xref: /openbmc/linux/drivers/mmc/host/sdhci-tegra.c (revision 7587cdef)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2010 Google, Inc.
4  */
5 
6 #include <linux/delay.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/err.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/iopoll.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/io.h>
15 #include <linux/of.h>
16 #include <linux/of_device.h>
17 #include <linux/pinctrl/consumer.h>
18 #include <linux/pm_opp.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 #include <linux/mmc/card.h>
23 #include <linux/mmc/host.h>
24 #include <linux/mmc/mmc.h>
25 #include <linux/mmc/slot-gpio.h>
26 #include <linux/gpio/consumer.h>
27 #include <linux/ktime.h>
28 
29 #include <soc/tegra/common.h>
30 
31 #include "sdhci-pltfm.h"
32 #include "cqhci.h"
33 
34 /* Tegra SDHOST controller vendor register definitions */
35 #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
36 #define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
37 #define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
38 #define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
39 #define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
40 #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
41 #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
42 #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
43 
44 #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
45 #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
46 
47 #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
48 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
49 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
50 
51 #define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
52 #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT		BIT(0)
53 #define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
54 #define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
55 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
56 #define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
57 
58 #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
59 #define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
60 
61 #define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
62 #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
63 
64 #define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
65 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
66 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
67 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
68 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
69 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
70 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
71 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
72 #define TRIES_128					2
73 #define TRIES_256					4
74 #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
75 
76 #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
77 #define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
78 #define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
79 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
80 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
81 #define TUNING_WORD_BIT_SIZE				32
82 
83 #define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
84 #define SDHCI_AUTO_CAL_START				BIT(31)
85 #define SDHCI_AUTO_CAL_ENABLE				BIT(29)
86 #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
87 
88 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
89 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
90 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
91 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
92 #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
93 
94 #define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
95 #define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
96 
97 #define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
98 #define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
99 #define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
100 #define NVQUIRK_ENABLE_SDR50				BIT(3)
101 #define NVQUIRK_ENABLE_SDR104				BIT(4)
102 #define NVQUIRK_ENABLE_DDR50				BIT(5)
103 /*
104  * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
105  * drive strength.
106  */
107 #define NVQUIRK_HAS_PADCALIB				BIT(6)
108 /*
109  * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
110  * 3V3/1V8 pad selection happens through pinctrl state selection depending
111  * on the signaling mode.
112  */
113 #define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
114 #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
115 #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
116 
117 /*
118  * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
119  * SDMMC hardware data timeout.
120  */
121 #define NVQUIRK_HAS_TMCLK				BIT(10)
122 
123 #define NVQUIRK_HAS_ANDROID_GPT_SECTOR			BIT(11)
124 
125 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
126 #define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
127 
128 #define SDHCI_TEGRA_CQE_TRNS_MODE	(SDHCI_TRNS_MULTI | \
129 					 SDHCI_TRNS_BLK_CNT_EN | \
130 					 SDHCI_TRNS_DMA)
131 
132 struct sdhci_tegra_soc_data {
133 	const struct sdhci_pltfm_data *pdata;
134 	u64 dma_mask;
135 	u32 nvquirks;
136 	u8 min_tap_delay;
137 	u8 max_tap_delay;
138 };
139 
140 /* Magic pull up and pull down pad calibration offsets */
141 struct sdhci_tegra_autocal_offsets {
142 	u32 pull_up_3v3;
143 	u32 pull_down_3v3;
144 	u32 pull_up_3v3_timeout;
145 	u32 pull_down_3v3_timeout;
146 	u32 pull_up_1v8;
147 	u32 pull_down_1v8;
148 	u32 pull_up_1v8_timeout;
149 	u32 pull_down_1v8_timeout;
150 	u32 pull_up_sdr104;
151 	u32 pull_down_sdr104;
152 	u32 pull_up_hs400;
153 	u32 pull_down_hs400;
154 };
155 
156 struct sdhci_tegra {
157 	const struct sdhci_tegra_soc_data *soc_data;
158 	struct gpio_desc *power_gpio;
159 	struct clk *tmclk;
160 	bool ddr_signaling;
161 	bool pad_calib_required;
162 	bool pad_control_available;
163 
164 	struct reset_control *rst;
165 	struct pinctrl *pinctrl_sdmmc;
166 	struct pinctrl_state *pinctrl_state_3v3;
167 	struct pinctrl_state *pinctrl_state_1v8;
168 	struct pinctrl_state *pinctrl_state_3v3_drv;
169 	struct pinctrl_state *pinctrl_state_1v8_drv;
170 
171 	struct sdhci_tegra_autocal_offsets autocal_offsets;
172 	ktime_t last_calib;
173 
174 	u32 default_tap;
175 	u32 default_trim;
176 	u32 dqs_trim;
177 	bool enable_hwcq;
178 	unsigned long curr_clk_rate;
179 	u8 tuned_tap_delay;
180 };
181 
182 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
183 {
184 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
185 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
186 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
187 
188 	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
189 			(reg == SDHCI_HOST_VERSION))) {
190 		/* Erratum: Version register is invalid in HW. */
191 		return SDHCI_SPEC_200;
192 	}
193 
194 	return readw(host->ioaddr + reg);
195 }
196 
197 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
198 {
199 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
200 
201 	switch (reg) {
202 	case SDHCI_TRANSFER_MODE:
203 		/*
204 		 * Postpone this write, we must do it together with a
205 		 * command write that is down below.
206 		 */
207 		pltfm_host->xfer_mode_shadow = val;
208 		return;
209 	case SDHCI_COMMAND:
210 		writel((val << 16) | pltfm_host->xfer_mode_shadow,
211 			host->ioaddr + SDHCI_TRANSFER_MODE);
212 		return;
213 	}
214 
215 	writew(val, host->ioaddr + reg);
216 }
217 
218 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
219 {
220 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
221 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
222 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
223 
224 	/* Seems like we're getting spurious timeout and crc errors, so
225 	 * disable signalling of them. In case of real errors software
226 	 * timers should take care of eventually detecting them.
227 	 */
228 	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
229 		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
230 
231 	writel(val, host->ioaddr + reg);
232 
233 	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
234 			(reg == SDHCI_INT_ENABLE))) {
235 		/* Erratum: Must enable block gap interrupt detection */
236 		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
237 		if (val & SDHCI_INT_CARD_INT)
238 			gap_ctrl |= 0x8;
239 		else
240 			gap_ctrl &= ~0x8;
241 		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
242 	}
243 }
244 
245 static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
246 {
247 	bool status;
248 	u32 reg;
249 
250 	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
251 	status = !!(reg & SDHCI_CLOCK_CARD_EN);
252 
253 	if (status == enable)
254 		return status;
255 
256 	if (enable)
257 		reg |= SDHCI_CLOCK_CARD_EN;
258 	else
259 		reg &= ~SDHCI_CLOCK_CARD_EN;
260 
261 	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
262 
263 	return status;
264 }
265 
266 static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
267 {
268 	bool is_tuning_cmd = 0;
269 	bool clk_enabled;
270 	u8 cmd;
271 
272 	if (reg == SDHCI_COMMAND) {
273 		cmd = SDHCI_GET_CMD(val);
274 		is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
275 				cmd == MMC_SEND_TUNING_BLOCK_HS200;
276 	}
277 
278 	if (is_tuning_cmd)
279 		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
280 
281 	writew(val, host->ioaddr + reg);
282 
283 	if (is_tuning_cmd) {
284 		udelay(1);
285 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
286 		tegra_sdhci_configure_card_clk(host, clk_enabled);
287 	}
288 }
289 
290 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
291 {
292 	/*
293 	 * Write-enable shall be assumed if GPIO is missing in a board's
294 	 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
295 	 * Tegra.
296 	 */
297 	return mmc_gpio_get_ro(host->mmc);
298 }
299 
300 static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
301 {
302 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
303 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
304 	int has_1v8, has_3v3;
305 
306 	/*
307 	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
308 	 * voltage configuration in order to perform voltage switching. This
309 	 * means that valid pinctrl info is required on SDHCI instances capable
310 	 * of performing voltage switching. Whether or not an SDHCI instance is
311 	 * capable of voltage switching is determined based on the regulator.
312 	 */
313 
314 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
315 		return true;
316 
317 	if (IS_ERR(host->mmc->supply.vqmmc))
318 		return false;
319 
320 	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
321 						 1700000, 1950000);
322 
323 	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
324 						 2700000, 3600000);
325 
326 	if (has_1v8 == 1 && has_3v3 == 1)
327 		return tegra_host->pad_control_available;
328 
329 	/* Fixed voltage, no pad control required. */
330 	return true;
331 }
332 
333 static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
334 {
335 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
336 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
337 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
338 	bool card_clk_enabled = false;
339 	u32 reg;
340 
341 	/*
342 	 * Touching the tap values is a bit tricky on some SoC generations.
343 	 * The quirk enables a workaround for a glitch that sometimes occurs if
344 	 * the tap values are changed.
345 	 */
346 
347 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
348 		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
349 
350 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
351 	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
352 	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
353 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
354 
355 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
356 	    card_clk_enabled) {
357 		udelay(1);
358 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
359 		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
360 	}
361 }
362 
363 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
364 {
365 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
366 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
367 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
368 	u32 misc_ctrl, clk_ctrl, pad_ctrl;
369 
370 	sdhci_reset(host, mask);
371 
372 	if (!(mask & SDHCI_RESET_ALL))
373 		return;
374 
375 	tegra_sdhci_set_tap(host, tegra_host->default_tap);
376 
377 	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
378 	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
379 
380 	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
381 		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
382 		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
383 		       SDHCI_MISC_CTRL_ENABLE_SDR104);
384 
385 	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
386 		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
387 
388 	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
389 		/* Erratum: Enable SDHCI spec v3.00 support */
390 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
391 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
392 		/* Advertise UHS modes as supported by host */
393 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
394 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
395 		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
396 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
397 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
398 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
399 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
400 			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
401 	}
402 
403 	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
404 
405 	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
406 	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
407 
408 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
409 		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
410 		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
411 		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
412 		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
413 
414 		tegra_host->pad_calib_required = true;
415 	}
416 
417 	tegra_host->ddr_signaling = false;
418 }
419 
420 static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
421 {
422 	u32 val;
423 
424 	/*
425 	 * Enable or disable the additional I/O pad used by the drive strength
426 	 * calibration process.
427 	 */
428 	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
429 
430 	if (enable)
431 		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
432 	else
433 		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
434 
435 	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
436 
437 	if (enable)
438 		usleep_range(1, 2);
439 }
440 
441 static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
442 					       u16 pdpu)
443 {
444 	u32 reg;
445 
446 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
447 	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
448 	reg |= pdpu;
449 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
450 }
451 
452 static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
453 				   bool state_drvupdn)
454 {
455 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
456 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
457 	struct sdhci_tegra_autocal_offsets *offsets =
458 						&tegra_host->autocal_offsets;
459 	struct pinctrl_state *pinctrl_drvupdn = NULL;
460 	int ret = 0;
461 	u8 drvup = 0, drvdn = 0;
462 	u32 reg;
463 
464 	if (!state_drvupdn) {
465 		/* PADS Drive Strength */
466 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
467 			if (tegra_host->pinctrl_state_1v8_drv) {
468 				pinctrl_drvupdn =
469 					tegra_host->pinctrl_state_1v8_drv;
470 			} else {
471 				drvup = offsets->pull_up_1v8_timeout;
472 				drvdn = offsets->pull_down_1v8_timeout;
473 			}
474 		} else {
475 			if (tegra_host->pinctrl_state_3v3_drv) {
476 				pinctrl_drvupdn =
477 					tegra_host->pinctrl_state_3v3_drv;
478 			} else {
479 				drvup = offsets->pull_up_3v3_timeout;
480 				drvdn = offsets->pull_down_3v3_timeout;
481 			}
482 		}
483 
484 		if (pinctrl_drvupdn != NULL) {
485 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
486 							pinctrl_drvupdn);
487 			if (ret < 0)
488 				dev_err(mmc_dev(host->mmc),
489 					"failed pads drvupdn, ret: %d\n", ret);
490 		} else if ((drvup) || (drvdn)) {
491 			reg = sdhci_readl(host,
492 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
493 			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
494 			reg |= (drvup << 20) | (drvdn << 12);
495 			sdhci_writel(host, reg,
496 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
497 		}
498 
499 	} else {
500 		/* Dual Voltage PADS Voltage selection */
501 		if (!tegra_host->pad_control_available)
502 			return 0;
503 
504 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
505 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
506 						tegra_host->pinctrl_state_1v8);
507 			if (ret < 0)
508 				dev_err(mmc_dev(host->mmc),
509 					"setting 1.8V failed, ret: %d\n", ret);
510 		} else {
511 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
512 						tegra_host->pinctrl_state_3v3);
513 			if (ret < 0)
514 				dev_err(mmc_dev(host->mmc),
515 					"setting 3.3V failed, ret: %d\n", ret);
516 		}
517 	}
518 
519 	return ret;
520 }
521 
522 static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
523 {
524 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
525 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
526 	struct sdhci_tegra_autocal_offsets offsets =
527 			tegra_host->autocal_offsets;
528 	struct mmc_ios *ios = &host->mmc->ios;
529 	bool card_clk_enabled;
530 	u16 pdpu;
531 	u32 reg;
532 	int ret;
533 
534 	switch (ios->timing) {
535 	case MMC_TIMING_UHS_SDR104:
536 		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
537 		break;
538 	case MMC_TIMING_MMC_HS400:
539 		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
540 		break;
541 	default:
542 		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
543 			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
544 		else
545 			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
546 	}
547 
548 	/* Set initial offset before auto-calibration */
549 	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
550 
551 	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
552 
553 	tegra_sdhci_configure_cal_pad(host, true);
554 
555 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
556 	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
557 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
558 
559 	usleep_range(1, 2);
560 	/* 10 ms timeout */
561 	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
562 				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
563 				 1000, 10000);
564 
565 	tegra_sdhci_configure_cal_pad(host, false);
566 
567 	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
568 
569 	if (ret) {
570 		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
571 
572 		/* Disable automatic cal and use fixed Drive Strengths */
573 		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
574 		reg &= ~SDHCI_AUTO_CAL_ENABLE;
575 		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
576 
577 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
578 		if (ret < 0)
579 			dev_err(mmc_dev(host->mmc),
580 				"Setting drive strengths failed: %d\n", ret);
581 	}
582 }
583 
584 static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
585 {
586 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
587 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
588 	struct sdhci_tegra_autocal_offsets *autocal =
589 			&tegra_host->autocal_offsets;
590 	int err;
591 
592 	err = device_property_read_u32(mmc_dev(host->mmc),
593 			"nvidia,pad-autocal-pull-up-offset-3v3",
594 			&autocal->pull_up_3v3);
595 	if (err)
596 		autocal->pull_up_3v3 = 0;
597 
598 	err = device_property_read_u32(mmc_dev(host->mmc),
599 			"nvidia,pad-autocal-pull-down-offset-3v3",
600 			&autocal->pull_down_3v3);
601 	if (err)
602 		autocal->pull_down_3v3 = 0;
603 
604 	err = device_property_read_u32(mmc_dev(host->mmc),
605 			"nvidia,pad-autocal-pull-up-offset-1v8",
606 			&autocal->pull_up_1v8);
607 	if (err)
608 		autocal->pull_up_1v8 = 0;
609 
610 	err = device_property_read_u32(mmc_dev(host->mmc),
611 			"nvidia,pad-autocal-pull-down-offset-1v8",
612 			&autocal->pull_down_1v8);
613 	if (err)
614 		autocal->pull_down_1v8 = 0;
615 
616 	err = device_property_read_u32(mmc_dev(host->mmc),
617 			"nvidia,pad-autocal-pull-up-offset-sdr104",
618 			&autocal->pull_up_sdr104);
619 	if (err)
620 		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
621 
622 	err = device_property_read_u32(mmc_dev(host->mmc),
623 			"nvidia,pad-autocal-pull-down-offset-sdr104",
624 			&autocal->pull_down_sdr104);
625 	if (err)
626 		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
627 
628 	err = device_property_read_u32(mmc_dev(host->mmc),
629 			"nvidia,pad-autocal-pull-up-offset-hs400",
630 			&autocal->pull_up_hs400);
631 	if (err)
632 		autocal->pull_up_hs400 = autocal->pull_up_1v8;
633 
634 	err = device_property_read_u32(mmc_dev(host->mmc),
635 			"nvidia,pad-autocal-pull-down-offset-hs400",
636 			&autocal->pull_down_hs400);
637 	if (err)
638 		autocal->pull_down_hs400 = autocal->pull_down_1v8;
639 
640 	/*
641 	 * Different fail-safe drive strength values based on the signaling
642 	 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
643 	 * So, avoid reading below device tree properties for SoCs that don't
644 	 * have NVQUIRK_NEEDS_PAD_CONTROL.
645 	 */
646 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
647 		return;
648 
649 	err = device_property_read_u32(mmc_dev(host->mmc),
650 			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
651 			&autocal->pull_up_3v3_timeout);
652 	if (err) {
653 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
654 			(tegra_host->pinctrl_state_3v3_drv == NULL))
655 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
656 				mmc_hostname(host->mmc));
657 		autocal->pull_up_3v3_timeout = 0;
658 	}
659 
660 	err = device_property_read_u32(mmc_dev(host->mmc),
661 			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
662 			&autocal->pull_down_3v3_timeout);
663 	if (err) {
664 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
665 			(tegra_host->pinctrl_state_3v3_drv == NULL))
666 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
667 				mmc_hostname(host->mmc));
668 		autocal->pull_down_3v3_timeout = 0;
669 	}
670 
671 	err = device_property_read_u32(mmc_dev(host->mmc),
672 			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
673 			&autocal->pull_up_1v8_timeout);
674 	if (err) {
675 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
676 			(tegra_host->pinctrl_state_1v8_drv == NULL))
677 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
678 				mmc_hostname(host->mmc));
679 		autocal->pull_up_1v8_timeout = 0;
680 	}
681 
682 	err = device_property_read_u32(mmc_dev(host->mmc),
683 			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
684 			&autocal->pull_down_1v8_timeout);
685 	if (err) {
686 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
687 			(tegra_host->pinctrl_state_1v8_drv == NULL))
688 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
689 				mmc_hostname(host->mmc));
690 		autocal->pull_down_1v8_timeout = 0;
691 	}
692 }
693 
694 static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
695 {
696 	struct sdhci_host *host = mmc_priv(mmc);
697 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
698 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
699 	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
700 
701 	/* 100 ms calibration interval is specified in the TRM */
702 	if (ktime_to_ms(since_calib) > 100) {
703 		tegra_sdhci_pad_autocalib(host);
704 		tegra_host->last_calib = ktime_get();
705 	}
706 
707 	sdhci_request(mmc, mrq);
708 }
709 
710 static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
711 {
712 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
713 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
714 	int err;
715 
716 	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap",
717 				       &tegra_host->default_tap);
718 	if (err)
719 		tegra_host->default_tap = 0;
720 
721 	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim",
722 				       &tegra_host->default_trim);
723 	if (err)
724 		tegra_host->default_trim = 0;
725 
726 	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim",
727 				       &tegra_host->dqs_trim);
728 	if (err)
729 		tegra_host->dqs_trim = 0x11;
730 }
731 
732 static void tegra_sdhci_parse_dt(struct sdhci_host *host)
733 {
734 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
735 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
736 
737 	if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe"))
738 		tegra_host->enable_hwcq = true;
739 	else
740 		tegra_host->enable_hwcq = false;
741 
742 	tegra_sdhci_parse_pad_autocal_dt(host);
743 	tegra_sdhci_parse_tap_and_trim(host);
744 }
745 
746 static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
747 {
748 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
749 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
750 	struct device *dev = mmc_dev(host->mmc);
751 	unsigned long host_clk;
752 	int err;
753 
754 	if (!clock)
755 		return sdhci_set_clock(host, clock);
756 
757 	/*
758 	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
759 	 * divider to be configured to divided the host clock by two. The SDHCI
760 	 * clock divider is calculated as part of sdhci_set_clock() by
761 	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
762 	 * the requested clock rate.
763 	 *
764 	 * By setting the host->max_clk to clock * 2 the divider calculation
765 	 * will always result in the correct value for DDR50/52 modes,
766 	 * regardless of clock rate rounding, which may happen if the value
767 	 * from clk_get_rate() is used.
768 	 */
769 	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
770 
771 	err = dev_pm_opp_set_rate(dev, host_clk);
772 	if (err)
773 		dev_err(dev, "failed to set clk rate to %luHz: %d\n",
774 			host_clk, err);
775 
776 	tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
777 	if (tegra_host->ddr_signaling)
778 		host->max_clk = host_clk;
779 	else
780 		host->max_clk = clk_get_rate(pltfm_host->clk);
781 
782 	sdhci_set_clock(host, clock);
783 
784 	if (tegra_host->pad_calib_required) {
785 		tegra_sdhci_pad_autocalib(host);
786 		tegra_host->pad_calib_required = false;
787 	}
788 }
789 
790 static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
791 					      struct mmc_ios *ios)
792 {
793 	struct sdhci_host *host = mmc_priv(mmc);
794 	u32 val;
795 
796 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
797 
798 	if (ios->enhanced_strobe) {
799 		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
800 		/*
801 		 * When CMD13 is sent from mmc_select_hs400es() after
802 		 * switching to HS400ES mode, the bus is operating at
803 		 * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
804 		 * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
805 		 * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
806 		 * controller CAR clock and the interface clock are rate matched.
807 		 */
808 		tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
809 	} else {
810 		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
811 	}
812 
813 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
814 }
815 
816 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
817 {
818 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
819 
820 	return clk_round_rate(pltfm_host->clk, UINT_MAX);
821 }
822 
823 static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
824 {
825 	u32 val;
826 
827 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
828 	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
829 	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
830 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
831 }
832 
833 static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
834 {
835 	u32 reg;
836 	int err;
837 
838 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
839 	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
840 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
841 
842 	/* 1 ms sleep, 5 ms timeout */
843 	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
844 				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
845 				 1000, 5000);
846 	if (err)
847 		dev_err(mmc_dev(host->mmc),
848 			"HS400 delay line calibration timed out\n");
849 }
850 
851 static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
852 				       u8 thd_low, u8 fixed_tap)
853 {
854 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
855 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
856 	u32 val, tun_status;
857 	u8 word, bit, edge1, tap, window;
858 	bool tap_result;
859 	bool start_fail = false;
860 	bool start_pass = false;
861 	bool end_pass = false;
862 	bool first_fail = false;
863 	bool first_pass = false;
864 	u8 start_pass_tap = 0;
865 	u8 end_pass_tap = 0;
866 	u8 first_fail_tap = 0;
867 	u8 first_pass_tap = 0;
868 	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
869 
870 	/*
871 	 * Read auto-tuned results and extract good valid passing window by
872 	 * filtering out un-wanted bubble/partial/merged windows.
873 	 */
874 	for (word = 0; word < total_tuning_words; word++) {
875 		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
876 		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
877 		val |= word;
878 		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
879 		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
880 		bit = 0;
881 		while (bit < TUNING_WORD_BIT_SIZE) {
882 			tap = word * TUNING_WORD_BIT_SIZE + bit;
883 			tap_result = tun_status & (1 << bit);
884 			if (!tap_result && !start_fail) {
885 				start_fail = true;
886 				if (!first_fail) {
887 					first_fail_tap = tap;
888 					first_fail = true;
889 				}
890 
891 			} else if (tap_result && start_fail && !start_pass) {
892 				start_pass_tap = tap;
893 				start_pass = true;
894 				if (!first_pass) {
895 					first_pass_tap = tap;
896 					first_pass = true;
897 				}
898 
899 			} else if (!tap_result && start_fail && start_pass &&
900 				   !end_pass) {
901 				end_pass_tap = tap - 1;
902 				end_pass = true;
903 			} else if (tap_result && start_pass && start_fail &&
904 				   end_pass) {
905 				window = end_pass_tap - start_pass_tap;
906 				/* discard merged window and bubble window */
907 				if (window >= thd_up || window < thd_low) {
908 					start_pass_tap = tap;
909 					end_pass = false;
910 				} else {
911 					/* set tap at middle of valid window */
912 					tap = start_pass_tap + window / 2;
913 					tegra_host->tuned_tap_delay = tap;
914 					return;
915 				}
916 			}
917 
918 			bit++;
919 		}
920 	}
921 
922 	if (!first_fail) {
923 		WARN(1, "no edge detected, continue with hw tuned delay.\n");
924 	} else if (first_pass) {
925 		/* set tap location at fixed tap relative to the first edge */
926 		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
927 		if (edge1 - 1 > fixed_tap)
928 			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
929 		else
930 			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
931 	}
932 }
933 
934 static void tegra_sdhci_post_tuning(struct sdhci_host *host)
935 {
936 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
937 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
938 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
939 	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
940 	u8 fixed_tap, start_tap, end_tap, window_width;
941 	u8 thdupper, thdlower;
942 	u8 num_iter;
943 	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
944 
945 	/* retain HW tuned tap to use incase if no correction is needed */
946 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
947 	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
948 				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
949 	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
950 		min_tap_dly = soc_data->min_tap_delay;
951 		max_tap_dly = soc_data->max_tap_delay;
952 		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
953 		period_ps = USEC_PER_SEC / clk_rate_mhz;
954 		bestcase = period_ps / min_tap_dly;
955 		worstcase = period_ps / max_tap_dly;
956 		/*
957 		 * Upper and Lower bound thresholds used to detect merged and
958 		 * bubble windows
959 		 */
960 		thdupper = (2 * worstcase + bestcase) / 2;
961 		thdlower = worstcase / 4;
962 		/*
963 		 * fixed tap is used when HW tuning result contains single edge
964 		 * and tap is set at fixed tap delay relative to the first edge
965 		 */
966 		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
967 		fixed_tap = avg_tap_dly / 2;
968 
969 		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
970 		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
971 		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
972 			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
973 		window_width = end_tap - start_tap;
974 		num_iter = host->tuning_loop_count;
975 		/*
976 		 * partial window includes edges of the tuning range.
977 		 * merged window includes more taps so window width is higher
978 		 * than upper threshold.
979 		 */
980 		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
981 		    (end_tap == num_iter - 2) || window_width >= thdupper) {
982 			pr_debug("%s: Apply tuning correction\n",
983 				 mmc_hostname(host->mmc));
984 			tegra_sdhci_tap_correction(host, thdupper, thdlower,
985 						   fixed_tap);
986 		}
987 	}
988 
989 	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
990 }
991 
992 static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
993 {
994 	struct sdhci_host *host = mmc_priv(mmc);
995 	int err;
996 
997 	err = sdhci_execute_tuning(mmc, opcode);
998 	if (!err && !host->tuning_err)
999 		tegra_sdhci_post_tuning(host);
1000 
1001 	return err;
1002 }
1003 
1004 static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
1005 					  unsigned timing)
1006 {
1007 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1008 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1009 	bool set_default_tap = false;
1010 	bool set_dqs_trim = false;
1011 	bool do_hs400_dll_cal = false;
1012 	u8 iter = TRIES_256;
1013 	u32 val;
1014 
1015 	tegra_host->ddr_signaling = false;
1016 	switch (timing) {
1017 	case MMC_TIMING_UHS_SDR50:
1018 		break;
1019 	case MMC_TIMING_UHS_SDR104:
1020 	case MMC_TIMING_MMC_HS200:
1021 		/* Don't set default tap on tunable modes. */
1022 		iter = TRIES_128;
1023 		break;
1024 	case MMC_TIMING_MMC_HS400:
1025 		set_dqs_trim = true;
1026 		do_hs400_dll_cal = true;
1027 		iter = TRIES_128;
1028 		break;
1029 	case MMC_TIMING_MMC_DDR52:
1030 	case MMC_TIMING_UHS_DDR50:
1031 		tegra_host->ddr_signaling = true;
1032 		set_default_tap = true;
1033 		break;
1034 	default:
1035 		set_default_tap = true;
1036 		break;
1037 	}
1038 
1039 	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1040 	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1041 		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1042 		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1043 	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1044 		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1045 		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1046 	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1047 	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1048 
1049 	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1050 
1051 	sdhci_set_uhs_signaling(host, timing);
1052 
1053 	tegra_sdhci_pad_autocalib(host);
1054 
1055 	if (tegra_host->tuned_tap_delay && !set_default_tap)
1056 		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1057 	else
1058 		tegra_sdhci_set_tap(host, tegra_host->default_tap);
1059 
1060 	if (set_dqs_trim)
1061 		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1062 
1063 	if (do_hs400_dll_cal)
1064 		tegra_sdhci_hs400_dll_cal(host);
1065 }
1066 
1067 static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1068 {
1069 	unsigned int min, max;
1070 
1071 	/*
1072 	 * Start search for minimum tap value at 10, as smaller values are
1073 	 * may wrongly be reported as working but fail at higher speeds,
1074 	 * according to the TRM.
1075 	 */
1076 	min = 10;
1077 	while (min < 255) {
1078 		tegra_sdhci_set_tap(host, min);
1079 		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1080 			break;
1081 		min++;
1082 	}
1083 
1084 	/* Find the maximum tap value that still passes. */
1085 	max = min + 1;
1086 	while (max < 255) {
1087 		tegra_sdhci_set_tap(host, max);
1088 		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1089 			max--;
1090 			break;
1091 		}
1092 		max++;
1093 	}
1094 
1095 	/* The TRM states the ideal tap value is at 75% in the passing range. */
1096 	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1097 
1098 	return mmc_send_tuning(host->mmc, opcode, NULL);
1099 }
1100 
1101 static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1102 						   struct mmc_ios *ios)
1103 {
1104 	struct sdhci_host *host = mmc_priv(mmc);
1105 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1106 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1107 	int ret = 0;
1108 
1109 	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1110 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1111 		if (ret < 0)
1112 			return ret;
1113 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1114 	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1115 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1116 		if (ret < 0)
1117 			return ret;
1118 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1119 	}
1120 
1121 	if (tegra_host->pad_calib_required)
1122 		tegra_sdhci_pad_autocalib(host);
1123 
1124 	return ret;
1125 }
1126 
1127 static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1128 					 struct sdhci_tegra *tegra_host)
1129 {
1130 	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1131 	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1132 		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1133 			PTR_ERR(tegra_host->pinctrl_sdmmc));
1134 		return -1;
1135 	}
1136 
1137 	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1138 				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1139 	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1140 		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1141 			tegra_host->pinctrl_state_1v8_drv = NULL;
1142 	}
1143 
1144 	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1145 				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1146 	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1147 		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1148 			tegra_host->pinctrl_state_3v3_drv = NULL;
1149 	}
1150 
1151 	tegra_host->pinctrl_state_3v3 =
1152 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1153 	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1154 		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1155 			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1156 		return -1;
1157 	}
1158 
1159 	tegra_host->pinctrl_state_1v8 =
1160 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1161 	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1162 		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1163 			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1164 		return -1;
1165 	}
1166 
1167 	tegra_host->pad_control_available = true;
1168 
1169 	return 0;
1170 }
1171 
1172 static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1173 {
1174 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1175 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1176 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1177 
1178 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1179 		tegra_host->pad_calib_required = true;
1180 }
1181 
1182 static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1183 {
1184 	struct mmc_host *mmc = cq_host->mmc;
1185 	struct sdhci_host *host = mmc_priv(mmc);
1186 	u8 ctrl;
1187 	ktime_t timeout;
1188 	bool timed_out;
1189 
1190 	/*
1191 	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1192 	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1193 	 * to be re-configured.
1194 	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1195 	 * CQE is unhalted. So handling CQE resume sequence here to configure
1196 	 * SDHCI block registers prior to exiting CQE halt state.
1197 	 */
1198 	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1199 	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1200 		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1201 		sdhci_cqe_enable(mmc);
1202 		writel(val, cq_host->mmio + reg);
1203 		timeout = ktime_add_us(ktime_get(), 50);
1204 		while (1) {
1205 			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1206 			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1207 			if (!(ctrl & CQHCI_HALT) || timed_out)
1208 				break;
1209 		}
1210 		/*
1211 		 * CQE usually resumes very quick, but incase if Tegra CQE
1212 		 * doesn't resume retry unhalt.
1213 		 */
1214 		if (timed_out)
1215 			writel(val, cq_host->mmio + reg);
1216 	} else {
1217 		writel(val, cq_host->mmio + reg);
1218 	}
1219 }
1220 
1221 static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1222 					 struct mmc_request *mrq, u64 *data)
1223 {
1224 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1225 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1226 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1227 
1228 	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1229 	    mrq->cmd->flags & MMC_RSP_R1B)
1230 		*data |= CQHCI_CMD_TIMING(1);
1231 }
1232 
1233 static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1234 {
1235 	struct cqhci_host *cq_host = mmc->cqe_private;
1236 	struct sdhci_host *host = mmc_priv(mmc);
1237 	u32 val;
1238 
1239 	/*
1240 	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1241 	 * register when CQE is enabled and unhalted.
1242 	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1243 	 * programming block size in sdhci controller and enable it back.
1244 	 */
1245 	if (!cq_host->activated) {
1246 		val = cqhci_readl(cq_host, CQHCI_CFG);
1247 		if (val & CQHCI_ENABLE)
1248 			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1249 				     CQHCI_CFG);
1250 		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1251 		sdhci_cqe_enable(mmc);
1252 		if (val & CQHCI_ENABLE)
1253 			cqhci_writel(cq_host, val, CQHCI_CFG);
1254 	}
1255 
1256 	/*
1257 	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1258 	 * command is sent during transfer of last data block which is the
1259 	 * default case as send status command block counter (CBC) is 1.
1260 	 * Recommended fix to set CBC to 0 allowing send status command only
1261 	 * when data lines are idle.
1262 	 */
1263 	val = cqhci_readl(cq_host, CQHCI_SSC1);
1264 	val &= ~CQHCI_SSC1_CBC_MASK;
1265 	cqhci_writel(cq_host, val, CQHCI_SSC1);
1266 }
1267 
1268 static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1269 {
1270 	sdhci_dumpregs(mmc_priv(mmc));
1271 }
1272 
1273 static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1274 {
1275 	int cmd_error = 0;
1276 	int data_error = 0;
1277 
1278 	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1279 		return intmask;
1280 
1281 	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1282 
1283 	return 0;
1284 }
1285 
1286 static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1287 				    struct mmc_command *cmd)
1288 {
1289 	u32 val;
1290 
1291 	/*
1292 	 * HW busy detection timeout is based on programmed data timeout
1293 	 * counter and maximum supported timeout is 11s which may not be
1294 	 * enough for long operations like cache flush, sleep awake, erase.
1295 	 *
1296 	 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1297 	 * host controller to wait for busy state until the card is busy
1298 	 * without HW timeout.
1299 	 *
1300 	 * So, use infinite busy wait mode for operations that may take
1301 	 * more than maximum HW busy timeout of 11s otherwise use finite
1302 	 * busy wait mode.
1303 	 */
1304 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1305 	if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
1306 		val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1307 	else
1308 		val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1309 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1310 
1311 	__sdhci_set_timeout(host, cmd);
1312 }
1313 
1314 static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
1315 {
1316 	struct cqhci_host *cq_host = mmc->cqe_private;
1317 	u32 reg;
1318 
1319 	reg = cqhci_readl(cq_host, CQHCI_CFG);
1320 	reg |= CQHCI_ENABLE;
1321 	cqhci_writel(cq_host, reg, CQHCI_CFG);
1322 }
1323 
1324 static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
1325 {
1326 	struct cqhci_host *cq_host = mmc->cqe_private;
1327 	struct sdhci_host *host = mmc_priv(mmc);
1328 	u32 reg;
1329 
1330 	reg = cqhci_readl(cq_host, CQHCI_CFG);
1331 	reg &= ~CQHCI_ENABLE;
1332 	cqhci_writel(cq_host, reg, CQHCI_CFG);
1333 	sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1334 }
1335 
1336 static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1337 	.write_l    = tegra_cqhci_writel,
1338 	.enable	= sdhci_tegra_cqe_enable,
1339 	.disable = sdhci_cqe_disable,
1340 	.dumpregs = sdhci_tegra_dumpregs,
1341 	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1342 	.pre_enable = sdhci_tegra_cqe_pre_enable,
1343 	.post_disable = sdhci_tegra_cqe_post_disable,
1344 };
1345 
1346 static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1347 {
1348 	struct sdhci_pltfm_host *platform = sdhci_priv(host);
1349 	struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1350 	const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1351 	struct device *dev = mmc_dev(host->mmc);
1352 
1353 	if (soc->dma_mask)
1354 		return dma_set_mask_and_coherent(dev, soc->dma_mask);
1355 
1356 	return 0;
1357 }
1358 
1359 static const struct sdhci_ops tegra_sdhci_ops = {
1360 	.get_ro     = tegra_sdhci_get_ro,
1361 	.read_w     = tegra_sdhci_readw,
1362 	.write_l    = tegra_sdhci_writel,
1363 	.set_clock  = tegra_sdhci_set_clock,
1364 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1365 	.set_bus_width = sdhci_set_bus_width,
1366 	.reset      = tegra_sdhci_reset,
1367 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1368 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1369 	.voltage_switch = tegra_sdhci_voltage_switch,
1370 	.get_max_clock = tegra_sdhci_get_max_clock,
1371 };
1372 
1373 static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1374 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1375 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1376 		  SDHCI_QUIRK_NO_HISPD_BIT |
1377 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1378 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1379 	.ops  = &tegra_sdhci_ops,
1380 };
1381 
1382 static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1383 	.pdata = &sdhci_tegra20_pdata,
1384 	.dma_mask = DMA_BIT_MASK(32),
1385 	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1386 		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1387 		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1388 };
1389 
1390 static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1391 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1392 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1393 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1394 		  SDHCI_QUIRK_NO_HISPD_BIT |
1395 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1396 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1397 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1398 		   SDHCI_QUIRK2_BROKEN_HS200 |
1399 		   /*
1400 		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1401 		    * though no command operation was in progress."
1402 		    *
1403 		    * The exact reason is unknown, as the same hardware seems
1404 		    * to support Auto CMD23 on a downstream 3.1 kernel.
1405 		    */
1406 		   SDHCI_QUIRK2_ACMD23_BROKEN,
1407 	.ops  = &tegra_sdhci_ops,
1408 };
1409 
1410 static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1411 	.pdata = &sdhci_tegra30_pdata,
1412 	.dma_mask = DMA_BIT_MASK(32),
1413 	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1414 		    NVQUIRK_ENABLE_SDR50 |
1415 		    NVQUIRK_ENABLE_SDR104 |
1416 		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1417 		    NVQUIRK_HAS_PADCALIB,
1418 };
1419 
1420 static const struct sdhci_ops tegra114_sdhci_ops = {
1421 	.get_ro     = tegra_sdhci_get_ro,
1422 	.read_w     = tegra_sdhci_readw,
1423 	.write_w    = tegra_sdhci_writew,
1424 	.write_l    = tegra_sdhci_writel,
1425 	.set_clock  = tegra_sdhci_set_clock,
1426 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1427 	.set_bus_width = sdhci_set_bus_width,
1428 	.reset      = tegra_sdhci_reset,
1429 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1430 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1431 	.voltage_switch = tegra_sdhci_voltage_switch,
1432 	.get_max_clock = tegra_sdhci_get_max_clock,
1433 };
1434 
1435 static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1436 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1437 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1438 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1439 		  SDHCI_QUIRK_NO_HISPD_BIT |
1440 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1441 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1442 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1443 	.ops  = &tegra114_sdhci_ops,
1444 };
1445 
1446 static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1447 	.pdata = &sdhci_tegra114_pdata,
1448 	.dma_mask = DMA_BIT_MASK(32),
1449 	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1450 };
1451 
1452 static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1453 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1454 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1455 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1456 		  SDHCI_QUIRK_NO_HISPD_BIT |
1457 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1458 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1459 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1460 	.ops  = &tegra114_sdhci_ops,
1461 };
1462 
1463 static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1464 	.pdata = &sdhci_tegra124_pdata,
1465 	.dma_mask = DMA_BIT_MASK(34),
1466 	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1467 };
1468 
1469 static const struct sdhci_ops tegra210_sdhci_ops = {
1470 	.get_ro     = tegra_sdhci_get_ro,
1471 	.read_w     = tegra_sdhci_readw,
1472 	.write_w    = tegra210_sdhci_writew,
1473 	.write_l    = tegra_sdhci_writel,
1474 	.set_clock  = tegra_sdhci_set_clock,
1475 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1476 	.set_bus_width = sdhci_set_bus_width,
1477 	.reset      = tegra_sdhci_reset,
1478 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1479 	.voltage_switch = tegra_sdhci_voltage_switch,
1480 	.get_max_clock = tegra_sdhci_get_max_clock,
1481 	.set_timeout = tegra_sdhci_set_timeout,
1482 };
1483 
1484 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1485 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1486 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1487 		  SDHCI_QUIRK_NO_HISPD_BIT |
1488 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1489 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1490 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1491 	.ops  = &tegra210_sdhci_ops,
1492 };
1493 
1494 static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1495 	.pdata = &sdhci_tegra210_pdata,
1496 	.dma_mask = DMA_BIT_MASK(34),
1497 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1498 		    NVQUIRK_HAS_PADCALIB |
1499 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1500 		    NVQUIRK_ENABLE_SDR50 |
1501 		    NVQUIRK_ENABLE_SDR104 |
1502 		    NVQUIRK_HAS_TMCLK,
1503 	.min_tap_delay = 106,
1504 	.max_tap_delay = 185,
1505 };
1506 
1507 static const struct sdhci_ops tegra186_sdhci_ops = {
1508 	.get_ro     = tegra_sdhci_get_ro,
1509 	.read_w     = tegra_sdhci_readw,
1510 	.write_l    = tegra_sdhci_writel,
1511 	.set_clock  = tegra_sdhci_set_clock,
1512 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1513 	.set_bus_width = sdhci_set_bus_width,
1514 	.reset      = tegra_sdhci_reset,
1515 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1516 	.voltage_switch = tegra_sdhci_voltage_switch,
1517 	.get_max_clock = tegra_sdhci_get_max_clock,
1518 	.irq = sdhci_tegra_cqhci_irq,
1519 	.set_timeout = tegra_sdhci_set_timeout,
1520 };
1521 
1522 static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1523 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1524 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1525 		  SDHCI_QUIRK_NO_HISPD_BIT |
1526 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1527 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1528 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1529 	.ops  = &tegra186_sdhci_ops,
1530 };
1531 
1532 static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1533 	.pdata = &sdhci_tegra186_pdata,
1534 	.dma_mask = DMA_BIT_MASK(40),
1535 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1536 		    NVQUIRK_HAS_PADCALIB |
1537 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1538 		    NVQUIRK_ENABLE_SDR50 |
1539 		    NVQUIRK_ENABLE_SDR104 |
1540 		    NVQUIRK_HAS_TMCLK |
1541 		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1542 	.min_tap_delay = 84,
1543 	.max_tap_delay = 136,
1544 };
1545 
1546 static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1547 	.pdata = &sdhci_tegra186_pdata,
1548 	.dma_mask = DMA_BIT_MASK(39),
1549 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1550 		    NVQUIRK_HAS_PADCALIB |
1551 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1552 		    NVQUIRK_ENABLE_SDR50 |
1553 		    NVQUIRK_ENABLE_SDR104 |
1554 		    NVQUIRK_HAS_TMCLK,
1555 	.min_tap_delay = 96,
1556 	.max_tap_delay = 139,
1557 };
1558 
1559 static const struct of_device_id sdhci_tegra_dt_match[] = {
1560 	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1561 	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1562 	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1563 	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1564 	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1565 	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1566 	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1567 	{}
1568 };
1569 MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1570 
1571 static int sdhci_tegra_add_host(struct sdhci_host *host)
1572 {
1573 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1574 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1575 	struct cqhci_host *cq_host;
1576 	bool dma64;
1577 	int ret;
1578 
1579 	if (!tegra_host->enable_hwcq)
1580 		return sdhci_add_host(host);
1581 
1582 	sdhci_enable_v4_mode(host);
1583 
1584 	ret = sdhci_setup_host(host);
1585 	if (ret)
1586 		return ret;
1587 
1588 	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1589 
1590 	cq_host = devm_kzalloc(mmc_dev(host->mmc),
1591 				sizeof(*cq_host), GFP_KERNEL);
1592 	if (!cq_host) {
1593 		ret = -ENOMEM;
1594 		goto cleanup;
1595 	}
1596 
1597 	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1598 	cq_host->ops = &sdhci_tegra_cqhci_ops;
1599 
1600 	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1601 	if (dma64)
1602 		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1603 
1604 	ret = cqhci_init(cq_host, host->mmc, dma64);
1605 	if (ret)
1606 		goto cleanup;
1607 
1608 	ret = __sdhci_add_host(host);
1609 	if (ret)
1610 		goto cleanup;
1611 
1612 	return 0;
1613 
1614 cleanup:
1615 	sdhci_cleanup_host(host);
1616 	return ret;
1617 }
1618 
1619 static int sdhci_tegra_probe(struct platform_device *pdev)
1620 {
1621 	const struct sdhci_tegra_soc_data *soc_data;
1622 	struct sdhci_host *host;
1623 	struct sdhci_pltfm_host *pltfm_host;
1624 	struct sdhci_tegra *tegra_host;
1625 	struct clk *clk;
1626 	int rc;
1627 
1628 	soc_data = of_device_get_match_data(&pdev->dev);
1629 	if (!soc_data)
1630 		return -EINVAL;
1631 
1632 	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1633 	if (IS_ERR(host))
1634 		return PTR_ERR(host);
1635 	pltfm_host = sdhci_priv(host);
1636 
1637 	tegra_host = sdhci_pltfm_priv(pltfm_host);
1638 	tegra_host->ddr_signaling = false;
1639 	tegra_host->pad_calib_required = false;
1640 	tegra_host->pad_control_available = false;
1641 	tegra_host->soc_data = soc_data;
1642 
1643 	if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR)
1644 		host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA;
1645 
1646 	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1647 		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1648 		if (rc == 0)
1649 			host->mmc_host_ops.start_signal_voltage_switch =
1650 				sdhci_tegra_start_signal_voltage_switch;
1651 	}
1652 
1653 	/* Hook to periodically rerun pad calibration */
1654 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1655 		host->mmc_host_ops.request = tegra_sdhci_request;
1656 
1657 	host->mmc_host_ops.hs400_enhanced_strobe =
1658 			tegra_sdhci_hs400_enhanced_strobe;
1659 
1660 	if (!host->ops->platform_execute_tuning)
1661 		host->mmc_host_ops.execute_tuning =
1662 				tegra_sdhci_execute_hw_tuning;
1663 
1664 	rc = mmc_of_parse(host->mmc);
1665 	if (rc)
1666 		goto err_parse_dt;
1667 
1668 	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1669 		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1670 
1671 	/* HW busy detection is supported, but R1B responses are required. */
1672 	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1673 
1674 	/* GPIO CD can be set as a wakeup source */
1675 	host->mmc->caps |= MMC_CAP_CD_WAKE;
1676 
1677 	tegra_sdhci_parse_dt(host);
1678 
1679 	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1680 							 GPIOD_OUT_HIGH);
1681 	if (IS_ERR(tegra_host->power_gpio)) {
1682 		rc = PTR_ERR(tegra_host->power_gpio);
1683 		goto err_power_req;
1684 	}
1685 
1686 	/*
1687 	 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1688 	 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1689 	 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1690 	 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1691 	 *
1692 	 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1693 	 * 12Mhz TMCLK which is advertised in host capability register.
1694 	 * With TMCLK of 12Mhz provides maximum data timeout period that can
1695 	 * be achieved is 11s better than using SDCLK for data timeout.
1696 	 *
1697 	 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1698 	 * supporting separate TMCLK.
1699 	 */
1700 
1701 	if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1702 		clk = devm_clk_get(&pdev->dev, "tmclk");
1703 		if (IS_ERR(clk)) {
1704 			rc = PTR_ERR(clk);
1705 			if (rc == -EPROBE_DEFER)
1706 				goto err_power_req;
1707 
1708 			dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1709 			clk = NULL;
1710 		}
1711 
1712 		clk_set_rate(clk, 12000000);
1713 		rc = clk_prepare_enable(clk);
1714 		if (rc) {
1715 			dev_err(&pdev->dev,
1716 				"failed to enable tmclk: %d\n", rc);
1717 			goto err_power_req;
1718 		}
1719 
1720 		tegra_host->tmclk = clk;
1721 	}
1722 
1723 	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1724 	if (IS_ERR(clk)) {
1725 		rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
1726 				   "failed to get clock\n");
1727 		goto err_clk_get;
1728 	}
1729 	pltfm_host->clk = clk;
1730 
1731 	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1732 							   "sdhci");
1733 	if (IS_ERR(tegra_host->rst)) {
1734 		rc = PTR_ERR(tegra_host->rst);
1735 		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1736 		goto err_rst_get;
1737 	}
1738 
1739 	rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
1740 	if (rc)
1741 		goto err_rst_get;
1742 
1743 	pm_runtime_enable(&pdev->dev);
1744 	rc = pm_runtime_resume_and_get(&pdev->dev);
1745 	if (rc)
1746 		goto err_pm_get;
1747 
1748 	rc = reset_control_assert(tegra_host->rst);
1749 	if (rc)
1750 		goto err_rst_assert;
1751 
1752 	usleep_range(2000, 4000);
1753 
1754 	rc = reset_control_deassert(tegra_host->rst);
1755 	if (rc)
1756 		goto err_rst_assert;
1757 
1758 	usleep_range(2000, 4000);
1759 
1760 	rc = sdhci_tegra_add_host(host);
1761 	if (rc)
1762 		goto err_add_host;
1763 
1764 	return 0;
1765 
1766 err_add_host:
1767 	reset_control_assert(tegra_host->rst);
1768 err_rst_assert:
1769 	pm_runtime_put_sync_suspend(&pdev->dev);
1770 err_pm_get:
1771 	pm_runtime_disable(&pdev->dev);
1772 err_rst_get:
1773 err_clk_get:
1774 	clk_disable_unprepare(tegra_host->tmclk);
1775 err_power_req:
1776 err_parse_dt:
1777 	sdhci_pltfm_free(pdev);
1778 	return rc;
1779 }
1780 
1781 static int sdhci_tegra_remove(struct platform_device *pdev)
1782 {
1783 	struct sdhci_host *host = platform_get_drvdata(pdev);
1784 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1785 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1786 
1787 	sdhci_remove_host(host, 0);
1788 
1789 	reset_control_assert(tegra_host->rst);
1790 	usleep_range(2000, 4000);
1791 
1792 	pm_runtime_put_sync_suspend(&pdev->dev);
1793 	pm_runtime_force_suspend(&pdev->dev);
1794 
1795 	clk_disable_unprepare(tegra_host->tmclk);
1796 	sdhci_pltfm_free(pdev);
1797 
1798 	return 0;
1799 }
1800 
1801 static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
1802 {
1803 	struct sdhci_host *host = dev_get_drvdata(dev);
1804 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1805 
1806 	clk_disable_unprepare(pltfm_host->clk);
1807 
1808 	return 0;
1809 }
1810 
1811 static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev)
1812 {
1813 	struct sdhci_host *host = dev_get_drvdata(dev);
1814 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1815 
1816 	return clk_prepare_enable(pltfm_host->clk);
1817 }
1818 
1819 #ifdef CONFIG_PM_SLEEP
1820 static int sdhci_tegra_suspend(struct device *dev)
1821 {
1822 	struct sdhci_host *host = dev_get_drvdata(dev);
1823 	int ret;
1824 
1825 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1826 		ret = cqhci_suspend(host->mmc);
1827 		if (ret)
1828 			return ret;
1829 	}
1830 
1831 	ret = sdhci_suspend_host(host);
1832 	if (ret) {
1833 		cqhci_resume(host->mmc);
1834 		return ret;
1835 	}
1836 
1837 	ret = pm_runtime_force_suspend(dev);
1838 	if (ret) {
1839 		sdhci_resume_host(host);
1840 		cqhci_resume(host->mmc);
1841 		return ret;
1842 	}
1843 
1844 	return mmc_gpio_set_cd_wake(host->mmc, true);
1845 }
1846 
1847 static int sdhci_tegra_resume(struct device *dev)
1848 {
1849 	struct sdhci_host *host = dev_get_drvdata(dev);
1850 	int ret;
1851 
1852 	ret = mmc_gpio_set_cd_wake(host->mmc, false);
1853 	if (ret)
1854 		return ret;
1855 
1856 	ret = pm_runtime_force_resume(dev);
1857 	if (ret)
1858 		return ret;
1859 
1860 	ret = sdhci_resume_host(host);
1861 	if (ret)
1862 		goto disable_clk;
1863 
1864 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1865 		ret = cqhci_resume(host->mmc);
1866 		if (ret)
1867 			goto suspend_host;
1868 	}
1869 
1870 	return 0;
1871 
1872 suspend_host:
1873 	sdhci_suspend_host(host);
1874 disable_clk:
1875 	pm_runtime_force_suspend(dev);
1876 	return ret;
1877 }
1878 #endif
1879 
1880 static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = {
1881 	SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume,
1882 			   NULL)
1883 	SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
1884 };
1885 
1886 static struct platform_driver sdhci_tegra_driver = {
1887 	.driver		= {
1888 		.name	= "sdhci-tegra",
1889 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1890 		.of_match_table = sdhci_tegra_dt_match,
1891 		.pm	= &sdhci_tegra_dev_pm_ops,
1892 	},
1893 	.probe		= sdhci_tegra_probe,
1894 	.remove		= sdhci_tegra_remove,
1895 };
1896 
1897 module_platform_driver(sdhci_tegra_driver);
1898 
1899 MODULE_DESCRIPTION("SDHCI driver for Tegra");
1900 MODULE_AUTHOR("Google, Inc.");
1901 MODULE_LICENSE("GPL v2");
1902