xref: /openbmc/linux/drivers/mmc/host/sdhci-tegra.c (revision 0b26ca68)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2010 Google, Inc.
4  */
5 
6 #include <linux/delay.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/err.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/iopoll.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/io.h>
15 #include <linux/of.h>
16 #include <linux/of_device.h>
17 #include <linux/pinctrl/consumer.h>
18 #include <linux/regulator/consumer.h>
19 #include <linux/reset.h>
20 #include <linux/mmc/card.h>
21 #include <linux/mmc/host.h>
22 #include <linux/mmc/mmc.h>
23 #include <linux/mmc/slot-gpio.h>
24 #include <linux/gpio/consumer.h>
25 #include <linux/ktime.h>
26 
27 #include "sdhci-pltfm.h"
28 #include "cqhci.h"
29 
30 /* Tegra SDHOST controller vendor register definitions */
31 #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
32 #define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
33 #define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
34 #define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
35 #define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
36 #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
37 #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
38 #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
39 
40 #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
41 #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
42 
43 #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
44 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
45 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
46 
47 #define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
48 #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT		BIT(0)
49 #define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
50 #define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
51 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
52 #define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
53 
54 #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
55 #define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
56 
57 #define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
58 #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
59 
60 #define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
61 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
62 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
63 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
64 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
65 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
66 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
67 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
68 #define TRIES_128					2
69 #define TRIES_256					4
70 #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
71 
72 #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
73 #define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
74 #define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
75 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
76 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
77 #define TUNING_WORD_BIT_SIZE				32
78 
79 #define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
80 #define SDHCI_AUTO_CAL_START				BIT(31)
81 #define SDHCI_AUTO_CAL_ENABLE				BIT(29)
82 #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
83 
84 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
85 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
86 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
87 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
88 #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
89 
90 #define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
91 #define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
92 
93 #define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
94 #define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
95 #define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
96 #define NVQUIRK_ENABLE_SDR50				BIT(3)
97 #define NVQUIRK_ENABLE_SDR104				BIT(4)
98 #define NVQUIRK_ENABLE_DDR50				BIT(5)
99 /*
100  * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
101  * drive strength.
102  */
103 #define NVQUIRK_HAS_PADCALIB				BIT(6)
104 /*
105  * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
106  * 3V3/1V8 pad selection happens through pinctrl state selection depending
107  * on the signaling mode.
108  */
109 #define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
110 #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
111 #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
112 
113 /*
114  * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
115  * SDMMC hardware data timeout.
116  */
117 #define NVQUIRK_HAS_TMCLK				BIT(10)
118 
119 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
120 #define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
121 
122 struct sdhci_tegra_soc_data {
123 	const struct sdhci_pltfm_data *pdata;
124 	u64 dma_mask;
125 	u32 nvquirks;
126 	u8 min_tap_delay;
127 	u8 max_tap_delay;
128 };
129 
130 /* Magic pull up and pull down pad calibration offsets */
131 struct sdhci_tegra_autocal_offsets {
132 	u32 pull_up_3v3;
133 	u32 pull_down_3v3;
134 	u32 pull_up_3v3_timeout;
135 	u32 pull_down_3v3_timeout;
136 	u32 pull_up_1v8;
137 	u32 pull_down_1v8;
138 	u32 pull_up_1v8_timeout;
139 	u32 pull_down_1v8_timeout;
140 	u32 pull_up_sdr104;
141 	u32 pull_down_sdr104;
142 	u32 pull_up_hs400;
143 	u32 pull_down_hs400;
144 };
145 
146 struct sdhci_tegra {
147 	const struct sdhci_tegra_soc_data *soc_data;
148 	struct gpio_desc *power_gpio;
149 	struct clk *tmclk;
150 	bool ddr_signaling;
151 	bool pad_calib_required;
152 	bool pad_control_available;
153 
154 	struct reset_control *rst;
155 	struct pinctrl *pinctrl_sdmmc;
156 	struct pinctrl_state *pinctrl_state_3v3;
157 	struct pinctrl_state *pinctrl_state_1v8;
158 	struct pinctrl_state *pinctrl_state_3v3_drv;
159 	struct pinctrl_state *pinctrl_state_1v8_drv;
160 
161 	struct sdhci_tegra_autocal_offsets autocal_offsets;
162 	ktime_t last_calib;
163 
164 	u32 default_tap;
165 	u32 default_trim;
166 	u32 dqs_trim;
167 	bool enable_hwcq;
168 	unsigned long curr_clk_rate;
169 	u8 tuned_tap_delay;
170 };
171 
172 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
173 {
174 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
175 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
176 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
177 
178 	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
179 			(reg == SDHCI_HOST_VERSION))) {
180 		/* Erratum: Version register is invalid in HW. */
181 		return SDHCI_SPEC_200;
182 	}
183 
184 	return readw(host->ioaddr + reg);
185 }
186 
187 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
188 {
189 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
190 
191 	switch (reg) {
192 	case SDHCI_TRANSFER_MODE:
193 		/*
194 		 * Postpone this write, we must do it together with a
195 		 * command write that is down below.
196 		 */
197 		pltfm_host->xfer_mode_shadow = val;
198 		return;
199 	case SDHCI_COMMAND:
200 		writel((val << 16) | pltfm_host->xfer_mode_shadow,
201 			host->ioaddr + SDHCI_TRANSFER_MODE);
202 		return;
203 	}
204 
205 	writew(val, host->ioaddr + reg);
206 }
207 
208 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
209 {
210 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
211 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
212 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
213 
214 	/* Seems like we're getting spurious timeout and crc errors, so
215 	 * disable signalling of them. In case of real errors software
216 	 * timers should take care of eventually detecting them.
217 	 */
218 	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
219 		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
220 
221 	writel(val, host->ioaddr + reg);
222 
223 	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
224 			(reg == SDHCI_INT_ENABLE))) {
225 		/* Erratum: Must enable block gap interrupt detection */
226 		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
227 		if (val & SDHCI_INT_CARD_INT)
228 			gap_ctrl |= 0x8;
229 		else
230 			gap_ctrl &= ~0x8;
231 		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
232 	}
233 }
234 
235 static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
236 {
237 	bool status;
238 	u32 reg;
239 
240 	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
241 	status = !!(reg & SDHCI_CLOCK_CARD_EN);
242 
243 	if (status == enable)
244 		return status;
245 
246 	if (enable)
247 		reg |= SDHCI_CLOCK_CARD_EN;
248 	else
249 		reg &= ~SDHCI_CLOCK_CARD_EN;
250 
251 	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
252 
253 	return status;
254 }
255 
256 static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
257 {
258 	bool is_tuning_cmd = 0;
259 	bool clk_enabled;
260 	u8 cmd;
261 
262 	if (reg == SDHCI_COMMAND) {
263 		cmd = SDHCI_GET_CMD(val);
264 		is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
265 				cmd == MMC_SEND_TUNING_BLOCK_HS200;
266 	}
267 
268 	if (is_tuning_cmd)
269 		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
270 
271 	writew(val, host->ioaddr + reg);
272 
273 	if (is_tuning_cmd) {
274 		udelay(1);
275 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
276 		tegra_sdhci_configure_card_clk(host, clk_enabled);
277 	}
278 }
279 
280 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
281 {
282 	/*
283 	 * Write-enable shall be assumed if GPIO is missing in a board's
284 	 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
285 	 * Tegra.
286 	 */
287 	return mmc_gpio_get_ro(host->mmc);
288 }
289 
290 static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
291 {
292 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
293 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
294 	int has_1v8, has_3v3;
295 
296 	/*
297 	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
298 	 * voltage configuration in order to perform voltage switching. This
299 	 * means that valid pinctrl info is required on SDHCI instances capable
300 	 * of performing voltage switching. Whether or not an SDHCI instance is
301 	 * capable of voltage switching is determined based on the regulator.
302 	 */
303 
304 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
305 		return true;
306 
307 	if (IS_ERR(host->mmc->supply.vqmmc))
308 		return false;
309 
310 	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
311 						 1700000, 1950000);
312 
313 	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
314 						 2700000, 3600000);
315 
316 	if (has_1v8 == 1 && has_3v3 == 1)
317 		return tegra_host->pad_control_available;
318 
319 	/* Fixed voltage, no pad control required. */
320 	return true;
321 }
322 
323 static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
324 {
325 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
326 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
327 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
328 	bool card_clk_enabled = false;
329 	u32 reg;
330 
331 	/*
332 	 * Touching the tap values is a bit tricky on some SoC generations.
333 	 * The quirk enables a workaround for a glitch that sometimes occurs if
334 	 * the tap values are changed.
335 	 */
336 
337 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
338 		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
339 
340 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
341 	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
342 	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
343 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
344 
345 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
346 	    card_clk_enabled) {
347 		udelay(1);
348 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
349 		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
350 	}
351 }
352 
353 static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
354 					      struct mmc_ios *ios)
355 {
356 	struct sdhci_host *host = mmc_priv(mmc);
357 	u32 val;
358 
359 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
360 
361 	if (ios->enhanced_strobe)
362 		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
363 	else
364 		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
365 
366 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
367 
368 }
369 
370 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
371 {
372 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
373 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
374 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
375 	u32 misc_ctrl, clk_ctrl, pad_ctrl;
376 
377 	sdhci_reset(host, mask);
378 
379 	if (!(mask & SDHCI_RESET_ALL))
380 		return;
381 
382 	tegra_sdhci_set_tap(host, tegra_host->default_tap);
383 
384 	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
385 	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
386 
387 	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
388 		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
389 		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
390 		       SDHCI_MISC_CTRL_ENABLE_SDR104);
391 
392 	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
393 		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
394 
395 	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
396 		/* Erratum: Enable SDHCI spec v3.00 support */
397 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
398 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
399 		/* Advertise UHS modes as supported by host */
400 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
401 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
402 		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
403 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
404 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
405 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
406 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
407 			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
408 	}
409 
410 	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
411 
412 	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
413 	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
414 
415 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
416 		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
417 		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
418 		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
419 		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
420 
421 		tegra_host->pad_calib_required = true;
422 	}
423 
424 	tegra_host->ddr_signaling = false;
425 }
426 
427 static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
428 {
429 	u32 val;
430 
431 	/*
432 	 * Enable or disable the additional I/O pad used by the drive strength
433 	 * calibration process.
434 	 */
435 	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
436 
437 	if (enable)
438 		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
439 	else
440 		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
441 
442 	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
443 
444 	if (enable)
445 		usleep_range(1, 2);
446 }
447 
448 static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
449 					       u16 pdpu)
450 {
451 	u32 reg;
452 
453 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
454 	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
455 	reg |= pdpu;
456 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
457 }
458 
459 static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
460 				   bool state_drvupdn)
461 {
462 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
463 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
464 	struct sdhci_tegra_autocal_offsets *offsets =
465 						&tegra_host->autocal_offsets;
466 	struct pinctrl_state *pinctrl_drvupdn = NULL;
467 	int ret = 0;
468 	u8 drvup = 0, drvdn = 0;
469 	u32 reg;
470 
471 	if (!state_drvupdn) {
472 		/* PADS Drive Strength */
473 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
474 			if (tegra_host->pinctrl_state_1v8_drv) {
475 				pinctrl_drvupdn =
476 					tegra_host->pinctrl_state_1v8_drv;
477 			} else {
478 				drvup = offsets->pull_up_1v8_timeout;
479 				drvdn = offsets->pull_down_1v8_timeout;
480 			}
481 		} else {
482 			if (tegra_host->pinctrl_state_3v3_drv) {
483 				pinctrl_drvupdn =
484 					tegra_host->pinctrl_state_3v3_drv;
485 			} else {
486 				drvup = offsets->pull_up_3v3_timeout;
487 				drvdn = offsets->pull_down_3v3_timeout;
488 			}
489 		}
490 
491 		if (pinctrl_drvupdn != NULL) {
492 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
493 							pinctrl_drvupdn);
494 			if (ret < 0)
495 				dev_err(mmc_dev(host->mmc),
496 					"failed pads drvupdn, ret: %d\n", ret);
497 		} else if ((drvup) || (drvdn)) {
498 			reg = sdhci_readl(host,
499 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
500 			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
501 			reg |= (drvup << 20) | (drvdn << 12);
502 			sdhci_writel(host, reg,
503 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
504 		}
505 
506 	} else {
507 		/* Dual Voltage PADS Voltage selection */
508 		if (!tegra_host->pad_control_available)
509 			return 0;
510 
511 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
512 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
513 						tegra_host->pinctrl_state_1v8);
514 			if (ret < 0)
515 				dev_err(mmc_dev(host->mmc),
516 					"setting 1.8V failed, ret: %d\n", ret);
517 		} else {
518 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
519 						tegra_host->pinctrl_state_3v3);
520 			if (ret < 0)
521 				dev_err(mmc_dev(host->mmc),
522 					"setting 3.3V failed, ret: %d\n", ret);
523 		}
524 	}
525 
526 	return ret;
527 }
528 
529 static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
530 {
531 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
532 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
533 	struct sdhci_tegra_autocal_offsets offsets =
534 			tegra_host->autocal_offsets;
535 	struct mmc_ios *ios = &host->mmc->ios;
536 	bool card_clk_enabled;
537 	u16 pdpu;
538 	u32 reg;
539 	int ret;
540 
541 	switch (ios->timing) {
542 	case MMC_TIMING_UHS_SDR104:
543 		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
544 		break;
545 	case MMC_TIMING_MMC_HS400:
546 		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
547 		break;
548 	default:
549 		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
550 			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
551 		else
552 			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
553 	}
554 
555 	/* Set initial offset before auto-calibration */
556 	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
557 
558 	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
559 
560 	tegra_sdhci_configure_cal_pad(host, true);
561 
562 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
563 	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
564 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
565 
566 	usleep_range(1, 2);
567 	/* 10 ms timeout */
568 	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
569 				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
570 				 1000, 10000);
571 
572 	tegra_sdhci_configure_cal_pad(host, false);
573 
574 	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
575 
576 	if (ret) {
577 		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
578 
579 		/* Disable automatic cal and use fixed Drive Strengths */
580 		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
581 		reg &= ~SDHCI_AUTO_CAL_ENABLE;
582 		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
583 
584 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
585 		if (ret < 0)
586 			dev_err(mmc_dev(host->mmc),
587 				"Setting drive strengths failed: %d\n", ret);
588 	}
589 }
590 
591 static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
592 {
593 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
594 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
595 	struct sdhci_tegra_autocal_offsets *autocal =
596 			&tegra_host->autocal_offsets;
597 	int err;
598 
599 	err = device_property_read_u32(host->mmc->parent,
600 			"nvidia,pad-autocal-pull-up-offset-3v3",
601 			&autocal->pull_up_3v3);
602 	if (err)
603 		autocal->pull_up_3v3 = 0;
604 
605 	err = device_property_read_u32(host->mmc->parent,
606 			"nvidia,pad-autocal-pull-down-offset-3v3",
607 			&autocal->pull_down_3v3);
608 	if (err)
609 		autocal->pull_down_3v3 = 0;
610 
611 	err = device_property_read_u32(host->mmc->parent,
612 			"nvidia,pad-autocal-pull-up-offset-1v8",
613 			&autocal->pull_up_1v8);
614 	if (err)
615 		autocal->pull_up_1v8 = 0;
616 
617 	err = device_property_read_u32(host->mmc->parent,
618 			"nvidia,pad-autocal-pull-down-offset-1v8",
619 			&autocal->pull_down_1v8);
620 	if (err)
621 		autocal->pull_down_1v8 = 0;
622 
623 	err = device_property_read_u32(host->mmc->parent,
624 			"nvidia,pad-autocal-pull-up-offset-sdr104",
625 			&autocal->pull_up_sdr104);
626 	if (err)
627 		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
628 
629 	err = device_property_read_u32(host->mmc->parent,
630 			"nvidia,pad-autocal-pull-down-offset-sdr104",
631 			&autocal->pull_down_sdr104);
632 	if (err)
633 		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
634 
635 	err = device_property_read_u32(host->mmc->parent,
636 			"nvidia,pad-autocal-pull-up-offset-hs400",
637 			&autocal->pull_up_hs400);
638 	if (err)
639 		autocal->pull_up_hs400 = autocal->pull_up_1v8;
640 
641 	err = device_property_read_u32(host->mmc->parent,
642 			"nvidia,pad-autocal-pull-down-offset-hs400",
643 			&autocal->pull_down_hs400);
644 	if (err)
645 		autocal->pull_down_hs400 = autocal->pull_down_1v8;
646 
647 	/*
648 	 * Different fail-safe drive strength values based on the signaling
649 	 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
650 	 * So, avoid reading below device tree properties for SoCs that don't
651 	 * have NVQUIRK_NEEDS_PAD_CONTROL.
652 	 */
653 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
654 		return;
655 
656 	err = device_property_read_u32(host->mmc->parent,
657 			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
658 			&autocal->pull_up_3v3_timeout);
659 	if (err) {
660 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
661 			(tegra_host->pinctrl_state_3v3_drv == NULL))
662 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
663 				mmc_hostname(host->mmc));
664 		autocal->pull_up_3v3_timeout = 0;
665 	}
666 
667 	err = device_property_read_u32(host->mmc->parent,
668 			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
669 			&autocal->pull_down_3v3_timeout);
670 	if (err) {
671 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
672 			(tegra_host->pinctrl_state_3v3_drv == NULL))
673 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
674 				mmc_hostname(host->mmc));
675 		autocal->pull_down_3v3_timeout = 0;
676 	}
677 
678 	err = device_property_read_u32(host->mmc->parent,
679 			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
680 			&autocal->pull_up_1v8_timeout);
681 	if (err) {
682 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
683 			(tegra_host->pinctrl_state_1v8_drv == NULL))
684 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
685 				mmc_hostname(host->mmc));
686 		autocal->pull_up_1v8_timeout = 0;
687 	}
688 
689 	err = device_property_read_u32(host->mmc->parent,
690 			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
691 			&autocal->pull_down_1v8_timeout);
692 	if (err) {
693 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
694 			(tegra_host->pinctrl_state_1v8_drv == NULL))
695 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
696 				mmc_hostname(host->mmc));
697 		autocal->pull_down_1v8_timeout = 0;
698 	}
699 }
700 
701 static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
702 {
703 	struct sdhci_host *host = mmc_priv(mmc);
704 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
705 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
706 	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
707 
708 	/* 100 ms calibration interval is specified in the TRM */
709 	if (ktime_to_ms(since_calib) > 100) {
710 		tegra_sdhci_pad_autocalib(host);
711 		tegra_host->last_calib = ktime_get();
712 	}
713 
714 	sdhci_request(mmc, mrq);
715 }
716 
717 static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
718 {
719 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
720 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
721 	int err;
722 
723 	err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap",
724 				       &tegra_host->default_tap);
725 	if (err)
726 		tegra_host->default_tap = 0;
727 
728 	err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim",
729 				       &tegra_host->default_trim);
730 	if (err)
731 		tegra_host->default_trim = 0;
732 
733 	err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim",
734 				       &tegra_host->dqs_trim);
735 	if (err)
736 		tegra_host->dqs_trim = 0x11;
737 }
738 
739 static void tegra_sdhci_parse_dt(struct sdhci_host *host)
740 {
741 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
742 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
743 
744 	if (device_property_read_bool(host->mmc->parent, "supports-cqe"))
745 		tegra_host->enable_hwcq = true;
746 	else
747 		tegra_host->enable_hwcq = false;
748 
749 	tegra_sdhci_parse_pad_autocal_dt(host);
750 	tegra_sdhci_parse_tap_and_trim(host);
751 }
752 
753 static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
754 {
755 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
756 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
757 	unsigned long host_clk;
758 
759 	if (!clock)
760 		return sdhci_set_clock(host, clock);
761 
762 	/*
763 	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
764 	 * divider to be configured to divided the host clock by two. The SDHCI
765 	 * clock divider is calculated as part of sdhci_set_clock() by
766 	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
767 	 * the requested clock rate.
768 	 *
769 	 * By setting the host->max_clk to clock * 2 the divider calculation
770 	 * will always result in the correct value for DDR50/52 modes,
771 	 * regardless of clock rate rounding, which may happen if the value
772 	 * from clk_get_rate() is used.
773 	 */
774 	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
775 	clk_set_rate(pltfm_host->clk, host_clk);
776 	tegra_host->curr_clk_rate = host_clk;
777 	if (tegra_host->ddr_signaling)
778 		host->max_clk = host_clk;
779 	else
780 		host->max_clk = clk_get_rate(pltfm_host->clk);
781 
782 	sdhci_set_clock(host, clock);
783 
784 	if (tegra_host->pad_calib_required) {
785 		tegra_sdhci_pad_autocalib(host);
786 		tegra_host->pad_calib_required = false;
787 	}
788 }
789 
790 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
791 {
792 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
793 
794 	return clk_round_rate(pltfm_host->clk, UINT_MAX);
795 }
796 
797 static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
798 {
799 	u32 val;
800 
801 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
802 	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
803 	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
804 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
805 }
806 
807 static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
808 {
809 	u32 reg;
810 	int err;
811 
812 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
813 	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
814 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
815 
816 	/* 1 ms sleep, 5 ms timeout */
817 	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
818 				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
819 				 1000, 5000);
820 	if (err)
821 		dev_err(mmc_dev(host->mmc),
822 			"HS400 delay line calibration timed out\n");
823 }
824 
825 static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
826 				       u8 thd_low, u8 fixed_tap)
827 {
828 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
829 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
830 	u32 val, tun_status;
831 	u8 word, bit, edge1, tap, window;
832 	bool tap_result;
833 	bool start_fail = false;
834 	bool start_pass = false;
835 	bool end_pass = false;
836 	bool first_fail = false;
837 	bool first_pass = false;
838 	u8 start_pass_tap = 0;
839 	u8 end_pass_tap = 0;
840 	u8 first_fail_tap = 0;
841 	u8 first_pass_tap = 0;
842 	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
843 
844 	/*
845 	 * Read auto-tuned results and extract good valid passing window by
846 	 * filtering out un-wanted bubble/partial/merged windows.
847 	 */
848 	for (word = 0; word < total_tuning_words; word++) {
849 		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
850 		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
851 		val |= word;
852 		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
853 		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
854 		bit = 0;
855 		while (bit < TUNING_WORD_BIT_SIZE) {
856 			tap = word * TUNING_WORD_BIT_SIZE + bit;
857 			tap_result = tun_status & (1 << bit);
858 			if (!tap_result && !start_fail) {
859 				start_fail = true;
860 				if (!first_fail) {
861 					first_fail_tap = tap;
862 					first_fail = true;
863 				}
864 
865 			} else if (tap_result && start_fail && !start_pass) {
866 				start_pass_tap = tap;
867 				start_pass = true;
868 				if (!first_pass) {
869 					first_pass_tap = tap;
870 					first_pass = true;
871 				}
872 
873 			} else if (!tap_result && start_fail && start_pass &&
874 				   !end_pass) {
875 				end_pass_tap = tap - 1;
876 				end_pass = true;
877 			} else if (tap_result && start_pass && start_fail &&
878 				   end_pass) {
879 				window = end_pass_tap - start_pass_tap;
880 				/* discard merged window and bubble window */
881 				if (window >= thd_up || window < thd_low) {
882 					start_pass_tap = tap;
883 					end_pass = false;
884 				} else {
885 					/* set tap at middle of valid window */
886 					tap = start_pass_tap + window / 2;
887 					tegra_host->tuned_tap_delay = tap;
888 					return;
889 				}
890 			}
891 
892 			bit++;
893 		}
894 	}
895 
896 	if (!first_fail) {
897 		WARN(1, "no edge detected, continue with hw tuned delay.\n");
898 	} else if (first_pass) {
899 		/* set tap location at fixed tap relative to the first edge */
900 		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
901 		if (edge1 - 1 > fixed_tap)
902 			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
903 		else
904 			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
905 	}
906 }
907 
908 static void tegra_sdhci_post_tuning(struct sdhci_host *host)
909 {
910 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
911 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
912 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
913 	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
914 	u8 fixed_tap, start_tap, end_tap, window_width;
915 	u8 thdupper, thdlower;
916 	u8 num_iter;
917 	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
918 
919 	/* retain HW tuned tap to use incase if no correction is needed */
920 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
921 	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
922 				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
923 	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
924 		min_tap_dly = soc_data->min_tap_delay;
925 		max_tap_dly = soc_data->max_tap_delay;
926 		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
927 		period_ps = USEC_PER_SEC / clk_rate_mhz;
928 		bestcase = period_ps / min_tap_dly;
929 		worstcase = period_ps / max_tap_dly;
930 		/*
931 		 * Upper and Lower bound thresholds used to detect merged and
932 		 * bubble windows
933 		 */
934 		thdupper = (2 * worstcase + bestcase) / 2;
935 		thdlower = worstcase / 4;
936 		/*
937 		 * fixed tap is used when HW tuning result contains single edge
938 		 * and tap is set at fixed tap delay relative to the first edge
939 		 */
940 		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
941 		fixed_tap = avg_tap_dly / 2;
942 
943 		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
944 		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
945 		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
946 			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
947 		window_width = end_tap - start_tap;
948 		num_iter = host->tuning_loop_count;
949 		/*
950 		 * partial window includes edges of the tuning range.
951 		 * merged window includes more taps so window width is higher
952 		 * than upper threshold.
953 		 */
954 		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
955 		    (end_tap == num_iter - 2) || window_width >= thdupper) {
956 			pr_debug("%s: Apply tuning correction\n",
957 				 mmc_hostname(host->mmc));
958 			tegra_sdhci_tap_correction(host, thdupper, thdlower,
959 						   fixed_tap);
960 		}
961 	}
962 
963 	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
964 }
965 
966 static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
967 {
968 	struct sdhci_host *host = mmc_priv(mmc);
969 	int err;
970 
971 	err = sdhci_execute_tuning(mmc, opcode);
972 	if (!err && !host->tuning_err)
973 		tegra_sdhci_post_tuning(host);
974 
975 	return err;
976 }
977 
978 static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
979 					  unsigned timing)
980 {
981 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
982 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
983 	bool set_default_tap = false;
984 	bool set_dqs_trim = false;
985 	bool do_hs400_dll_cal = false;
986 	u8 iter = TRIES_256;
987 	u32 val;
988 
989 	tegra_host->ddr_signaling = false;
990 	switch (timing) {
991 	case MMC_TIMING_UHS_SDR50:
992 		break;
993 	case MMC_TIMING_UHS_SDR104:
994 	case MMC_TIMING_MMC_HS200:
995 		/* Don't set default tap on tunable modes. */
996 		iter = TRIES_128;
997 		break;
998 	case MMC_TIMING_MMC_HS400:
999 		set_dqs_trim = true;
1000 		do_hs400_dll_cal = true;
1001 		iter = TRIES_128;
1002 		break;
1003 	case MMC_TIMING_MMC_DDR52:
1004 	case MMC_TIMING_UHS_DDR50:
1005 		tegra_host->ddr_signaling = true;
1006 		set_default_tap = true;
1007 		break;
1008 	default:
1009 		set_default_tap = true;
1010 		break;
1011 	}
1012 
1013 	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1014 	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1015 		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1016 		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1017 	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1018 		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1019 		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1020 	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1021 	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1022 
1023 	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1024 
1025 	sdhci_set_uhs_signaling(host, timing);
1026 
1027 	tegra_sdhci_pad_autocalib(host);
1028 
1029 	if (tegra_host->tuned_tap_delay && !set_default_tap)
1030 		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1031 	else
1032 		tegra_sdhci_set_tap(host, tegra_host->default_tap);
1033 
1034 	if (set_dqs_trim)
1035 		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1036 
1037 	if (do_hs400_dll_cal)
1038 		tegra_sdhci_hs400_dll_cal(host);
1039 }
1040 
1041 static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1042 {
1043 	unsigned int min, max;
1044 
1045 	/*
1046 	 * Start search for minimum tap value at 10, as smaller values are
1047 	 * may wrongly be reported as working but fail at higher speeds,
1048 	 * according to the TRM.
1049 	 */
1050 	min = 10;
1051 	while (min < 255) {
1052 		tegra_sdhci_set_tap(host, min);
1053 		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1054 			break;
1055 		min++;
1056 	}
1057 
1058 	/* Find the maximum tap value that still passes. */
1059 	max = min + 1;
1060 	while (max < 255) {
1061 		tegra_sdhci_set_tap(host, max);
1062 		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1063 			max--;
1064 			break;
1065 		}
1066 		max++;
1067 	}
1068 
1069 	/* The TRM states the ideal tap value is at 75% in the passing range. */
1070 	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1071 
1072 	return mmc_send_tuning(host->mmc, opcode, NULL);
1073 }
1074 
1075 static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1076 						   struct mmc_ios *ios)
1077 {
1078 	struct sdhci_host *host = mmc_priv(mmc);
1079 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1080 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1081 	int ret = 0;
1082 
1083 	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1084 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1085 		if (ret < 0)
1086 			return ret;
1087 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1088 	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1089 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1090 		if (ret < 0)
1091 			return ret;
1092 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1093 	}
1094 
1095 	if (tegra_host->pad_calib_required)
1096 		tegra_sdhci_pad_autocalib(host);
1097 
1098 	return ret;
1099 }
1100 
1101 static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1102 					 struct sdhci_tegra *tegra_host)
1103 {
1104 	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1105 	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1106 		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1107 			PTR_ERR(tegra_host->pinctrl_sdmmc));
1108 		return -1;
1109 	}
1110 
1111 	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1112 				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1113 	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1114 		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1115 			tegra_host->pinctrl_state_1v8_drv = NULL;
1116 	}
1117 
1118 	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1119 				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1120 	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1121 		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1122 			tegra_host->pinctrl_state_3v3_drv = NULL;
1123 	}
1124 
1125 	tegra_host->pinctrl_state_3v3 =
1126 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1127 	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1128 		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1129 			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1130 		return -1;
1131 	}
1132 
1133 	tegra_host->pinctrl_state_1v8 =
1134 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1135 	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1136 		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1137 			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1138 		return -1;
1139 	}
1140 
1141 	tegra_host->pad_control_available = true;
1142 
1143 	return 0;
1144 }
1145 
1146 static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1147 {
1148 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1149 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1150 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1151 
1152 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1153 		tegra_host->pad_calib_required = true;
1154 }
1155 
1156 static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1157 {
1158 	struct mmc_host *mmc = cq_host->mmc;
1159 	u8 ctrl;
1160 	ktime_t timeout;
1161 	bool timed_out;
1162 
1163 	/*
1164 	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1165 	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1166 	 * to be re-configured.
1167 	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1168 	 * CQE is unhalted. So handling CQE resume sequence here to configure
1169 	 * SDHCI block registers prior to exiting CQE halt state.
1170 	 */
1171 	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1172 	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1173 		sdhci_cqe_enable(mmc);
1174 		writel(val, cq_host->mmio + reg);
1175 		timeout = ktime_add_us(ktime_get(), 50);
1176 		while (1) {
1177 			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1178 			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1179 			if (!(ctrl & CQHCI_HALT) || timed_out)
1180 				break;
1181 		}
1182 		/*
1183 		 * CQE usually resumes very quick, but incase if Tegra CQE
1184 		 * doesn't resume retry unhalt.
1185 		 */
1186 		if (timed_out)
1187 			writel(val, cq_host->mmio + reg);
1188 	} else {
1189 		writel(val, cq_host->mmio + reg);
1190 	}
1191 }
1192 
1193 static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1194 					 struct mmc_request *mrq, u64 *data)
1195 {
1196 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1197 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1198 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1199 
1200 	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1201 	    mrq->cmd->flags & MMC_RSP_R1B)
1202 		*data |= CQHCI_CMD_TIMING(1);
1203 }
1204 
1205 static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1206 {
1207 	struct cqhci_host *cq_host = mmc->cqe_private;
1208 	u32 val;
1209 
1210 	/*
1211 	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1212 	 * register when CQE is enabled and unhalted.
1213 	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1214 	 * programming block size in sdhci controller and enable it back.
1215 	 */
1216 	if (!cq_host->activated) {
1217 		val = cqhci_readl(cq_host, CQHCI_CFG);
1218 		if (val & CQHCI_ENABLE)
1219 			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1220 				     CQHCI_CFG);
1221 		sdhci_cqe_enable(mmc);
1222 		if (val & CQHCI_ENABLE)
1223 			cqhci_writel(cq_host, val, CQHCI_CFG);
1224 	}
1225 
1226 	/*
1227 	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1228 	 * command is sent during transfer of last data block which is the
1229 	 * default case as send status command block counter (CBC) is 1.
1230 	 * Recommended fix to set CBC to 0 allowing send status command only
1231 	 * when data lines are idle.
1232 	 */
1233 	val = cqhci_readl(cq_host, CQHCI_SSC1);
1234 	val &= ~CQHCI_SSC1_CBC_MASK;
1235 	cqhci_writel(cq_host, val, CQHCI_SSC1);
1236 }
1237 
1238 static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1239 {
1240 	sdhci_dumpregs(mmc_priv(mmc));
1241 }
1242 
1243 static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1244 {
1245 	int cmd_error = 0;
1246 	int data_error = 0;
1247 
1248 	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1249 		return intmask;
1250 
1251 	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1252 
1253 	return 0;
1254 }
1255 
1256 static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1257 				    struct mmc_command *cmd)
1258 {
1259 	u32 val;
1260 
1261 	/*
1262 	 * HW busy detection timeout is based on programmed data timeout
1263 	 * counter and maximum supported timeout is 11s which may not be
1264 	 * enough for long operations like cache flush, sleep awake, erase.
1265 	 *
1266 	 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1267 	 * host controller to wait for busy state until the card is busy
1268 	 * without HW timeout.
1269 	 *
1270 	 * So, use infinite busy wait mode for operations that may take
1271 	 * more than maximum HW busy timeout of 11s otherwise use finite
1272 	 * busy wait mode.
1273 	 */
1274 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1275 	if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
1276 		val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1277 	else
1278 		val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1279 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1280 
1281 	__sdhci_set_timeout(host, cmd);
1282 }
1283 
1284 static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1285 	.write_l    = tegra_cqhci_writel,
1286 	.enable	= sdhci_tegra_cqe_enable,
1287 	.disable = sdhci_cqe_disable,
1288 	.dumpregs = sdhci_tegra_dumpregs,
1289 	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1290 };
1291 
1292 static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1293 {
1294 	struct sdhci_pltfm_host *platform = sdhci_priv(host);
1295 	struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1296 	const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1297 	struct device *dev = mmc_dev(host->mmc);
1298 
1299 	if (soc->dma_mask)
1300 		return dma_set_mask_and_coherent(dev, soc->dma_mask);
1301 
1302 	return 0;
1303 }
1304 
1305 static const struct sdhci_ops tegra_sdhci_ops = {
1306 	.get_ro     = tegra_sdhci_get_ro,
1307 	.read_w     = tegra_sdhci_readw,
1308 	.write_l    = tegra_sdhci_writel,
1309 	.set_clock  = tegra_sdhci_set_clock,
1310 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1311 	.set_bus_width = sdhci_set_bus_width,
1312 	.reset      = tegra_sdhci_reset,
1313 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1314 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1315 	.voltage_switch = tegra_sdhci_voltage_switch,
1316 	.get_max_clock = tegra_sdhci_get_max_clock,
1317 };
1318 
1319 static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1320 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1321 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1322 		  SDHCI_QUIRK_NO_HISPD_BIT |
1323 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1324 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1325 	.ops  = &tegra_sdhci_ops,
1326 };
1327 
1328 static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1329 	.pdata = &sdhci_tegra20_pdata,
1330 	.dma_mask = DMA_BIT_MASK(32),
1331 	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1332 		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1333 };
1334 
1335 static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1336 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1337 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1338 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1339 		  SDHCI_QUIRK_NO_HISPD_BIT |
1340 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1341 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1342 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1343 		   SDHCI_QUIRK2_BROKEN_HS200 |
1344 		   /*
1345 		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1346 		    * though no command operation was in progress."
1347 		    *
1348 		    * The exact reason is unknown, as the same hardware seems
1349 		    * to support Auto CMD23 on a downstream 3.1 kernel.
1350 		    */
1351 		   SDHCI_QUIRK2_ACMD23_BROKEN,
1352 	.ops  = &tegra_sdhci_ops,
1353 };
1354 
1355 static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1356 	.pdata = &sdhci_tegra30_pdata,
1357 	.dma_mask = DMA_BIT_MASK(32),
1358 	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1359 		    NVQUIRK_ENABLE_SDR50 |
1360 		    NVQUIRK_ENABLE_SDR104 |
1361 		    NVQUIRK_HAS_PADCALIB,
1362 };
1363 
1364 static const struct sdhci_ops tegra114_sdhci_ops = {
1365 	.get_ro     = tegra_sdhci_get_ro,
1366 	.read_w     = tegra_sdhci_readw,
1367 	.write_w    = tegra_sdhci_writew,
1368 	.write_l    = tegra_sdhci_writel,
1369 	.set_clock  = tegra_sdhci_set_clock,
1370 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1371 	.set_bus_width = sdhci_set_bus_width,
1372 	.reset      = tegra_sdhci_reset,
1373 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1374 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1375 	.voltage_switch = tegra_sdhci_voltage_switch,
1376 	.get_max_clock = tegra_sdhci_get_max_clock,
1377 };
1378 
1379 static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1380 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1381 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1382 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1383 		  SDHCI_QUIRK_NO_HISPD_BIT |
1384 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1385 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1386 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1387 	.ops  = &tegra114_sdhci_ops,
1388 };
1389 
1390 static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1391 	.pdata = &sdhci_tegra114_pdata,
1392 	.dma_mask = DMA_BIT_MASK(32),
1393 };
1394 
1395 static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1396 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1397 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1398 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1399 		  SDHCI_QUIRK_NO_HISPD_BIT |
1400 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1401 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1402 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1403 	.ops  = &tegra114_sdhci_ops,
1404 };
1405 
1406 static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1407 	.pdata = &sdhci_tegra124_pdata,
1408 	.dma_mask = DMA_BIT_MASK(34),
1409 };
1410 
1411 static const struct sdhci_ops tegra210_sdhci_ops = {
1412 	.get_ro     = tegra_sdhci_get_ro,
1413 	.read_w     = tegra_sdhci_readw,
1414 	.write_w    = tegra210_sdhci_writew,
1415 	.write_l    = tegra_sdhci_writel,
1416 	.set_clock  = tegra_sdhci_set_clock,
1417 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1418 	.set_bus_width = sdhci_set_bus_width,
1419 	.reset      = tegra_sdhci_reset,
1420 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1421 	.voltage_switch = tegra_sdhci_voltage_switch,
1422 	.get_max_clock = tegra_sdhci_get_max_clock,
1423 	.set_timeout = tegra_sdhci_set_timeout,
1424 };
1425 
1426 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1427 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1428 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1429 		  SDHCI_QUIRK_NO_HISPD_BIT |
1430 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1431 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1432 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1433 	.ops  = &tegra210_sdhci_ops,
1434 };
1435 
1436 static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1437 	.pdata = &sdhci_tegra210_pdata,
1438 	.dma_mask = DMA_BIT_MASK(34),
1439 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1440 		    NVQUIRK_HAS_PADCALIB |
1441 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1442 		    NVQUIRK_ENABLE_SDR50 |
1443 		    NVQUIRK_ENABLE_SDR104 |
1444 		    NVQUIRK_HAS_TMCLK,
1445 	.min_tap_delay = 106,
1446 	.max_tap_delay = 185,
1447 };
1448 
1449 static const struct sdhci_ops tegra186_sdhci_ops = {
1450 	.get_ro     = tegra_sdhci_get_ro,
1451 	.read_w     = tegra_sdhci_readw,
1452 	.write_l    = tegra_sdhci_writel,
1453 	.set_clock  = tegra_sdhci_set_clock,
1454 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1455 	.set_bus_width = sdhci_set_bus_width,
1456 	.reset      = tegra_sdhci_reset,
1457 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1458 	.voltage_switch = tegra_sdhci_voltage_switch,
1459 	.get_max_clock = tegra_sdhci_get_max_clock,
1460 	.irq = sdhci_tegra_cqhci_irq,
1461 	.set_timeout = tegra_sdhci_set_timeout,
1462 };
1463 
1464 static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1465 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1466 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1467 		  SDHCI_QUIRK_NO_HISPD_BIT |
1468 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1469 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1470 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1471 	.ops  = &tegra186_sdhci_ops,
1472 };
1473 
1474 static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1475 	.pdata = &sdhci_tegra186_pdata,
1476 	.dma_mask = DMA_BIT_MASK(40),
1477 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1478 		    NVQUIRK_HAS_PADCALIB |
1479 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1480 		    NVQUIRK_ENABLE_SDR50 |
1481 		    NVQUIRK_ENABLE_SDR104 |
1482 		    NVQUIRK_HAS_TMCLK |
1483 		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1484 	.min_tap_delay = 84,
1485 	.max_tap_delay = 136,
1486 };
1487 
1488 static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1489 	.pdata = &sdhci_tegra186_pdata,
1490 	.dma_mask = DMA_BIT_MASK(39),
1491 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1492 		    NVQUIRK_HAS_PADCALIB |
1493 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1494 		    NVQUIRK_ENABLE_SDR50 |
1495 		    NVQUIRK_ENABLE_SDR104 |
1496 		    NVQUIRK_HAS_TMCLK,
1497 	.min_tap_delay = 96,
1498 	.max_tap_delay = 139,
1499 };
1500 
1501 static const struct of_device_id sdhci_tegra_dt_match[] = {
1502 	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1503 	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1504 	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1505 	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1506 	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1507 	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1508 	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1509 	{}
1510 };
1511 MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1512 
1513 static int sdhci_tegra_add_host(struct sdhci_host *host)
1514 {
1515 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1516 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1517 	struct cqhci_host *cq_host;
1518 	bool dma64;
1519 	int ret;
1520 
1521 	if (!tegra_host->enable_hwcq)
1522 		return sdhci_add_host(host);
1523 
1524 	sdhci_enable_v4_mode(host);
1525 
1526 	ret = sdhci_setup_host(host);
1527 	if (ret)
1528 		return ret;
1529 
1530 	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1531 
1532 	cq_host = devm_kzalloc(host->mmc->parent,
1533 				sizeof(*cq_host), GFP_KERNEL);
1534 	if (!cq_host) {
1535 		ret = -ENOMEM;
1536 		goto cleanup;
1537 	}
1538 
1539 	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1540 	cq_host->ops = &sdhci_tegra_cqhci_ops;
1541 
1542 	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1543 	if (dma64)
1544 		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1545 
1546 	ret = cqhci_init(cq_host, host->mmc, dma64);
1547 	if (ret)
1548 		goto cleanup;
1549 
1550 	ret = __sdhci_add_host(host);
1551 	if (ret)
1552 		goto cleanup;
1553 
1554 	return 0;
1555 
1556 cleanup:
1557 	sdhci_cleanup_host(host);
1558 	return ret;
1559 }
1560 
1561 static int sdhci_tegra_probe(struct platform_device *pdev)
1562 {
1563 	const struct of_device_id *match;
1564 	const struct sdhci_tegra_soc_data *soc_data;
1565 	struct sdhci_host *host;
1566 	struct sdhci_pltfm_host *pltfm_host;
1567 	struct sdhci_tegra *tegra_host;
1568 	struct clk *clk;
1569 	int rc;
1570 
1571 	match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
1572 	if (!match)
1573 		return -EINVAL;
1574 	soc_data = match->data;
1575 
1576 	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1577 	if (IS_ERR(host))
1578 		return PTR_ERR(host);
1579 	pltfm_host = sdhci_priv(host);
1580 
1581 	tegra_host = sdhci_pltfm_priv(pltfm_host);
1582 	tegra_host->ddr_signaling = false;
1583 	tegra_host->pad_calib_required = false;
1584 	tegra_host->pad_control_available = false;
1585 	tegra_host->soc_data = soc_data;
1586 
1587 	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1588 		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1589 		if (rc == 0)
1590 			host->mmc_host_ops.start_signal_voltage_switch =
1591 				sdhci_tegra_start_signal_voltage_switch;
1592 	}
1593 
1594 	/* Hook to periodically rerun pad calibration */
1595 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1596 		host->mmc_host_ops.request = tegra_sdhci_request;
1597 
1598 	host->mmc_host_ops.hs400_enhanced_strobe =
1599 			tegra_sdhci_hs400_enhanced_strobe;
1600 
1601 	if (!host->ops->platform_execute_tuning)
1602 		host->mmc_host_ops.execute_tuning =
1603 				tegra_sdhci_execute_hw_tuning;
1604 
1605 	rc = mmc_of_parse(host->mmc);
1606 	if (rc)
1607 		goto err_parse_dt;
1608 
1609 	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1610 		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1611 
1612 	/* HW busy detection is supported, but R1B responses are required. */
1613 	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1614 
1615 	tegra_sdhci_parse_dt(host);
1616 
1617 	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1618 							 GPIOD_OUT_HIGH);
1619 	if (IS_ERR(tegra_host->power_gpio)) {
1620 		rc = PTR_ERR(tegra_host->power_gpio);
1621 		goto err_power_req;
1622 	}
1623 
1624 	/*
1625 	 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1626 	 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1627 	 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1628 	 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1629 	 *
1630 	 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1631 	 * 12Mhz TMCLK which is advertised in host capability register.
1632 	 * With TMCLK of 12Mhz provides maximum data timeout period that can
1633 	 * be achieved is 11s better than using SDCLK for data timeout.
1634 	 *
1635 	 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1636 	 * supporting separate TMCLK.
1637 	 */
1638 
1639 	if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1640 		clk = devm_clk_get(&pdev->dev, "tmclk");
1641 		if (IS_ERR(clk)) {
1642 			rc = PTR_ERR(clk);
1643 			if (rc == -EPROBE_DEFER)
1644 				goto err_power_req;
1645 
1646 			dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1647 			clk = NULL;
1648 		}
1649 
1650 		clk_set_rate(clk, 12000000);
1651 		rc = clk_prepare_enable(clk);
1652 		if (rc) {
1653 			dev_err(&pdev->dev,
1654 				"failed to enable tmclk: %d\n", rc);
1655 			goto err_power_req;
1656 		}
1657 
1658 		tegra_host->tmclk = clk;
1659 	}
1660 
1661 	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1662 	if (IS_ERR(clk)) {
1663 		rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
1664 				   "failed to get clock\n");
1665 		goto err_clk_get;
1666 	}
1667 	clk_prepare_enable(clk);
1668 	pltfm_host->clk = clk;
1669 
1670 	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1671 							   "sdhci");
1672 	if (IS_ERR(tegra_host->rst)) {
1673 		rc = PTR_ERR(tegra_host->rst);
1674 		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1675 		goto err_rst_get;
1676 	}
1677 
1678 	rc = reset_control_assert(tegra_host->rst);
1679 	if (rc)
1680 		goto err_rst_get;
1681 
1682 	usleep_range(2000, 4000);
1683 
1684 	rc = reset_control_deassert(tegra_host->rst);
1685 	if (rc)
1686 		goto err_rst_get;
1687 
1688 	usleep_range(2000, 4000);
1689 
1690 	rc = sdhci_tegra_add_host(host);
1691 	if (rc)
1692 		goto err_add_host;
1693 
1694 	return 0;
1695 
1696 err_add_host:
1697 	reset_control_assert(tegra_host->rst);
1698 err_rst_get:
1699 	clk_disable_unprepare(pltfm_host->clk);
1700 err_clk_get:
1701 	clk_disable_unprepare(tegra_host->tmclk);
1702 err_power_req:
1703 err_parse_dt:
1704 	sdhci_pltfm_free(pdev);
1705 	return rc;
1706 }
1707 
1708 static int sdhci_tegra_remove(struct platform_device *pdev)
1709 {
1710 	struct sdhci_host *host = platform_get_drvdata(pdev);
1711 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1712 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1713 
1714 	sdhci_remove_host(host, 0);
1715 
1716 	reset_control_assert(tegra_host->rst);
1717 	usleep_range(2000, 4000);
1718 	clk_disable_unprepare(pltfm_host->clk);
1719 	clk_disable_unprepare(tegra_host->tmclk);
1720 
1721 	sdhci_pltfm_free(pdev);
1722 
1723 	return 0;
1724 }
1725 
1726 #ifdef CONFIG_PM_SLEEP
1727 static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
1728 {
1729 	struct sdhci_host *host = dev_get_drvdata(dev);
1730 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1731 	int ret;
1732 
1733 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1734 		ret = cqhci_suspend(host->mmc);
1735 		if (ret)
1736 			return ret;
1737 	}
1738 
1739 	ret = sdhci_suspend_host(host);
1740 	if (ret) {
1741 		cqhci_resume(host->mmc);
1742 		return ret;
1743 	}
1744 
1745 	clk_disable_unprepare(pltfm_host->clk);
1746 	return 0;
1747 }
1748 
1749 static int __maybe_unused sdhci_tegra_resume(struct device *dev)
1750 {
1751 	struct sdhci_host *host = dev_get_drvdata(dev);
1752 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1753 	int ret;
1754 
1755 	ret = clk_prepare_enable(pltfm_host->clk);
1756 	if (ret)
1757 		return ret;
1758 
1759 	ret = sdhci_resume_host(host);
1760 	if (ret)
1761 		goto disable_clk;
1762 
1763 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1764 		ret = cqhci_resume(host->mmc);
1765 		if (ret)
1766 			goto suspend_host;
1767 	}
1768 
1769 	return 0;
1770 
1771 suspend_host:
1772 	sdhci_suspend_host(host);
1773 disable_clk:
1774 	clk_disable_unprepare(pltfm_host->clk);
1775 	return ret;
1776 }
1777 #endif
1778 
1779 static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
1780 			 sdhci_tegra_resume);
1781 
1782 static struct platform_driver sdhci_tegra_driver = {
1783 	.driver		= {
1784 		.name	= "sdhci-tegra",
1785 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1786 		.of_match_table = sdhci_tegra_dt_match,
1787 		.pm	= &sdhci_tegra_dev_pm_ops,
1788 	},
1789 	.probe		= sdhci_tegra_probe,
1790 	.remove		= sdhci_tegra_remove,
1791 };
1792 
1793 module_platform_driver(sdhci_tegra_driver);
1794 
1795 MODULE_DESCRIPTION("SDHCI driver for Tegra");
1796 MODULE_AUTHOR("Google, Inc.");
1797 MODULE_LICENSE("GPL v2");
1798