xref: /openbmc/linux/drivers/mmc/host/sdhci-tegra.c (revision 4fc7261d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2010 Google, Inc.
4  */
5 
6 #include <linux/delay.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/err.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/iopoll.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/io.h>
15 #include <linux/of.h>
16 #include <linux/of_device.h>
17 #include <linux/pinctrl/consumer.h>
18 #include <linux/regulator/consumer.h>
19 #include <linux/reset.h>
20 #include <linux/mmc/card.h>
21 #include <linux/mmc/host.h>
22 #include <linux/mmc/mmc.h>
23 #include <linux/mmc/slot-gpio.h>
24 #include <linux/gpio/consumer.h>
25 #include <linux/ktime.h>
26 
27 #include "sdhci-pltfm.h"
28 #include "cqhci.h"
29 
30 /* Tegra SDHOST controller vendor register definitions */
31 #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
32 #define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
33 #define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
34 #define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
35 #define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
36 #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
37 #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
38 #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
39 
40 #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
41 #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
42 
43 #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
44 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
45 #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
46 
47 #define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
48 #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT		BIT(0)
49 #define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
50 #define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
51 #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
52 #define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
53 
54 #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
55 #define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
56 
57 #define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
58 #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
59 
60 #define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
61 #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
62 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
63 #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
64 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
65 #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
66 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
67 #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
68 #define TRIES_128					2
69 #define TRIES_256					4
70 #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
71 
72 #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
73 #define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
74 #define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
75 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
76 #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
77 #define TUNING_WORD_BIT_SIZE				32
78 
79 #define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
80 #define SDHCI_AUTO_CAL_START				BIT(31)
81 #define SDHCI_AUTO_CAL_ENABLE				BIT(29)
82 #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
83 
84 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
85 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
86 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
87 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
88 #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
89 
90 #define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
91 #define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
92 
93 #define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
94 #define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
95 #define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
96 #define NVQUIRK_ENABLE_SDR50				BIT(3)
97 #define NVQUIRK_ENABLE_SDR104				BIT(4)
98 #define NVQUIRK_ENABLE_DDR50				BIT(5)
99 /*
100  * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
101  * drive strength.
102  */
103 #define NVQUIRK_HAS_PADCALIB				BIT(6)
104 /*
105  * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
106  * 3V3/1V8 pad selection happens through pinctrl state selection depending
107  * on the signaling mode.
108  */
109 #define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
110 #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
111 #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
112 
113 /*
114  * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
115  * SDMMC hardware data timeout.
116  */
117 #define NVQUIRK_HAS_TMCLK				BIT(10)
118 
119 #define NVQUIRK_HAS_ANDROID_GPT_SECTOR			BIT(11)
120 
121 /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
122 #define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
123 
124 #define SDHCI_TEGRA_CQE_TRNS_MODE	(SDHCI_TRNS_MULTI | \
125 					 SDHCI_TRNS_BLK_CNT_EN | \
126 					 SDHCI_TRNS_DMA)
127 
128 struct sdhci_tegra_soc_data {
129 	const struct sdhci_pltfm_data *pdata;
130 	u64 dma_mask;
131 	u32 nvquirks;
132 	u8 min_tap_delay;
133 	u8 max_tap_delay;
134 };
135 
136 /* Magic pull up and pull down pad calibration offsets */
137 struct sdhci_tegra_autocal_offsets {
138 	u32 pull_up_3v3;
139 	u32 pull_down_3v3;
140 	u32 pull_up_3v3_timeout;
141 	u32 pull_down_3v3_timeout;
142 	u32 pull_up_1v8;
143 	u32 pull_down_1v8;
144 	u32 pull_up_1v8_timeout;
145 	u32 pull_down_1v8_timeout;
146 	u32 pull_up_sdr104;
147 	u32 pull_down_sdr104;
148 	u32 pull_up_hs400;
149 	u32 pull_down_hs400;
150 };
151 
152 struct sdhci_tegra {
153 	const struct sdhci_tegra_soc_data *soc_data;
154 	struct gpio_desc *power_gpio;
155 	struct clk *tmclk;
156 	bool ddr_signaling;
157 	bool pad_calib_required;
158 	bool pad_control_available;
159 
160 	struct reset_control *rst;
161 	struct pinctrl *pinctrl_sdmmc;
162 	struct pinctrl_state *pinctrl_state_3v3;
163 	struct pinctrl_state *pinctrl_state_1v8;
164 	struct pinctrl_state *pinctrl_state_3v3_drv;
165 	struct pinctrl_state *pinctrl_state_1v8_drv;
166 
167 	struct sdhci_tegra_autocal_offsets autocal_offsets;
168 	ktime_t last_calib;
169 
170 	u32 default_tap;
171 	u32 default_trim;
172 	u32 dqs_trim;
173 	bool enable_hwcq;
174 	unsigned long curr_clk_rate;
175 	u8 tuned_tap_delay;
176 };
177 
178 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
179 {
180 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
181 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
182 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
183 
184 	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
185 			(reg == SDHCI_HOST_VERSION))) {
186 		/* Erratum: Version register is invalid in HW. */
187 		return SDHCI_SPEC_200;
188 	}
189 
190 	return readw(host->ioaddr + reg);
191 }
192 
193 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
194 {
195 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
196 
197 	switch (reg) {
198 	case SDHCI_TRANSFER_MODE:
199 		/*
200 		 * Postpone this write, we must do it together with a
201 		 * command write that is down below.
202 		 */
203 		pltfm_host->xfer_mode_shadow = val;
204 		return;
205 	case SDHCI_COMMAND:
206 		writel((val << 16) | pltfm_host->xfer_mode_shadow,
207 			host->ioaddr + SDHCI_TRANSFER_MODE);
208 		return;
209 	}
210 
211 	writew(val, host->ioaddr + reg);
212 }
213 
214 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
215 {
216 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
217 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
218 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
219 
220 	/* Seems like we're getting spurious timeout and crc errors, so
221 	 * disable signalling of them. In case of real errors software
222 	 * timers should take care of eventually detecting them.
223 	 */
224 	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
225 		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
226 
227 	writel(val, host->ioaddr + reg);
228 
229 	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
230 			(reg == SDHCI_INT_ENABLE))) {
231 		/* Erratum: Must enable block gap interrupt detection */
232 		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
233 		if (val & SDHCI_INT_CARD_INT)
234 			gap_ctrl |= 0x8;
235 		else
236 			gap_ctrl &= ~0x8;
237 		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
238 	}
239 }
240 
241 static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
242 {
243 	bool status;
244 	u32 reg;
245 
246 	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
247 	status = !!(reg & SDHCI_CLOCK_CARD_EN);
248 
249 	if (status == enable)
250 		return status;
251 
252 	if (enable)
253 		reg |= SDHCI_CLOCK_CARD_EN;
254 	else
255 		reg &= ~SDHCI_CLOCK_CARD_EN;
256 
257 	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
258 
259 	return status;
260 }
261 
262 static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
263 {
264 	bool is_tuning_cmd = 0;
265 	bool clk_enabled;
266 	u8 cmd;
267 
268 	if (reg == SDHCI_COMMAND) {
269 		cmd = SDHCI_GET_CMD(val);
270 		is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
271 				cmd == MMC_SEND_TUNING_BLOCK_HS200;
272 	}
273 
274 	if (is_tuning_cmd)
275 		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
276 
277 	writew(val, host->ioaddr + reg);
278 
279 	if (is_tuning_cmd) {
280 		udelay(1);
281 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
282 		tegra_sdhci_configure_card_clk(host, clk_enabled);
283 	}
284 }
285 
286 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
287 {
288 	/*
289 	 * Write-enable shall be assumed if GPIO is missing in a board's
290 	 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
291 	 * Tegra.
292 	 */
293 	return mmc_gpio_get_ro(host->mmc);
294 }
295 
296 static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
297 {
298 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
299 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
300 	int has_1v8, has_3v3;
301 
302 	/*
303 	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
304 	 * voltage configuration in order to perform voltage switching. This
305 	 * means that valid pinctrl info is required on SDHCI instances capable
306 	 * of performing voltage switching. Whether or not an SDHCI instance is
307 	 * capable of voltage switching is determined based on the regulator.
308 	 */
309 
310 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
311 		return true;
312 
313 	if (IS_ERR(host->mmc->supply.vqmmc))
314 		return false;
315 
316 	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
317 						 1700000, 1950000);
318 
319 	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
320 						 2700000, 3600000);
321 
322 	if (has_1v8 == 1 && has_3v3 == 1)
323 		return tegra_host->pad_control_available;
324 
325 	/* Fixed voltage, no pad control required. */
326 	return true;
327 }
328 
329 static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
330 {
331 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
332 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
333 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
334 	bool card_clk_enabled = false;
335 	u32 reg;
336 
337 	/*
338 	 * Touching the tap values is a bit tricky on some SoC generations.
339 	 * The quirk enables a workaround for a glitch that sometimes occurs if
340 	 * the tap values are changed.
341 	 */
342 
343 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
344 		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
345 
346 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
347 	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
348 	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
349 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
350 
351 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
352 	    card_clk_enabled) {
353 		udelay(1);
354 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
355 		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
356 	}
357 }
358 
359 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
360 {
361 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
362 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
363 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
364 	u32 misc_ctrl, clk_ctrl, pad_ctrl;
365 
366 	sdhci_reset(host, mask);
367 
368 	if (!(mask & SDHCI_RESET_ALL))
369 		return;
370 
371 	tegra_sdhci_set_tap(host, tegra_host->default_tap);
372 
373 	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
374 	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
375 
376 	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
377 		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
378 		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
379 		       SDHCI_MISC_CTRL_ENABLE_SDR104);
380 
381 	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
382 		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
383 
384 	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
385 		/* Erratum: Enable SDHCI spec v3.00 support */
386 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
387 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
388 		/* Advertise UHS modes as supported by host */
389 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
390 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
391 		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
392 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
393 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
394 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
395 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
396 			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
397 	}
398 
399 	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
400 
401 	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
402 	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
403 
404 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
405 		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
406 		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
407 		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
408 		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
409 
410 		tegra_host->pad_calib_required = true;
411 	}
412 
413 	tegra_host->ddr_signaling = false;
414 }
415 
416 static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
417 {
418 	u32 val;
419 
420 	/*
421 	 * Enable or disable the additional I/O pad used by the drive strength
422 	 * calibration process.
423 	 */
424 	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
425 
426 	if (enable)
427 		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
428 	else
429 		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
430 
431 	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
432 
433 	if (enable)
434 		usleep_range(1, 2);
435 }
436 
437 static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
438 					       u16 pdpu)
439 {
440 	u32 reg;
441 
442 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
443 	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
444 	reg |= pdpu;
445 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
446 }
447 
448 static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
449 				   bool state_drvupdn)
450 {
451 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
452 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
453 	struct sdhci_tegra_autocal_offsets *offsets =
454 						&tegra_host->autocal_offsets;
455 	struct pinctrl_state *pinctrl_drvupdn = NULL;
456 	int ret = 0;
457 	u8 drvup = 0, drvdn = 0;
458 	u32 reg;
459 
460 	if (!state_drvupdn) {
461 		/* PADS Drive Strength */
462 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
463 			if (tegra_host->pinctrl_state_1v8_drv) {
464 				pinctrl_drvupdn =
465 					tegra_host->pinctrl_state_1v8_drv;
466 			} else {
467 				drvup = offsets->pull_up_1v8_timeout;
468 				drvdn = offsets->pull_down_1v8_timeout;
469 			}
470 		} else {
471 			if (tegra_host->pinctrl_state_3v3_drv) {
472 				pinctrl_drvupdn =
473 					tegra_host->pinctrl_state_3v3_drv;
474 			} else {
475 				drvup = offsets->pull_up_3v3_timeout;
476 				drvdn = offsets->pull_down_3v3_timeout;
477 			}
478 		}
479 
480 		if (pinctrl_drvupdn != NULL) {
481 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
482 							pinctrl_drvupdn);
483 			if (ret < 0)
484 				dev_err(mmc_dev(host->mmc),
485 					"failed pads drvupdn, ret: %d\n", ret);
486 		} else if ((drvup) || (drvdn)) {
487 			reg = sdhci_readl(host,
488 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
489 			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
490 			reg |= (drvup << 20) | (drvdn << 12);
491 			sdhci_writel(host, reg,
492 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
493 		}
494 
495 	} else {
496 		/* Dual Voltage PADS Voltage selection */
497 		if (!tegra_host->pad_control_available)
498 			return 0;
499 
500 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
501 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
502 						tegra_host->pinctrl_state_1v8);
503 			if (ret < 0)
504 				dev_err(mmc_dev(host->mmc),
505 					"setting 1.8V failed, ret: %d\n", ret);
506 		} else {
507 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
508 						tegra_host->pinctrl_state_3v3);
509 			if (ret < 0)
510 				dev_err(mmc_dev(host->mmc),
511 					"setting 3.3V failed, ret: %d\n", ret);
512 		}
513 	}
514 
515 	return ret;
516 }
517 
518 static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
519 {
520 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
521 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
522 	struct sdhci_tegra_autocal_offsets offsets =
523 			tegra_host->autocal_offsets;
524 	struct mmc_ios *ios = &host->mmc->ios;
525 	bool card_clk_enabled;
526 	u16 pdpu;
527 	u32 reg;
528 	int ret;
529 
530 	switch (ios->timing) {
531 	case MMC_TIMING_UHS_SDR104:
532 		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
533 		break;
534 	case MMC_TIMING_MMC_HS400:
535 		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
536 		break;
537 	default:
538 		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
539 			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
540 		else
541 			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
542 	}
543 
544 	/* Set initial offset before auto-calibration */
545 	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
546 
547 	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
548 
549 	tegra_sdhci_configure_cal_pad(host, true);
550 
551 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
552 	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
553 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
554 
555 	usleep_range(1, 2);
556 	/* 10 ms timeout */
557 	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
558 				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
559 				 1000, 10000);
560 
561 	tegra_sdhci_configure_cal_pad(host, false);
562 
563 	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
564 
565 	if (ret) {
566 		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
567 
568 		/* Disable automatic cal and use fixed Drive Strengths */
569 		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
570 		reg &= ~SDHCI_AUTO_CAL_ENABLE;
571 		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
572 
573 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
574 		if (ret < 0)
575 			dev_err(mmc_dev(host->mmc),
576 				"Setting drive strengths failed: %d\n", ret);
577 	}
578 }
579 
580 static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
581 {
582 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
583 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
584 	struct sdhci_tegra_autocal_offsets *autocal =
585 			&tegra_host->autocal_offsets;
586 	int err;
587 
588 	err = device_property_read_u32(mmc_dev(host->mmc),
589 			"nvidia,pad-autocal-pull-up-offset-3v3",
590 			&autocal->pull_up_3v3);
591 	if (err)
592 		autocal->pull_up_3v3 = 0;
593 
594 	err = device_property_read_u32(mmc_dev(host->mmc),
595 			"nvidia,pad-autocal-pull-down-offset-3v3",
596 			&autocal->pull_down_3v3);
597 	if (err)
598 		autocal->pull_down_3v3 = 0;
599 
600 	err = device_property_read_u32(mmc_dev(host->mmc),
601 			"nvidia,pad-autocal-pull-up-offset-1v8",
602 			&autocal->pull_up_1v8);
603 	if (err)
604 		autocal->pull_up_1v8 = 0;
605 
606 	err = device_property_read_u32(mmc_dev(host->mmc),
607 			"nvidia,pad-autocal-pull-down-offset-1v8",
608 			&autocal->pull_down_1v8);
609 	if (err)
610 		autocal->pull_down_1v8 = 0;
611 
612 	err = device_property_read_u32(mmc_dev(host->mmc),
613 			"nvidia,pad-autocal-pull-up-offset-sdr104",
614 			&autocal->pull_up_sdr104);
615 	if (err)
616 		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
617 
618 	err = device_property_read_u32(mmc_dev(host->mmc),
619 			"nvidia,pad-autocal-pull-down-offset-sdr104",
620 			&autocal->pull_down_sdr104);
621 	if (err)
622 		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
623 
624 	err = device_property_read_u32(mmc_dev(host->mmc),
625 			"nvidia,pad-autocal-pull-up-offset-hs400",
626 			&autocal->pull_up_hs400);
627 	if (err)
628 		autocal->pull_up_hs400 = autocal->pull_up_1v8;
629 
630 	err = device_property_read_u32(mmc_dev(host->mmc),
631 			"nvidia,pad-autocal-pull-down-offset-hs400",
632 			&autocal->pull_down_hs400);
633 	if (err)
634 		autocal->pull_down_hs400 = autocal->pull_down_1v8;
635 
636 	/*
637 	 * Different fail-safe drive strength values based on the signaling
638 	 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
639 	 * So, avoid reading below device tree properties for SoCs that don't
640 	 * have NVQUIRK_NEEDS_PAD_CONTROL.
641 	 */
642 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
643 		return;
644 
645 	err = device_property_read_u32(mmc_dev(host->mmc),
646 			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
647 			&autocal->pull_up_3v3_timeout);
648 	if (err) {
649 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
650 			(tegra_host->pinctrl_state_3v3_drv == NULL))
651 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
652 				mmc_hostname(host->mmc));
653 		autocal->pull_up_3v3_timeout = 0;
654 	}
655 
656 	err = device_property_read_u32(mmc_dev(host->mmc),
657 			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
658 			&autocal->pull_down_3v3_timeout);
659 	if (err) {
660 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
661 			(tegra_host->pinctrl_state_3v3_drv == NULL))
662 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
663 				mmc_hostname(host->mmc));
664 		autocal->pull_down_3v3_timeout = 0;
665 	}
666 
667 	err = device_property_read_u32(mmc_dev(host->mmc),
668 			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
669 			&autocal->pull_up_1v8_timeout);
670 	if (err) {
671 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
672 			(tegra_host->pinctrl_state_1v8_drv == NULL))
673 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
674 				mmc_hostname(host->mmc));
675 		autocal->pull_up_1v8_timeout = 0;
676 	}
677 
678 	err = device_property_read_u32(mmc_dev(host->mmc),
679 			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
680 			&autocal->pull_down_1v8_timeout);
681 	if (err) {
682 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
683 			(tegra_host->pinctrl_state_1v8_drv == NULL))
684 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
685 				mmc_hostname(host->mmc));
686 		autocal->pull_down_1v8_timeout = 0;
687 	}
688 }
689 
690 static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
691 {
692 	struct sdhci_host *host = mmc_priv(mmc);
693 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
694 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
695 	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
696 
697 	/* 100 ms calibration interval is specified in the TRM */
698 	if (ktime_to_ms(since_calib) > 100) {
699 		tegra_sdhci_pad_autocalib(host);
700 		tegra_host->last_calib = ktime_get();
701 	}
702 
703 	sdhci_request(mmc, mrq);
704 }
705 
706 static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
707 {
708 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
709 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
710 	int err;
711 
712 	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-tap",
713 				       &tegra_host->default_tap);
714 	if (err)
715 		tegra_host->default_tap = 0;
716 
717 	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,default-trim",
718 				       &tegra_host->default_trim);
719 	if (err)
720 		tegra_host->default_trim = 0;
721 
722 	err = device_property_read_u32(mmc_dev(host->mmc), "nvidia,dqs-trim",
723 				       &tegra_host->dqs_trim);
724 	if (err)
725 		tegra_host->dqs_trim = 0x11;
726 }
727 
728 static void tegra_sdhci_parse_dt(struct sdhci_host *host)
729 {
730 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
731 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
732 
733 	if (device_property_read_bool(mmc_dev(host->mmc), "supports-cqe"))
734 		tegra_host->enable_hwcq = true;
735 	else
736 		tegra_host->enable_hwcq = false;
737 
738 	tegra_sdhci_parse_pad_autocal_dt(host);
739 	tegra_sdhci_parse_tap_and_trim(host);
740 }
741 
742 static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
743 {
744 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
745 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
746 	unsigned long host_clk;
747 
748 	if (!clock)
749 		return sdhci_set_clock(host, clock);
750 
751 	/*
752 	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
753 	 * divider to be configured to divided the host clock by two. The SDHCI
754 	 * clock divider is calculated as part of sdhci_set_clock() by
755 	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
756 	 * the requested clock rate.
757 	 *
758 	 * By setting the host->max_clk to clock * 2 the divider calculation
759 	 * will always result in the correct value for DDR50/52 modes,
760 	 * regardless of clock rate rounding, which may happen if the value
761 	 * from clk_get_rate() is used.
762 	 */
763 	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
764 	clk_set_rate(pltfm_host->clk, host_clk);
765 	tegra_host->curr_clk_rate = host_clk;
766 	if (tegra_host->ddr_signaling)
767 		host->max_clk = host_clk;
768 	else
769 		host->max_clk = clk_get_rate(pltfm_host->clk);
770 
771 	sdhci_set_clock(host, clock);
772 
773 	if (tegra_host->pad_calib_required) {
774 		tegra_sdhci_pad_autocalib(host);
775 		tegra_host->pad_calib_required = false;
776 	}
777 }
778 
779 static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
780 					      struct mmc_ios *ios)
781 {
782 	struct sdhci_host *host = mmc_priv(mmc);
783 	u32 val;
784 
785 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
786 
787 	if (ios->enhanced_strobe) {
788 		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
789 		/*
790 		 * When CMD13 is sent from mmc_select_hs400es() after
791 		 * switching to HS400ES mode, the bus is operating at
792 		 * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
793 		 * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
794 		 * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
795 		 * controller CAR clock and the interface clock are rate matched.
796 		 */
797 		tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
798 	} else {
799 		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
800 	}
801 
802 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
803 }
804 
805 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
806 {
807 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
808 
809 	return clk_round_rate(pltfm_host->clk, UINT_MAX);
810 }
811 
812 static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
813 {
814 	u32 val;
815 
816 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
817 	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
818 	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
819 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
820 }
821 
822 static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
823 {
824 	u32 reg;
825 	int err;
826 
827 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
828 	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
829 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
830 
831 	/* 1 ms sleep, 5 ms timeout */
832 	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
833 				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
834 				 1000, 5000);
835 	if (err)
836 		dev_err(mmc_dev(host->mmc),
837 			"HS400 delay line calibration timed out\n");
838 }
839 
840 static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
841 				       u8 thd_low, u8 fixed_tap)
842 {
843 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
844 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
845 	u32 val, tun_status;
846 	u8 word, bit, edge1, tap, window;
847 	bool tap_result;
848 	bool start_fail = false;
849 	bool start_pass = false;
850 	bool end_pass = false;
851 	bool first_fail = false;
852 	bool first_pass = false;
853 	u8 start_pass_tap = 0;
854 	u8 end_pass_tap = 0;
855 	u8 first_fail_tap = 0;
856 	u8 first_pass_tap = 0;
857 	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
858 
859 	/*
860 	 * Read auto-tuned results and extract good valid passing window by
861 	 * filtering out un-wanted bubble/partial/merged windows.
862 	 */
863 	for (word = 0; word < total_tuning_words; word++) {
864 		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
865 		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
866 		val |= word;
867 		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
868 		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
869 		bit = 0;
870 		while (bit < TUNING_WORD_BIT_SIZE) {
871 			tap = word * TUNING_WORD_BIT_SIZE + bit;
872 			tap_result = tun_status & (1 << bit);
873 			if (!tap_result && !start_fail) {
874 				start_fail = true;
875 				if (!first_fail) {
876 					first_fail_tap = tap;
877 					first_fail = true;
878 				}
879 
880 			} else if (tap_result && start_fail && !start_pass) {
881 				start_pass_tap = tap;
882 				start_pass = true;
883 				if (!first_pass) {
884 					first_pass_tap = tap;
885 					first_pass = true;
886 				}
887 
888 			} else if (!tap_result && start_fail && start_pass &&
889 				   !end_pass) {
890 				end_pass_tap = tap - 1;
891 				end_pass = true;
892 			} else if (tap_result && start_pass && start_fail &&
893 				   end_pass) {
894 				window = end_pass_tap - start_pass_tap;
895 				/* discard merged window and bubble window */
896 				if (window >= thd_up || window < thd_low) {
897 					start_pass_tap = tap;
898 					end_pass = false;
899 				} else {
900 					/* set tap at middle of valid window */
901 					tap = start_pass_tap + window / 2;
902 					tegra_host->tuned_tap_delay = tap;
903 					return;
904 				}
905 			}
906 
907 			bit++;
908 		}
909 	}
910 
911 	if (!first_fail) {
912 		WARN(1, "no edge detected, continue with hw tuned delay.\n");
913 	} else if (first_pass) {
914 		/* set tap location at fixed tap relative to the first edge */
915 		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
916 		if (edge1 - 1 > fixed_tap)
917 			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
918 		else
919 			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
920 	}
921 }
922 
923 static void tegra_sdhci_post_tuning(struct sdhci_host *host)
924 {
925 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
926 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
927 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
928 	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
929 	u8 fixed_tap, start_tap, end_tap, window_width;
930 	u8 thdupper, thdlower;
931 	u8 num_iter;
932 	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
933 
934 	/* retain HW tuned tap to use incase if no correction is needed */
935 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
936 	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
937 				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
938 	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
939 		min_tap_dly = soc_data->min_tap_delay;
940 		max_tap_dly = soc_data->max_tap_delay;
941 		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
942 		period_ps = USEC_PER_SEC / clk_rate_mhz;
943 		bestcase = period_ps / min_tap_dly;
944 		worstcase = period_ps / max_tap_dly;
945 		/*
946 		 * Upper and Lower bound thresholds used to detect merged and
947 		 * bubble windows
948 		 */
949 		thdupper = (2 * worstcase + bestcase) / 2;
950 		thdlower = worstcase / 4;
951 		/*
952 		 * fixed tap is used when HW tuning result contains single edge
953 		 * and tap is set at fixed tap delay relative to the first edge
954 		 */
955 		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
956 		fixed_tap = avg_tap_dly / 2;
957 
958 		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
959 		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
960 		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
961 			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
962 		window_width = end_tap - start_tap;
963 		num_iter = host->tuning_loop_count;
964 		/*
965 		 * partial window includes edges of the tuning range.
966 		 * merged window includes more taps so window width is higher
967 		 * than upper threshold.
968 		 */
969 		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
970 		    (end_tap == num_iter - 2) || window_width >= thdupper) {
971 			pr_debug("%s: Apply tuning correction\n",
972 				 mmc_hostname(host->mmc));
973 			tegra_sdhci_tap_correction(host, thdupper, thdlower,
974 						   fixed_tap);
975 		}
976 	}
977 
978 	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
979 }
980 
981 static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
982 {
983 	struct sdhci_host *host = mmc_priv(mmc);
984 	int err;
985 
986 	err = sdhci_execute_tuning(mmc, opcode);
987 	if (!err && !host->tuning_err)
988 		tegra_sdhci_post_tuning(host);
989 
990 	return err;
991 }
992 
993 static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
994 					  unsigned timing)
995 {
996 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
997 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
998 	bool set_default_tap = false;
999 	bool set_dqs_trim = false;
1000 	bool do_hs400_dll_cal = false;
1001 	u8 iter = TRIES_256;
1002 	u32 val;
1003 
1004 	tegra_host->ddr_signaling = false;
1005 	switch (timing) {
1006 	case MMC_TIMING_UHS_SDR50:
1007 		break;
1008 	case MMC_TIMING_UHS_SDR104:
1009 	case MMC_TIMING_MMC_HS200:
1010 		/* Don't set default tap on tunable modes. */
1011 		iter = TRIES_128;
1012 		break;
1013 	case MMC_TIMING_MMC_HS400:
1014 		set_dqs_trim = true;
1015 		do_hs400_dll_cal = true;
1016 		iter = TRIES_128;
1017 		break;
1018 	case MMC_TIMING_MMC_DDR52:
1019 	case MMC_TIMING_UHS_DDR50:
1020 		tegra_host->ddr_signaling = true;
1021 		set_default_tap = true;
1022 		break;
1023 	default:
1024 		set_default_tap = true;
1025 		break;
1026 	}
1027 
1028 	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1029 	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1030 		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1031 		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1032 	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1033 		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1034 		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1035 	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1036 	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1037 
1038 	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1039 
1040 	sdhci_set_uhs_signaling(host, timing);
1041 
1042 	tegra_sdhci_pad_autocalib(host);
1043 
1044 	if (tegra_host->tuned_tap_delay && !set_default_tap)
1045 		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1046 	else
1047 		tegra_sdhci_set_tap(host, tegra_host->default_tap);
1048 
1049 	if (set_dqs_trim)
1050 		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1051 
1052 	if (do_hs400_dll_cal)
1053 		tegra_sdhci_hs400_dll_cal(host);
1054 }
1055 
1056 static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1057 {
1058 	unsigned int min, max;
1059 
1060 	/*
1061 	 * Start search for minimum tap value at 10, as smaller values are
1062 	 * may wrongly be reported as working but fail at higher speeds,
1063 	 * according to the TRM.
1064 	 */
1065 	min = 10;
1066 	while (min < 255) {
1067 		tegra_sdhci_set_tap(host, min);
1068 		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1069 			break;
1070 		min++;
1071 	}
1072 
1073 	/* Find the maximum tap value that still passes. */
1074 	max = min + 1;
1075 	while (max < 255) {
1076 		tegra_sdhci_set_tap(host, max);
1077 		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1078 			max--;
1079 			break;
1080 		}
1081 		max++;
1082 	}
1083 
1084 	/* The TRM states the ideal tap value is at 75% in the passing range. */
1085 	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1086 
1087 	return mmc_send_tuning(host->mmc, opcode, NULL);
1088 }
1089 
1090 static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1091 						   struct mmc_ios *ios)
1092 {
1093 	struct sdhci_host *host = mmc_priv(mmc);
1094 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1095 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1096 	int ret = 0;
1097 
1098 	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1099 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1100 		if (ret < 0)
1101 			return ret;
1102 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1103 	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1104 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1105 		if (ret < 0)
1106 			return ret;
1107 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1108 	}
1109 
1110 	if (tegra_host->pad_calib_required)
1111 		tegra_sdhci_pad_autocalib(host);
1112 
1113 	return ret;
1114 }
1115 
1116 static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1117 					 struct sdhci_tegra *tegra_host)
1118 {
1119 	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1120 	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1121 		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1122 			PTR_ERR(tegra_host->pinctrl_sdmmc));
1123 		return -1;
1124 	}
1125 
1126 	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1127 				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1128 	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1129 		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1130 			tegra_host->pinctrl_state_1v8_drv = NULL;
1131 	}
1132 
1133 	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1134 				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1135 	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1136 		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1137 			tegra_host->pinctrl_state_3v3_drv = NULL;
1138 	}
1139 
1140 	tegra_host->pinctrl_state_3v3 =
1141 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1142 	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1143 		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1144 			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1145 		return -1;
1146 	}
1147 
1148 	tegra_host->pinctrl_state_1v8 =
1149 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1150 	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1151 		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1152 			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1153 		return -1;
1154 	}
1155 
1156 	tegra_host->pad_control_available = true;
1157 
1158 	return 0;
1159 }
1160 
1161 static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1162 {
1163 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1164 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1165 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1166 
1167 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1168 		tegra_host->pad_calib_required = true;
1169 }
1170 
1171 static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1172 {
1173 	struct mmc_host *mmc = cq_host->mmc;
1174 	struct sdhci_host *host = mmc_priv(mmc);
1175 	u8 ctrl;
1176 	ktime_t timeout;
1177 	bool timed_out;
1178 
1179 	/*
1180 	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1181 	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1182 	 * to be re-configured.
1183 	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1184 	 * CQE is unhalted. So handling CQE resume sequence here to configure
1185 	 * SDHCI block registers prior to exiting CQE halt state.
1186 	 */
1187 	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1188 	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1189 		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1190 		sdhci_cqe_enable(mmc);
1191 		writel(val, cq_host->mmio + reg);
1192 		timeout = ktime_add_us(ktime_get(), 50);
1193 		while (1) {
1194 			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1195 			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1196 			if (!(ctrl & CQHCI_HALT) || timed_out)
1197 				break;
1198 		}
1199 		/*
1200 		 * CQE usually resumes very quick, but incase if Tegra CQE
1201 		 * doesn't resume retry unhalt.
1202 		 */
1203 		if (timed_out)
1204 			writel(val, cq_host->mmio + reg);
1205 	} else {
1206 		writel(val, cq_host->mmio + reg);
1207 	}
1208 }
1209 
1210 static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1211 					 struct mmc_request *mrq, u64 *data)
1212 {
1213 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1214 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1215 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1216 
1217 	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1218 	    mrq->cmd->flags & MMC_RSP_R1B)
1219 		*data |= CQHCI_CMD_TIMING(1);
1220 }
1221 
1222 static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1223 {
1224 	struct cqhci_host *cq_host = mmc->cqe_private;
1225 	struct sdhci_host *host = mmc_priv(mmc);
1226 	u32 val;
1227 
1228 	/*
1229 	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1230 	 * register when CQE is enabled and unhalted.
1231 	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1232 	 * programming block size in sdhci controller and enable it back.
1233 	 */
1234 	if (!cq_host->activated) {
1235 		val = cqhci_readl(cq_host, CQHCI_CFG);
1236 		if (val & CQHCI_ENABLE)
1237 			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1238 				     CQHCI_CFG);
1239 		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1240 		sdhci_cqe_enable(mmc);
1241 		if (val & CQHCI_ENABLE)
1242 			cqhci_writel(cq_host, val, CQHCI_CFG);
1243 	}
1244 
1245 	/*
1246 	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1247 	 * command is sent during transfer of last data block which is the
1248 	 * default case as send status command block counter (CBC) is 1.
1249 	 * Recommended fix to set CBC to 0 allowing send status command only
1250 	 * when data lines are idle.
1251 	 */
1252 	val = cqhci_readl(cq_host, CQHCI_SSC1);
1253 	val &= ~CQHCI_SSC1_CBC_MASK;
1254 	cqhci_writel(cq_host, val, CQHCI_SSC1);
1255 }
1256 
1257 static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1258 {
1259 	sdhci_dumpregs(mmc_priv(mmc));
1260 }
1261 
1262 static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1263 {
1264 	int cmd_error = 0;
1265 	int data_error = 0;
1266 
1267 	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1268 		return intmask;
1269 
1270 	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1271 
1272 	return 0;
1273 }
1274 
1275 static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1276 				    struct mmc_command *cmd)
1277 {
1278 	u32 val;
1279 
1280 	/*
1281 	 * HW busy detection timeout is based on programmed data timeout
1282 	 * counter and maximum supported timeout is 11s which may not be
1283 	 * enough for long operations like cache flush, sleep awake, erase.
1284 	 *
1285 	 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1286 	 * host controller to wait for busy state until the card is busy
1287 	 * without HW timeout.
1288 	 *
1289 	 * So, use infinite busy wait mode for operations that may take
1290 	 * more than maximum HW busy timeout of 11s otherwise use finite
1291 	 * busy wait mode.
1292 	 */
1293 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1294 	if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
1295 		val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1296 	else
1297 		val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1298 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1299 
1300 	__sdhci_set_timeout(host, cmd);
1301 }
1302 
1303 static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
1304 {
1305 	struct cqhci_host *cq_host = mmc->cqe_private;
1306 	u32 reg;
1307 
1308 	reg = cqhci_readl(cq_host, CQHCI_CFG);
1309 	reg |= CQHCI_ENABLE;
1310 	cqhci_writel(cq_host, reg, CQHCI_CFG);
1311 }
1312 
1313 static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
1314 {
1315 	struct cqhci_host *cq_host = mmc->cqe_private;
1316 	struct sdhci_host *host = mmc_priv(mmc);
1317 	u32 reg;
1318 
1319 	reg = cqhci_readl(cq_host, CQHCI_CFG);
1320 	reg &= ~CQHCI_ENABLE;
1321 	cqhci_writel(cq_host, reg, CQHCI_CFG);
1322 	sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1323 }
1324 
1325 static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1326 	.write_l    = tegra_cqhci_writel,
1327 	.enable	= sdhci_tegra_cqe_enable,
1328 	.disable = sdhci_cqe_disable,
1329 	.dumpregs = sdhci_tegra_dumpregs,
1330 	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1331 	.pre_enable = sdhci_tegra_cqe_pre_enable,
1332 	.post_disable = sdhci_tegra_cqe_post_disable,
1333 };
1334 
1335 static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1336 {
1337 	struct sdhci_pltfm_host *platform = sdhci_priv(host);
1338 	struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1339 	const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1340 	struct device *dev = mmc_dev(host->mmc);
1341 
1342 	if (soc->dma_mask)
1343 		return dma_set_mask_and_coherent(dev, soc->dma_mask);
1344 
1345 	return 0;
1346 }
1347 
1348 static const struct sdhci_ops tegra_sdhci_ops = {
1349 	.get_ro     = tegra_sdhci_get_ro,
1350 	.read_w     = tegra_sdhci_readw,
1351 	.write_l    = tegra_sdhci_writel,
1352 	.set_clock  = tegra_sdhci_set_clock,
1353 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1354 	.set_bus_width = sdhci_set_bus_width,
1355 	.reset      = tegra_sdhci_reset,
1356 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1357 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1358 	.voltage_switch = tegra_sdhci_voltage_switch,
1359 	.get_max_clock = tegra_sdhci_get_max_clock,
1360 };
1361 
1362 static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1363 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1364 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1365 		  SDHCI_QUIRK_NO_HISPD_BIT |
1366 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1367 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1368 	.ops  = &tegra_sdhci_ops,
1369 };
1370 
1371 static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1372 	.pdata = &sdhci_tegra20_pdata,
1373 	.dma_mask = DMA_BIT_MASK(32),
1374 	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1375 		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1376 		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1377 };
1378 
1379 static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1380 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1381 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1382 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1383 		  SDHCI_QUIRK_NO_HISPD_BIT |
1384 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1385 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1386 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1387 		   SDHCI_QUIRK2_BROKEN_HS200 |
1388 		   /*
1389 		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1390 		    * though no command operation was in progress."
1391 		    *
1392 		    * The exact reason is unknown, as the same hardware seems
1393 		    * to support Auto CMD23 on a downstream 3.1 kernel.
1394 		    */
1395 		   SDHCI_QUIRK2_ACMD23_BROKEN,
1396 	.ops  = &tegra_sdhci_ops,
1397 };
1398 
1399 static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1400 	.pdata = &sdhci_tegra30_pdata,
1401 	.dma_mask = DMA_BIT_MASK(32),
1402 	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1403 		    NVQUIRK_ENABLE_SDR50 |
1404 		    NVQUIRK_ENABLE_SDR104 |
1405 		    NVQUIRK_HAS_ANDROID_GPT_SECTOR |
1406 		    NVQUIRK_HAS_PADCALIB,
1407 };
1408 
1409 static const struct sdhci_ops tegra114_sdhci_ops = {
1410 	.get_ro     = tegra_sdhci_get_ro,
1411 	.read_w     = tegra_sdhci_readw,
1412 	.write_w    = tegra_sdhci_writew,
1413 	.write_l    = tegra_sdhci_writel,
1414 	.set_clock  = tegra_sdhci_set_clock,
1415 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1416 	.set_bus_width = sdhci_set_bus_width,
1417 	.reset      = tegra_sdhci_reset,
1418 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1419 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1420 	.voltage_switch = tegra_sdhci_voltage_switch,
1421 	.get_max_clock = tegra_sdhci_get_max_clock,
1422 };
1423 
1424 static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1425 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1426 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1427 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1428 		  SDHCI_QUIRK_NO_HISPD_BIT |
1429 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1430 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1431 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1432 	.ops  = &tegra114_sdhci_ops,
1433 };
1434 
1435 static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1436 	.pdata = &sdhci_tegra114_pdata,
1437 	.dma_mask = DMA_BIT_MASK(32),
1438 	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1439 };
1440 
1441 static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1442 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1443 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1444 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1445 		  SDHCI_QUIRK_NO_HISPD_BIT |
1446 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1447 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1448 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1449 	.ops  = &tegra114_sdhci_ops,
1450 };
1451 
1452 static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1453 	.pdata = &sdhci_tegra124_pdata,
1454 	.dma_mask = DMA_BIT_MASK(34),
1455 	.nvquirks = NVQUIRK_HAS_ANDROID_GPT_SECTOR,
1456 };
1457 
1458 static const struct sdhci_ops tegra210_sdhci_ops = {
1459 	.get_ro     = tegra_sdhci_get_ro,
1460 	.read_w     = tegra_sdhci_readw,
1461 	.write_w    = tegra210_sdhci_writew,
1462 	.write_l    = tegra_sdhci_writel,
1463 	.set_clock  = tegra_sdhci_set_clock,
1464 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1465 	.set_bus_width = sdhci_set_bus_width,
1466 	.reset      = tegra_sdhci_reset,
1467 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1468 	.voltage_switch = tegra_sdhci_voltage_switch,
1469 	.get_max_clock = tegra_sdhci_get_max_clock,
1470 	.set_timeout = tegra_sdhci_set_timeout,
1471 };
1472 
1473 static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1474 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1475 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1476 		  SDHCI_QUIRK_NO_HISPD_BIT |
1477 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1478 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1479 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1480 	.ops  = &tegra210_sdhci_ops,
1481 };
1482 
1483 static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1484 	.pdata = &sdhci_tegra210_pdata,
1485 	.dma_mask = DMA_BIT_MASK(34),
1486 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1487 		    NVQUIRK_HAS_PADCALIB |
1488 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1489 		    NVQUIRK_ENABLE_SDR50 |
1490 		    NVQUIRK_ENABLE_SDR104 |
1491 		    NVQUIRK_HAS_TMCLK,
1492 	.min_tap_delay = 106,
1493 	.max_tap_delay = 185,
1494 };
1495 
1496 static const struct sdhci_ops tegra186_sdhci_ops = {
1497 	.get_ro     = tegra_sdhci_get_ro,
1498 	.read_w     = tegra_sdhci_readw,
1499 	.write_l    = tegra_sdhci_writel,
1500 	.set_clock  = tegra_sdhci_set_clock,
1501 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1502 	.set_bus_width = sdhci_set_bus_width,
1503 	.reset      = tegra_sdhci_reset,
1504 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1505 	.voltage_switch = tegra_sdhci_voltage_switch,
1506 	.get_max_clock = tegra_sdhci_get_max_clock,
1507 	.irq = sdhci_tegra_cqhci_irq,
1508 	.set_timeout = tegra_sdhci_set_timeout,
1509 };
1510 
1511 static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1512 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1513 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1514 		  SDHCI_QUIRK_NO_HISPD_BIT |
1515 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1516 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1517 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1518 	.ops  = &tegra186_sdhci_ops,
1519 };
1520 
1521 static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1522 	.pdata = &sdhci_tegra186_pdata,
1523 	.dma_mask = DMA_BIT_MASK(40),
1524 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1525 		    NVQUIRK_HAS_PADCALIB |
1526 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1527 		    NVQUIRK_ENABLE_SDR50 |
1528 		    NVQUIRK_ENABLE_SDR104 |
1529 		    NVQUIRK_HAS_TMCLK |
1530 		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1531 	.min_tap_delay = 84,
1532 	.max_tap_delay = 136,
1533 };
1534 
1535 static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1536 	.pdata = &sdhci_tegra186_pdata,
1537 	.dma_mask = DMA_BIT_MASK(39),
1538 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1539 		    NVQUIRK_HAS_PADCALIB |
1540 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1541 		    NVQUIRK_ENABLE_SDR50 |
1542 		    NVQUIRK_ENABLE_SDR104 |
1543 		    NVQUIRK_HAS_TMCLK,
1544 	.min_tap_delay = 96,
1545 	.max_tap_delay = 139,
1546 };
1547 
1548 static const struct of_device_id sdhci_tegra_dt_match[] = {
1549 	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1550 	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1551 	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1552 	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1553 	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1554 	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1555 	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1556 	{}
1557 };
1558 MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1559 
1560 static int sdhci_tegra_add_host(struct sdhci_host *host)
1561 {
1562 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1563 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1564 	struct cqhci_host *cq_host;
1565 	bool dma64;
1566 	int ret;
1567 
1568 	if (!tegra_host->enable_hwcq)
1569 		return sdhci_add_host(host);
1570 
1571 	sdhci_enable_v4_mode(host);
1572 
1573 	ret = sdhci_setup_host(host);
1574 	if (ret)
1575 		return ret;
1576 
1577 	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1578 
1579 	cq_host = devm_kzalloc(mmc_dev(host->mmc),
1580 				sizeof(*cq_host), GFP_KERNEL);
1581 	if (!cq_host) {
1582 		ret = -ENOMEM;
1583 		goto cleanup;
1584 	}
1585 
1586 	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1587 	cq_host->ops = &sdhci_tegra_cqhci_ops;
1588 
1589 	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1590 	if (dma64)
1591 		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1592 
1593 	ret = cqhci_init(cq_host, host->mmc, dma64);
1594 	if (ret)
1595 		goto cleanup;
1596 
1597 	ret = __sdhci_add_host(host);
1598 	if (ret)
1599 		goto cleanup;
1600 
1601 	return 0;
1602 
1603 cleanup:
1604 	sdhci_cleanup_host(host);
1605 	return ret;
1606 }
1607 
1608 static int sdhci_tegra_probe(struct platform_device *pdev)
1609 {
1610 	const struct of_device_id *match;
1611 	const struct sdhci_tegra_soc_data *soc_data;
1612 	struct sdhci_host *host;
1613 	struct sdhci_pltfm_host *pltfm_host;
1614 	struct sdhci_tegra *tegra_host;
1615 	struct clk *clk;
1616 	int rc;
1617 
1618 	match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
1619 	if (!match)
1620 		return -EINVAL;
1621 	soc_data = match->data;
1622 
1623 	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1624 	if (IS_ERR(host))
1625 		return PTR_ERR(host);
1626 	pltfm_host = sdhci_priv(host);
1627 
1628 	tegra_host = sdhci_pltfm_priv(pltfm_host);
1629 	tegra_host->ddr_signaling = false;
1630 	tegra_host->pad_calib_required = false;
1631 	tegra_host->pad_control_available = false;
1632 	tegra_host->soc_data = soc_data;
1633 
1634 	if (soc_data->nvquirks & NVQUIRK_HAS_ANDROID_GPT_SECTOR)
1635 		host->mmc->caps2 |= MMC_CAP2_ALT_GPT_TEGRA;
1636 
1637 	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1638 		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1639 		if (rc == 0)
1640 			host->mmc_host_ops.start_signal_voltage_switch =
1641 				sdhci_tegra_start_signal_voltage_switch;
1642 	}
1643 
1644 	/* Hook to periodically rerun pad calibration */
1645 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1646 		host->mmc_host_ops.request = tegra_sdhci_request;
1647 
1648 	host->mmc_host_ops.hs400_enhanced_strobe =
1649 			tegra_sdhci_hs400_enhanced_strobe;
1650 
1651 	if (!host->ops->platform_execute_tuning)
1652 		host->mmc_host_ops.execute_tuning =
1653 				tegra_sdhci_execute_hw_tuning;
1654 
1655 	rc = mmc_of_parse(host->mmc);
1656 	if (rc)
1657 		goto err_parse_dt;
1658 
1659 	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1660 		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1661 
1662 	/* HW busy detection is supported, but R1B responses are required. */
1663 	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1664 
1665 	tegra_sdhci_parse_dt(host);
1666 
1667 	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1668 							 GPIOD_OUT_HIGH);
1669 	if (IS_ERR(tegra_host->power_gpio)) {
1670 		rc = PTR_ERR(tegra_host->power_gpio);
1671 		goto err_power_req;
1672 	}
1673 
1674 	/*
1675 	 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1676 	 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1677 	 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1678 	 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1679 	 *
1680 	 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1681 	 * 12Mhz TMCLK which is advertised in host capability register.
1682 	 * With TMCLK of 12Mhz provides maximum data timeout period that can
1683 	 * be achieved is 11s better than using SDCLK for data timeout.
1684 	 *
1685 	 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1686 	 * supporting separate TMCLK.
1687 	 */
1688 
1689 	if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1690 		clk = devm_clk_get(&pdev->dev, "tmclk");
1691 		if (IS_ERR(clk)) {
1692 			rc = PTR_ERR(clk);
1693 			if (rc == -EPROBE_DEFER)
1694 				goto err_power_req;
1695 
1696 			dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1697 			clk = NULL;
1698 		}
1699 
1700 		clk_set_rate(clk, 12000000);
1701 		rc = clk_prepare_enable(clk);
1702 		if (rc) {
1703 			dev_err(&pdev->dev,
1704 				"failed to enable tmclk: %d\n", rc);
1705 			goto err_power_req;
1706 		}
1707 
1708 		tegra_host->tmclk = clk;
1709 	}
1710 
1711 	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1712 	if (IS_ERR(clk)) {
1713 		rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
1714 				   "failed to get clock\n");
1715 		goto err_clk_get;
1716 	}
1717 	clk_prepare_enable(clk);
1718 	pltfm_host->clk = clk;
1719 
1720 	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1721 							   "sdhci");
1722 	if (IS_ERR(tegra_host->rst)) {
1723 		rc = PTR_ERR(tegra_host->rst);
1724 		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1725 		goto err_rst_get;
1726 	}
1727 
1728 	rc = reset_control_assert(tegra_host->rst);
1729 	if (rc)
1730 		goto err_rst_get;
1731 
1732 	usleep_range(2000, 4000);
1733 
1734 	rc = reset_control_deassert(tegra_host->rst);
1735 	if (rc)
1736 		goto err_rst_get;
1737 
1738 	usleep_range(2000, 4000);
1739 
1740 	rc = sdhci_tegra_add_host(host);
1741 	if (rc)
1742 		goto err_add_host;
1743 
1744 	return 0;
1745 
1746 err_add_host:
1747 	reset_control_assert(tegra_host->rst);
1748 err_rst_get:
1749 	clk_disable_unprepare(pltfm_host->clk);
1750 err_clk_get:
1751 	clk_disable_unprepare(tegra_host->tmclk);
1752 err_power_req:
1753 err_parse_dt:
1754 	sdhci_pltfm_free(pdev);
1755 	return rc;
1756 }
1757 
1758 static int sdhci_tegra_remove(struct platform_device *pdev)
1759 {
1760 	struct sdhci_host *host = platform_get_drvdata(pdev);
1761 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1762 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1763 
1764 	sdhci_remove_host(host, 0);
1765 
1766 	reset_control_assert(tegra_host->rst);
1767 	usleep_range(2000, 4000);
1768 	clk_disable_unprepare(pltfm_host->clk);
1769 	clk_disable_unprepare(tegra_host->tmclk);
1770 
1771 	sdhci_pltfm_free(pdev);
1772 
1773 	return 0;
1774 }
1775 
1776 #ifdef CONFIG_PM_SLEEP
1777 static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
1778 {
1779 	struct sdhci_host *host = dev_get_drvdata(dev);
1780 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1781 	int ret;
1782 
1783 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1784 		ret = cqhci_suspend(host->mmc);
1785 		if (ret)
1786 			return ret;
1787 	}
1788 
1789 	ret = sdhci_suspend_host(host);
1790 	if (ret) {
1791 		cqhci_resume(host->mmc);
1792 		return ret;
1793 	}
1794 
1795 	clk_disable_unprepare(pltfm_host->clk);
1796 	return 0;
1797 }
1798 
1799 static int __maybe_unused sdhci_tegra_resume(struct device *dev)
1800 {
1801 	struct sdhci_host *host = dev_get_drvdata(dev);
1802 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1803 	int ret;
1804 
1805 	ret = clk_prepare_enable(pltfm_host->clk);
1806 	if (ret)
1807 		return ret;
1808 
1809 	ret = sdhci_resume_host(host);
1810 	if (ret)
1811 		goto disable_clk;
1812 
1813 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1814 		ret = cqhci_resume(host->mmc);
1815 		if (ret)
1816 			goto suspend_host;
1817 	}
1818 
1819 	return 0;
1820 
1821 suspend_host:
1822 	sdhci_suspend_host(host);
1823 disable_clk:
1824 	clk_disable_unprepare(pltfm_host->clk);
1825 	return ret;
1826 }
1827 #endif
1828 
1829 static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
1830 			 sdhci_tegra_resume);
1831 
1832 static struct platform_driver sdhci_tegra_driver = {
1833 	.driver		= {
1834 		.name	= "sdhci-tegra",
1835 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1836 		.of_match_table = sdhci_tegra_dt_match,
1837 		.pm	= &sdhci_tegra_dev_pm_ops,
1838 	},
1839 	.probe		= sdhci_tegra_probe,
1840 	.remove		= sdhci_tegra_remove,
1841 };
1842 
1843 module_platform_driver(sdhci_tegra_driver);
1844 
1845 MODULE_DESCRIPTION("SDHCI driver for Tegra");
1846 MODULE_AUTHOR("Google, Inc.");
1847 MODULE_LICENSE("GPL v2");
1848