1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Freescale eSDHC controller driver.
4  *
5  * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6  * Copyright (c) 2009 MontaVista Software, Inc.
7  * Copyright 2020 NXP
8  *
9  * Authors: Xiaobo Xie <X.Xie@freescale.com>
10  *	    Anton Vorontsov <avorontsov@ru.mvista.com>
11  */
12 
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 #include <linux/sys_soc.h>
20 #include <linux/clk.h>
21 #include <linux/ktime.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/iopoll.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/mmc.h>
26 #include "sdhci-pltfm.h"
27 #include "sdhci-esdhc.h"
28 
29 #define VENDOR_V_22	0x12
30 #define VENDOR_V_23	0x13
31 
32 #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
33 
34 struct esdhc_clk_fixup {
35 	const unsigned int sd_dflt_max_clk;
36 	const unsigned int max_clk[MMC_TIMING_NUM];
37 };
38 
39 static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
40 	.sd_dflt_max_clk = 25000000,
41 	.max_clk[MMC_TIMING_MMC_HS] = 46500000,
42 	.max_clk[MMC_TIMING_SD_HS] = 46500000,
43 };
44 
45 static const struct esdhc_clk_fixup ls1043a_esdhc_clk = {
46 	.sd_dflt_max_clk = 25000000,
47 	.max_clk[MMC_TIMING_UHS_SDR104] = 116700000,
48 	.max_clk[MMC_TIMING_MMC_HS200] = 116700000,
49 };
50 
51 static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
52 	.sd_dflt_max_clk = 25000000,
53 	.max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
54 	.max_clk[MMC_TIMING_MMC_HS200] = 167000000,
55 };
56 
57 static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
58 	.sd_dflt_max_clk = 25000000,
59 	.max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
60 	.max_clk[MMC_TIMING_MMC_HS200] = 125000000,
61 };
62 
63 static const struct esdhc_clk_fixup p1010_esdhc_clk = {
64 	.sd_dflt_max_clk = 20000000,
65 	.max_clk[MMC_TIMING_LEGACY] = 20000000,
66 	.max_clk[MMC_TIMING_MMC_HS] = 42000000,
67 	.max_clk[MMC_TIMING_SD_HS] = 40000000,
68 };
69 
70 static const struct of_device_id sdhci_esdhc_of_match[] = {
71 	{ .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
72 	{ .compatible = "fsl,ls1043a-esdhc", .data = &ls1043a_esdhc_clk},
73 	{ .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
74 	{ .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
75 	{ .compatible = "fsl,p1010-esdhc",   .data = &p1010_esdhc_clk},
76 	{ .compatible = "fsl,mpc8379-esdhc" },
77 	{ .compatible = "fsl,mpc8536-esdhc" },
78 	{ .compatible = "fsl,esdhc" },
79 	{ }
80 };
81 MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
82 
83 struct sdhci_esdhc {
84 	u8 vendor_ver;
85 	u8 spec_ver;
86 	bool quirk_incorrect_hostver;
87 	bool quirk_limited_clk_division;
88 	bool quirk_unreliable_pulse_detection;
89 	bool quirk_tuning_erratum_type1;
90 	bool quirk_tuning_erratum_type2;
91 	bool quirk_ignore_data_inhibit;
92 	bool quirk_delay_before_data_reset;
93 	bool quirk_trans_complete_erratum;
94 	bool in_sw_tuning;
95 	unsigned int peripheral_clock;
96 	const struct esdhc_clk_fixup *clk_fixup;
97 	u32 div_ratio;
98 };
99 
100 /**
101  * esdhc_readl_fixup - Fixup the value read from incompatible eSDHC register
102  *		       to make it compatible with SD spec.
103  *
104  * @host: pointer to sdhci_host
105  * @spec_reg: SD spec register address
106  * @value: 32bit eSDHC register value on spec_reg address
107  *
108  * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
109  * registers are 32 bits. There are differences in register size, register
110  * address, register function, bit position and function between eSDHC spec
111  * and SD spec.
112  *
113  * Return a fixed up register value
114  */
115 static u32 esdhc_readl_fixup(struct sdhci_host *host,
116 				     int spec_reg, u32 value)
117 {
118 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
119 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
120 	u32 ret;
121 
122 	/*
123 	 * The bit of ADMA flag in eSDHC is not compatible with standard
124 	 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
125 	 * supported by eSDHC.
126 	 * And for many FSL eSDHC controller, the reset value of field
127 	 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
128 	 * only these vendor version is greater than 2.2/0x12 support ADMA.
129 	 */
130 	if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
131 		if (esdhc->vendor_ver > VENDOR_V_22) {
132 			ret = value | SDHCI_CAN_DO_ADMA2;
133 			return ret;
134 		}
135 	}
136 	/*
137 	 * The DAT[3:0] line signal levels and the CMD line signal level are
138 	 * not compatible with standard SDHC register. The line signal levels
139 	 * DAT[7:0] are at bits 31:24 and the command line signal level is at
140 	 * bit 23. All other bits are the same as in the standard SDHC
141 	 * register.
142 	 */
143 	if (spec_reg == SDHCI_PRESENT_STATE) {
144 		ret = value & 0x000fffff;
145 		ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
146 		ret |= (value << 1) & SDHCI_CMD_LVL;
147 		return ret;
148 	}
149 
150 	/*
151 	 * DTS properties of mmc host are used to enable each speed mode
152 	 * according to soc and board capability. So clean up
153 	 * SDR50/SDR104/DDR50 support bits here.
154 	 */
155 	if (spec_reg == SDHCI_CAPABILITIES_1) {
156 		ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
157 				SDHCI_SUPPORT_DDR50);
158 		return ret;
159 	}
160 
161 	/*
162 	 * Some controllers have unreliable Data Line Active
163 	 * bit for commands with busy signal. This affects
164 	 * Command Inhibit (data) bit. Just ignore it since
165 	 * MMC core driver has already polled card status
166 	 * with CMD13 after any command with busy siganl.
167 	 */
168 	if ((spec_reg == SDHCI_PRESENT_STATE) &&
169 	(esdhc->quirk_ignore_data_inhibit == true)) {
170 		ret = value & ~SDHCI_DATA_INHIBIT;
171 		return ret;
172 	}
173 
174 	ret = value;
175 	return ret;
176 }
177 
178 static u16 esdhc_readw_fixup(struct sdhci_host *host,
179 				     int spec_reg, u32 value)
180 {
181 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
182 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
183 	u16 ret;
184 	int shift = (spec_reg & 0x2) * 8;
185 
186 	if (spec_reg == SDHCI_TRANSFER_MODE)
187 		return pltfm_host->xfer_mode_shadow;
188 
189 	if (spec_reg == SDHCI_HOST_VERSION)
190 		ret = value & 0xffff;
191 	else
192 		ret = (value >> shift) & 0xffff;
193 	/* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
194 	 * vendor version and spec version information.
195 	 */
196 	if ((spec_reg == SDHCI_HOST_VERSION) &&
197 	    (esdhc->quirk_incorrect_hostver))
198 		ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
199 	return ret;
200 }
201 
202 static u8 esdhc_readb_fixup(struct sdhci_host *host,
203 				     int spec_reg, u32 value)
204 {
205 	u8 ret;
206 	u8 dma_bits;
207 	int shift = (spec_reg & 0x3) * 8;
208 
209 	ret = (value >> shift) & 0xff;
210 
211 	/*
212 	 * "DMA select" locates at offset 0x28 in SD specification, but on
213 	 * P5020 or P3041, it locates at 0x29.
214 	 */
215 	if (spec_reg == SDHCI_HOST_CONTROL) {
216 		/* DMA select is 22,23 bits in Protocol Control Register */
217 		dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
218 		/* fixup the result */
219 		ret &= ~SDHCI_CTRL_DMA_MASK;
220 		ret |= dma_bits;
221 	}
222 	return ret;
223 }
224 
225 /**
226  * esdhc_writel_fixup - Fixup the SD spec register value so that it could be
227  *			written into eSDHC register.
228  *
229  * @host: pointer to sdhci_host
230  * @spec_reg: SD spec register address
231  * @value: 8/16/32bit SD spec register value that would be written
232  * @old_value: 32bit eSDHC register value on spec_reg address
233  *
234  * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
235  * registers are 32 bits. There are differences in register size, register
236  * address, register function, bit position and function between eSDHC spec
237  * and SD spec.
238  *
239  * Return a fixed up register value
240  */
241 static u32 esdhc_writel_fixup(struct sdhci_host *host,
242 				     int spec_reg, u32 value, u32 old_value)
243 {
244 	u32 ret;
245 
246 	/*
247 	 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
248 	 * when SYSCTL[RSTD] is set for some special operations.
249 	 * No any impact on other operation.
250 	 */
251 	if (spec_reg == SDHCI_INT_ENABLE)
252 		ret = value | SDHCI_INT_BLK_GAP;
253 	else
254 		ret = value;
255 
256 	return ret;
257 }
258 
259 static u32 esdhc_writew_fixup(struct sdhci_host *host,
260 				     int spec_reg, u16 value, u32 old_value)
261 {
262 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
263 	int shift = (spec_reg & 0x2) * 8;
264 	u32 ret;
265 
266 	switch (spec_reg) {
267 	case SDHCI_TRANSFER_MODE:
268 		/*
269 		 * Postpone this write, we must do it together with a
270 		 * command write that is down below. Return old value.
271 		 */
272 		pltfm_host->xfer_mode_shadow = value;
273 		return old_value;
274 	case SDHCI_COMMAND:
275 		ret = (value << 16) | pltfm_host->xfer_mode_shadow;
276 		return ret;
277 	}
278 
279 	ret = old_value & (~(0xffff << shift));
280 	ret |= (value << shift);
281 
282 	if (spec_reg == SDHCI_BLOCK_SIZE) {
283 		/*
284 		 * Two last DMA bits are reserved, and first one is used for
285 		 * non-standard blksz of 4096 bytes that we don't support
286 		 * yet. So clear the DMA boundary bits.
287 		 */
288 		ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
289 	}
290 	return ret;
291 }
292 
293 static u32 esdhc_writeb_fixup(struct sdhci_host *host,
294 				     int spec_reg, u8 value, u32 old_value)
295 {
296 	u32 ret;
297 	u32 dma_bits;
298 	u8 tmp;
299 	int shift = (spec_reg & 0x3) * 8;
300 
301 	/*
302 	 * eSDHC doesn't have a standard power control register, so we do
303 	 * nothing here to avoid incorrect operation.
304 	 */
305 	if (spec_reg == SDHCI_POWER_CONTROL)
306 		return old_value;
307 	/*
308 	 * "DMA select" location is offset 0x28 in SD specification, but on
309 	 * P5020 or P3041, it's located at 0x29.
310 	 */
311 	if (spec_reg == SDHCI_HOST_CONTROL) {
312 		/*
313 		 * If host control register is not standard, exit
314 		 * this function
315 		 */
316 		if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
317 			return old_value;
318 
319 		/* DMA select is 22,23 bits in Protocol Control Register */
320 		dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
321 		ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
322 		tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
323 		      (old_value & SDHCI_CTRL_DMA_MASK);
324 		ret = (ret & (~0xff)) | tmp;
325 
326 		/* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
327 		ret &= ~ESDHC_HOST_CONTROL_RES;
328 		return ret;
329 	}
330 
331 	ret = (old_value & (~(0xff << shift))) | (value << shift);
332 	return ret;
333 }
334 
335 static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
336 {
337 	u32 ret;
338 	u32 value;
339 
340 	if (reg == SDHCI_CAPABILITIES_1)
341 		value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
342 	else
343 		value = ioread32be(host->ioaddr + reg);
344 
345 	ret = esdhc_readl_fixup(host, reg, value);
346 
347 	return ret;
348 }
349 
350 static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
351 {
352 	u32 ret;
353 	u32 value;
354 
355 	if (reg == SDHCI_CAPABILITIES_1)
356 		value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
357 	else
358 		value = ioread32(host->ioaddr + reg);
359 
360 	ret = esdhc_readl_fixup(host, reg, value);
361 
362 	return ret;
363 }
364 
365 static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
366 {
367 	u16 ret;
368 	u32 value;
369 	int base = reg & ~0x3;
370 
371 	value = ioread32be(host->ioaddr + base);
372 	ret = esdhc_readw_fixup(host, reg, value);
373 	return ret;
374 }
375 
376 static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
377 {
378 	u16 ret;
379 	u32 value;
380 	int base = reg & ~0x3;
381 
382 	value = ioread32(host->ioaddr + base);
383 	ret = esdhc_readw_fixup(host, reg, value);
384 	return ret;
385 }
386 
387 static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
388 {
389 	u8 ret;
390 	u32 value;
391 	int base = reg & ~0x3;
392 
393 	value = ioread32be(host->ioaddr + base);
394 	ret = esdhc_readb_fixup(host, reg, value);
395 	return ret;
396 }
397 
398 static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
399 {
400 	u8 ret;
401 	u32 value;
402 	int base = reg & ~0x3;
403 
404 	value = ioread32(host->ioaddr + base);
405 	ret = esdhc_readb_fixup(host, reg, value);
406 	return ret;
407 }
408 
409 static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
410 {
411 	u32 value;
412 
413 	value = esdhc_writel_fixup(host, reg, val, 0);
414 	iowrite32be(value, host->ioaddr + reg);
415 }
416 
417 static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
418 {
419 	u32 value;
420 
421 	value = esdhc_writel_fixup(host, reg, val, 0);
422 	iowrite32(value, host->ioaddr + reg);
423 }
424 
425 static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
426 {
427 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
428 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
429 	int base = reg & ~0x3;
430 	u32 value;
431 	u32 ret;
432 
433 	value = ioread32be(host->ioaddr + base);
434 	ret = esdhc_writew_fixup(host, reg, val, value);
435 	if (reg != SDHCI_TRANSFER_MODE)
436 		iowrite32be(ret, host->ioaddr + base);
437 
438 	/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
439 	 * 1us later after ESDHC_EXTN is set.
440 	 */
441 	if (base == ESDHC_SYSTEM_CONTROL_2) {
442 		if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
443 		    esdhc->in_sw_tuning) {
444 			udelay(1);
445 			ret |= ESDHC_SMPCLKSEL;
446 			iowrite32be(ret, host->ioaddr + base);
447 		}
448 	}
449 }
450 
451 static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
452 {
453 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
454 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
455 	int base = reg & ~0x3;
456 	u32 value;
457 	u32 ret;
458 
459 	value = ioread32(host->ioaddr + base);
460 	ret = esdhc_writew_fixup(host, reg, val, value);
461 	if (reg != SDHCI_TRANSFER_MODE)
462 		iowrite32(ret, host->ioaddr + base);
463 
464 	/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
465 	 * 1us later after ESDHC_EXTN is set.
466 	 */
467 	if (base == ESDHC_SYSTEM_CONTROL_2) {
468 		if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
469 		    esdhc->in_sw_tuning) {
470 			udelay(1);
471 			ret |= ESDHC_SMPCLKSEL;
472 			iowrite32(ret, host->ioaddr + base);
473 		}
474 	}
475 }
476 
477 static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
478 {
479 	int base = reg & ~0x3;
480 	u32 value;
481 	u32 ret;
482 
483 	value = ioread32be(host->ioaddr + base);
484 	ret = esdhc_writeb_fixup(host, reg, val, value);
485 	iowrite32be(ret, host->ioaddr + base);
486 }
487 
488 static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
489 {
490 	int base = reg & ~0x3;
491 	u32 value;
492 	u32 ret;
493 
494 	value = ioread32(host->ioaddr + base);
495 	ret = esdhc_writeb_fixup(host, reg, val, value);
496 	iowrite32(ret, host->ioaddr + base);
497 }
498 
499 /*
500  * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
501  * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
502  * and Block Gap Event(IRQSTAT[BGE]) are also set.
503  * For Continue, apply soft reset for data(SYSCTL[RSTD]);
504  * and re-issue the entire read transaction from beginning.
505  */
506 static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
507 {
508 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
509 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
510 	bool applicable;
511 	dma_addr_t dmastart;
512 	dma_addr_t dmanow;
513 
514 	applicable = (intmask & SDHCI_INT_DATA_END) &&
515 		     (intmask & SDHCI_INT_BLK_GAP) &&
516 		     (esdhc->vendor_ver == VENDOR_V_23);
517 	if (!applicable)
518 		return;
519 
520 	host->data->error = 0;
521 	dmastart = sg_dma_address(host->data->sg);
522 	dmanow = dmastart + host->data->bytes_xfered;
523 	/*
524 	 * Force update to the next DMA block boundary.
525 	 */
526 	dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
527 		SDHCI_DEFAULT_BOUNDARY_SIZE;
528 	host->data->bytes_xfered = dmanow - dmastart;
529 	sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
530 }
531 
532 static int esdhc_of_enable_dma(struct sdhci_host *host)
533 {
534 	int ret;
535 	u32 value;
536 	struct device *dev = mmc_dev(host->mmc);
537 
538 	if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
539 	    of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
540 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
541 		if (ret)
542 			return ret;
543 	}
544 
545 	value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
546 
547 	if (of_dma_is_coherent(dev->of_node))
548 		value |= ESDHC_DMA_SNOOP;
549 	else
550 		value &= ~ESDHC_DMA_SNOOP;
551 
552 	sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
553 	return 0;
554 }
555 
556 static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
557 {
558 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
559 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
560 
561 	if (esdhc->peripheral_clock)
562 		return esdhc->peripheral_clock;
563 	else
564 		return pltfm_host->clock;
565 }
566 
567 static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
568 {
569 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
570 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
571 	unsigned int clock;
572 
573 	if (esdhc->peripheral_clock)
574 		clock = esdhc->peripheral_clock;
575 	else
576 		clock = pltfm_host->clock;
577 	return clock / 256 / 16;
578 }
579 
580 static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
581 {
582 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
583 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
584 	ktime_t timeout;
585 	u32 val, clk_en;
586 
587 	clk_en = ESDHC_CLOCK_SDCLKEN;
588 
589 	/*
590 	 * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
591 	 * is 2.2 or lower.
592 	 */
593 	if (esdhc->vendor_ver <= VENDOR_V_22)
594 		clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
595 			   ESDHC_CLOCK_PEREN);
596 
597 	val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
598 
599 	if (enable)
600 		val |= clk_en;
601 	else
602 		val &= ~clk_en;
603 
604 	sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
605 
606 	/*
607 	 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
608 	 * wait clock stable bit which does not exist.
609 	 */
610 	timeout = ktime_add_ms(ktime_get(), 20);
611 	while (esdhc->vendor_ver > VENDOR_V_22) {
612 		bool timedout = ktime_after(ktime_get(), timeout);
613 
614 		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
615 			break;
616 		if (timedout) {
617 			pr_err("%s: Internal clock never stabilised.\n",
618 				mmc_hostname(host->mmc));
619 			break;
620 		}
621 		usleep_range(10, 20);
622 	}
623 }
624 
625 static void esdhc_flush_async_fifo(struct sdhci_host *host)
626 {
627 	ktime_t timeout;
628 	u32 val;
629 
630 	val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
631 	val |= ESDHC_FLUSH_ASYNC_FIFO;
632 	sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
633 
634 	/* Wait max 20 ms */
635 	timeout = ktime_add_ms(ktime_get(), 20);
636 	while (1) {
637 		bool timedout = ktime_after(ktime_get(), timeout);
638 
639 		if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
640 		      ESDHC_FLUSH_ASYNC_FIFO))
641 			break;
642 		if (timedout) {
643 			pr_err("%s: flushing asynchronous FIFO timeout.\n",
644 				mmc_hostname(host->mmc));
645 			break;
646 		}
647 		usleep_range(10, 20);
648 	}
649 }
650 
651 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
652 {
653 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
654 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
655 	unsigned int pre_div = 1, div = 1;
656 	unsigned int clock_fixup = 0;
657 	ktime_t timeout;
658 	u32 temp;
659 
660 	if (clock == 0) {
661 		host->mmc->actual_clock = 0;
662 		esdhc_clock_enable(host, false);
663 		return;
664 	}
665 
666 	/* Start pre_div at 2 for vendor version < 2.3. */
667 	if (esdhc->vendor_ver < VENDOR_V_23)
668 		pre_div = 2;
669 
670 	/* Fix clock value. */
671 	if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
672 	    esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
673 		clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
674 	else if (esdhc->clk_fixup)
675 		clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
676 
677 	if (clock_fixup == 0 || clock < clock_fixup)
678 		clock_fixup = clock;
679 
680 	/* Calculate pre_div and div. */
681 	while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
682 		pre_div *= 2;
683 
684 	while (host->max_clk / pre_div / div > clock_fixup && div < 16)
685 		div++;
686 
687 	esdhc->div_ratio = pre_div * div;
688 
689 	/* Limit clock division for HS400 200MHz clock for quirk. */
690 	if (esdhc->quirk_limited_clk_division &&
691 	    clock == MMC_HS200_MAX_DTR &&
692 	    (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
693 	     host->flags & SDHCI_HS400_TUNING)) {
694 		if (esdhc->div_ratio <= 4) {
695 			pre_div = 4;
696 			div = 1;
697 		} else if (esdhc->div_ratio <= 8) {
698 			pre_div = 4;
699 			div = 2;
700 		} else if (esdhc->div_ratio <= 12) {
701 			pre_div = 4;
702 			div = 3;
703 		} else {
704 			pr_warn("%s: using unsupported clock division.\n",
705 				mmc_hostname(host->mmc));
706 		}
707 		esdhc->div_ratio = pre_div * div;
708 	}
709 
710 	host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
711 
712 	dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
713 		clock, host->mmc->actual_clock);
714 
715 	/* Set clock division into register. */
716 	pre_div >>= 1;
717 	div--;
718 
719 	esdhc_clock_enable(host, false);
720 
721 	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
722 	temp &= ~ESDHC_CLOCK_MASK;
723 	temp |= ((div << ESDHC_DIVIDER_SHIFT) |
724 		(pre_div << ESDHC_PREDIV_SHIFT));
725 	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
726 
727 	/*
728 	 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
729 	 * wait clock stable bit which does not exist.
730 	 */
731 	timeout = ktime_add_ms(ktime_get(), 20);
732 	while (esdhc->vendor_ver > VENDOR_V_22) {
733 		bool timedout = ktime_after(ktime_get(), timeout);
734 
735 		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
736 			break;
737 		if (timedout) {
738 			pr_err("%s: Internal clock never stabilised.\n",
739 				mmc_hostname(host->mmc));
740 			break;
741 		}
742 		usleep_range(10, 20);
743 	}
744 
745 	/* Additional setting for HS400. */
746 	if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
747 	    clock == MMC_HS200_MAX_DTR) {
748 		temp = sdhci_readl(host, ESDHC_TBCTL);
749 		sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
750 		temp = sdhci_readl(host, ESDHC_SDCLKCTL);
751 		sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
752 		esdhc_clock_enable(host, true);
753 
754 		temp = sdhci_readl(host, ESDHC_DLLCFG0);
755 		temp |= ESDHC_DLL_ENABLE;
756 		if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
757 			temp |= ESDHC_DLL_FREQ_SEL;
758 		sdhci_writel(host, temp, ESDHC_DLLCFG0);
759 
760 		temp |= ESDHC_DLL_RESET;
761 		sdhci_writel(host, temp, ESDHC_DLLCFG0);
762 		udelay(1);
763 		temp &= ~ESDHC_DLL_RESET;
764 		sdhci_writel(host, temp, ESDHC_DLLCFG0);
765 
766 		/* Wait max 20 ms */
767 		if (read_poll_timeout(sdhci_readl, temp,
768 				      temp & ESDHC_DLL_STS_SLV_LOCK,
769 				      10, 20000, false,
770 				      host, ESDHC_DLLSTAT0))
771 			pr_err("%s: timeout for delay chain lock.\n",
772 			       mmc_hostname(host->mmc));
773 
774 		temp = sdhci_readl(host, ESDHC_TBCTL);
775 		sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
776 
777 		esdhc_clock_enable(host, false);
778 		esdhc_flush_async_fifo(host);
779 	}
780 	esdhc_clock_enable(host, true);
781 }
782 
783 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
784 {
785 	u32 ctrl;
786 
787 	ctrl = sdhci_readl(host, ESDHC_PROCTL);
788 	ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
789 	switch (width) {
790 	case MMC_BUS_WIDTH_8:
791 		ctrl |= ESDHC_CTRL_8BITBUS;
792 		break;
793 
794 	case MMC_BUS_WIDTH_4:
795 		ctrl |= ESDHC_CTRL_4BITBUS;
796 		break;
797 
798 	default:
799 		break;
800 	}
801 
802 	sdhci_writel(host, ctrl, ESDHC_PROCTL);
803 }
804 
805 static void esdhc_reset(struct sdhci_host *host, u8 mask)
806 {
807 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
808 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
809 	u32 val, bus_width = 0;
810 
811 	/*
812 	 * Add delay to make sure all the DMA transfers are finished
813 	 * for quirk.
814 	 */
815 	if (esdhc->quirk_delay_before_data_reset &&
816 	    (mask & SDHCI_RESET_DATA) &&
817 	    (host->flags & SDHCI_REQ_USE_DMA))
818 		mdelay(5);
819 
820 	/*
821 	 * Save bus-width for eSDHC whose vendor version is 2.2
822 	 * or lower for data reset.
823 	 */
824 	if ((mask & SDHCI_RESET_DATA) &&
825 	    (esdhc->vendor_ver <= VENDOR_V_22)) {
826 		val = sdhci_readl(host, ESDHC_PROCTL);
827 		bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
828 	}
829 
830 	sdhci_reset(host, mask);
831 
832 	/*
833 	 * Restore bus-width setting and interrupt registers for eSDHC
834 	 * whose vendor version is 2.2 or lower for data reset.
835 	 */
836 	if ((mask & SDHCI_RESET_DATA) &&
837 	    (esdhc->vendor_ver <= VENDOR_V_22)) {
838 		val = sdhci_readl(host, ESDHC_PROCTL);
839 		val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
840 		val |= bus_width;
841 		sdhci_writel(host, val, ESDHC_PROCTL);
842 
843 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
844 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
845 	}
846 
847 	/*
848 	 * Some bits have to be cleaned manually for eSDHC whose spec
849 	 * version is higher than 3.0 for all reset.
850 	 */
851 	if ((mask & SDHCI_RESET_ALL) &&
852 	    (esdhc->spec_ver >= SDHCI_SPEC_300)) {
853 		val = sdhci_readl(host, ESDHC_TBCTL);
854 		val &= ~ESDHC_TB_EN;
855 		sdhci_writel(host, val, ESDHC_TBCTL);
856 
857 		/*
858 		 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
859 		 * 0 for quirk.
860 		 */
861 		if (esdhc->quirk_unreliable_pulse_detection) {
862 			val = sdhci_readl(host, ESDHC_DLLCFG1);
863 			val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
864 			sdhci_writel(host, val, ESDHC_DLLCFG1);
865 		}
866 	}
867 }
868 
869 /* The SCFG, Supplemental Configuration Unit, provides SoC specific
870  * configuration and status registers for the device. There is a
871  * SDHC IO VSEL control register on SCFG for some platforms. It's
872  * used to support SDHC IO voltage switching.
873  */
874 static const struct of_device_id scfg_device_ids[] = {
875 	{ .compatible = "fsl,t1040-scfg", },
876 	{ .compatible = "fsl,ls1012a-scfg", },
877 	{ .compatible = "fsl,ls1046a-scfg", },
878 	{}
879 };
880 
881 /* SDHC IO VSEL control register definition */
882 #define SCFG_SDHCIOVSELCR	0x408
883 #define SDHCIOVSELCR_TGLEN	0x80000000
884 #define SDHCIOVSELCR_VSELVAL	0x60000000
885 #define SDHCIOVSELCR_SDHC_VS	0x00000001
886 
887 static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
888 				       struct mmc_ios *ios)
889 {
890 	struct sdhci_host *host = mmc_priv(mmc);
891 	struct device_node *scfg_node;
892 	void __iomem *scfg_base = NULL;
893 	u32 sdhciovselcr;
894 	u32 val;
895 
896 	/*
897 	 * Signal Voltage Switching is only applicable for Host Controllers
898 	 * v3.00 and above.
899 	 */
900 	if (host->version < SDHCI_SPEC_300)
901 		return 0;
902 
903 	val = sdhci_readl(host, ESDHC_PROCTL);
904 
905 	switch (ios->signal_voltage) {
906 	case MMC_SIGNAL_VOLTAGE_330:
907 		val &= ~ESDHC_VOLT_SEL;
908 		sdhci_writel(host, val, ESDHC_PROCTL);
909 		return 0;
910 	case MMC_SIGNAL_VOLTAGE_180:
911 		scfg_node = of_find_matching_node(NULL, scfg_device_ids);
912 		if (scfg_node)
913 			scfg_base = of_iomap(scfg_node, 0);
914 		of_node_put(scfg_node);
915 		if (scfg_base) {
916 			sdhciovselcr = SDHCIOVSELCR_TGLEN |
917 				       SDHCIOVSELCR_VSELVAL;
918 			iowrite32be(sdhciovselcr,
919 				scfg_base + SCFG_SDHCIOVSELCR);
920 
921 			val |= ESDHC_VOLT_SEL;
922 			sdhci_writel(host, val, ESDHC_PROCTL);
923 			mdelay(5);
924 
925 			sdhciovselcr = SDHCIOVSELCR_TGLEN |
926 				       SDHCIOVSELCR_SDHC_VS;
927 			iowrite32be(sdhciovselcr,
928 				scfg_base + SCFG_SDHCIOVSELCR);
929 			iounmap(scfg_base);
930 		} else {
931 			val |= ESDHC_VOLT_SEL;
932 			sdhci_writel(host, val, ESDHC_PROCTL);
933 		}
934 		return 0;
935 	default:
936 		return 0;
937 	}
938 }
939 
940 static struct soc_device_attribute soc_tuning_erratum_type1[] = {
941 	{ .family = "QorIQ T1023", },
942 	{ .family = "QorIQ T1040", },
943 	{ .family = "QorIQ T2080", },
944 	{ .family = "QorIQ LS1021A", },
945 	{ /* sentinel */ }
946 };
947 
948 static struct soc_device_attribute soc_tuning_erratum_type2[] = {
949 	{ .family = "QorIQ LS1012A", },
950 	{ .family = "QorIQ LS1043A", },
951 	{ .family = "QorIQ LS1046A", },
952 	{ .family = "QorIQ LS1080A", },
953 	{ .family = "QorIQ LS2080A", },
954 	{ .family = "QorIQ LA1575A", },
955 	{ /* sentinel */ }
956 };
957 
958 static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
959 {
960 	u32 val;
961 
962 	esdhc_clock_enable(host, false);
963 	esdhc_flush_async_fifo(host);
964 
965 	val = sdhci_readl(host, ESDHC_TBCTL);
966 	if (enable)
967 		val |= ESDHC_TB_EN;
968 	else
969 		val &= ~ESDHC_TB_EN;
970 	sdhci_writel(host, val, ESDHC_TBCTL);
971 
972 	esdhc_clock_enable(host, true);
973 }
974 
975 static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
976 				    u8 *window_end)
977 {
978 	u32 val;
979 
980 	/* Write TBCTL[11:8]=4'h8 */
981 	val = sdhci_readl(host, ESDHC_TBCTL);
982 	val &= ~(0xf << 8);
983 	val |= 8 << 8;
984 	sdhci_writel(host, val, ESDHC_TBCTL);
985 
986 	mdelay(1);
987 
988 	/* Read TBCTL[31:0] register and rewrite again */
989 	val = sdhci_readl(host, ESDHC_TBCTL);
990 	sdhci_writel(host, val, ESDHC_TBCTL);
991 
992 	mdelay(1);
993 
994 	/* Read the TBSTAT[31:0] register twice */
995 	val = sdhci_readl(host, ESDHC_TBSTAT);
996 	val = sdhci_readl(host, ESDHC_TBSTAT);
997 
998 	*window_end = val & 0xff;
999 	*window_start = (val >> 8) & 0xff;
1000 }
1001 
1002 static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
1003 				    u8 *window_end)
1004 {
1005 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1006 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1007 	u8 start_ptr, end_ptr;
1008 
1009 	if (esdhc->quirk_tuning_erratum_type1) {
1010 		*window_start = 5 * esdhc->div_ratio;
1011 		*window_end = 3 * esdhc->div_ratio;
1012 		return;
1013 	}
1014 
1015 	esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
1016 
1017 	/* Reset data lines by setting ESDHCCTL[RSTD] */
1018 	sdhci_reset(host, SDHCI_RESET_DATA);
1019 	/* Write 32'hFFFF_FFFF to IRQSTAT register */
1020 	sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
1021 
1022 	/* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
1023 	 * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
1024 	 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
1025 	 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
1026 	 */
1027 
1028 	if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
1029 		*window_start = 8 * esdhc->div_ratio;
1030 		*window_end = 4 * esdhc->div_ratio;
1031 	} else {
1032 		*window_start = 5 * esdhc->div_ratio;
1033 		*window_end = 3 * esdhc->div_ratio;
1034 	}
1035 }
1036 
1037 static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
1038 				   u8 window_start, u8 window_end)
1039 {
1040 	struct sdhci_host *host = mmc_priv(mmc);
1041 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1042 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1043 	u32 val;
1044 	int ret;
1045 
1046 	/* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
1047 	val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
1048 	      ESDHC_WNDW_STRT_PTR_MASK;
1049 	val |= window_end & ESDHC_WNDW_END_PTR_MASK;
1050 	sdhci_writel(host, val, ESDHC_TBPTR);
1051 
1052 	/* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
1053 	val = sdhci_readl(host, ESDHC_TBCTL);
1054 	val &= ~ESDHC_TB_MODE_MASK;
1055 	val |= ESDHC_TB_MODE_SW;
1056 	sdhci_writel(host, val, ESDHC_TBCTL);
1057 
1058 	esdhc->in_sw_tuning = true;
1059 	ret = sdhci_execute_tuning(mmc, opcode);
1060 	esdhc->in_sw_tuning = false;
1061 	return ret;
1062 }
1063 
1064 static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1065 {
1066 	struct sdhci_host *host = mmc_priv(mmc);
1067 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1068 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1069 	u8 window_start, window_end;
1070 	int ret, retries = 1;
1071 	bool hs400_tuning;
1072 	unsigned int clk;
1073 	u32 val;
1074 
1075 	/* For tuning mode, the sd clock divisor value
1076 	 * must be larger than 3 according to reference manual.
1077 	 */
1078 	clk = esdhc->peripheral_clock / 3;
1079 	if (host->clock > clk)
1080 		esdhc_of_set_clock(host, clk);
1081 
1082 	esdhc_tuning_block_enable(host, true);
1083 
1084 	/*
1085 	 * The eSDHC controller takes the data timeout value into account
1086 	 * during tuning. If the SD card is too slow sending the response, the
1087 	 * timer will expire and a "Buffer Read Ready" interrupt without data
1088 	 * is triggered. This leads to tuning errors.
1089 	 *
1090 	 * Just set the timeout to the maximum value because the core will
1091 	 * already take care of it in sdhci_send_tuning().
1092 	 */
1093 	sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
1094 
1095 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1096 
1097 	do {
1098 		if (esdhc->quirk_limited_clk_division &&
1099 		    hs400_tuning)
1100 			esdhc_of_set_clock(host, host->clock);
1101 
1102 		/* Do HW tuning */
1103 		val = sdhci_readl(host, ESDHC_TBCTL);
1104 		val &= ~ESDHC_TB_MODE_MASK;
1105 		val |= ESDHC_TB_MODE_3;
1106 		sdhci_writel(host, val, ESDHC_TBCTL);
1107 
1108 		ret = sdhci_execute_tuning(mmc, opcode);
1109 		if (ret)
1110 			break;
1111 
1112 		/* For type2 affected platforms of the tuning erratum,
1113 		 * tuning may succeed although eSDHC might not have
1114 		 * tuned properly. Need to check tuning window.
1115 		 */
1116 		if (esdhc->quirk_tuning_erratum_type2 &&
1117 		    !host->tuning_err) {
1118 			esdhc_tuning_window_ptr(host, &window_start,
1119 						&window_end);
1120 			if (abs(window_start - window_end) >
1121 			    (4 * esdhc->div_ratio + 2))
1122 				host->tuning_err = -EAGAIN;
1123 		}
1124 
1125 		/* If HW tuning fails and triggers erratum,
1126 		 * try workaround.
1127 		 */
1128 		ret = host->tuning_err;
1129 		if (ret == -EAGAIN &&
1130 		    (esdhc->quirk_tuning_erratum_type1 ||
1131 		     esdhc->quirk_tuning_erratum_type2)) {
1132 			/* Recover HS400 tuning flag */
1133 			if (hs400_tuning)
1134 				host->flags |= SDHCI_HS400_TUNING;
1135 			pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1136 				mmc_hostname(mmc));
1137 			/* Do SW tuning */
1138 			esdhc_prepare_sw_tuning(host, &window_start,
1139 						&window_end);
1140 			ret = esdhc_execute_sw_tuning(mmc, opcode,
1141 						      window_start,
1142 						      window_end);
1143 			if (ret)
1144 				break;
1145 
1146 			/* Retry both HW/SW tuning with reduced clock. */
1147 			ret = host->tuning_err;
1148 			if (ret == -EAGAIN && retries) {
1149 				/* Recover HS400 tuning flag */
1150 				if (hs400_tuning)
1151 					host->flags |= SDHCI_HS400_TUNING;
1152 
1153 				clk = host->max_clk / (esdhc->div_ratio + 1);
1154 				esdhc_of_set_clock(host, clk);
1155 				pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1156 					mmc_hostname(mmc));
1157 			} else {
1158 				break;
1159 			}
1160 		} else {
1161 			break;
1162 		}
1163 	} while (retries--);
1164 
1165 	if (ret) {
1166 		esdhc_tuning_block_enable(host, false);
1167 	} else if (hs400_tuning) {
1168 		val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1169 		val |= ESDHC_FLW_CTL_BG;
1170 		sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1171 	}
1172 
1173 	return ret;
1174 }
1175 
1176 static void esdhc_set_uhs_signaling(struct sdhci_host *host,
1177 				   unsigned int timing)
1178 {
1179 	u32 val;
1180 
1181 	/*
1182 	 * There are specific registers setting for HS400 mode.
1183 	 * Clean all of them if controller is in HS400 mode to
1184 	 * exit HS400 mode before re-setting any speed mode.
1185 	 */
1186 	val = sdhci_readl(host, ESDHC_TBCTL);
1187 	if (val & ESDHC_HS400_MODE) {
1188 		val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1189 		val &= ~ESDHC_FLW_CTL_BG;
1190 		sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1191 
1192 		val = sdhci_readl(host, ESDHC_SDCLKCTL);
1193 		val &= ~ESDHC_CMD_CLK_CTL;
1194 		sdhci_writel(host, val, ESDHC_SDCLKCTL);
1195 
1196 		esdhc_clock_enable(host, false);
1197 		val = sdhci_readl(host, ESDHC_TBCTL);
1198 		val &= ~ESDHC_HS400_MODE;
1199 		sdhci_writel(host, val, ESDHC_TBCTL);
1200 		esdhc_clock_enable(host, true);
1201 
1202 		val = sdhci_readl(host, ESDHC_DLLCFG0);
1203 		val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL);
1204 		sdhci_writel(host, val, ESDHC_DLLCFG0);
1205 
1206 		val = sdhci_readl(host, ESDHC_TBCTL);
1207 		val &= ~ESDHC_HS400_WNDW_ADJUST;
1208 		sdhci_writel(host, val, ESDHC_TBCTL);
1209 
1210 		esdhc_tuning_block_enable(host, false);
1211 	}
1212 
1213 	if (timing == MMC_TIMING_MMC_HS400)
1214 		esdhc_tuning_block_enable(host, true);
1215 	else
1216 		sdhci_set_uhs_signaling(host, timing);
1217 }
1218 
1219 static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
1220 {
1221 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1222 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1223 	u32 command;
1224 
1225 	if (esdhc->quirk_trans_complete_erratum) {
1226 		command = SDHCI_GET_CMD(sdhci_readw(host,
1227 					SDHCI_COMMAND));
1228 		if (command == MMC_WRITE_MULTIPLE_BLOCK &&
1229 				sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
1230 				intmask & SDHCI_INT_DATA_END) {
1231 			intmask &= ~SDHCI_INT_DATA_END;
1232 			sdhci_writel(host, SDHCI_INT_DATA_END,
1233 					SDHCI_INT_STATUS);
1234 		}
1235 	}
1236 	return intmask;
1237 }
1238 
1239 #ifdef CONFIG_PM_SLEEP
1240 static u32 esdhc_proctl;
1241 static int esdhc_of_suspend(struct device *dev)
1242 {
1243 	struct sdhci_host *host = dev_get_drvdata(dev);
1244 
1245 	esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
1246 
1247 	if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1248 		mmc_retune_needed(host->mmc);
1249 
1250 	return sdhci_suspend_host(host);
1251 }
1252 
1253 static int esdhc_of_resume(struct device *dev)
1254 {
1255 	struct sdhci_host *host = dev_get_drvdata(dev);
1256 	int ret = sdhci_resume_host(host);
1257 
1258 	if (ret == 0) {
1259 		/* Isn't this already done by sdhci_resume_host() ? --rmk */
1260 		esdhc_of_enable_dma(host);
1261 		sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
1262 	}
1263 	return ret;
1264 }
1265 #endif
1266 
1267 static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
1268 			esdhc_of_suspend,
1269 			esdhc_of_resume);
1270 
1271 static const struct sdhci_ops sdhci_esdhc_be_ops = {
1272 	.read_l = esdhc_be_readl,
1273 	.read_w = esdhc_be_readw,
1274 	.read_b = esdhc_be_readb,
1275 	.write_l = esdhc_be_writel,
1276 	.write_w = esdhc_be_writew,
1277 	.write_b = esdhc_be_writeb,
1278 	.set_clock = esdhc_of_set_clock,
1279 	.enable_dma = esdhc_of_enable_dma,
1280 	.get_max_clock = esdhc_of_get_max_clock,
1281 	.get_min_clock = esdhc_of_get_min_clock,
1282 	.adma_workaround = esdhc_of_adma_workaround,
1283 	.set_bus_width = esdhc_pltfm_set_bus_width,
1284 	.reset = esdhc_reset,
1285 	.set_uhs_signaling = esdhc_set_uhs_signaling,
1286 	.irq = esdhc_irq,
1287 };
1288 
1289 static const struct sdhci_ops sdhci_esdhc_le_ops = {
1290 	.read_l = esdhc_le_readl,
1291 	.read_w = esdhc_le_readw,
1292 	.read_b = esdhc_le_readb,
1293 	.write_l = esdhc_le_writel,
1294 	.write_w = esdhc_le_writew,
1295 	.write_b = esdhc_le_writeb,
1296 	.set_clock = esdhc_of_set_clock,
1297 	.enable_dma = esdhc_of_enable_dma,
1298 	.get_max_clock = esdhc_of_get_max_clock,
1299 	.get_min_clock = esdhc_of_get_min_clock,
1300 	.adma_workaround = esdhc_of_adma_workaround,
1301 	.set_bus_width = esdhc_pltfm_set_bus_width,
1302 	.reset = esdhc_reset,
1303 	.set_uhs_signaling = esdhc_set_uhs_signaling,
1304 	.irq = esdhc_irq,
1305 };
1306 
1307 static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
1308 	.quirks = ESDHC_DEFAULT_QUIRKS |
1309 #ifdef CONFIG_PPC
1310 		  SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1311 #endif
1312 		  SDHCI_QUIRK_NO_CARD_NO_RESET |
1313 		  SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1314 	.ops = &sdhci_esdhc_be_ops,
1315 };
1316 
1317 static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
1318 	.quirks = ESDHC_DEFAULT_QUIRKS |
1319 		  SDHCI_QUIRK_NO_CARD_NO_RESET |
1320 		  SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1321 	.ops = &sdhci_esdhc_le_ops,
1322 };
1323 
1324 static struct soc_device_attribute soc_incorrect_hostver[] = {
1325 	{ .family = "QorIQ T4240", .revision = "1.0", },
1326 	{ .family = "QorIQ T4240", .revision = "2.0", },
1327 	{ /* sentinel */ }
1328 };
1329 
1330 static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1331 	{ .family = "QorIQ LX2160A", .revision = "1.0", },
1332 	{ .family = "QorIQ LX2160A", .revision = "2.0", },
1333 	{ .family = "QorIQ LS1028A", .revision = "1.0", },
1334 	{ /* sentinel */ }
1335 };
1336 
1337 static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1338 	{ .family = "QorIQ LX2160A", .revision = "1.0", },
1339 	{ .family = "QorIQ LX2160A", .revision = "2.0", },
1340 	{ .family = "QorIQ LS1028A", .revision = "1.0", },
1341 	{ /* sentinel */ }
1342 };
1343 
1344 static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1345 {
1346 	const struct of_device_id *match;
1347 	struct sdhci_pltfm_host *pltfm_host;
1348 	struct sdhci_esdhc *esdhc;
1349 	struct device_node *np;
1350 	struct clk *clk;
1351 	u32 val;
1352 	u16 host_ver;
1353 
1354 	pltfm_host = sdhci_priv(host);
1355 	esdhc = sdhci_pltfm_priv(pltfm_host);
1356 
1357 	host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1358 	esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1359 			     SDHCI_VENDOR_VER_SHIFT;
1360 	esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1361 	if (soc_device_match(soc_incorrect_hostver))
1362 		esdhc->quirk_incorrect_hostver = true;
1363 	else
1364 		esdhc->quirk_incorrect_hostver = false;
1365 
1366 	if (soc_device_match(soc_fixup_sdhc_clkdivs))
1367 		esdhc->quirk_limited_clk_division = true;
1368 	else
1369 		esdhc->quirk_limited_clk_division = false;
1370 
1371 	if (soc_device_match(soc_unreliable_pulse_detection))
1372 		esdhc->quirk_unreliable_pulse_detection = true;
1373 	else
1374 		esdhc->quirk_unreliable_pulse_detection = false;
1375 
1376 	match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1377 	if (match)
1378 		esdhc->clk_fixup = match->data;
1379 	np = pdev->dev.of_node;
1380 
1381 	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1382 		esdhc->quirk_delay_before_data_reset = true;
1383 		esdhc->quirk_trans_complete_erratum = true;
1384 	}
1385 
1386 	clk = of_clk_get(np, 0);
1387 	if (!IS_ERR(clk)) {
1388 		/*
1389 		 * esdhc->peripheral_clock would be assigned with a value
1390 		 * which is eSDHC base clock when use periperal clock.
1391 		 * For some platforms, the clock value got by common clk
1392 		 * API is peripheral clock while the eSDHC base clock is
1393 		 * 1/2 peripheral clock.
1394 		 */
1395 		if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1396 		    of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
1397 		    of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
1398 			esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1399 		else
1400 			esdhc->peripheral_clock = clk_get_rate(clk);
1401 
1402 		clk_put(clk);
1403 	}
1404 
1405 	esdhc_clock_enable(host, false);
1406 	val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1407 	/*
1408 	 * This bit is not able to be reset by SDHCI_RESET_ALL. Need to
1409 	 * initialize it as 1 or 0 once, to override the different value
1410 	 * which may be configured in bootloader.
1411 	 */
1412 	if (esdhc->peripheral_clock)
1413 		val |= ESDHC_PERIPHERAL_CLK_SEL;
1414 	else
1415 		val &= ~ESDHC_PERIPHERAL_CLK_SEL;
1416 	sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1417 	esdhc_clock_enable(host, true);
1418 }
1419 
1420 static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1421 {
1422 	esdhc_tuning_block_enable(mmc_priv(mmc), false);
1423 	return 0;
1424 }
1425 
1426 static int sdhci_esdhc_probe(struct platform_device *pdev)
1427 {
1428 	struct sdhci_host *host;
1429 	struct device_node *np, *tp;
1430 	struct sdhci_pltfm_host *pltfm_host;
1431 	struct sdhci_esdhc *esdhc;
1432 	int ret;
1433 
1434 	np = pdev->dev.of_node;
1435 
1436 	if (of_property_read_bool(np, "little-endian"))
1437 		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1438 					sizeof(struct sdhci_esdhc));
1439 	else
1440 		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1441 					sizeof(struct sdhci_esdhc));
1442 
1443 	if (IS_ERR(host))
1444 		return PTR_ERR(host);
1445 
1446 	host->mmc_host_ops.start_signal_voltage_switch =
1447 		esdhc_signal_voltage_switch;
1448 	host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1449 	host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1450 	host->tuning_delay = 1;
1451 
1452 	esdhc_init(pdev, host);
1453 
1454 	sdhci_get_of_property(pdev);
1455 
1456 	pltfm_host = sdhci_priv(host);
1457 	esdhc = sdhci_pltfm_priv(pltfm_host);
1458 	if (soc_device_match(soc_tuning_erratum_type1))
1459 		esdhc->quirk_tuning_erratum_type1 = true;
1460 	else
1461 		esdhc->quirk_tuning_erratum_type1 = false;
1462 
1463 	if (soc_device_match(soc_tuning_erratum_type2))
1464 		esdhc->quirk_tuning_erratum_type2 = true;
1465 	else
1466 		esdhc->quirk_tuning_erratum_type2 = false;
1467 
1468 	if (esdhc->vendor_ver == VENDOR_V_22)
1469 		host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1470 
1471 	if (esdhc->vendor_ver > VENDOR_V_22)
1472 		host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1473 
1474 	tp = of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc");
1475 	if (tp) {
1476 		of_node_put(tp);
1477 		host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1478 		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1479 	}
1480 
1481 	if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1482 	    of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1483 	    of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1484 	    of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1485 	    of_device_is_compatible(np, "fsl,t1040-esdhc"))
1486 		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1487 
1488 	if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1489 		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1490 
1491 	esdhc->quirk_ignore_data_inhibit = false;
1492 	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1493 		/*
1494 		 * Freescale messed up with P2020 as it has a non-standard
1495 		 * host control register
1496 		 */
1497 		host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1498 		esdhc->quirk_ignore_data_inhibit = true;
1499 	}
1500 
1501 	/* call to generic mmc_of_parse to support additional capabilities */
1502 	ret = mmc_of_parse(host->mmc);
1503 	if (ret)
1504 		goto err;
1505 
1506 	mmc_of_parse_voltage(host->mmc, &host->ocr_mask);
1507 
1508 	ret = sdhci_add_host(host);
1509 	if (ret)
1510 		goto err;
1511 
1512 	return 0;
1513  err:
1514 	sdhci_pltfm_free(pdev);
1515 	return ret;
1516 }
1517 
1518 static struct platform_driver sdhci_esdhc_driver = {
1519 	.driver = {
1520 		.name = "sdhci-esdhc",
1521 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1522 		.of_match_table = sdhci_esdhc_of_match,
1523 		.pm = &esdhc_of_dev_pm_ops,
1524 	},
1525 	.probe = sdhci_esdhc_probe,
1526 	.remove = sdhci_pltfm_unregister,
1527 };
1528 
1529 module_platform_driver(sdhci_esdhc_driver);
1530 
1531 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1532 MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1533 	      "Anton Vorontsov <avorontsov@ru.mvista.com>");
1534 MODULE_LICENSE("GPL v2");
1535