1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Freescale eSDHC controller driver.
4  *
5  * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6  * Copyright (c) 2009 MontaVista Software, Inc.
7  *
8  * Authors: Xiaobo Xie <X.Xie@freescale.com>
9  *	    Anton Vorontsov <avorontsov@ru.mvista.com>
10  */
11 
12 #include <linux/err.h>
13 #include <linux/io.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/sys_soc.h>
19 #include <linux/clk.h>
20 #include <linux/ktime.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/mmc.h>
24 #include "sdhci-pltfm.h"
25 #include "sdhci-esdhc.h"
26 
27 #define VENDOR_V_22	0x12
28 #define VENDOR_V_23	0x13
29 
30 #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
31 
32 struct esdhc_clk_fixup {
33 	const unsigned int sd_dflt_max_clk;
34 	const unsigned int max_clk[MMC_TIMING_NUM];
35 };
36 
37 static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
38 	.sd_dflt_max_clk = 25000000,
39 	.max_clk[MMC_TIMING_MMC_HS] = 46500000,
40 	.max_clk[MMC_TIMING_SD_HS] = 46500000,
41 };
42 
43 static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
44 	.sd_dflt_max_clk = 25000000,
45 	.max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
46 	.max_clk[MMC_TIMING_MMC_HS200] = 167000000,
47 };
48 
49 static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
50 	.sd_dflt_max_clk = 25000000,
51 	.max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
52 	.max_clk[MMC_TIMING_MMC_HS200] = 125000000,
53 };
54 
55 static const struct esdhc_clk_fixup p1010_esdhc_clk = {
56 	.sd_dflt_max_clk = 20000000,
57 	.max_clk[MMC_TIMING_LEGACY] = 20000000,
58 	.max_clk[MMC_TIMING_MMC_HS] = 42000000,
59 	.max_clk[MMC_TIMING_SD_HS] = 40000000,
60 };
61 
62 static const struct of_device_id sdhci_esdhc_of_match[] = {
63 	{ .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
64 	{ .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
65 	{ .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
66 	{ .compatible = "fsl,p1010-esdhc",   .data = &p1010_esdhc_clk},
67 	{ .compatible = "fsl,mpc8379-esdhc" },
68 	{ .compatible = "fsl,mpc8536-esdhc" },
69 	{ .compatible = "fsl,esdhc" },
70 	{ }
71 };
72 MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
73 
74 struct sdhci_esdhc {
75 	u8 vendor_ver;
76 	u8 spec_ver;
77 	bool quirk_incorrect_hostver;
78 	bool quirk_limited_clk_division;
79 	bool quirk_unreliable_pulse_detection;
80 	bool quirk_tuning_erratum_type1;
81 	bool quirk_tuning_erratum_type2;
82 	bool quirk_ignore_data_inhibit;
83 	bool quirk_delay_before_data_reset;
84 	bool in_sw_tuning;
85 	unsigned int peripheral_clock;
86 	const struct esdhc_clk_fixup *clk_fixup;
87 	u32 div_ratio;
88 };
89 
90 /**
91  * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
92  *		       to make it compatible with SD spec.
93  *
94  * @host: pointer to sdhci_host
95  * @spec_reg: SD spec register address
96  * @value: 32bit eSDHC register value on spec_reg address
97  *
98  * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
99  * registers are 32 bits. There are differences in register size, register
100  * address, register function, bit position and function between eSDHC spec
101  * and SD spec.
102  *
103  * Return a fixed up register value
104  */
105 static u32 esdhc_readl_fixup(struct sdhci_host *host,
106 				     int spec_reg, u32 value)
107 {
108 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
109 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
110 	u32 ret;
111 
112 	/*
113 	 * The bit of ADMA flag in eSDHC is not compatible with standard
114 	 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
115 	 * supported by eSDHC.
116 	 * And for many FSL eSDHC controller, the reset value of field
117 	 * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
118 	 * only these vendor version is greater than 2.2/0x12 support ADMA.
119 	 */
120 	if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
121 		if (esdhc->vendor_ver > VENDOR_V_22) {
122 			ret = value | SDHCI_CAN_DO_ADMA2;
123 			return ret;
124 		}
125 	}
126 	/*
127 	 * The DAT[3:0] line signal levels and the CMD line signal level are
128 	 * not compatible with standard SDHC register. The line signal levels
129 	 * DAT[7:0] are at bits 31:24 and the command line signal level is at
130 	 * bit 23. All other bits are the same as in the standard SDHC
131 	 * register.
132 	 */
133 	if (spec_reg == SDHCI_PRESENT_STATE) {
134 		ret = value & 0x000fffff;
135 		ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
136 		ret |= (value << 1) & SDHCI_CMD_LVL;
137 		return ret;
138 	}
139 
140 	/*
141 	 * DTS properties of mmc host are used to enable each speed mode
142 	 * according to soc and board capability. So clean up
143 	 * SDR50/SDR104/DDR50 support bits here.
144 	 */
145 	if (spec_reg == SDHCI_CAPABILITIES_1) {
146 		ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
147 				SDHCI_SUPPORT_DDR50);
148 		return ret;
149 	}
150 
151 	/*
152 	 * Some controllers have unreliable Data Line Active
153 	 * bit for commands with busy signal. This affects
154 	 * Command Inhibit (data) bit. Just ignore it since
155 	 * MMC core driver has already polled card status
156 	 * with CMD13 after any command with busy siganl.
157 	 */
158 	if ((spec_reg == SDHCI_PRESENT_STATE) &&
159 	(esdhc->quirk_ignore_data_inhibit == true)) {
160 		ret = value & ~SDHCI_DATA_INHIBIT;
161 		return ret;
162 	}
163 
164 	ret = value;
165 	return ret;
166 }
167 
168 static u16 esdhc_readw_fixup(struct sdhci_host *host,
169 				     int spec_reg, u32 value)
170 {
171 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
172 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
173 	u16 ret;
174 	int shift = (spec_reg & 0x2) * 8;
175 
176 	if (spec_reg == SDHCI_HOST_VERSION)
177 		ret = value & 0xffff;
178 	else
179 		ret = (value >> shift) & 0xffff;
180 	/* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
181 	 * vendor version and spec version information.
182 	 */
183 	if ((spec_reg == SDHCI_HOST_VERSION) &&
184 	    (esdhc->quirk_incorrect_hostver))
185 		ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
186 	return ret;
187 }
188 
189 static u8 esdhc_readb_fixup(struct sdhci_host *host,
190 				     int spec_reg, u32 value)
191 {
192 	u8 ret;
193 	u8 dma_bits;
194 	int shift = (spec_reg & 0x3) * 8;
195 
196 	ret = (value >> shift) & 0xff;
197 
198 	/*
199 	 * "DMA select" locates at offset 0x28 in SD specification, but on
200 	 * P5020 or P3041, it locates at 0x29.
201 	 */
202 	if (spec_reg == SDHCI_HOST_CONTROL) {
203 		/* DMA select is 22,23 bits in Protocol Control Register */
204 		dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
205 		/* fixup the result */
206 		ret &= ~SDHCI_CTRL_DMA_MASK;
207 		ret |= dma_bits;
208 	}
209 	return ret;
210 }
211 
212 /**
213  * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
214  *			written into eSDHC register.
215  *
216  * @host: pointer to sdhci_host
217  * @spec_reg: SD spec register address
218  * @value: 8/16/32bit SD spec register value that would be written
219  * @old_value: 32bit eSDHC register value on spec_reg address
220  *
221  * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
222  * registers are 32 bits. There are differences in register size, register
223  * address, register function, bit position and function between eSDHC spec
224  * and SD spec.
225  *
226  * Return a fixed up register value
227  */
228 static u32 esdhc_writel_fixup(struct sdhci_host *host,
229 				     int spec_reg, u32 value, u32 old_value)
230 {
231 	u32 ret;
232 
233 	/*
234 	 * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
235 	 * when SYSCTL[RSTD] is set for some special operations.
236 	 * No any impact on other operation.
237 	 */
238 	if (spec_reg == SDHCI_INT_ENABLE)
239 		ret = value | SDHCI_INT_BLK_GAP;
240 	else
241 		ret = value;
242 
243 	return ret;
244 }
245 
246 static u32 esdhc_writew_fixup(struct sdhci_host *host,
247 				     int spec_reg, u16 value, u32 old_value)
248 {
249 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
250 	int shift = (spec_reg & 0x2) * 8;
251 	u32 ret;
252 
253 	switch (spec_reg) {
254 	case SDHCI_TRANSFER_MODE:
255 		/*
256 		 * Postpone this write, we must do it together with a
257 		 * command write that is down below. Return old value.
258 		 */
259 		pltfm_host->xfer_mode_shadow = value;
260 		return old_value;
261 	case SDHCI_COMMAND:
262 		ret = (value << 16) | pltfm_host->xfer_mode_shadow;
263 		return ret;
264 	}
265 
266 	ret = old_value & (~(0xffff << shift));
267 	ret |= (value << shift);
268 
269 	if (spec_reg == SDHCI_BLOCK_SIZE) {
270 		/*
271 		 * Two last DMA bits are reserved, and first one is used for
272 		 * non-standard blksz of 4096 bytes that we don't support
273 		 * yet. So clear the DMA boundary bits.
274 		 */
275 		ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
276 	}
277 	return ret;
278 }
279 
280 static u32 esdhc_writeb_fixup(struct sdhci_host *host,
281 				     int spec_reg, u8 value, u32 old_value)
282 {
283 	u32 ret;
284 	u32 dma_bits;
285 	u8 tmp;
286 	int shift = (spec_reg & 0x3) * 8;
287 
288 	/*
289 	 * eSDHC doesn't have a standard power control register, so we do
290 	 * nothing here to avoid incorrect operation.
291 	 */
292 	if (spec_reg == SDHCI_POWER_CONTROL)
293 		return old_value;
294 	/*
295 	 * "DMA select" location is offset 0x28 in SD specification, but on
296 	 * P5020 or P3041, it's located at 0x29.
297 	 */
298 	if (spec_reg == SDHCI_HOST_CONTROL) {
299 		/*
300 		 * If host control register is not standard, exit
301 		 * this function
302 		 */
303 		if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
304 			return old_value;
305 
306 		/* DMA select is 22,23 bits in Protocol Control Register */
307 		dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
308 		ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
309 		tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
310 		      (old_value & SDHCI_CTRL_DMA_MASK);
311 		ret = (ret & (~0xff)) | tmp;
312 
313 		/* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
314 		ret &= ~ESDHC_HOST_CONTROL_RES;
315 		return ret;
316 	}
317 
318 	ret = (old_value & (~(0xff << shift))) | (value << shift);
319 	return ret;
320 }
321 
322 static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
323 {
324 	u32 ret;
325 	u32 value;
326 
327 	if (reg == SDHCI_CAPABILITIES_1)
328 		value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
329 	else
330 		value = ioread32be(host->ioaddr + reg);
331 
332 	ret = esdhc_readl_fixup(host, reg, value);
333 
334 	return ret;
335 }
336 
337 static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
338 {
339 	u32 ret;
340 	u32 value;
341 
342 	if (reg == SDHCI_CAPABILITIES_1)
343 		value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
344 	else
345 		value = ioread32(host->ioaddr + reg);
346 
347 	ret = esdhc_readl_fixup(host, reg, value);
348 
349 	return ret;
350 }
351 
352 static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
353 {
354 	u16 ret;
355 	u32 value;
356 	int base = reg & ~0x3;
357 
358 	value = ioread32be(host->ioaddr + base);
359 	ret = esdhc_readw_fixup(host, reg, value);
360 	return ret;
361 }
362 
363 static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
364 {
365 	u16 ret;
366 	u32 value;
367 	int base = reg & ~0x3;
368 
369 	value = ioread32(host->ioaddr + base);
370 	ret = esdhc_readw_fixup(host, reg, value);
371 	return ret;
372 }
373 
374 static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
375 {
376 	u8 ret;
377 	u32 value;
378 	int base = reg & ~0x3;
379 
380 	value = ioread32be(host->ioaddr + base);
381 	ret = esdhc_readb_fixup(host, reg, value);
382 	return ret;
383 }
384 
385 static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
386 {
387 	u8 ret;
388 	u32 value;
389 	int base = reg & ~0x3;
390 
391 	value = ioread32(host->ioaddr + base);
392 	ret = esdhc_readb_fixup(host, reg, value);
393 	return ret;
394 }
395 
396 static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
397 {
398 	u32 value;
399 
400 	value = esdhc_writel_fixup(host, reg, val, 0);
401 	iowrite32be(value, host->ioaddr + reg);
402 }
403 
404 static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
405 {
406 	u32 value;
407 
408 	value = esdhc_writel_fixup(host, reg, val, 0);
409 	iowrite32(value, host->ioaddr + reg);
410 }
411 
412 static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
413 {
414 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
415 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
416 	int base = reg & ~0x3;
417 	u32 value;
418 	u32 ret;
419 
420 	value = ioread32be(host->ioaddr + base);
421 	ret = esdhc_writew_fixup(host, reg, val, value);
422 	if (reg != SDHCI_TRANSFER_MODE)
423 		iowrite32be(ret, host->ioaddr + base);
424 
425 	/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
426 	 * 1us later after ESDHC_EXTN is set.
427 	 */
428 	if (base == ESDHC_SYSTEM_CONTROL_2) {
429 		if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
430 		    esdhc->in_sw_tuning) {
431 			udelay(1);
432 			ret |= ESDHC_SMPCLKSEL;
433 			iowrite32be(ret, host->ioaddr + base);
434 		}
435 	}
436 }
437 
438 static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
439 {
440 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
441 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
442 	int base = reg & ~0x3;
443 	u32 value;
444 	u32 ret;
445 
446 	value = ioread32(host->ioaddr + base);
447 	ret = esdhc_writew_fixup(host, reg, val, value);
448 	if (reg != SDHCI_TRANSFER_MODE)
449 		iowrite32(ret, host->ioaddr + base);
450 
451 	/* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
452 	 * 1us later after ESDHC_EXTN is set.
453 	 */
454 	if (base == ESDHC_SYSTEM_CONTROL_2) {
455 		if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
456 		    esdhc->in_sw_tuning) {
457 			udelay(1);
458 			ret |= ESDHC_SMPCLKSEL;
459 			iowrite32(ret, host->ioaddr + base);
460 		}
461 	}
462 }
463 
464 static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
465 {
466 	int base = reg & ~0x3;
467 	u32 value;
468 	u32 ret;
469 
470 	value = ioread32be(host->ioaddr + base);
471 	ret = esdhc_writeb_fixup(host, reg, val, value);
472 	iowrite32be(ret, host->ioaddr + base);
473 }
474 
475 static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
476 {
477 	int base = reg & ~0x3;
478 	u32 value;
479 	u32 ret;
480 
481 	value = ioread32(host->ioaddr + base);
482 	ret = esdhc_writeb_fixup(host, reg, val, value);
483 	iowrite32(ret, host->ioaddr + base);
484 }
485 
486 /*
487  * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
488  * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
489  * and Block Gap Event(IRQSTAT[BGE]) are also set.
490  * For Continue, apply soft reset for data(SYSCTL[RSTD]);
491  * and re-issue the entire read transaction from beginning.
492  */
493 static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
494 {
495 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
496 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
497 	bool applicable;
498 	dma_addr_t dmastart;
499 	dma_addr_t dmanow;
500 
501 	applicable = (intmask & SDHCI_INT_DATA_END) &&
502 		     (intmask & SDHCI_INT_BLK_GAP) &&
503 		     (esdhc->vendor_ver == VENDOR_V_23);
504 	if (!applicable)
505 		return;
506 
507 	host->data->error = 0;
508 	dmastart = sg_dma_address(host->data->sg);
509 	dmanow = dmastart + host->data->bytes_xfered;
510 	/*
511 	 * Force update to the next DMA block boundary.
512 	 */
513 	dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
514 		SDHCI_DEFAULT_BOUNDARY_SIZE;
515 	host->data->bytes_xfered = dmanow - dmastart;
516 	sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
517 }
518 
519 static int esdhc_of_enable_dma(struct sdhci_host *host)
520 {
521 	u32 value;
522 	struct device *dev = mmc_dev(host->mmc);
523 
524 	if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
525 	    of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
526 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
527 
528 	value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
529 
530 	if (of_dma_is_coherent(dev->of_node))
531 		value |= ESDHC_DMA_SNOOP;
532 	else
533 		value &= ~ESDHC_DMA_SNOOP;
534 
535 	sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
536 	return 0;
537 }
538 
539 static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
540 {
541 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
542 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
543 
544 	if (esdhc->peripheral_clock)
545 		return esdhc->peripheral_clock;
546 	else
547 		return pltfm_host->clock;
548 }
549 
550 static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
551 {
552 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
553 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
554 	unsigned int clock;
555 
556 	if (esdhc->peripheral_clock)
557 		clock = esdhc->peripheral_clock;
558 	else
559 		clock = pltfm_host->clock;
560 	return clock / 256 / 16;
561 }
562 
563 static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
564 {
565 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
566 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
567 	ktime_t timeout;
568 	u32 val, clk_en;
569 
570 	clk_en = ESDHC_CLOCK_SDCLKEN;
571 
572 	/*
573 	 * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
574 	 * is 2.2 or lower.
575 	 */
576 	if (esdhc->vendor_ver <= VENDOR_V_22)
577 		clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
578 			   ESDHC_CLOCK_PEREN);
579 
580 	val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
581 
582 	if (enable)
583 		val |= clk_en;
584 	else
585 		val &= ~clk_en;
586 
587 	sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
588 
589 	/*
590 	 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
591 	 * wait clock stable bit which does not exist.
592 	 */
593 	timeout = ktime_add_ms(ktime_get(), 20);
594 	while (esdhc->vendor_ver > VENDOR_V_22) {
595 		bool timedout = ktime_after(ktime_get(), timeout);
596 
597 		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
598 			break;
599 		if (timedout) {
600 			pr_err("%s: Internal clock never stabilised.\n",
601 				mmc_hostname(host->mmc));
602 			break;
603 		}
604 		usleep_range(10, 20);
605 	}
606 }
607 
608 static void esdhc_flush_async_fifo(struct sdhci_host *host)
609 {
610 	ktime_t timeout;
611 	u32 val;
612 
613 	val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
614 	val |= ESDHC_FLUSH_ASYNC_FIFO;
615 	sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
616 
617 	/* Wait max 20 ms */
618 	timeout = ktime_add_ms(ktime_get(), 20);
619 	while (1) {
620 		bool timedout = ktime_after(ktime_get(), timeout);
621 
622 		if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
623 		      ESDHC_FLUSH_ASYNC_FIFO))
624 			break;
625 		if (timedout) {
626 			pr_err("%s: flushing asynchronous FIFO timeout.\n",
627 				mmc_hostname(host->mmc));
628 			break;
629 		}
630 		usleep_range(10, 20);
631 	}
632 }
633 
634 static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
635 {
636 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
637 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
638 	unsigned int pre_div = 1, div = 1;
639 	unsigned int clock_fixup = 0;
640 	ktime_t timeout;
641 	u32 temp;
642 
643 	if (clock == 0) {
644 		host->mmc->actual_clock = 0;
645 		esdhc_clock_enable(host, false);
646 		return;
647 	}
648 
649 	/* Start pre_div at 2 for vendor version < 2.3. */
650 	if (esdhc->vendor_ver < VENDOR_V_23)
651 		pre_div = 2;
652 
653 	/* Fix clock value. */
654 	if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
655 	    esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
656 		clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
657 	else if (esdhc->clk_fixup)
658 		clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
659 
660 	if (clock_fixup == 0 || clock < clock_fixup)
661 		clock_fixup = clock;
662 
663 	/* Calculate pre_div and div. */
664 	while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
665 		pre_div *= 2;
666 
667 	while (host->max_clk / pre_div / div > clock_fixup && div < 16)
668 		div++;
669 
670 	esdhc->div_ratio = pre_div * div;
671 
672 	/* Limit clock division for HS400 200MHz clock for quirk. */
673 	if (esdhc->quirk_limited_clk_division &&
674 	    clock == MMC_HS200_MAX_DTR &&
675 	    (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
676 	     host->flags & SDHCI_HS400_TUNING)) {
677 		if (esdhc->div_ratio <= 4) {
678 			pre_div = 4;
679 			div = 1;
680 		} else if (esdhc->div_ratio <= 8) {
681 			pre_div = 4;
682 			div = 2;
683 		} else if (esdhc->div_ratio <= 12) {
684 			pre_div = 4;
685 			div = 3;
686 		} else {
687 			pr_warn("%s: using unsupported clock division.\n",
688 				mmc_hostname(host->mmc));
689 		}
690 		esdhc->div_ratio = pre_div * div;
691 	}
692 
693 	host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
694 
695 	dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
696 		clock, host->mmc->actual_clock);
697 
698 	/* Set clock division into register. */
699 	pre_div >>= 1;
700 	div--;
701 
702 	esdhc_clock_enable(host, false);
703 
704 	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
705 	temp &= ~ESDHC_CLOCK_MASK;
706 	temp |= ((div << ESDHC_DIVIDER_SHIFT) |
707 		(pre_div << ESDHC_PREDIV_SHIFT));
708 	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
709 
710 	/*
711 	 * Wait max 20 ms. If vendor version is 2.2 or lower, do not
712 	 * wait clock stable bit which does not exist.
713 	 */
714 	timeout = ktime_add_ms(ktime_get(), 20);
715 	while (esdhc->vendor_ver > VENDOR_V_22) {
716 		bool timedout = ktime_after(ktime_get(), timeout);
717 
718 		if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
719 			break;
720 		if (timedout) {
721 			pr_err("%s: Internal clock never stabilised.\n",
722 				mmc_hostname(host->mmc));
723 			break;
724 		}
725 		usleep_range(10, 20);
726 	}
727 
728 	/* Additional setting for HS400. */
729 	if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
730 	    clock == MMC_HS200_MAX_DTR) {
731 		temp = sdhci_readl(host, ESDHC_TBCTL);
732 		sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
733 		temp = sdhci_readl(host, ESDHC_SDCLKCTL);
734 		sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
735 		esdhc_clock_enable(host, true);
736 
737 		temp = sdhci_readl(host, ESDHC_DLLCFG0);
738 		temp |= ESDHC_DLL_ENABLE;
739 		if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
740 			temp |= ESDHC_DLL_FREQ_SEL;
741 		sdhci_writel(host, temp, ESDHC_DLLCFG0);
742 		temp = sdhci_readl(host, ESDHC_TBCTL);
743 		sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
744 
745 		esdhc_clock_enable(host, false);
746 		esdhc_flush_async_fifo(host);
747 	}
748 	esdhc_clock_enable(host, false);
749 }
750 
751 static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
752 {
753 	u32 ctrl;
754 
755 	ctrl = sdhci_readl(host, ESDHC_PROCTL);
756 	ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
757 	switch (width) {
758 	case MMC_BUS_WIDTH_8:
759 		ctrl |= ESDHC_CTRL_8BITBUS;
760 		break;
761 
762 	case MMC_BUS_WIDTH_4:
763 		ctrl |= ESDHC_CTRL_4BITBUS;
764 		break;
765 
766 	default:
767 		break;
768 	}
769 
770 	sdhci_writel(host, ctrl, ESDHC_PROCTL);
771 }
772 
773 static void esdhc_reset(struct sdhci_host *host, u8 mask)
774 {
775 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
776 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
777 	u32 val, bus_width = 0;
778 
779 	/*
780 	 * Add delay to make sure all the DMA transfers are finished
781 	 * for quirk.
782 	 */
783 	if (esdhc->quirk_delay_before_data_reset &&
784 	    (mask & SDHCI_RESET_DATA) &&
785 	    (host->flags & SDHCI_REQ_USE_DMA))
786 		mdelay(5);
787 
788 	/*
789 	 * Save bus-width for eSDHC whose vendor version is 2.2
790 	 * or lower for data reset.
791 	 */
792 	if ((mask & SDHCI_RESET_DATA) &&
793 	    (esdhc->vendor_ver <= VENDOR_V_22)) {
794 		val = sdhci_readl(host, ESDHC_PROCTL);
795 		bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
796 	}
797 
798 	sdhci_reset(host, mask);
799 
800 	/*
801 	 * Restore bus-width setting and interrupt registers for eSDHC
802 	 * whose vendor version is 2.2 or lower for data reset.
803 	 */
804 	if ((mask & SDHCI_RESET_DATA) &&
805 	    (esdhc->vendor_ver <= VENDOR_V_22)) {
806 		val = sdhci_readl(host, ESDHC_PROCTL);
807 		val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
808 		val |= bus_width;
809 		sdhci_writel(host, val, ESDHC_PROCTL);
810 
811 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
812 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
813 	}
814 
815 	/*
816 	 * Some bits have to be cleaned manually for eSDHC whose spec
817 	 * version is higher than 3.0 for all reset.
818 	 */
819 	if ((mask & SDHCI_RESET_ALL) &&
820 	    (esdhc->spec_ver >= SDHCI_SPEC_300)) {
821 		val = sdhci_readl(host, ESDHC_TBCTL);
822 		val &= ~ESDHC_TB_EN;
823 		sdhci_writel(host, val, ESDHC_TBCTL);
824 
825 		/*
826 		 * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
827 		 * 0 for quirk.
828 		 */
829 		if (esdhc->quirk_unreliable_pulse_detection) {
830 			val = sdhci_readl(host, ESDHC_DLLCFG1);
831 			val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
832 			sdhci_writel(host, val, ESDHC_DLLCFG1);
833 		}
834 	}
835 }
836 
837 /* The SCFG, Supplemental Configuration Unit, provides SoC specific
838  * configuration and status registers for the device. There is a
839  * SDHC IO VSEL control register on SCFG for some platforms. It's
840  * used to support SDHC IO voltage switching.
841  */
842 static const struct of_device_id scfg_device_ids[] = {
843 	{ .compatible = "fsl,t1040-scfg", },
844 	{ .compatible = "fsl,ls1012a-scfg", },
845 	{ .compatible = "fsl,ls1046a-scfg", },
846 	{}
847 };
848 
849 /* SDHC IO VSEL control register definition */
850 #define SCFG_SDHCIOVSELCR	0x408
851 #define SDHCIOVSELCR_TGLEN	0x80000000
852 #define SDHCIOVSELCR_VSELVAL	0x60000000
853 #define SDHCIOVSELCR_SDHC_VS	0x00000001
854 
855 static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
856 				       struct mmc_ios *ios)
857 {
858 	struct sdhci_host *host = mmc_priv(mmc);
859 	struct device_node *scfg_node;
860 	void __iomem *scfg_base = NULL;
861 	u32 sdhciovselcr;
862 	u32 val;
863 
864 	/*
865 	 * Signal Voltage Switching is only applicable for Host Controllers
866 	 * v3.00 and above.
867 	 */
868 	if (host->version < SDHCI_SPEC_300)
869 		return 0;
870 
871 	val = sdhci_readl(host, ESDHC_PROCTL);
872 
873 	switch (ios->signal_voltage) {
874 	case MMC_SIGNAL_VOLTAGE_330:
875 		val &= ~ESDHC_VOLT_SEL;
876 		sdhci_writel(host, val, ESDHC_PROCTL);
877 		return 0;
878 	case MMC_SIGNAL_VOLTAGE_180:
879 		scfg_node = of_find_matching_node(NULL, scfg_device_ids);
880 		if (scfg_node)
881 			scfg_base = of_iomap(scfg_node, 0);
882 		if (scfg_base) {
883 			sdhciovselcr = SDHCIOVSELCR_TGLEN |
884 				       SDHCIOVSELCR_VSELVAL;
885 			iowrite32be(sdhciovselcr,
886 				scfg_base + SCFG_SDHCIOVSELCR);
887 
888 			val |= ESDHC_VOLT_SEL;
889 			sdhci_writel(host, val, ESDHC_PROCTL);
890 			mdelay(5);
891 
892 			sdhciovselcr = SDHCIOVSELCR_TGLEN |
893 				       SDHCIOVSELCR_SDHC_VS;
894 			iowrite32be(sdhciovselcr,
895 				scfg_base + SCFG_SDHCIOVSELCR);
896 			iounmap(scfg_base);
897 		} else {
898 			val |= ESDHC_VOLT_SEL;
899 			sdhci_writel(host, val, ESDHC_PROCTL);
900 		}
901 		return 0;
902 	default:
903 		return 0;
904 	}
905 }
906 
907 static struct soc_device_attribute soc_tuning_erratum_type1[] = {
908 	{ .family = "QorIQ T1023", },
909 	{ .family = "QorIQ T1040", },
910 	{ .family = "QorIQ T2080", },
911 	{ .family = "QorIQ LS1021A", },
912 	{ },
913 };
914 
915 static struct soc_device_attribute soc_tuning_erratum_type2[] = {
916 	{ .family = "QorIQ LS1012A", },
917 	{ .family = "QorIQ LS1043A", },
918 	{ .family = "QorIQ LS1046A", },
919 	{ .family = "QorIQ LS1080A", },
920 	{ .family = "QorIQ LS2080A", },
921 	{ .family = "QorIQ LA1575A", },
922 	{ },
923 };
924 
925 static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
926 {
927 	u32 val;
928 
929 	esdhc_clock_enable(host, false);
930 	esdhc_flush_async_fifo(host);
931 
932 	val = sdhci_readl(host, ESDHC_TBCTL);
933 	if (enable)
934 		val |= ESDHC_TB_EN;
935 	else
936 		val &= ~ESDHC_TB_EN;
937 	sdhci_writel(host, val, ESDHC_TBCTL);
938 
939 	esdhc_clock_enable(host, true);
940 }
941 
942 static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
943 				    u8 *window_end)
944 {
945 	u32 val;
946 
947 	/* Write TBCTL[11:8]=4'h8 */
948 	val = sdhci_readl(host, ESDHC_TBCTL);
949 	val &= ~(0xf << 8);
950 	val |= 8 << 8;
951 	sdhci_writel(host, val, ESDHC_TBCTL);
952 
953 	mdelay(1);
954 
955 	/* Read TBCTL[31:0] register and rewrite again */
956 	val = sdhci_readl(host, ESDHC_TBCTL);
957 	sdhci_writel(host, val, ESDHC_TBCTL);
958 
959 	mdelay(1);
960 
961 	/* Read the TBSTAT[31:0] register twice */
962 	val = sdhci_readl(host, ESDHC_TBSTAT);
963 	val = sdhci_readl(host, ESDHC_TBSTAT);
964 
965 	*window_end = val & 0xff;
966 	*window_start = (val >> 8) & 0xff;
967 }
968 
969 static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
970 				    u8 *window_end)
971 {
972 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
973 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
974 	u8 start_ptr, end_ptr;
975 
976 	if (esdhc->quirk_tuning_erratum_type1) {
977 		*window_start = 5 * esdhc->div_ratio;
978 		*window_end = 3 * esdhc->div_ratio;
979 		return;
980 	}
981 
982 	esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
983 
984 	/* Reset data lines by setting ESDHCCTL[RSTD] */
985 	sdhci_reset(host, SDHCI_RESET_DATA);
986 	/* Write 32'hFFFF_FFFF to IRQSTAT register */
987 	sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
988 
989 	/* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
990 	 * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
991 	 * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
992 	 * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
993 	 */
994 
995 	if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
996 		*window_start = 8 * esdhc->div_ratio;
997 		*window_end = 4 * esdhc->div_ratio;
998 	} else {
999 		*window_start = 5 * esdhc->div_ratio;
1000 		*window_end = 3 * esdhc->div_ratio;
1001 	}
1002 }
1003 
1004 static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
1005 				   u8 window_start, u8 window_end)
1006 {
1007 	struct sdhci_host *host = mmc_priv(mmc);
1008 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1009 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1010 	u32 val;
1011 	int ret;
1012 
1013 	/* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
1014 	val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
1015 	      ESDHC_WNDW_STRT_PTR_MASK;
1016 	val |= window_end & ESDHC_WNDW_END_PTR_MASK;
1017 	sdhci_writel(host, val, ESDHC_TBPTR);
1018 
1019 	/* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
1020 	val = sdhci_readl(host, ESDHC_TBCTL);
1021 	val &= ~ESDHC_TB_MODE_MASK;
1022 	val |= ESDHC_TB_MODE_SW;
1023 	sdhci_writel(host, val, ESDHC_TBCTL);
1024 
1025 	esdhc->in_sw_tuning = true;
1026 	ret = sdhci_execute_tuning(mmc, opcode);
1027 	esdhc->in_sw_tuning = false;
1028 	return ret;
1029 }
1030 
1031 static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1032 {
1033 	struct sdhci_host *host = mmc_priv(mmc);
1034 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1035 	struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1036 	u8 window_start, window_end;
1037 	int ret, retries = 1;
1038 	bool hs400_tuning;
1039 	unsigned int clk;
1040 	u32 val;
1041 
1042 	/* For tuning mode, the sd clock divisor value
1043 	 * must be larger than 3 according to reference manual.
1044 	 */
1045 	clk = esdhc->peripheral_clock / 3;
1046 	if (host->clock > clk)
1047 		esdhc_of_set_clock(host, clk);
1048 
1049 	esdhc_tuning_block_enable(host, true);
1050 
1051 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1052 
1053 	do {
1054 		if (esdhc->quirk_limited_clk_division &&
1055 		    hs400_tuning)
1056 			esdhc_of_set_clock(host, host->clock);
1057 
1058 		/* Do HW tuning */
1059 		val = sdhci_readl(host, ESDHC_TBCTL);
1060 		val &= ~ESDHC_TB_MODE_MASK;
1061 		val |= ESDHC_TB_MODE_3;
1062 		sdhci_writel(host, val, ESDHC_TBCTL);
1063 
1064 		ret = sdhci_execute_tuning(mmc, opcode);
1065 		if (ret)
1066 			break;
1067 
1068 		/* For type2 affected platforms of the tuning erratum,
1069 		 * tuning may succeed although eSDHC might not have
1070 		 * tuned properly. Need to check tuning window.
1071 		 */
1072 		if (esdhc->quirk_tuning_erratum_type2 &&
1073 		    !host->tuning_err) {
1074 			esdhc_tuning_window_ptr(host, &window_start,
1075 						&window_end);
1076 			if (abs(window_start - window_end) >
1077 			    (4 * esdhc->div_ratio + 2))
1078 				host->tuning_err = -EAGAIN;
1079 		}
1080 
1081 		/* If HW tuning fails and triggers erratum,
1082 		 * try workaround.
1083 		 */
1084 		ret = host->tuning_err;
1085 		if (ret == -EAGAIN &&
1086 		    (esdhc->quirk_tuning_erratum_type1 ||
1087 		     esdhc->quirk_tuning_erratum_type2)) {
1088 			/* Recover HS400 tuning flag */
1089 			if (hs400_tuning)
1090 				host->flags |= SDHCI_HS400_TUNING;
1091 			pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1092 				mmc_hostname(mmc));
1093 			/* Do SW tuning */
1094 			esdhc_prepare_sw_tuning(host, &window_start,
1095 						&window_end);
1096 			ret = esdhc_execute_sw_tuning(mmc, opcode,
1097 						      window_start,
1098 						      window_end);
1099 			if (ret)
1100 				break;
1101 
1102 			/* Retry both HW/SW tuning with reduced clock. */
1103 			ret = host->tuning_err;
1104 			if (ret == -EAGAIN && retries) {
1105 				/* Recover HS400 tuning flag */
1106 				if (hs400_tuning)
1107 					host->flags |= SDHCI_HS400_TUNING;
1108 
1109 				clk = host->max_clk / (esdhc->div_ratio + 1);
1110 				esdhc_of_set_clock(host, clk);
1111 				pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1112 					mmc_hostname(mmc));
1113 			} else {
1114 				break;
1115 			}
1116 		} else {
1117 			break;
1118 		}
1119 	} while (retries--);
1120 
1121 	if (ret) {
1122 		esdhc_tuning_block_enable(host, false);
1123 	} else if (hs400_tuning) {
1124 		val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1125 		val |= ESDHC_FLW_CTL_BG;
1126 		sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1127 	}
1128 
1129 	return ret;
1130 }
1131 
1132 static void esdhc_set_uhs_signaling(struct sdhci_host *host,
1133 				   unsigned int timing)
1134 {
1135 	if (timing == MMC_TIMING_MMC_HS400)
1136 		esdhc_tuning_block_enable(host, true);
1137 	else
1138 		sdhci_set_uhs_signaling(host, timing);
1139 }
1140 
1141 static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
1142 {
1143 	u32 command;
1144 
1145 	if (of_find_compatible_node(NULL, NULL,
1146 				"fsl,p2020-esdhc")) {
1147 		command = SDHCI_GET_CMD(sdhci_readw(host,
1148 					SDHCI_COMMAND));
1149 		if (command == MMC_WRITE_MULTIPLE_BLOCK &&
1150 				sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
1151 				intmask & SDHCI_INT_DATA_END) {
1152 			intmask &= ~SDHCI_INT_DATA_END;
1153 			sdhci_writel(host, SDHCI_INT_DATA_END,
1154 					SDHCI_INT_STATUS);
1155 		}
1156 	}
1157 	return intmask;
1158 }
1159 
1160 #ifdef CONFIG_PM_SLEEP
1161 static u32 esdhc_proctl;
1162 static int esdhc_of_suspend(struct device *dev)
1163 {
1164 	struct sdhci_host *host = dev_get_drvdata(dev);
1165 
1166 	esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
1167 
1168 	if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1169 		mmc_retune_needed(host->mmc);
1170 
1171 	return sdhci_suspend_host(host);
1172 }
1173 
1174 static int esdhc_of_resume(struct device *dev)
1175 {
1176 	struct sdhci_host *host = dev_get_drvdata(dev);
1177 	int ret = sdhci_resume_host(host);
1178 
1179 	if (ret == 0) {
1180 		/* Isn't this already done by sdhci_resume_host() ? --rmk */
1181 		esdhc_of_enable_dma(host);
1182 		sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
1183 	}
1184 	return ret;
1185 }
1186 #endif
1187 
1188 static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
1189 			esdhc_of_suspend,
1190 			esdhc_of_resume);
1191 
1192 static const struct sdhci_ops sdhci_esdhc_be_ops = {
1193 	.read_l = esdhc_be_readl,
1194 	.read_w = esdhc_be_readw,
1195 	.read_b = esdhc_be_readb,
1196 	.write_l = esdhc_be_writel,
1197 	.write_w = esdhc_be_writew,
1198 	.write_b = esdhc_be_writeb,
1199 	.set_clock = esdhc_of_set_clock,
1200 	.enable_dma = esdhc_of_enable_dma,
1201 	.get_max_clock = esdhc_of_get_max_clock,
1202 	.get_min_clock = esdhc_of_get_min_clock,
1203 	.adma_workaround = esdhc_of_adma_workaround,
1204 	.set_bus_width = esdhc_pltfm_set_bus_width,
1205 	.reset = esdhc_reset,
1206 	.set_uhs_signaling = esdhc_set_uhs_signaling,
1207 	.irq = esdhc_irq,
1208 };
1209 
1210 static const struct sdhci_ops sdhci_esdhc_le_ops = {
1211 	.read_l = esdhc_le_readl,
1212 	.read_w = esdhc_le_readw,
1213 	.read_b = esdhc_le_readb,
1214 	.write_l = esdhc_le_writel,
1215 	.write_w = esdhc_le_writew,
1216 	.write_b = esdhc_le_writeb,
1217 	.set_clock = esdhc_of_set_clock,
1218 	.enable_dma = esdhc_of_enable_dma,
1219 	.get_max_clock = esdhc_of_get_max_clock,
1220 	.get_min_clock = esdhc_of_get_min_clock,
1221 	.adma_workaround = esdhc_of_adma_workaround,
1222 	.set_bus_width = esdhc_pltfm_set_bus_width,
1223 	.reset = esdhc_reset,
1224 	.set_uhs_signaling = esdhc_set_uhs_signaling,
1225 	.irq = esdhc_irq,
1226 };
1227 
1228 static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
1229 	.quirks = ESDHC_DEFAULT_QUIRKS |
1230 #ifdef CONFIG_PPC
1231 		  SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1232 #endif
1233 		  SDHCI_QUIRK_NO_CARD_NO_RESET |
1234 		  SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1235 	.ops = &sdhci_esdhc_be_ops,
1236 };
1237 
1238 static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
1239 	.quirks = ESDHC_DEFAULT_QUIRKS |
1240 		  SDHCI_QUIRK_NO_CARD_NO_RESET |
1241 		  SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1242 	.ops = &sdhci_esdhc_le_ops,
1243 };
1244 
1245 static struct soc_device_attribute soc_incorrect_hostver[] = {
1246 	{ .family = "QorIQ T4240", .revision = "1.0", },
1247 	{ .family = "QorIQ T4240", .revision = "2.0", },
1248 	{ },
1249 };
1250 
1251 static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1252 	{ .family = "QorIQ LX2160A", .revision = "1.0", },
1253 	{ .family = "QorIQ LX2160A", .revision = "2.0", },
1254 	{ .family = "QorIQ LS1028A", .revision = "1.0", },
1255 	{ },
1256 };
1257 
1258 static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1259 	{ .family = "QorIQ LX2160A", .revision = "1.0", },
1260 	{ },
1261 };
1262 
1263 static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1264 {
1265 	const struct of_device_id *match;
1266 	struct sdhci_pltfm_host *pltfm_host;
1267 	struct sdhci_esdhc *esdhc;
1268 	struct device_node *np;
1269 	struct clk *clk;
1270 	u32 val;
1271 	u16 host_ver;
1272 
1273 	pltfm_host = sdhci_priv(host);
1274 	esdhc = sdhci_pltfm_priv(pltfm_host);
1275 
1276 	host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1277 	esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1278 			     SDHCI_VENDOR_VER_SHIFT;
1279 	esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1280 	if (soc_device_match(soc_incorrect_hostver))
1281 		esdhc->quirk_incorrect_hostver = true;
1282 	else
1283 		esdhc->quirk_incorrect_hostver = false;
1284 
1285 	if (soc_device_match(soc_fixup_sdhc_clkdivs))
1286 		esdhc->quirk_limited_clk_division = true;
1287 	else
1288 		esdhc->quirk_limited_clk_division = false;
1289 
1290 	if (soc_device_match(soc_unreliable_pulse_detection))
1291 		esdhc->quirk_unreliable_pulse_detection = true;
1292 	else
1293 		esdhc->quirk_unreliable_pulse_detection = false;
1294 
1295 	match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1296 	if (match)
1297 		esdhc->clk_fixup = match->data;
1298 	np = pdev->dev.of_node;
1299 
1300 	if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
1301 		esdhc->quirk_delay_before_data_reset = true;
1302 
1303 	clk = of_clk_get(np, 0);
1304 	if (!IS_ERR(clk)) {
1305 		/*
1306 		 * esdhc->peripheral_clock would be assigned with a value
1307 		 * which is eSDHC base clock when use periperal clock.
1308 		 * For some platforms, the clock value got by common clk
1309 		 * API is peripheral clock while the eSDHC base clock is
1310 		 * 1/2 peripheral clock.
1311 		 */
1312 		if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1313 		    of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
1314 		    of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
1315 			esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1316 		else
1317 			esdhc->peripheral_clock = clk_get_rate(clk);
1318 
1319 		clk_put(clk);
1320 	}
1321 
1322 	if (esdhc->peripheral_clock) {
1323 		esdhc_clock_enable(host, false);
1324 		val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1325 		val |= ESDHC_PERIPHERAL_CLK_SEL;
1326 		sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1327 		esdhc_clock_enable(host, true);
1328 	}
1329 }
1330 
1331 static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1332 {
1333 	esdhc_tuning_block_enable(mmc_priv(mmc), false);
1334 	return 0;
1335 }
1336 
1337 static int sdhci_esdhc_probe(struct platform_device *pdev)
1338 {
1339 	struct sdhci_host *host;
1340 	struct device_node *np;
1341 	struct sdhci_pltfm_host *pltfm_host;
1342 	struct sdhci_esdhc *esdhc;
1343 	int ret;
1344 
1345 	np = pdev->dev.of_node;
1346 
1347 	if (of_property_read_bool(np, "little-endian"))
1348 		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1349 					sizeof(struct sdhci_esdhc));
1350 	else
1351 		host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1352 					sizeof(struct sdhci_esdhc));
1353 
1354 	if (IS_ERR(host))
1355 		return PTR_ERR(host);
1356 
1357 	host->mmc_host_ops.start_signal_voltage_switch =
1358 		esdhc_signal_voltage_switch;
1359 	host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1360 	host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1361 	host->tuning_delay = 1;
1362 
1363 	esdhc_init(pdev, host);
1364 
1365 	sdhci_get_of_property(pdev);
1366 
1367 	pltfm_host = sdhci_priv(host);
1368 	esdhc = sdhci_pltfm_priv(pltfm_host);
1369 	if (soc_device_match(soc_tuning_erratum_type1))
1370 		esdhc->quirk_tuning_erratum_type1 = true;
1371 	else
1372 		esdhc->quirk_tuning_erratum_type1 = false;
1373 
1374 	if (soc_device_match(soc_tuning_erratum_type2))
1375 		esdhc->quirk_tuning_erratum_type2 = true;
1376 	else
1377 		esdhc->quirk_tuning_erratum_type2 = false;
1378 
1379 	if (esdhc->vendor_ver == VENDOR_V_22)
1380 		host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1381 
1382 	if (esdhc->vendor_ver > VENDOR_V_22)
1383 		host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1384 
1385 	if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
1386 		host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1387 		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1388 	}
1389 
1390 	if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1391 	    of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1392 	    of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1393 	    of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1394 	    of_device_is_compatible(np, "fsl,t1040-esdhc"))
1395 		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1396 
1397 	if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1398 		host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1399 
1400 	esdhc->quirk_ignore_data_inhibit = false;
1401 	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1402 		/*
1403 		 * Freescale messed up with P2020 as it has a non-standard
1404 		 * host control register
1405 		 */
1406 		host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1407 		esdhc->quirk_ignore_data_inhibit = true;
1408 	}
1409 
1410 	/* call to generic mmc_of_parse to support additional capabilities */
1411 	ret = mmc_of_parse(host->mmc);
1412 	if (ret)
1413 		goto err;
1414 
1415 	mmc_of_parse_voltage(np, &host->ocr_mask);
1416 
1417 	ret = sdhci_add_host(host);
1418 	if (ret)
1419 		goto err;
1420 
1421 	return 0;
1422  err:
1423 	sdhci_pltfm_free(pdev);
1424 	return ret;
1425 }
1426 
1427 static struct platform_driver sdhci_esdhc_driver = {
1428 	.driver = {
1429 		.name = "sdhci-esdhc",
1430 		.of_match_table = sdhci_esdhc_of_match,
1431 		.pm = &esdhc_of_dev_pm_ops,
1432 	},
1433 	.probe = sdhci_esdhc_probe,
1434 	.remove = sdhci_pltfm_unregister,
1435 };
1436 
1437 module_platform_driver(sdhci_esdhc_driver);
1438 
1439 MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1440 MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1441 	      "Anton Vorontsov <avorontsov@ru.mvista.com>");
1442 MODULE_LICENSE("GPL v2");
1443