xref: /openbmc/linux/drivers/mmc/host/mmci.c (revision 09b4f706)
1 /*
2  *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3  *
4  *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5  *  Copyright (C) 2010 ST-Ericsson SA
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/io.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/err.h>
22 #include <linux/highmem.h>
23 #include <linux/log2.h>
24 #include <linux/mmc/pm.h>
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/card.h>
27 #include <linux/mmc/slot-gpio.h>
28 #include <linux/amba/bus.h>
29 #include <linux/clk.h>
30 #include <linux/scatterlist.h>
31 #include <linux/of.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/dmaengine.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/amba/mmci.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/types.h>
38 #include <linux/pinctrl/consumer.h>
39 #include <linux/reset.h>
40 
41 #include <asm/div64.h>
42 #include <asm/io.h>
43 
44 #include "mmci.h"
45 #include "mmci_qcom_dml.h"
46 
47 #define DRIVER_NAME "mmci-pl18x"
48 
49 #ifdef CONFIG_DMA_ENGINE
50 void mmci_variant_init(struct mmci_host *host);
51 #else
52 static inline void mmci_variant_init(struct mmci_host *host) {}
53 #endif
54 
55 #ifdef CONFIG_MMC_STM32_SDMMC
56 void sdmmc_variant_init(struct mmci_host *host);
57 #else
58 static inline void sdmmc_variant_init(struct mmci_host *host) {}
59 #endif
60 
61 static unsigned int fmax = 515633;
62 
63 static struct variant_data variant_arm = {
64 	.fifosize		= 16 * 4,
65 	.fifohalfsize		= 8 * 4,
66 	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
67 	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
68 	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
69 	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
70 	.datalength_bits	= 16,
71 	.datactrl_blocksz	= 11,
72 	.datactrl_dpsm_enable	= MCI_DPSM_ENABLE,
73 	.pwrreg_powerup		= MCI_PWR_UP,
74 	.f_max			= 100000000,
75 	.reversed_irq_handling	= true,
76 	.mmcimask1		= true,
77 	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
78 	.start_err		= MCI_STARTBITERR,
79 	.opendrain		= MCI_ROD,
80 	.init			= mmci_variant_init,
81 };
82 
83 static struct variant_data variant_arm_extended_fifo = {
84 	.fifosize		= 128 * 4,
85 	.fifohalfsize		= 64 * 4,
86 	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
87 	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
88 	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
89 	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
90 	.datalength_bits	= 16,
91 	.datactrl_blocksz	= 11,
92 	.datactrl_dpsm_enable	= MCI_DPSM_ENABLE,
93 	.pwrreg_powerup		= MCI_PWR_UP,
94 	.f_max			= 100000000,
95 	.mmcimask1		= true,
96 	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
97 	.start_err		= MCI_STARTBITERR,
98 	.opendrain		= MCI_ROD,
99 	.init			= mmci_variant_init,
100 };
101 
102 static struct variant_data variant_arm_extended_fifo_hwfc = {
103 	.fifosize		= 128 * 4,
104 	.fifohalfsize		= 64 * 4,
105 	.clkreg_enable		= MCI_ARM_HWFCEN,
106 	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
107 	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
108 	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
109 	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
110 	.datalength_bits	= 16,
111 	.datactrl_blocksz	= 11,
112 	.datactrl_dpsm_enable	= MCI_DPSM_ENABLE,
113 	.pwrreg_powerup		= MCI_PWR_UP,
114 	.f_max			= 100000000,
115 	.mmcimask1		= true,
116 	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
117 	.start_err		= MCI_STARTBITERR,
118 	.opendrain		= MCI_ROD,
119 	.init			= mmci_variant_init,
120 };
121 
122 static struct variant_data variant_u300 = {
123 	.fifosize		= 16 * 4,
124 	.fifohalfsize		= 8 * 4,
125 	.clkreg_enable		= MCI_ST_U300_HWFCEN,
126 	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
127 	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
128 	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
129 	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
130 	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
131 	.datalength_bits	= 16,
132 	.datactrl_blocksz	= 11,
133 	.datactrl_dpsm_enable	= MCI_DPSM_ENABLE,
134 	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
135 	.st_sdio			= true,
136 	.pwrreg_powerup		= MCI_PWR_ON,
137 	.f_max			= 100000000,
138 	.signal_direction	= true,
139 	.pwrreg_clkgate		= true,
140 	.pwrreg_nopower		= true,
141 	.mmcimask1		= true,
142 	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
143 	.start_err		= MCI_STARTBITERR,
144 	.opendrain		= MCI_OD,
145 	.init			= mmci_variant_init,
146 };
147 
148 static struct variant_data variant_nomadik = {
149 	.fifosize		= 16 * 4,
150 	.fifohalfsize		= 8 * 4,
151 	.clkreg			= MCI_CLK_ENABLE,
152 	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
153 	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
154 	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
155 	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
156 	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
157 	.datalength_bits	= 24,
158 	.datactrl_blocksz	= 11,
159 	.datactrl_dpsm_enable	= MCI_DPSM_ENABLE,
160 	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
161 	.st_sdio		= true,
162 	.st_clkdiv		= true,
163 	.pwrreg_powerup		= MCI_PWR_ON,
164 	.f_max			= 100000000,
165 	.signal_direction	= true,
166 	.pwrreg_clkgate		= true,
167 	.pwrreg_nopower		= true,
168 	.mmcimask1		= true,
169 	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
170 	.start_err		= MCI_STARTBITERR,
171 	.opendrain		= MCI_OD,
172 	.init			= mmci_variant_init,
173 };
174 
175 static struct variant_data variant_ux500 = {
176 	.fifosize		= 30 * 4,
177 	.fifohalfsize		= 8 * 4,
178 	.clkreg			= MCI_CLK_ENABLE,
179 	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
180 	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
181 	.clkreg_neg_edge_enable	= MCI_ST_UX500_NEG_EDGE,
182 	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
183 	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
184 	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
185 	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
186 	.datalength_bits	= 24,
187 	.datactrl_blocksz	= 11,
188 	.datactrl_dpsm_enable	= MCI_DPSM_ENABLE,
189 	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
190 	.st_sdio		= true,
191 	.st_clkdiv		= true,
192 	.pwrreg_powerup		= MCI_PWR_ON,
193 	.f_max			= 100000000,
194 	.signal_direction	= true,
195 	.pwrreg_clkgate		= true,
196 	.busy_detect		= true,
197 	.busy_dpsm_flag		= MCI_DPSM_ST_BUSYMODE,
198 	.busy_detect_flag	= MCI_ST_CARDBUSY,
199 	.busy_detect_mask	= MCI_ST_BUSYENDMASK,
200 	.pwrreg_nopower		= true,
201 	.mmcimask1		= true,
202 	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
203 	.start_err		= MCI_STARTBITERR,
204 	.opendrain		= MCI_OD,
205 	.init			= mmci_variant_init,
206 };
207 
208 static struct variant_data variant_ux500v2 = {
209 	.fifosize		= 30 * 4,
210 	.fifohalfsize		= 8 * 4,
211 	.clkreg			= MCI_CLK_ENABLE,
212 	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
213 	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
214 	.clkreg_neg_edge_enable	= MCI_ST_UX500_NEG_EDGE,
215 	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
216 	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
217 	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
218 	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
219 	.datactrl_mask_ddrmode	= MCI_DPSM_ST_DDRMODE,
220 	.datalength_bits	= 24,
221 	.datactrl_blocksz	= 11,
222 	.datactrl_dpsm_enable	= MCI_DPSM_ENABLE,
223 	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
224 	.st_sdio		= true,
225 	.st_clkdiv		= true,
226 	.blksz_datactrl16	= true,
227 	.pwrreg_powerup		= MCI_PWR_ON,
228 	.f_max			= 100000000,
229 	.signal_direction	= true,
230 	.pwrreg_clkgate		= true,
231 	.busy_detect		= true,
232 	.busy_dpsm_flag		= MCI_DPSM_ST_BUSYMODE,
233 	.busy_detect_flag	= MCI_ST_CARDBUSY,
234 	.busy_detect_mask	= MCI_ST_BUSYENDMASK,
235 	.pwrreg_nopower		= true,
236 	.mmcimask1		= true,
237 	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
238 	.start_err		= MCI_STARTBITERR,
239 	.opendrain		= MCI_OD,
240 	.init			= mmci_variant_init,
241 };
242 
243 static struct variant_data variant_stm32 = {
244 	.fifosize		= 32 * 4,
245 	.fifohalfsize		= 8 * 4,
246 	.clkreg			= MCI_CLK_ENABLE,
247 	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
248 	.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
249 	.clkreg_neg_edge_enable	= MCI_ST_UX500_NEG_EDGE,
250 	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
251 	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
252 	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
253 	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
254 	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
255 	.datalength_bits	= 24,
256 	.datactrl_blocksz	= 11,
257 	.datactrl_dpsm_enable	= MCI_DPSM_ENABLE,
258 	.datactrl_mask_sdio	= MCI_DPSM_ST_SDIOEN,
259 	.st_sdio		= true,
260 	.st_clkdiv		= true,
261 	.pwrreg_powerup		= MCI_PWR_ON,
262 	.f_max			= 48000000,
263 	.pwrreg_clkgate		= true,
264 	.pwrreg_nopower		= true,
265 	.init			= mmci_variant_init,
266 };
267 
268 static struct variant_data variant_stm32_sdmmc = {
269 	.fifosize		= 16 * 4,
270 	.fifohalfsize		= 8 * 4,
271 	.f_max			= 208000000,
272 	.stm32_clkdiv		= true,
273 	.cmdreg_cpsm_enable	= MCI_CPSM_STM32_ENABLE,
274 	.cmdreg_lrsp_crc	= MCI_CPSM_STM32_LRSP_CRC,
275 	.cmdreg_srsp_crc	= MCI_CPSM_STM32_SRSP_CRC,
276 	.cmdreg_srsp		= MCI_CPSM_STM32_SRSP,
277 	.data_cmd_enable	= MCI_CPSM_STM32_CMDTRANS,
278 	.irq_pio_mask		= MCI_IRQ_PIO_STM32_MASK,
279 	.datactrl_first		= true,
280 	.datacnt_useless	= true,
281 	.datalength_bits	= 25,
282 	.datactrl_blocksz	= 14,
283 	.stm32_idmabsize_mask	= GENMASK(12, 5),
284 	.init			= sdmmc_variant_init,
285 };
286 
287 static struct variant_data variant_qcom = {
288 	.fifosize		= 16 * 4,
289 	.fifohalfsize		= 8 * 4,
290 	.clkreg			= MCI_CLK_ENABLE,
291 	.clkreg_enable		= MCI_QCOM_CLK_FLOWENA |
292 				  MCI_QCOM_CLK_SELECT_IN_FBCLK,
293 	.clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
294 	.datactrl_mask_ddrmode	= MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
295 	.cmdreg_cpsm_enable	= MCI_CPSM_ENABLE,
296 	.cmdreg_lrsp_crc	= MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
297 	.cmdreg_srsp_crc	= MCI_CPSM_RESPONSE,
298 	.cmdreg_srsp		= MCI_CPSM_RESPONSE,
299 	.data_cmd_enable	= MCI_CPSM_QCOM_DATCMD,
300 	.blksz_datactrl4	= true,
301 	.datalength_bits	= 24,
302 	.datactrl_blocksz	= 11,
303 	.datactrl_dpsm_enable	= MCI_DPSM_ENABLE,
304 	.pwrreg_powerup		= MCI_PWR_UP,
305 	.f_max			= 208000000,
306 	.explicit_mclk_control	= true,
307 	.qcom_fifo		= true,
308 	.qcom_dml		= true,
309 	.mmcimask1		= true,
310 	.irq_pio_mask		= MCI_IRQ_PIO_MASK,
311 	.start_err		= MCI_STARTBITERR,
312 	.opendrain		= MCI_ROD,
313 	.init			= qcom_variant_init,
314 };
315 
316 /* Busy detection for the ST Micro variant */
317 static int mmci_card_busy(struct mmc_host *mmc)
318 {
319 	struct mmci_host *host = mmc_priv(mmc);
320 	unsigned long flags;
321 	int busy = 0;
322 
323 	spin_lock_irqsave(&host->lock, flags);
324 	if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
325 		busy = 1;
326 	spin_unlock_irqrestore(&host->lock, flags);
327 
328 	return busy;
329 }
330 
331 static void mmci_reg_delay(struct mmci_host *host)
332 {
333 	/*
334 	 * According to the spec, at least three feedback clock cycles
335 	 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
336 	 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
337 	 * Worst delay time during card init is at 100 kHz => 30 us.
338 	 * Worst delay time when up and running is at 25 MHz => 120 ns.
339 	 */
340 	if (host->cclk < 25000000)
341 		udelay(30);
342 	else
343 		ndelay(120);
344 }
345 
346 /*
347  * This must be called with host->lock held
348  */
349 void mmci_write_clkreg(struct mmci_host *host, u32 clk)
350 {
351 	if (host->clk_reg != clk) {
352 		host->clk_reg = clk;
353 		writel(clk, host->base + MMCICLOCK);
354 	}
355 }
356 
357 /*
358  * This must be called with host->lock held
359  */
360 void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
361 {
362 	if (host->pwr_reg != pwr) {
363 		host->pwr_reg = pwr;
364 		writel(pwr, host->base + MMCIPOWER);
365 	}
366 }
367 
368 /*
369  * This must be called with host->lock held
370  */
371 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
372 {
373 	/* Keep busy mode in DPSM if enabled */
374 	datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
375 
376 	if (host->datactrl_reg != datactrl) {
377 		host->datactrl_reg = datactrl;
378 		writel(datactrl, host->base + MMCIDATACTRL);
379 	}
380 }
381 
382 /*
383  * This must be called with host->lock held
384  */
385 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
386 {
387 	struct variant_data *variant = host->variant;
388 	u32 clk = variant->clkreg;
389 
390 	/* Make sure cclk reflects the current calculated clock */
391 	host->cclk = 0;
392 
393 	if (desired) {
394 		if (variant->explicit_mclk_control) {
395 			host->cclk = host->mclk;
396 		} else if (desired >= host->mclk) {
397 			clk = MCI_CLK_BYPASS;
398 			if (variant->st_clkdiv)
399 				clk |= MCI_ST_UX500_NEG_EDGE;
400 			host->cclk = host->mclk;
401 		} else if (variant->st_clkdiv) {
402 			/*
403 			 * DB8500 TRM says f = mclk / (clkdiv + 2)
404 			 * => clkdiv = (mclk / f) - 2
405 			 * Round the divider up so we don't exceed the max
406 			 * frequency
407 			 */
408 			clk = DIV_ROUND_UP(host->mclk, desired) - 2;
409 			if (clk >= 256)
410 				clk = 255;
411 			host->cclk = host->mclk / (clk + 2);
412 		} else {
413 			/*
414 			 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
415 			 * => clkdiv = mclk / (2 * f) - 1
416 			 */
417 			clk = host->mclk / (2 * desired) - 1;
418 			if (clk >= 256)
419 				clk = 255;
420 			host->cclk = host->mclk / (2 * (clk + 1));
421 		}
422 
423 		clk |= variant->clkreg_enable;
424 		clk |= MCI_CLK_ENABLE;
425 		/* This hasn't proven to be worthwhile */
426 		/* clk |= MCI_CLK_PWRSAVE; */
427 	}
428 
429 	/* Set actual clock for debug */
430 	host->mmc->actual_clock = host->cclk;
431 
432 	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
433 		clk |= MCI_4BIT_BUS;
434 	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
435 		clk |= variant->clkreg_8bit_bus_enable;
436 
437 	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
438 	    host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
439 		clk |= variant->clkreg_neg_edge_enable;
440 
441 	mmci_write_clkreg(host, clk);
442 }
443 
444 void mmci_dma_release(struct mmci_host *host)
445 {
446 	if (host->ops && host->ops->dma_release)
447 		host->ops->dma_release(host);
448 
449 	host->use_dma = false;
450 }
451 
452 void mmci_dma_setup(struct mmci_host *host)
453 {
454 	if (!host->ops || !host->ops->dma_setup)
455 		return;
456 
457 	if (host->ops->dma_setup(host))
458 		return;
459 
460 	/* initialize pre request cookie */
461 	host->next_cookie = 1;
462 
463 	host->use_dma = true;
464 }
465 
466 /*
467  * Validate mmc prerequisites
468  */
469 static int mmci_validate_data(struct mmci_host *host,
470 			      struct mmc_data *data)
471 {
472 	if (!data)
473 		return 0;
474 
475 	if (!is_power_of_2(data->blksz)) {
476 		dev_err(mmc_dev(host->mmc),
477 			"unsupported block size (%d bytes)\n", data->blksz);
478 		return -EINVAL;
479 	}
480 
481 	if (host->ops && host->ops->validate_data)
482 		return host->ops->validate_data(host, data);
483 
484 	return 0;
485 }
486 
487 int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
488 {
489 	int err;
490 
491 	if (!host->ops || !host->ops->prep_data)
492 		return 0;
493 
494 	err = host->ops->prep_data(host, data, next);
495 
496 	if (next && !err)
497 		data->host_cookie = ++host->next_cookie < 0 ?
498 			1 : host->next_cookie;
499 
500 	return err;
501 }
502 
503 void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
504 		      int err)
505 {
506 	if (host->ops && host->ops->unprep_data)
507 		host->ops->unprep_data(host, data, err);
508 
509 	data->host_cookie = 0;
510 }
511 
512 void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
513 {
514 	WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
515 
516 	if (host->ops && host->ops->get_next_data)
517 		host->ops->get_next_data(host, data);
518 }
519 
520 int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
521 {
522 	struct mmc_data *data = host->data;
523 	int ret;
524 
525 	if (!host->use_dma)
526 		return -EINVAL;
527 
528 	ret = mmci_prep_data(host, data, false);
529 	if (ret)
530 		return ret;
531 
532 	if (!host->ops || !host->ops->dma_start)
533 		return -EINVAL;
534 
535 	/* Okay, go for it. */
536 	dev_vdbg(mmc_dev(host->mmc),
537 		 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
538 		 data->sg_len, data->blksz, data->blocks, data->flags);
539 
540 	host->ops->dma_start(host, &datactrl);
541 
542 	/* Trigger the DMA transfer */
543 	mmci_write_datactrlreg(host, datactrl);
544 
545 	/*
546 	 * Let the MMCI say when the data is ended and it's time
547 	 * to fire next DMA request. When that happens, MMCI will
548 	 * call mmci_data_end()
549 	 */
550 	writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
551 	       host->base + MMCIMASK0);
552 	return 0;
553 }
554 
555 void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
556 {
557 	if (!host->use_dma)
558 		return;
559 
560 	if (host->ops && host->ops->dma_finalize)
561 		host->ops->dma_finalize(host, data);
562 }
563 
564 void mmci_dma_error(struct mmci_host *host)
565 {
566 	if (!host->use_dma)
567 		return;
568 
569 	if (host->ops && host->ops->dma_error)
570 		host->ops->dma_error(host);
571 }
572 
573 static void
574 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
575 {
576 	writel(0, host->base + MMCICOMMAND);
577 
578 	BUG_ON(host->data);
579 
580 	host->mrq = NULL;
581 	host->cmd = NULL;
582 
583 	mmc_request_done(host->mmc, mrq);
584 }
585 
586 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
587 {
588 	void __iomem *base = host->base;
589 	struct variant_data *variant = host->variant;
590 
591 	if (host->singleirq) {
592 		unsigned int mask0 = readl(base + MMCIMASK0);
593 
594 		mask0 &= ~variant->irq_pio_mask;
595 		mask0 |= mask;
596 
597 		writel(mask0, base + MMCIMASK0);
598 	}
599 
600 	if (variant->mmcimask1)
601 		writel(mask, base + MMCIMASK1);
602 
603 	host->mask1_reg = mask;
604 }
605 
606 static void mmci_stop_data(struct mmci_host *host)
607 {
608 	mmci_write_datactrlreg(host, 0);
609 	mmci_set_mask1(host, 0);
610 	host->data = NULL;
611 }
612 
613 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
614 {
615 	unsigned int flags = SG_MITER_ATOMIC;
616 
617 	if (data->flags & MMC_DATA_READ)
618 		flags |= SG_MITER_TO_SG;
619 	else
620 		flags |= SG_MITER_FROM_SG;
621 
622 	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
623 }
624 
625 /*
626  * All the DMA operation mode stuff goes inside this ifdef.
627  * This assumes that you have a generic DMA device interface,
628  * no custom DMA interfaces are supported.
629  */
630 #ifdef CONFIG_DMA_ENGINE
631 struct mmci_dmae_next {
632 	struct dma_async_tx_descriptor *desc;
633 	struct dma_chan	*chan;
634 };
635 
636 struct mmci_dmae_priv {
637 	struct dma_chan	*cur;
638 	struct dma_chan	*rx_channel;
639 	struct dma_chan	*tx_channel;
640 	struct dma_async_tx_descriptor	*desc_current;
641 	struct mmci_dmae_next next_data;
642 };
643 
644 int mmci_dmae_setup(struct mmci_host *host)
645 {
646 	const char *rxname, *txname;
647 	struct mmci_dmae_priv *dmae;
648 
649 	dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
650 	if (!dmae)
651 		return -ENOMEM;
652 
653 	host->dma_priv = dmae;
654 
655 	dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
656 						     "rx");
657 	dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
658 						     "tx");
659 
660 	/*
661 	 * If only an RX channel is specified, the driver will
662 	 * attempt to use it bidirectionally, however if it is
663 	 * is specified but cannot be located, DMA will be disabled.
664 	 */
665 	if (dmae->rx_channel && !dmae->tx_channel)
666 		dmae->tx_channel = dmae->rx_channel;
667 
668 	if (dmae->rx_channel)
669 		rxname = dma_chan_name(dmae->rx_channel);
670 	else
671 		rxname = "none";
672 
673 	if (dmae->tx_channel)
674 		txname = dma_chan_name(dmae->tx_channel);
675 	else
676 		txname = "none";
677 
678 	dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
679 		 rxname, txname);
680 
681 	/*
682 	 * Limit the maximum segment size in any SG entry according to
683 	 * the parameters of the DMA engine device.
684 	 */
685 	if (dmae->tx_channel) {
686 		struct device *dev = dmae->tx_channel->device->dev;
687 		unsigned int max_seg_size = dma_get_max_seg_size(dev);
688 
689 		if (max_seg_size < host->mmc->max_seg_size)
690 			host->mmc->max_seg_size = max_seg_size;
691 	}
692 	if (dmae->rx_channel) {
693 		struct device *dev = dmae->rx_channel->device->dev;
694 		unsigned int max_seg_size = dma_get_max_seg_size(dev);
695 
696 		if (max_seg_size < host->mmc->max_seg_size)
697 			host->mmc->max_seg_size = max_seg_size;
698 	}
699 
700 	if (!dmae->tx_channel || !dmae->rx_channel) {
701 		mmci_dmae_release(host);
702 		return -EINVAL;
703 	}
704 
705 	return 0;
706 }
707 
708 /*
709  * This is used in or so inline it
710  * so it can be discarded.
711  */
712 void mmci_dmae_release(struct mmci_host *host)
713 {
714 	struct mmci_dmae_priv *dmae = host->dma_priv;
715 
716 	if (dmae->rx_channel)
717 		dma_release_channel(dmae->rx_channel);
718 	if (dmae->tx_channel)
719 		dma_release_channel(dmae->tx_channel);
720 	dmae->rx_channel = dmae->tx_channel = NULL;
721 }
722 
723 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
724 {
725 	struct mmci_dmae_priv *dmae = host->dma_priv;
726 	struct dma_chan *chan;
727 
728 	if (data->flags & MMC_DATA_READ)
729 		chan = dmae->rx_channel;
730 	else
731 		chan = dmae->tx_channel;
732 
733 	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
734 		     mmc_get_dma_dir(data));
735 }
736 
737 void mmci_dmae_error(struct mmci_host *host)
738 {
739 	struct mmci_dmae_priv *dmae = host->dma_priv;
740 
741 	if (!dma_inprogress(host))
742 		return;
743 
744 	dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
745 	dmaengine_terminate_all(dmae->cur);
746 	host->dma_in_progress = false;
747 	dmae->cur = NULL;
748 	dmae->desc_current = NULL;
749 	host->data->host_cookie = 0;
750 
751 	mmci_dma_unmap(host, host->data);
752 }
753 
754 void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
755 {
756 	struct mmci_dmae_priv *dmae = host->dma_priv;
757 	u32 status;
758 	int i;
759 
760 	if (!dma_inprogress(host))
761 		return;
762 
763 	/* Wait up to 1ms for the DMA to complete */
764 	for (i = 0; ; i++) {
765 		status = readl(host->base + MMCISTATUS);
766 		if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
767 			break;
768 		udelay(10);
769 	}
770 
771 	/*
772 	 * Check to see whether we still have some data left in the FIFO -
773 	 * this catches DMA controllers which are unable to monitor the
774 	 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
775 	 * contiguous buffers.  On TX, we'll get a FIFO underrun error.
776 	 */
777 	if (status & MCI_RXDATAAVLBLMASK) {
778 		mmci_dma_error(host);
779 		if (!data->error)
780 			data->error = -EIO;
781 	} else if (!data->host_cookie) {
782 		mmci_dma_unmap(host, data);
783 	}
784 
785 	/*
786 	 * Use of DMA with scatter-gather is impossible.
787 	 * Give up with DMA and switch back to PIO mode.
788 	 */
789 	if (status & MCI_RXDATAAVLBLMASK) {
790 		dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
791 		mmci_dma_release(host);
792 	}
793 
794 	host->dma_in_progress = false;
795 	dmae->cur = NULL;
796 	dmae->desc_current = NULL;
797 }
798 
799 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */
800 static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
801 				struct dma_chan **dma_chan,
802 				struct dma_async_tx_descriptor **dma_desc)
803 {
804 	struct mmci_dmae_priv *dmae = host->dma_priv;
805 	struct variant_data *variant = host->variant;
806 	struct dma_slave_config conf = {
807 		.src_addr = host->phybase + MMCIFIFO,
808 		.dst_addr = host->phybase + MMCIFIFO,
809 		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
810 		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
811 		.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
812 		.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
813 		.device_fc = false,
814 	};
815 	struct dma_chan *chan;
816 	struct dma_device *device;
817 	struct dma_async_tx_descriptor *desc;
818 	int nr_sg;
819 	unsigned long flags = DMA_CTRL_ACK;
820 
821 	if (data->flags & MMC_DATA_READ) {
822 		conf.direction = DMA_DEV_TO_MEM;
823 		chan = dmae->rx_channel;
824 	} else {
825 		conf.direction = DMA_MEM_TO_DEV;
826 		chan = dmae->tx_channel;
827 	}
828 
829 	/* If there's no DMA channel, fall back to PIO */
830 	if (!chan)
831 		return -EINVAL;
832 
833 	/* If less than or equal to the fifo size, don't bother with DMA */
834 	if (data->blksz * data->blocks <= variant->fifosize)
835 		return -EINVAL;
836 
837 	device = chan->device;
838 	nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
839 			   mmc_get_dma_dir(data));
840 	if (nr_sg == 0)
841 		return -EINVAL;
842 
843 	if (host->variant->qcom_dml)
844 		flags |= DMA_PREP_INTERRUPT;
845 
846 	dmaengine_slave_config(chan, &conf);
847 	desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
848 					    conf.direction, flags);
849 	if (!desc)
850 		goto unmap_exit;
851 
852 	*dma_chan = chan;
853 	*dma_desc = desc;
854 
855 	return 0;
856 
857  unmap_exit:
858 	dma_unmap_sg(device->dev, data->sg, data->sg_len,
859 		     mmc_get_dma_dir(data));
860 	return -ENOMEM;
861 }
862 
863 int mmci_dmae_prep_data(struct mmci_host *host,
864 			struct mmc_data *data,
865 			bool next)
866 {
867 	struct mmci_dmae_priv *dmae = host->dma_priv;
868 	struct mmci_dmae_next *nd = &dmae->next_data;
869 
870 	if (!host->use_dma)
871 		return -EINVAL;
872 
873 	if (next)
874 		return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
875 	/* Check if next job is already prepared. */
876 	if (dmae->cur && dmae->desc_current)
877 		return 0;
878 
879 	/* No job were prepared thus do it now. */
880 	return _mmci_dmae_prep_data(host, data, &dmae->cur,
881 				    &dmae->desc_current);
882 }
883 
884 int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
885 {
886 	struct mmci_dmae_priv *dmae = host->dma_priv;
887 	struct mmc_data *data = host->data;
888 
889 	host->dma_in_progress = true;
890 	dmaengine_submit(dmae->desc_current);
891 	dma_async_issue_pending(dmae->cur);
892 
893 	if (host->variant->qcom_dml)
894 		dml_start_xfer(host, data);
895 
896 	*datactrl |= MCI_DPSM_DMAENABLE;
897 
898 	return 0;
899 }
900 
901 void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
902 {
903 	struct mmci_dmae_priv *dmae = host->dma_priv;
904 	struct mmci_dmae_next *next = &dmae->next_data;
905 
906 	if (!host->use_dma)
907 		return;
908 
909 	WARN_ON(!data->host_cookie && (next->desc || next->chan));
910 
911 	dmae->desc_current = next->desc;
912 	dmae->cur = next->chan;
913 	next->desc = NULL;
914 	next->chan = NULL;
915 }
916 
917 void mmci_dmae_unprep_data(struct mmci_host *host,
918 			   struct mmc_data *data, int err)
919 
920 {
921 	struct mmci_dmae_priv *dmae = host->dma_priv;
922 
923 	if (!host->use_dma)
924 		return;
925 
926 	mmci_dma_unmap(host, data);
927 
928 	if (err) {
929 		struct mmci_dmae_next *next = &dmae->next_data;
930 		struct dma_chan *chan;
931 		if (data->flags & MMC_DATA_READ)
932 			chan = dmae->rx_channel;
933 		else
934 			chan = dmae->tx_channel;
935 		dmaengine_terminate_all(chan);
936 
937 		if (dmae->desc_current == next->desc)
938 			dmae->desc_current = NULL;
939 
940 		if (dmae->cur == next->chan) {
941 			host->dma_in_progress = false;
942 			dmae->cur = NULL;
943 		}
944 
945 		next->desc = NULL;
946 		next->chan = NULL;
947 	}
948 }
949 
950 static struct mmci_host_ops mmci_variant_ops = {
951 	.prep_data = mmci_dmae_prep_data,
952 	.unprep_data = mmci_dmae_unprep_data,
953 	.get_next_data = mmci_dmae_get_next_data,
954 	.dma_setup = mmci_dmae_setup,
955 	.dma_release = mmci_dmae_release,
956 	.dma_start = mmci_dmae_start,
957 	.dma_finalize = mmci_dmae_finalize,
958 	.dma_error = mmci_dmae_error,
959 };
960 
961 void mmci_variant_init(struct mmci_host *host)
962 {
963 	host->ops = &mmci_variant_ops;
964 }
965 #endif
966 
967 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
968 {
969 	struct mmci_host *host = mmc_priv(mmc);
970 	struct mmc_data *data = mrq->data;
971 
972 	if (!data)
973 		return;
974 
975 	WARN_ON(data->host_cookie);
976 
977 	if (mmci_validate_data(host, data))
978 		return;
979 
980 	mmci_prep_data(host, data, true);
981 }
982 
983 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
984 			      int err)
985 {
986 	struct mmci_host *host = mmc_priv(mmc);
987 	struct mmc_data *data = mrq->data;
988 
989 	if (!data || !data->host_cookie)
990 		return;
991 
992 	mmci_unprep_data(host, data, err);
993 }
994 
995 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
996 {
997 	struct variant_data *variant = host->variant;
998 	unsigned int datactrl, timeout, irqmask;
999 	unsigned long long clks;
1000 	void __iomem *base;
1001 	int blksz_bits;
1002 
1003 	dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
1004 		data->blksz, data->blocks, data->flags);
1005 
1006 	host->data = data;
1007 	host->size = data->blksz * data->blocks;
1008 	data->bytes_xfered = 0;
1009 
1010 	clks = (unsigned long long)data->timeout_ns * host->cclk;
1011 	do_div(clks, NSEC_PER_SEC);
1012 
1013 	timeout = data->timeout_clks + (unsigned int)clks;
1014 
1015 	base = host->base;
1016 	writel(timeout, base + MMCIDATATIMER);
1017 	writel(host->size, base + MMCIDATALENGTH);
1018 
1019 	blksz_bits = ffs(data->blksz) - 1;
1020 	BUG_ON(1 << blksz_bits != data->blksz);
1021 
1022 	if (variant->blksz_datactrl16)
1023 		datactrl = variant->datactrl_dpsm_enable | (data->blksz << 16);
1024 	else if (variant->blksz_datactrl4)
1025 		datactrl = variant->datactrl_dpsm_enable | (data->blksz << 4);
1026 	else
1027 		datactrl = variant->datactrl_dpsm_enable | blksz_bits << 4;
1028 
1029 	if (data->flags & MMC_DATA_READ)
1030 		datactrl |= MCI_DPSM_DIRECTION;
1031 
1032 	if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
1033 		u32 clk;
1034 
1035 		datactrl |= variant->datactrl_mask_sdio;
1036 
1037 		/*
1038 		 * The ST Micro variant for SDIO small write transfers
1039 		 * needs to have clock H/W flow control disabled,
1040 		 * otherwise the transfer will not start. The threshold
1041 		 * depends on the rate of MCLK.
1042 		 */
1043 		if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
1044 		    (host->size < 8 ||
1045 		     (host->size <= 8 && host->mclk > 50000000)))
1046 			clk = host->clk_reg & ~variant->clkreg_enable;
1047 		else
1048 			clk = host->clk_reg | variant->clkreg_enable;
1049 
1050 		mmci_write_clkreg(host, clk);
1051 	}
1052 
1053 	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
1054 	    host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
1055 		datactrl |= variant->datactrl_mask_ddrmode;
1056 
1057 	/*
1058 	 * Attempt to use DMA operation mode, if this
1059 	 * should fail, fall back to PIO mode
1060 	 */
1061 	if (!mmci_dma_start(host, datactrl))
1062 		return;
1063 
1064 	/* IRQ mode, map the SG list for CPU reading/writing */
1065 	mmci_init_sg(host, data);
1066 
1067 	if (data->flags & MMC_DATA_READ) {
1068 		irqmask = MCI_RXFIFOHALFFULLMASK;
1069 
1070 		/*
1071 		 * If we have less than the fifo 'half-full' threshold to
1072 		 * transfer, trigger a PIO interrupt as soon as any data
1073 		 * is available.
1074 		 */
1075 		if (host->size < variant->fifohalfsize)
1076 			irqmask |= MCI_RXDATAAVLBLMASK;
1077 	} else {
1078 		/*
1079 		 * We don't actually need to include "FIFO empty" here
1080 		 * since its implicit in "FIFO half empty".
1081 		 */
1082 		irqmask = MCI_TXFIFOHALFEMPTYMASK;
1083 	}
1084 
1085 	mmci_write_datactrlreg(host, datactrl);
1086 	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
1087 	mmci_set_mask1(host, irqmask);
1088 }
1089 
1090 static void
1091 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
1092 {
1093 	void __iomem *base = host->base;
1094 
1095 	dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
1096 	    cmd->opcode, cmd->arg, cmd->flags);
1097 
1098 	if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
1099 		writel(0, base + MMCICOMMAND);
1100 		mmci_reg_delay(host);
1101 	}
1102 
1103 	c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
1104 	if (cmd->flags & MMC_RSP_PRESENT) {
1105 		if (cmd->flags & MMC_RSP_136)
1106 			c |= host->variant->cmdreg_lrsp_crc;
1107 		else if (cmd->flags & MMC_RSP_CRC)
1108 			c |= host->variant->cmdreg_srsp_crc;
1109 		else
1110 			c |= host->variant->cmdreg_srsp;
1111 	}
1112 	if (/*interrupt*/0)
1113 		c |= MCI_CPSM_INTERRUPT;
1114 
1115 	if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
1116 		c |= host->variant->data_cmd_enable;
1117 
1118 	host->cmd = cmd;
1119 
1120 	writel(cmd->arg, base + MMCIARGUMENT);
1121 	writel(c, base + MMCICOMMAND);
1122 }
1123 
1124 static void
1125 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
1126 	      unsigned int status)
1127 {
1128 	unsigned int status_err;
1129 
1130 	/* Make sure we have data to handle */
1131 	if (!data)
1132 		return;
1133 
1134 	/* First check for errors */
1135 	status_err = status & (host->variant->start_err |
1136 			       MCI_DATACRCFAIL | MCI_DATATIMEOUT |
1137 			       MCI_TXUNDERRUN | MCI_RXOVERRUN);
1138 
1139 	if (status_err) {
1140 		u32 remain, success;
1141 
1142 		/* Terminate the DMA transfer */
1143 		mmci_dma_error(host);
1144 
1145 		/*
1146 		 * Calculate how far we are into the transfer.  Note that
1147 		 * the data counter gives the number of bytes transferred
1148 		 * on the MMC bus, not on the host side.  On reads, this
1149 		 * can be as much as a FIFO-worth of data ahead.  This
1150 		 * matters for FIFO overruns only.
1151 		 */
1152 		if (!host->variant->datacnt_useless) {
1153 			remain = readl(host->base + MMCIDATACNT);
1154 			success = data->blksz * data->blocks - remain;
1155 		} else {
1156 			success = 0;
1157 		}
1158 
1159 		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
1160 			status_err, success);
1161 		if (status_err & MCI_DATACRCFAIL) {
1162 			/* Last block was not successful */
1163 			success -= 1;
1164 			data->error = -EILSEQ;
1165 		} else if (status_err & MCI_DATATIMEOUT) {
1166 			data->error = -ETIMEDOUT;
1167 		} else if (status_err & MCI_STARTBITERR) {
1168 			data->error = -ECOMM;
1169 		} else if (status_err & MCI_TXUNDERRUN) {
1170 			data->error = -EIO;
1171 		} else if (status_err & MCI_RXOVERRUN) {
1172 			if (success > host->variant->fifosize)
1173 				success -= host->variant->fifosize;
1174 			else
1175 				success = 0;
1176 			data->error = -EIO;
1177 		}
1178 		data->bytes_xfered = round_down(success, data->blksz);
1179 	}
1180 
1181 	if (status & MCI_DATABLOCKEND)
1182 		dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
1183 
1184 	if (status & MCI_DATAEND || data->error) {
1185 		mmci_dma_finalize(host, data);
1186 
1187 		mmci_stop_data(host);
1188 
1189 		if (!data->error)
1190 			/* The error clause is handled above, success! */
1191 			data->bytes_xfered = data->blksz * data->blocks;
1192 
1193 		if (!data->stop || (host->mrq->sbc && !data->error))
1194 			mmci_request_end(host, data->mrq);
1195 		else
1196 			mmci_start_command(host, data->stop, 0);
1197 	}
1198 }
1199 
1200 static void
1201 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
1202 	     unsigned int status)
1203 {
1204 	void __iomem *base = host->base;
1205 	bool sbc;
1206 
1207 	if (!cmd)
1208 		return;
1209 
1210 	sbc = (cmd == host->mrq->sbc);
1211 
1212 	/*
1213 	 * We need to be one of these interrupts to be considered worth
1214 	 * handling. Note that we tag on any latent IRQs postponed
1215 	 * due to waiting for busy status.
1216 	 */
1217 	if (!((status|host->busy_status) &
1218 	      (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
1219 		return;
1220 
1221 	/*
1222 	 * ST Micro variant: handle busy detection.
1223 	 */
1224 	if (host->variant->busy_detect) {
1225 		bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
1226 
1227 		/* We are busy with a command, return */
1228 		if (host->busy_status &&
1229 		    (status & host->variant->busy_detect_flag))
1230 			return;
1231 
1232 		/*
1233 		 * We were not busy, but we now got a busy response on
1234 		 * something that was not an error, and we double-check
1235 		 * that the special busy status bit is still set before
1236 		 * proceeding.
1237 		 */
1238 		if (!host->busy_status && busy_resp &&
1239 		    !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
1240 		    (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
1241 
1242 			/* Clear the busy start IRQ */
1243 			writel(host->variant->busy_detect_mask,
1244 			       host->base + MMCICLEAR);
1245 
1246 			/* Unmask the busy end IRQ */
1247 			writel(readl(base + MMCIMASK0) |
1248 			       host->variant->busy_detect_mask,
1249 			       base + MMCIMASK0);
1250 			/*
1251 			 * Now cache the last response status code (until
1252 			 * the busy bit goes low), and return.
1253 			 */
1254 			host->busy_status =
1255 				status & (MCI_CMDSENT|MCI_CMDRESPEND);
1256 			return;
1257 		}
1258 
1259 		/*
1260 		 * At this point we are not busy with a command, we have
1261 		 * not received a new busy request, clear and mask the busy
1262 		 * end IRQ and fall through to process the IRQ.
1263 		 */
1264 		if (host->busy_status) {
1265 
1266 			writel(host->variant->busy_detect_mask,
1267 			       host->base + MMCICLEAR);
1268 
1269 			writel(readl(base + MMCIMASK0) &
1270 			       ~host->variant->busy_detect_mask,
1271 			       base + MMCIMASK0);
1272 			host->busy_status = 0;
1273 		}
1274 	}
1275 
1276 	host->cmd = NULL;
1277 
1278 	if (status & MCI_CMDTIMEOUT) {
1279 		cmd->error = -ETIMEDOUT;
1280 	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
1281 		cmd->error = -EILSEQ;
1282 	} else {
1283 		cmd->resp[0] = readl(base + MMCIRESPONSE0);
1284 		cmd->resp[1] = readl(base + MMCIRESPONSE1);
1285 		cmd->resp[2] = readl(base + MMCIRESPONSE2);
1286 		cmd->resp[3] = readl(base + MMCIRESPONSE3);
1287 	}
1288 
1289 	if ((!sbc && !cmd->data) || cmd->error) {
1290 		if (host->data) {
1291 			/* Terminate the DMA transfer */
1292 			mmci_dma_error(host);
1293 
1294 			mmci_stop_data(host);
1295 		}
1296 		mmci_request_end(host, host->mrq);
1297 	} else if (sbc) {
1298 		mmci_start_command(host, host->mrq->cmd, 0);
1299 	} else if (!host->variant->datactrl_first &&
1300 		   !(cmd->data->flags & MMC_DATA_READ)) {
1301 		mmci_start_data(host, cmd->data);
1302 	}
1303 }
1304 
1305 static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
1306 {
1307 	return remain - (readl(host->base + MMCIFIFOCNT) << 2);
1308 }
1309 
1310 static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
1311 {
1312 	/*
1313 	 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
1314 	 * from the fifo range should be used
1315 	 */
1316 	if (status & MCI_RXFIFOHALFFULL)
1317 		return host->variant->fifohalfsize;
1318 	else if (status & MCI_RXDATAAVLBL)
1319 		return 4;
1320 
1321 	return 0;
1322 }
1323 
1324 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
1325 {
1326 	void __iomem *base = host->base;
1327 	char *ptr = buffer;
1328 	u32 status = readl(host->base + MMCISTATUS);
1329 	int host_remain = host->size;
1330 
1331 	do {
1332 		int count = host->get_rx_fifocnt(host, status, host_remain);
1333 
1334 		if (count > remain)
1335 			count = remain;
1336 
1337 		if (count <= 0)
1338 			break;
1339 
1340 		/*
1341 		 * SDIO especially may want to send something that is
1342 		 * not divisible by 4 (as opposed to card sectors
1343 		 * etc). Therefore make sure to always read the last bytes
1344 		 * while only doing full 32-bit reads towards the FIFO.
1345 		 */
1346 		if (unlikely(count & 0x3)) {
1347 			if (count < 4) {
1348 				unsigned char buf[4];
1349 				ioread32_rep(base + MMCIFIFO, buf, 1);
1350 				memcpy(ptr, buf, count);
1351 			} else {
1352 				ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1353 				count &= ~0x3;
1354 			}
1355 		} else {
1356 			ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1357 		}
1358 
1359 		ptr += count;
1360 		remain -= count;
1361 		host_remain -= count;
1362 
1363 		if (remain == 0)
1364 			break;
1365 
1366 		status = readl(base + MMCISTATUS);
1367 	} while (status & MCI_RXDATAAVLBL);
1368 
1369 	return ptr - buffer;
1370 }
1371 
1372 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1373 {
1374 	struct variant_data *variant = host->variant;
1375 	void __iomem *base = host->base;
1376 	char *ptr = buffer;
1377 
1378 	do {
1379 		unsigned int count, maxcnt;
1380 
1381 		maxcnt = status & MCI_TXFIFOEMPTY ?
1382 			 variant->fifosize : variant->fifohalfsize;
1383 		count = min(remain, maxcnt);
1384 
1385 		/*
1386 		 * SDIO especially may want to send something that is
1387 		 * not divisible by 4 (as opposed to card sectors
1388 		 * etc), and the FIFO only accept full 32-bit writes.
1389 		 * So compensate by adding +3 on the count, a single
1390 		 * byte become a 32bit write, 7 bytes will be two
1391 		 * 32bit writes etc.
1392 		 */
1393 		iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1394 
1395 		ptr += count;
1396 		remain -= count;
1397 
1398 		if (remain == 0)
1399 			break;
1400 
1401 		status = readl(base + MMCISTATUS);
1402 	} while (status & MCI_TXFIFOHALFEMPTY);
1403 
1404 	return ptr - buffer;
1405 }
1406 
1407 /*
1408  * PIO data transfer IRQ handler.
1409  */
1410 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1411 {
1412 	struct mmci_host *host = dev_id;
1413 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1414 	struct variant_data *variant = host->variant;
1415 	void __iomem *base = host->base;
1416 	u32 status;
1417 
1418 	status = readl(base + MMCISTATUS);
1419 
1420 	dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1421 
1422 	do {
1423 		unsigned int remain, len;
1424 		char *buffer;
1425 
1426 		/*
1427 		 * For write, we only need to test the half-empty flag
1428 		 * here - if the FIFO is completely empty, then by
1429 		 * definition it is more than half empty.
1430 		 *
1431 		 * For read, check for data available.
1432 		 */
1433 		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1434 			break;
1435 
1436 		if (!sg_miter_next(sg_miter))
1437 			break;
1438 
1439 		buffer = sg_miter->addr;
1440 		remain = sg_miter->length;
1441 
1442 		len = 0;
1443 		if (status & MCI_RXACTIVE)
1444 			len = mmci_pio_read(host, buffer, remain);
1445 		if (status & MCI_TXACTIVE)
1446 			len = mmci_pio_write(host, buffer, remain, status);
1447 
1448 		sg_miter->consumed = len;
1449 
1450 		host->size -= len;
1451 		remain -= len;
1452 
1453 		if (remain)
1454 			break;
1455 
1456 		status = readl(base + MMCISTATUS);
1457 	} while (1);
1458 
1459 	sg_miter_stop(sg_miter);
1460 
1461 	/*
1462 	 * If we have less than the fifo 'half-full' threshold to transfer,
1463 	 * trigger a PIO interrupt as soon as any data is available.
1464 	 */
1465 	if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1466 		mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1467 
1468 	/*
1469 	 * If we run out of data, disable the data IRQs; this
1470 	 * prevents a race where the FIFO becomes empty before
1471 	 * the chip itself has disabled the data path, and
1472 	 * stops us racing with our data end IRQ.
1473 	 */
1474 	if (host->size == 0) {
1475 		mmci_set_mask1(host, 0);
1476 		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1477 	}
1478 
1479 	return IRQ_HANDLED;
1480 }
1481 
1482 /*
1483  * Handle completion of command and data transfers.
1484  */
1485 static irqreturn_t mmci_irq(int irq, void *dev_id)
1486 {
1487 	struct mmci_host *host = dev_id;
1488 	u32 status;
1489 	int ret = 0;
1490 
1491 	spin_lock(&host->lock);
1492 
1493 	do {
1494 		status = readl(host->base + MMCISTATUS);
1495 
1496 		if (host->singleirq) {
1497 			if (status & host->mask1_reg)
1498 				mmci_pio_irq(irq, dev_id);
1499 
1500 			status &= ~host->variant->irq_pio_mask;
1501 		}
1502 
1503 		/*
1504 		 * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
1505 		 * enabled) in mmci_cmd_irq() function where ST Micro busy
1506 		 * detection variant is handled. Considering the HW seems to be
1507 		 * triggering the IRQ on both edges while monitoring DAT0 for
1508 		 * busy completion and that same status bit is used to monitor
1509 		 * start and end of busy detection, special care must be taken
1510 		 * to make sure that both start and end interrupts are always
1511 		 * cleared one after the other.
1512 		 */
1513 		status &= readl(host->base + MMCIMASK0);
1514 		if (host->variant->busy_detect)
1515 			writel(status & ~host->variant->busy_detect_mask,
1516 			       host->base + MMCICLEAR);
1517 		else
1518 			writel(status, host->base + MMCICLEAR);
1519 
1520 		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1521 
1522 		if (host->variant->reversed_irq_handling) {
1523 			mmci_data_irq(host, host->data, status);
1524 			mmci_cmd_irq(host, host->cmd, status);
1525 		} else {
1526 			mmci_cmd_irq(host, host->cmd, status);
1527 			mmci_data_irq(host, host->data, status);
1528 		}
1529 
1530 		/*
1531 		 * Don't poll for busy completion in irq context.
1532 		 */
1533 		if (host->variant->busy_detect && host->busy_status)
1534 			status &= ~host->variant->busy_detect_flag;
1535 
1536 		ret = 1;
1537 	} while (status);
1538 
1539 	spin_unlock(&host->lock);
1540 
1541 	return IRQ_RETVAL(ret);
1542 }
1543 
1544 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1545 {
1546 	struct mmci_host *host = mmc_priv(mmc);
1547 	unsigned long flags;
1548 
1549 	WARN_ON(host->mrq != NULL);
1550 
1551 	mrq->cmd->error = mmci_validate_data(host, mrq->data);
1552 	if (mrq->cmd->error) {
1553 		mmc_request_done(mmc, mrq);
1554 		return;
1555 	}
1556 
1557 	spin_lock_irqsave(&host->lock, flags);
1558 
1559 	host->mrq = mrq;
1560 
1561 	if (mrq->data)
1562 		mmci_get_next_data(host, mrq->data);
1563 
1564 	if (mrq->data &&
1565 	    (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
1566 		mmci_start_data(host, mrq->data);
1567 
1568 	if (mrq->sbc)
1569 		mmci_start_command(host, mrq->sbc, 0);
1570 	else
1571 		mmci_start_command(host, mrq->cmd, 0);
1572 
1573 	spin_unlock_irqrestore(&host->lock, flags);
1574 }
1575 
1576 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1577 {
1578 	struct mmci_host *host = mmc_priv(mmc);
1579 	struct variant_data *variant = host->variant;
1580 	u32 pwr = 0;
1581 	unsigned long flags;
1582 	int ret;
1583 
1584 	if (host->plat->ios_handler &&
1585 		host->plat->ios_handler(mmc_dev(mmc), ios))
1586 			dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1587 
1588 	switch (ios->power_mode) {
1589 	case MMC_POWER_OFF:
1590 		if (!IS_ERR(mmc->supply.vmmc))
1591 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1592 
1593 		if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1594 			regulator_disable(mmc->supply.vqmmc);
1595 			host->vqmmc_enabled = false;
1596 		}
1597 
1598 		break;
1599 	case MMC_POWER_UP:
1600 		if (!IS_ERR(mmc->supply.vmmc))
1601 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1602 
1603 		/*
1604 		 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1605 		 * and instead uses MCI_PWR_ON so apply whatever value is
1606 		 * configured in the variant data.
1607 		 */
1608 		pwr |= variant->pwrreg_powerup;
1609 
1610 		break;
1611 	case MMC_POWER_ON:
1612 		if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1613 			ret = regulator_enable(mmc->supply.vqmmc);
1614 			if (ret < 0)
1615 				dev_err(mmc_dev(mmc),
1616 					"failed to enable vqmmc regulator\n");
1617 			else
1618 				host->vqmmc_enabled = true;
1619 		}
1620 
1621 		pwr |= MCI_PWR_ON;
1622 		break;
1623 	}
1624 
1625 	if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1626 		/*
1627 		 * The ST Micro variant has some additional bits
1628 		 * indicating signal direction for the signals in
1629 		 * the SD/MMC bus and feedback-clock usage.
1630 		 */
1631 		pwr |= host->pwr_reg_add;
1632 
1633 		if (ios->bus_width == MMC_BUS_WIDTH_4)
1634 			pwr &= ~MCI_ST_DATA74DIREN;
1635 		else if (ios->bus_width == MMC_BUS_WIDTH_1)
1636 			pwr &= (~MCI_ST_DATA74DIREN &
1637 				~MCI_ST_DATA31DIREN &
1638 				~MCI_ST_DATA2DIREN);
1639 	}
1640 
1641 	if (variant->opendrain) {
1642 		if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1643 			pwr |= variant->opendrain;
1644 	} else {
1645 		/*
1646 		 * If the variant cannot configure the pads by its own, then we
1647 		 * expect the pinctrl to be able to do that for us
1648 		 */
1649 		if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1650 			pinctrl_select_state(host->pinctrl, host->pins_opendrain);
1651 		else
1652 			pinctrl_select_state(host->pinctrl, host->pins_default);
1653 	}
1654 
1655 	/*
1656 	 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1657 	 * gating the clock, the MCI_PWR_ON bit is cleared.
1658 	 */
1659 	if (!ios->clock && variant->pwrreg_clkgate)
1660 		pwr &= ~MCI_PWR_ON;
1661 
1662 	if (host->variant->explicit_mclk_control &&
1663 	    ios->clock != host->clock_cache) {
1664 		ret = clk_set_rate(host->clk, ios->clock);
1665 		if (ret < 0)
1666 			dev_err(mmc_dev(host->mmc),
1667 				"Error setting clock rate (%d)\n", ret);
1668 		else
1669 			host->mclk = clk_get_rate(host->clk);
1670 	}
1671 	host->clock_cache = ios->clock;
1672 
1673 	spin_lock_irqsave(&host->lock, flags);
1674 
1675 	if (host->ops && host->ops->set_clkreg)
1676 		host->ops->set_clkreg(host, ios->clock);
1677 	else
1678 		mmci_set_clkreg(host, ios->clock);
1679 
1680 	if (host->ops && host->ops->set_pwrreg)
1681 		host->ops->set_pwrreg(host, pwr);
1682 	else
1683 		mmci_write_pwrreg(host, pwr);
1684 
1685 	mmci_reg_delay(host);
1686 
1687 	spin_unlock_irqrestore(&host->lock, flags);
1688 }
1689 
1690 static int mmci_get_cd(struct mmc_host *mmc)
1691 {
1692 	struct mmci_host *host = mmc_priv(mmc);
1693 	struct mmci_platform_data *plat = host->plat;
1694 	unsigned int status = mmc_gpio_get_cd(mmc);
1695 
1696 	if (status == -ENOSYS) {
1697 		if (!plat->status)
1698 			return 1; /* Assume always present */
1699 
1700 		status = plat->status(mmc_dev(host->mmc));
1701 	}
1702 	return status;
1703 }
1704 
1705 static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1706 {
1707 	int ret = 0;
1708 
1709 	if (!IS_ERR(mmc->supply.vqmmc)) {
1710 
1711 		switch (ios->signal_voltage) {
1712 		case MMC_SIGNAL_VOLTAGE_330:
1713 			ret = regulator_set_voltage(mmc->supply.vqmmc,
1714 						2700000, 3600000);
1715 			break;
1716 		case MMC_SIGNAL_VOLTAGE_180:
1717 			ret = regulator_set_voltage(mmc->supply.vqmmc,
1718 						1700000, 1950000);
1719 			break;
1720 		case MMC_SIGNAL_VOLTAGE_120:
1721 			ret = regulator_set_voltage(mmc->supply.vqmmc,
1722 						1100000, 1300000);
1723 			break;
1724 		}
1725 
1726 		if (ret)
1727 			dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
1728 	}
1729 
1730 	return ret;
1731 }
1732 
1733 static struct mmc_host_ops mmci_ops = {
1734 	.request	= mmci_request,
1735 	.pre_req	= mmci_pre_request,
1736 	.post_req	= mmci_post_request,
1737 	.set_ios	= mmci_set_ios,
1738 	.get_ro		= mmc_gpio_get_ro,
1739 	.get_cd		= mmci_get_cd,
1740 	.start_signal_voltage_switch = mmci_sig_volt_switch,
1741 };
1742 
1743 static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
1744 {
1745 	struct mmci_host *host = mmc_priv(mmc);
1746 	int ret = mmc_of_parse(mmc);
1747 
1748 	if (ret)
1749 		return ret;
1750 
1751 	if (of_get_property(np, "st,sig-dir-dat0", NULL))
1752 		host->pwr_reg_add |= MCI_ST_DATA0DIREN;
1753 	if (of_get_property(np, "st,sig-dir-dat2", NULL))
1754 		host->pwr_reg_add |= MCI_ST_DATA2DIREN;
1755 	if (of_get_property(np, "st,sig-dir-dat31", NULL))
1756 		host->pwr_reg_add |= MCI_ST_DATA31DIREN;
1757 	if (of_get_property(np, "st,sig-dir-dat74", NULL))
1758 		host->pwr_reg_add |= MCI_ST_DATA74DIREN;
1759 	if (of_get_property(np, "st,sig-dir-cmd", NULL))
1760 		host->pwr_reg_add |= MCI_ST_CMDDIREN;
1761 	if (of_get_property(np, "st,sig-pin-fbclk", NULL))
1762 		host->pwr_reg_add |= MCI_ST_FBCLKEN;
1763 	if (of_get_property(np, "st,sig-dir", NULL))
1764 		host->pwr_reg_add |= MCI_STM32_DIRPOL;
1765 	if (of_get_property(np, "st,neg-edge", NULL))
1766 		host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
1767 	if (of_get_property(np, "st,use-ckin", NULL))
1768 		host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
1769 
1770 	if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1771 		mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
1772 	if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1773 		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1774 
1775 	return 0;
1776 }
1777 
1778 static int mmci_probe(struct amba_device *dev,
1779 	const struct amba_id *id)
1780 {
1781 	struct mmci_platform_data *plat = dev->dev.platform_data;
1782 	struct device_node *np = dev->dev.of_node;
1783 	struct variant_data *variant = id->data;
1784 	struct mmci_host *host;
1785 	struct mmc_host *mmc;
1786 	int ret;
1787 
1788 	/* Must have platform data or Device Tree. */
1789 	if (!plat && !np) {
1790 		dev_err(&dev->dev, "No plat data or DT found\n");
1791 		return -EINVAL;
1792 	}
1793 
1794 	if (!plat) {
1795 		plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1796 		if (!plat)
1797 			return -ENOMEM;
1798 	}
1799 
1800 	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1801 	if (!mmc)
1802 		return -ENOMEM;
1803 
1804 	ret = mmci_of_parse(np, mmc);
1805 	if (ret)
1806 		goto host_free;
1807 
1808 	host = mmc_priv(mmc);
1809 	host->mmc = mmc;
1810 
1811 	/*
1812 	 * Some variant (STM32) doesn't have opendrain bit, nevertheless
1813 	 * pins can be set accordingly using pinctrl
1814 	 */
1815 	if (!variant->opendrain) {
1816 		host->pinctrl = devm_pinctrl_get(&dev->dev);
1817 		if (IS_ERR(host->pinctrl)) {
1818 			dev_err(&dev->dev, "failed to get pinctrl");
1819 			ret = PTR_ERR(host->pinctrl);
1820 			goto host_free;
1821 		}
1822 
1823 		host->pins_default = pinctrl_lookup_state(host->pinctrl,
1824 							  PINCTRL_STATE_DEFAULT);
1825 		if (IS_ERR(host->pins_default)) {
1826 			dev_err(mmc_dev(mmc), "Can't select default pins\n");
1827 			ret = PTR_ERR(host->pins_default);
1828 			goto host_free;
1829 		}
1830 
1831 		host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
1832 							    MMCI_PINCTRL_STATE_OPENDRAIN);
1833 		if (IS_ERR(host->pins_opendrain)) {
1834 			dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
1835 			ret = PTR_ERR(host->pins_opendrain);
1836 			goto host_free;
1837 		}
1838 	}
1839 
1840 	host->hw_designer = amba_manf(dev);
1841 	host->hw_revision = amba_rev(dev);
1842 	dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1843 	dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1844 
1845 	host->clk = devm_clk_get(&dev->dev, NULL);
1846 	if (IS_ERR(host->clk)) {
1847 		ret = PTR_ERR(host->clk);
1848 		goto host_free;
1849 	}
1850 
1851 	ret = clk_prepare_enable(host->clk);
1852 	if (ret)
1853 		goto host_free;
1854 
1855 	if (variant->qcom_fifo)
1856 		host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
1857 	else
1858 		host->get_rx_fifocnt = mmci_get_rx_fifocnt;
1859 
1860 	host->plat = plat;
1861 	host->variant = variant;
1862 	host->mclk = clk_get_rate(host->clk);
1863 	/*
1864 	 * According to the spec, mclk is max 100 MHz,
1865 	 * so we try to adjust the clock down to this,
1866 	 * (if possible).
1867 	 */
1868 	if (host->mclk > variant->f_max) {
1869 		ret = clk_set_rate(host->clk, variant->f_max);
1870 		if (ret < 0)
1871 			goto clk_disable;
1872 		host->mclk = clk_get_rate(host->clk);
1873 		dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1874 			host->mclk);
1875 	}
1876 
1877 	host->phybase = dev->res.start;
1878 	host->base = devm_ioremap_resource(&dev->dev, &dev->res);
1879 	if (IS_ERR(host->base)) {
1880 		ret = PTR_ERR(host->base);
1881 		goto clk_disable;
1882 	}
1883 
1884 	if (variant->init)
1885 		variant->init(host);
1886 
1887 	/*
1888 	 * The ARM and ST versions of the block have slightly different
1889 	 * clock divider equations which means that the minimum divider
1890 	 * differs too.
1891 	 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
1892 	 */
1893 	if (variant->st_clkdiv)
1894 		mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1895 	else if (variant->stm32_clkdiv)
1896 		mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
1897 	else if (variant->explicit_mclk_control)
1898 		mmc->f_min = clk_round_rate(host->clk, 100000);
1899 	else
1900 		mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1901 	/*
1902 	 * If no maximum operating frequency is supplied, fall back to use
1903 	 * the module parameter, which has a (low) default value in case it
1904 	 * is not specified. Either value must not exceed the clock rate into
1905 	 * the block, of course.
1906 	 */
1907 	if (mmc->f_max)
1908 		mmc->f_max = variant->explicit_mclk_control ?
1909 				min(variant->f_max, mmc->f_max) :
1910 				min(host->mclk, mmc->f_max);
1911 	else
1912 		mmc->f_max = variant->explicit_mclk_control ?
1913 				fmax : min(host->mclk, fmax);
1914 
1915 
1916 	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1917 
1918 	host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
1919 	if (IS_ERR(host->rst)) {
1920 		ret = PTR_ERR(host->rst);
1921 		goto clk_disable;
1922 	}
1923 
1924 	/* Get regulators and the supported OCR mask */
1925 	ret = mmc_regulator_get_supply(mmc);
1926 	if (ret)
1927 		goto clk_disable;
1928 
1929 	if (!mmc->ocr_avail)
1930 		mmc->ocr_avail = plat->ocr_mask;
1931 	else if (plat->ocr_mask)
1932 		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1933 
1934 	/* We support these capabilities. */
1935 	mmc->caps |= MMC_CAP_CMD23;
1936 
1937 	/*
1938 	 * Enable busy detection.
1939 	 */
1940 	if (variant->busy_detect) {
1941 		mmci_ops.card_busy = mmci_card_busy;
1942 		/*
1943 		 * Not all variants have a flag to enable busy detection
1944 		 * in the DPSM, but if they do, set it here.
1945 		 */
1946 		if (variant->busy_dpsm_flag)
1947 			mmci_write_datactrlreg(host,
1948 					       host->variant->busy_dpsm_flag);
1949 		mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1950 		mmc->max_busy_timeout = 0;
1951 	}
1952 
1953 	mmc->ops = &mmci_ops;
1954 
1955 	/* We support these PM capabilities. */
1956 	mmc->pm_caps |= MMC_PM_KEEP_POWER;
1957 
1958 	/*
1959 	 * We can do SGIO
1960 	 */
1961 	mmc->max_segs = NR_SG;
1962 
1963 	/*
1964 	 * Since only a certain number of bits are valid in the data length
1965 	 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1966 	 * single request.
1967 	 */
1968 	mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1969 
1970 	/*
1971 	 * Set the maximum segment size.  Since we aren't doing DMA
1972 	 * (yet) we are only limited by the data length register.
1973 	 */
1974 	mmc->max_seg_size = mmc->max_req_size;
1975 
1976 	/*
1977 	 * Block size can be up to 2048 bytes, but must be a power of two.
1978 	 */
1979 	mmc->max_blk_size = 1 << variant->datactrl_blocksz;
1980 
1981 	/*
1982 	 * Limit the number of blocks transferred so that we don't overflow
1983 	 * the maximum request size.
1984 	 */
1985 	mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
1986 
1987 	spin_lock_init(&host->lock);
1988 
1989 	writel(0, host->base + MMCIMASK0);
1990 
1991 	if (variant->mmcimask1)
1992 		writel(0, host->base + MMCIMASK1);
1993 
1994 	writel(0xfff, host->base + MMCICLEAR);
1995 
1996 	/*
1997 	 * If:
1998 	 * - not using DT but using a descriptor table, or
1999 	 * - using a table of descriptors ALONGSIDE DT, or
2000 	 * look up these descriptors named "cd" and "wp" right here, fail
2001 	 * silently of these do not exist
2002 	 */
2003 	if (!np) {
2004 		ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
2005 		if (ret == -EPROBE_DEFER)
2006 			goto clk_disable;
2007 
2008 		ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
2009 		if (ret == -EPROBE_DEFER)
2010 			goto clk_disable;
2011 	}
2012 
2013 	ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
2014 			DRIVER_NAME " (cmd)", host);
2015 	if (ret)
2016 		goto clk_disable;
2017 
2018 	if (!dev->irq[1])
2019 		host->singleirq = true;
2020 	else {
2021 		ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
2022 				IRQF_SHARED, DRIVER_NAME " (pio)", host);
2023 		if (ret)
2024 			goto clk_disable;
2025 	}
2026 
2027 	writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
2028 
2029 	amba_set_drvdata(dev, mmc);
2030 
2031 	dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
2032 		 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
2033 		 amba_rev(dev), (unsigned long long)dev->res.start,
2034 		 dev->irq[0], dev->irq[1]);
2035 
2036 	mmci_dma_setup(host);
2037 
2038 	pm_runtime_set_autosuspend_delay(&dev->dev, 50);
2039 	pm_runtime_use_autosuspend(&dev->dev);
2040 
2041 	mmc_add_host(mmc);
2042 
2043 	pm_runtime_put(&dev->dev);
2044 	return 0;
2045 
2046  clk_disable:
2047 	clk_disable_unprepare(host->clk);
2048  host_free:
2049 	mmc_free_host(mmc);
2050 	return ret;
2051 }
2052 
2053 static int mmci_remove(struct amba_device *dev)
2054 {
2055 	struct mmc_host *mmc = amba_get_drvdata(dev);
2056 
2057 	if (mmc) {
2058 		struct mmci_host *host = mmc_priv(mmc);
2059 		struct variant_data *variant = host->variant;
2060 
2061 		/*
2062 		 * Undo pm_runtime_put() in probe.  We use the _sync
2063 		 * version here so that we can access the primecell.
2064 		 */
2065 		pm_runtime_get_sync(&dev->dev);
2066 
2067 		mmc_remove_host(mmc);
2068 
2069 		writel(0, host->base + MMCIMASK0);
2070 
2071 		if (variant->mmcimask1)
2072 			writel(0, host->base + MMCIMASK1);
2073 
2074 		writel(0, host->base + MMCICOMMAND);
2075 		writel(0, host->base + MMCIDATACTRL);
2076 
2077 		mmci_dma_release(host);
2078 		clk_disable_unprepare(host->clk);
2079 		mmc_free_host(mmc);
2080 	}
2081 
2082 	return 0;
2083 }
2084 
2085 #ifdef CONFIG_PM
2086 static void mmci_save(struct mmci_host *host)
2087 {
2088 	unsigned long flags;
2089 
2090 	spin_lock_irqsave(&host->lock, flags);
2091 
2092 	writel(0, host->base + MMCIMASK0);
2093 	if (host->variant->pwrreg_nopower) {
2094 		writel(0, host->base + MMCIDATACTRL);
2095 		writel(0, host->base + MMCIPOWER);
2096 		writel(0, host->base + MMCICLOCK);
2097 	}
2098 	mmci_reg_delay(host);
2099 
2100 	spin_unlock_irqrestore(&host->lock, flags);
2101 }
2102 
2103 static void mmci_restore(struct mmci_host *host)
2104 {
2105 	unsigned long flags;
2106 
2107 	spin_lock_irqsave(&host->lock, flags);
2108 
2109 	if (host->variant->pwrreg_nopower) {
2110 		writel(host->clk_reg, host->base + MMCICLOCK);
2111 		writel(host->datactrl_reg, host->base + MMCIDATACTRL);
2112 		writel(host->pwr_reg, host->base + MMCIPOWER);
2113 	}
2114 	writel(MCI_IRQENABLE | host->variant->start_err,
2115 	       host->base + MMCIMASK0);
2116 	mmci_reg_delay(host);
2117 
2118 	spin_unlock_irqrestore(&host->lock, flags);
2119 }
2120 
2121 static int mmci_runtime_suspend(struct device *dev)
2122 {
2123 	struct amba_device *adev = to_amba_device(dev);
2124 	struct mmc_host *mmc = amba_get_drvdata(adev);
2125 
2126 	if (mmc) {
2127 		struct mmci_host *host = mmc_priv(mmc);
2128 		pinctrl_pm_select_sleep_state(dev);
2129 		mmci_save(host);
2130 		clk_disable_unprepare(host->clk);
2131 	}
2132 
2133 	return 0;
2134 }
2135 
2136 static int mmci_runtime_resume(struct device *dev)
2137 {
2138 	struct amba_device *adev = to_amba_device(dev);
2139 	struct mmc_host *mmc = amba_get_drvdata(adev);
2140 
2141 	if (mmc) {
2142 		struct mmci_host *host = mmc_priv(mmc);
2143 		clk_prepare_enable(host->clk);
2144 		mmci_restore(host);
2145 		pinctrl_pm_select_default_state(dev);
2146 	}
2147 
2148 	return 0;
2149 }
2150 #endif
2151 
2152 static const struct dev_pm_ops mmci_dev_pm_ops = {
2153 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2154 				pm_runtime_force_resume)
2155 	SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
2156 };
2157 
2158 static const struct amba_id mmci_ids[] = {
2159 	{
2160 		.id	= 0x00041180,
2161 		.mask	= 0xff0fffff,
2162 		.data	= &variant_arm,
2163 	},
2164 	{
2165 		.id	= 0x01041180,
2166 		.mask	= 0xff0fffff,
2167 		.data	= &variant_arm_extended_fifo,
2168 	},
2169 	{
2170 		.id	= 0x02041180,
2171 		.mask	= 0xff0fffff,
2172 		.data	= &variant_arm_extended_fifo_hwfc,
2173 	},
2174 	{
2175 		.id	= 0x00041181,
2176 		.mask	= 0x000fffff,
2177 		.data	= &variant_arm,
2178 	},
2179 	/* ST Micro variants */
2180 	{
2181 		.id     = 0x00180180,
2182 		.mask   = 0x00ffffff,
2183 		.data	= &variant_u300,
2184 	},
2185 	{
2186 		.id     = 0x10180180,
2187 		.mask   = 0xf0ffffff,
2188 		.data	= &variant_nomadik,
2189 	},
2190 	{
2191 		.id     = 0x00280180,
2192 		.mask   = 0x00ffffff,
2193 		.data	= &variant_nomadik,
2194 	},
2195 	{
2196 		.id     = 0x00480180,
2197 		.mask   = 0xf0ffffff,
2198 		.data	= &variant_ux500,
2199 	},
2200 	{
2201 		.id     = 0x10480180,
2202 		.mask   = 0xf0ffffff,
2203 		.data	= &variant_ux500v2,
2204 	},
2205 	{
2206 		.id     = 0x00880180,
2207 		.mask   = 0x00ffffff,
2208 		.data	= &variant_stm32,
2209 	},
2210 	{
2211 		.id     = 0x10153180,
2212 		.mask	= 0xf0ffffff,
2213 		.data	= &variant_stm32_sdmmc,
2214 	},
2215 	/* Qualcomm variants */
2216 	{
2217 		.id     = 0x00051180,
2218 		.mask	= 0x000fffff,
2219 		.data	= &variant_qcom,
2220 	},
2221 	{ 0, 0 },
2222 };
2223 
2224 MODULE_DEVICE_TABLE(amba, mmci_ids);
2225 
2226 static struct amba_driver mmci_driver = {
2227 	.drv		= {
2228 		.name	= DRIVER_NAME,
2229 		.pm	= &mmci_dev_pm_ops,
2230 	},
2231 	.probe		= mmci_probe,
2232 	.remove		= mmci_remove,
2233 	.id_table	= mmci_ids,
2234 };
2235 
2236 module_amba_driver(mmci_driver);
2237 
2238 module_param(fmax, uint, 0444);
2239 
2240 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
2241 MODULE_LICENSE("GPL");
2242