xref: /openbmc/linux/drivers/mmc/host/dw_mmc.c (revision 8a10bc9d)
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  *  (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13 
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/dw_mmc.h>
34 #include <linux/bitops.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/workqueue.h>
37 #include <linux/of.h>
38 #include <linux/of_gpio.h>
39 #include <linux/mmc/slot-gpio.h>
40 
41 #include "dw_mmc.h"
42 
43 /* Common flag combinations */
44 #define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
45 				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
46 				 SDMMC_INT_EBE)
47 #define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 				 SDMMC_INT_RESP_ERR)
49 #define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
50 				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
51 #define DW_MCI_SEND_STATUS	1
52 #define DW_MCI_RECV_STATUS	2
53 #define DW_MCI_DMA_THRESHOLD	16
54 
55 #define DW_MCI_FREQ_MAX	200000000	/* unit: HZ */
56 #define DW_MCI_FREQ_MIN	400000		/* unit: HZ */
57 
58 #ifdef CONFIG_MMC_DW_IDMAC
59 #define IDMAC_INT_CLR		(SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 				 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 				 SDMMC_IDMAC_INT_TI)
63 
64 struct idmac_desc {
65 	u32		des0;	/* Control Descriptor */
66 #define IDMAC_DES0_DIC	BIT(1)
67 #define IDMAC_DES0_LD	BIT(2)
68 #define IDMAC_DES0_FD	BIT(3)
69 #define IDMAC_DES0_CH	BIT(4)
70 #define IDMAC_DES0_ER	BIT(5)
71 #define IDMAC_DES0_CES	BIT(30)
72 #define IDMAC_DES0_OWN	BIT(31)
73 
74 	u32		des1;	/* Buffer sizes */
75 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
76 	((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
77 
78 	u32		des2;	/* buffer 1 physical address */
79 
80 	u32		des3;	/* buffer 2 physical address */
81 };
82 #endif /* CONFIG_MMC_DW_IDMAC */
83 
84 static const u8 tuning_blk_pattern_4bit[] = {
85 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93 };
94 
95 static const u8 tuning_blk_pattern_8bit[] = {
96 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
112 };
113 
114 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
115 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
116 
117 #if defined(CONFIG_DEBUG_FS)
118 static int dw_mci_req_show(struct seq_file *s, void *v)
119 {
120 	struct dw_mci_slot *slot = s->private;
121 	struct mmc_request *mrq;
122 	struct mmc_command *cmd;
123 	struct mmc_command *stop;
124 	struct mmc_data	*data;
125 
126 	/* Make sure we get a consistent snapshot */
127 	spin_lock_bh(&slot->host->lock);
128 	mrq = slot->mrq;
129 
130 	if (mrq) {
131 		cmd = mrq->cmd;
132 		data = mrq->data;
133 		stop = mrq->stop;
134 
135 		if (cmd)
136 			seq_printf(s,
137 				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 				   cmd->opcode, cmd->arg, cmd->flags,
139 				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 				   cmd->resp[2], cmd->error);
141 		if (data)
142 			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 				   data->bytes_xfered, data->blocks,
144 				   data->blksz, data->flags, data->error);
145 		if (stop)
146 			seq_printf(s,
147 				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 				   stop->opcode, stop->arg, stop->flags,
149 				   stop->resp[0], stop->resp[1], stop->resp[2],
150 				   stop->resp[2], stop->error);
151 	}
152 
153 	spin_unlock_bh(&slot->host->lock);
154 
155 	return 0;
156 }
157 
158 static int dw_mci_req_open(struct inode *inode, struct file *file)
159 {
160 	return single_open(file, dw_mci_req_show, inode->i_private);
161 }
162 
163 static const struct file_operations dw_mci_req_fops = {
164 	.owner		= THIS_MODULE,
165 	.open		= dw_mci_req_open,
166 	.read		= seq_read,
167 	.llseek		= seq_lseek,
168 	.release	= single_release,
169 };
170 
171 static int dw_mci_regs_show(struct seq_file *s, void *v)
172 {
173 	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
179 
180 	return 0;
181 }
182 
183 static int dw_mci_regs_open(struct inode *inode, struct file *file)
184 {
185 	return single_open(file, dw_mci_regs_show, inode->i_private);
186 }
187 
188 static const struct file_operations dw_mci_regs_fops = {
189 	.owner		= THIS_MODULE,
190 	.open		= dw_mci_regs_open,
191 	.read		= seq_read,
192 	.llseek		= seq_lseek,
193 	.release	= single_release,
194 };
195 
196 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197 {
198 	struct mmc_host	*mmc = slot->mmc;
199 	struct dw_mci *host = slot->host;
200 	struct dentry *root;
201 	struct dentry *node;
202 
203 	root = mmc->debugfs_root;
204 	if (!root)
205 		return;
206 
207 	node = debugfs_create_file("regs", S_IRUSR, root, host,
208 				   &dw_mci_regs_fops);
209 	if (!node)
210 		goto err;
211 
212 	node = debugfs_create_file("req", S_IRUSR, root, slot,
213 				   &dw_mci_req_fops);
214 	if (!node)
215 		goto err;
216 
217 	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
218 	if (!node)
219 		goto err;
220 
221 	node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 				  (u32 *)&host->pending_events);
223 	if (!node)
224 		goto err;
225 
226 	node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 				  (u32 *)&host->completed_events);
228 	if (!node)
229 		goto err;
230 
231 	return;
232 
233 err:
234 	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235 }
236 #endif /* defined(CONFIG_DEBUG_FS) */
237 
238 static void dw_mci_set_timeout(struct dw_mci *host)
239 {
240 	/* timeout (maximum) */
241 	mci_writel(host, TMOUT, 0xffffffff);
242 }
243 
244 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
245 {
246 	struct mmc_data	*data;
247 	struct dw_mci_slot *slot = mmc_priv(mmc);
248 	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
249 	u32 cmdr;
250 	cmd->error = -EINPROGRESS;
251 
252 	cmdr = cmd->opcode;
253 
254 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
255 	    cmd->opcode == MMC_GO_IDLE_STATE ||
256 	    cmd->opcode == MMC_GO_INACTIVE_STATE ||
257 	    (cmd->opcode == SD_IO_RW_DIRECT &&
258 	     ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
259 		cmdr |= SDMMC_CMD_STOP;
260 	else
261 		if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
262 			cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
263 
264 	if (cmd->flags & MMC_RSP_PRESENT) {
265 		/* We expect a response, so set this bit */
266 		cmdr |= SDMMC_CMD_RESP_EXP;
267 		if (cmd->flags & MMC_RSP_136)
268 			cmdr |= SDMMC_CMD_RESP_LONG;
269 	}
270 
271 	if (cmd->flags & MMC_RSP_CRC)
272 		cmdr |= SDMMC_CMD_RESP_CRC;
273 
274 	data = cmd->data;
275 	if (data) {
276 		cmdr |= SDMMC_CMD_DAT_EXP;
277 		if (data->flags & MMC_DATA_STREAM)
278 			cmdr |= SDMMC_CMD_STRM_MODE;
279 		if (data->flags & MMC_DATA_WRITE)
280 			cmdr |= SDMMC_CMD_DAT_WR;
281 	}
282 
283 	if (drv_data && drv_data->prepare_command)
284 		drv_data->prepare_command(slot->host, &cmdr);
285 
286 	return cmdr;
287 }
288 
289 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
290 {
291 	struct mmc_command *stop;
292 	u32 cmdr;
293 
294 	if (!cmd->data)
295 		return 0;
296 
297 	stop = &host->stop_abort;
298 	cmdr = cmd->opcode;
299 	memset(stop, 0, sizeof(struct mmc_command));
300 
301 	if (cmdr == MMC_READ_SINGLE_BLOCK ||
302 	    cmdr == MMC_READ_MULTIPLE_BLOCK ||
303 	    cmdr == MMC_WRITE_BLOCK ||
304 	    cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
305 		stop->opcode = MMC_STOP_TRANSMISSION;
306 		stop->arg = 0;
307 		stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
308 	} else if (cmdr == SD_IO_RW_EXTENDED) {
309 		stop->opcode = SD_IO_RW_DIRECT;
310 		stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
311 			     ((cmd->arg >> 28) & 0x7);
312 		stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
313 	} else {
314 		return 0;
315 	}
316 
317 	cmdr = stop->opcode | SDMMC_CMD_STOP |
318 		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
319 
320 	return cmdr;
321 }
322 
323 static void dw_mci_start_command(struct dw_mci *host,
324 				 struct mmc_command *cmd, u32 cmd_flags)
325 {
326 	host->cmd = cmd;
327 	dev_vdbg(host->dev,
328 		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
329 		 cmd->arg, cmd_flags);
330 
331 	mci_writel(host, CMDARG, cmd->arg);
332 	wmb();
333 
334 	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
335 }
336 
337 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
338 {
339 	struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
340 	dw_mci_start_command(host, stop, host->stop_cmdr);
341 }
342 
343 /* DMA interface functions */
344 static void dw_mci_stop_dma(struct dw_mci *host)
345 {
346 	if (host->using_dma) {
347 		host->dma_ops->stop(host);
348 		host->dma_ops->cleanup(host);
349 	}
350 
351 	/* Data transfer was stopped by the interrupt handler */
352 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
353 }
354 
355 static int dw_mci_get_dma_dir(struct mmc_data *data)
356 {
357 	if (data->flags & MMC_DATA_WRITE)
358 		return DMA_TO_DEVICE;
359 	else
360 		return DMA_FROM_DEVICE;
361 }
362 
363 #ifdef CONFIG_MMC_DW_IDMAC
364 static void dw_mci_dma_cleanup(struct dw_mci *host)
365 {
366 	struct mmc_data *data = host->data;
367 
368 	if (data)
369 		if (!data->host_cookie)
370 			dma_unmap_sg(host->dev,
371 				     data->sg,
372 				     data->sg_len,
373 				     dw_mci_get_dma_dir(data));
374 }
375 
376 static void dw_mci_idmac_reset(struct dw_mci *host)
377 {
378 	u32 bmod = mci_readl(host, BMOD);
379 	/* Software reset of DMA */
380 	bmod |= SDMMC_IDMAC_SWRESET;
381 	mci_writel(host, BMOD, bmod);
382 }
383 
384 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
385 {
386 	u32 temp;
387 
388 	/* Disable and reset the IDMAC interface */
389 	temp = mci_readl(host, CTRL);
390 	temp &= ~SDMMC_CTRL_USE_IDMAC;
391 	temp |= SDMMC_CTRL_DMA_RESET;
392 	mci_writel(host, CTRL, temp);
393 
394 	/* Stop the IDMAC running */
395 	temp = mci_readl(host, BMOD);
396 	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
397 	temp |= SDMMC_IDMAC_SWRESET;
398 	mci_writel(host, BMOD, temp);
399 }
400 
401 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
402 {
403 	struct mmc_data *data = host->data;
404 
405 	dev_vdbg(host->dev, "DMA complete\n");
406 
407 	host->dma_ops->cleanup(host);
408 
409 	/*
410 	 * If the card was removed, data will be NULL. No point in trying to
411 	 * send the stop command or waiting for NBUSY in this case.
412 	 */
413 	if (data) {
414 		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
415 		tasklet_schedule(&host->tasklet);
416 	}
417 }
418 
419 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
420 				    unsigned int sg_len)
421 {
422 	int i;
423 	struct idmac_desc *desc = host->sg_cpu;
424 
425 	for (i = 0; i < sg_len; i++, desc++) {
426 		unsigned int length = sg_dma_len(&data->sg[i]);
427 		u32 mem_addr = sg_dma_address(&data->sg[i]);
428 
429 		/* Set the OWN bit and disable interrupts for this descriptor */
430 		desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
431 
432 		/* Buffer length */
433 		IDMAC_SET_BUFFER1_SIZE(desc, length);
434 
435 		/* Physical address to DMA to/from */
436 		desc->des2 = mem_addr;
437 	}
438 
439 	/* Set first descriptor */
440 	desc = host->sg_cpu;
441 	desc->des0 |= IDMAC_DES0_FD;
442 
443 	/* Set last descriptor */
444 	desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
445 	desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
446 	desc->des0 |= IDMAC_DES0_LD;
447 
448 	wmb();
449 }
450 
451 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
452 {
453 	u32 temp;
454 
455 	dw_mci_translate_sglist(host, host->data, sg_len);
456 
457 	/* Select IDMAC interface */
458 	temp = mci_readl(host, CTRL);
459 	temp |= SDMMC_CTRL_USE_IDMAC;
460 	mci_writel(host, CTRL, temp);
461 
462 	wmb();
463 
464 	/* Enable the IDMAC */
465 	temp = mci_readl(host, BMOD);
466 	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
467 	mci_writel(host, BMOD, temp);
468 
469 	/* Start it running */
470 	mci_writel(host, PLDMND, 1);
471 }
472 
473 static int dw_mci_idmac_init(struct dw_mci *host)
474 {
475 	struct idmac_desc *p;
476 	int i;
477 
478 	/* Number of descriptors in the ring buffer */
479 	host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
480 
481 	/* Forward link the descriptor list */
482 	for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
483 		p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
484 
485 	/* Set the last descriptor as the end-of-ring descriptor */
486 	p->des3 = host->sg_dma;
487 	p->des0 = IDMAC_DES0_ER;
488 
489 	dw_mci_idmac_reset(host);
490 
491 	/* Mask out interrupts - get Tx & Rx complete only */
492 	mci_writel(host, IDSTS, IDMAC_INT_CLR);
493 	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
494 		   SDMMC_IDMAC_INT_TI);
495 
496 	/* Set the descriptor base address */
497 	mci_writel(host, DBADDR, host->sg_dma);
498 	return 0;
499 }
500 
501 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
502 	.init = dw_mci_idmac_init,
503 	.start = dw_mci_idmac_start_dma,
504 	.stop = dw_mci_idmac_stop_dma,
505 	.complete = dw_mci_idmac_complete_dma,
506 	.cleanup = dw_mci_dma_cleanup,
507 };
508 #endif /* CONFIG_MMC_DW_IDMAC */
509 
510 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
511 				   struct mmc_data *data,
512 				   bool next)
513 {
514 	struct scatterlist *sg;
515 	unsigned int i, sg_len;
516 
517 	if (!next && data->host_cookie)
518 		return data->host_cookie;
519 
520 	/*
521 	 * We don't do DMA on "complex" transfers, i.e. with
522 	 * non-word-aligned buffers or lengths. Also, we don't bother
523 	 * with all the DMA setup overhead for short transfers.
524 	 */
525 	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
526 		return -EINVAL;
527 
528 	if (data->blksz & 3)
529 		return -EINVAL;
530 
531 	for_each_sg(data->sg, sg, data->sg_len, i) {
532 		if (sg->offset & 3 || sg->length & 3)
533 			return -EINVAL;
534 	}
535 
536 	sg_len = dma_map_sg(host->dev,
537 			    data->sg,
538 			    data->sg_len,
539 			    dw_mci_get_dma_dir(data));
540 	if (sg_len == 0)
541 		return -EINVAL;
542 
543 	if (next)
544 		data->host_cookie = sg_len;
545 
546 	return sg_len;
547 }
548 
549 static void dw_mci_pre_req(struct mmc_host *mmc,
550 			   struct mmc_request *mrq,
551 			   bool is_first_req)
552 {
553 	struct dw_mci_slot *slot = mmc_priv(mmc);
554 	struct mmc_data *data = mrq->data;
555 
556 	if (!slot->host->use_dma || !data)
557 		return;
558 
559 	if (data->host_cookie) {
560 		data->host_cookie = 0;
561 		return;
562 	}
563 
564 	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
565 		data->host_cookie = 0;
566 }
567 
568 static void dw_mci_post_req(struct mmc_host *mmc,
569 			    struct mmc_request *mrq,
570 			    int err)
571 {
572 	struct dw_mci_slot *slot = mmc_priv(mmc);
573 	struct mmc_data *data = mrq->data;
574 
575 	if (!slot->host->use_dma || !data)
576 		return;
577 
578 	if (data->host_cookie)
579 		dma_unmap_sg(slot->host->dev,
580 			     data->sg,
581 			     data->sg_len,
582 			     dw_mci_get_dma_dir(data));
583 	data->host_cookie = 0;
584 }
585 
586 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
587 {
588 #ifdef CONFIG_MMC_DW_IDMAC
589 	unsigned int blksz = data->blksz;
590 	const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
591 	u32 fifo_width = 1 << host->data_shift;
592 	u32 blksz_depth = blksz / fifo_width, fifoth_val;
593 	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
594 	int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
595 
596 	tx_wmark = (host->fifo_depth) / 2;
597 	tx_wmark_invers = host->fifo_depth - tx_wmark;
598 
599 	/*
600 	 * MSIZE is '1',
601 	 * if blksz is not a multiple of the FIFO width
602 	 */
603 	if (blksz % fifo_width) {
604 		msize = 0;
605 		rx_wmark = 1;
606 		goto done;
607 	}
608 
609 	do {
610 		if (!((blksz_depth % mszs[idx]) ||
611 		     (tx_wmark_invers % mszs[idx]))) {
612 			msize = idx;
613 			rx_wmark = mszs[idx] - 1;
614 			break;
615 		}
616 	} while (--idx > 0);
617 	/*
618 	 * If idx is '0', it won't be tried
619 	 * Thus, initial values are uesed
620 	 */
621 done:
622 	fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
623 	mci_writel(host, FIFOTH, fifoth_val);
624 #endif
625 }
626 
627 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
628 {
629 	unsigned int blksz = data->blksz;
630 	u32 blksz_depth, fifo_depth;
631 	u16 thld_size;
632 
633 	WARN_ON(!(data->flags & MMC_DATA_READ));
634 
635 	if (host->timing != MMC_TIMING_MMC_HS200 &&
636 	    host->timing != MMC_TIMING_UHS_SDR104)
637 		goto disable;
638 
639 	blksz_depth = blksz / (1 << host->data_shift);
640 	fifo_depth = host->fifo_depth;
641 
642 	if (blksz_depth > fifo_depth)
643 		goto disable;
644 
645 	/*
646 	 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
647 	 * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
648 	 * Currently just choose blksz.
649 	 */
650 	thld_size = blksz;
651 	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
652 	return;
653 
654 disable:
655 	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
656 }
657 
658 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
659 {
660 	int sg_len;
661 	u32 temp;
662 
663 	host->using_dma = 0;
664 
665 	/* If we don't have a channel, we can't do DMA */
666 	if (!host->use_dma)
667 		return -ENODEV;
668 
669 	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
670 	if (sg_len < 0) {
671 		host->dma_ops->stop(host);
672 		return sg_len;
673 	}
674 
675 	host->using_dma = 1;
676 
677 	dev_vdbg(host->dev,
678 		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
679 		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
680 		 sg_len);
681 
682 	/*
683 	 * Decide the MSIZE and RX/TX Watermark.
684 	 * If current block size is same with previous size,
685 	 * no need to update fifoth.
686 	 */
687 	if (host->prev_blksz != data->blksz)
688 		dw_mci_adjust_fifoth(host, data);
689 
690 	/* Enable the DMA interface */
691 	temp = mci_readl(host, CTRL);
692 	temp |= SDMMC_CTRL_DMA_ENABLE;
693 	mci_writel(host, CTRL, temp);
694 
695 	/* Disable RX/TX IRQs, let DMA handle it */
696 	temp = mci_readl(host, INTMASK);
697 	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
698 	mci_writel(host, INTMASK, temp);
699 
700 	host->dma_ops->start(host, sg_len);
701 
702 	return 0;
703 }
704 
705 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
706 {
707 	u32 temp;
708 
709 	data->error = -EINPROGRESS;
710 
711 	WARN_ON(host->data);
712 	host->sg = NULL;
713 	host->data = data;
714 
715 	if (data->flags & MMC_DATA_READ) {
716 		host->dir_status = DW_MCI_RECV_STATUS;
717 		dw_mci_ctrl_rd_thld(host, data);
718 	} else {
719 		host->dir_status = DW_MCI_SEND_STATUS;
720 	}
721 
722 	if (dw_mci_submit_data_dma(host, data)) {
723 		int flags = SG_MITER_ATOMIC;
724 		if (host->data->flags & MMC_DATA_READ)
725 			flags |= SG_MITER_TO_SG;
726 		else
727 			flags |= SG_MITER_FROM_SG;
728 
729 		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
730 		host->sg = data->sg;
731 		host->part_buf_start = 0;
732 		host->part_buf_count = 0;
733 
734 		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
735 		temp = mci_readl(host, INTMASK);
736 		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
737 		mci_writel(host, INTMASK, temp);
738 
739 		temp = mci_readl(host, CTRL);
740 		temp &= ~SDMMC_CTRL_DMA_ENABLE;
741 		mci_writel(host, CTRL, temp);
742 
743 		/*
744 		 * Use the initial fifoth_val for PIO mode.
745 		 * If next issued data may be transfered by DMA mode,
746 		 * prev_blksz should be invalidated.
747 		 */
748 		mci_writel(host, FIFOTH, host->fifoth_val);
749 		host->prev_blksz = 0;
750 	} else {
751 		/*
752 		 * Keep the current block size.
753 		 * It will be used to decide whether to update
754 		 * fifoth register next time.
755 		 */
756 		host->prev_blksz = data->blksz;
757 	}
758 }
759 
760 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
761 {
762 	struct dw_mci *host = slot->host;
763 	unsigned long timeout = jiffies + msecs_to_jiffies(500);
764 	unsigned int cmd_status = 0;
765 
766 	mci_writel(host, CMDARG, arg);
767 	wmb();
768 	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
769 
770 	while (time_before(jiffies, timeout)) {
771 		cmd_status = mci_readl(host, CMD);
772 		if (!(cmd_status & SDMMC_CMD_START))
773 			return;
774 	}
775 	dev_err(&slot->mmc->class_dev,
776 		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
777 		cmd, arg, cmd_status);
778 }
779 
780 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
781 {
782 	struct dw_mci *host = slot->host;
783 	unsigned int clock = slot->clock;
784 	u32 div;
785 	u32 clk_en_a;
786 
787 	if (!clock) {
788 		mci_writel(host, CLKENA, 0);
789 		mci_send_cmd(slot,
790 			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
791 	} else if (clock != host->current_speed || force_clkinit) {
792 		div = host->bus_hz / clock;
793 		if (host->bus_hz % clock && host->bus_hz > clock)
794 			/*
795 			 * move the + 1 after the divide to prevent
796 			 * over-clocking the card.
797 			 */
798 			div += 1;
799 
800 		div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
801 
802 		if ((clock << div) != slot->__clk_old || force_clkinit)
803 			dev_info(&slot->mmc->class_dev,
804 				 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
805 				 slot->id, host->bus_hz, clock,
806 				 div ? ((host->bus_hz / div) >> 1) :
807 				 host->bus_hz, div);
808 
809 		/* disable clock */
810 		mci_writel(host, CLKENA, 0);
811 		mci_writel(host, CLKSRC, 0);
812 
813 		/* inform CIU */
814 		mci_send_cmd(slot,
815 			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
816 
817 		/* set clock to desired speed */
818 		mci_writel(host, CLKDIV, div);
819 
820 		/* inform CIU */
821 		mci_send_cmd(slot,
822 			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
823 
824 		/* enable clock; only low power if no SDIO */
825 		clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
826 		if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
827 			clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
828 		mci_writel(host, CLKENA, clk_en_a);
829 
830 		/* inform CIU */
831 		mci_send_cmd(slot,
832 			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
833 
834 		/* keep the clock with reflecting clock dividor */
835 		slot->__clk_old = clock << div;
836 	}
837 
838 	host->current_speed = clock;
839 
840 	/* Set the current slot bus width */
841 	mci_writel(host, CTYPE, (slot->ctype << slot->id));
842 }
843 
844 static void __dw_mci_start_request(struct dw_mci *host,
845 				   struct dw_mci_slot *slot,
846 				   struct mmc_command *cmd)
847 {
848 	struct mmc_request *mrq;
849 	struct mmc_data	*data;
850 	u32 cmdflags;
851 
852 	mrq = slot->mrq;
853 	if (host->pdata->select_slot)
854 		host->pdata->select_slot(slot->id);
855 
856 	host->cur_slot = slot;
857 	host->mrq = mrq;
858 
859 	host->pending_events = 0;
860 	host->completed_events = 0;
861 	host->cmd_status = 0;
862 	host->data_status = 0;
863 	host->dir_status = 0;
864 
865 	data = cmd->data;
866 	if (data) {
867 		dw_mci_set_timeout(host);
868 		mci_writel(host, BYTCNT, data->blksz*data->blocks);
869 		mci_writel(host, BLKSIZ, data->blksz);
870 	}
871 
872 	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
873 
874 	/* this is the first command, send the initialization clock */
875 	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
876 		cmdflags |= SDMMC_CMD_INIT;
877 
878 	if (data) {
879 		dw_mci_submit_data(host, data);
880 		wmb();
881 	}
882 
883 	dw_mci_start_command(host, cmd, cmdflags);
884 
885 	if (mrq->stop)
886 		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
887 	else
888 		host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
889 }
890 
891 static void dw_mci_start_request(struct dw_mci *host,
892 				 struct dw_mci_slot *slot)
893 {
894 	struct mmc_request *mrq = slot->mrq;
895 	struct mmc_command *cmd;
896 
897 	cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
898 	__dw_mci_start_request(host, slot, cmd);
899 }
900 
901 /* must be called with host->lock held */
902 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
903 				 struct mmc_request *mrq)
904 {
905 	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
906 		 host->state);
907 
908 	slot->mrq = mrq;
909 
910 	if (host->state == STATE_IDLE) {
911 		host->state = STATE_SENDING_CMD;
912 		dw_mci_start_request(host, slot);
913 	} else {
914 		list_add_tail(&slot->queue_node, &host->queue);
915 	}
916 }
917 
918 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
919 {
920 	struct dw_mci_slot *slot = mmc_priv(mmc);
921 	struct dw_mci *host = slot->host;
922 
923 	WARN_ON(slot->mrq);
924 
925 	/*
926 	 * The check for card presence and queueing of the request must be
927 	 * atomic, otherwise the card could be removed in between and the
928 	 * request wouldn't fail until another card was inserted.
929 	 */
930 	spin_lock_bh(&host->lock);
931 
932 	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
933 		spin_unlock_bh(&host->lock);
934 		mrq->cmd->error = -ENOMEDIUM;
935 		mmc_request_done(mmc, mrq);
936 		return;
937 	}
938 
939 	dw_mci_queue_request(host, slot, mrq);
940 
941 	spin_unlock_bh(&host->lock);
942 }
943 
944 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
945 {
946 	struct dw_mci_slot *slot = mmc_priv(mmc);
947 	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
948 	u32 regs;
949 
950 	switch (ios->bus_width) {
951 	case MMC_BUS_WIDTH_4:
952 		slot->ctype = SDMMC_CTYPE_4BIT;
953 		break;
954 	case MMC_BUS_WIDTH_8:
955 		slot->ctype = SDMMC_CTYPE_8BIT;
956 		break;
957 	default:
958 		/* set default 1 bit mode */
959 		slot->ctype = SDMMC_CTYPE_1BIT;
960 	}
961 
962 	regs = mci_readl(slot->host, UHS_REG);
963 
964 	/* DDR mode set */
965 	if (ios->timing == MMC_TIMING_UHS_DDR50)
966 		regs |= ((0x1 << slot->id) << 16);
967 	else
968 		regs &= ~((0x1 << slot->id) << 16);
969 
970 	mci_writel(slot->host, UHS_REG, regs);
971 	slot->host->timing = ios->timing;
972 
973 	/*
974 	 * Use mirror of ios->clock to prevent race with mmc
975 	 * core ios update when finding the minimum.
976 	 */
977 	slot->clock = ios->clock;
978 
979 	if (drv_data && drv_data->set_ios)
980 		drv_data->set_ios(slot->host, ios);
981 
982 	/* Slot specific timing and width adjustment */
983 	dw_mci_setup_bus(slot, false);
984 
985 	switch (ios->power_mode) {
986 	case MMC_POWER_UP:
987 		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
988 		/* Power up slot */
989 		if (slot->host->pdata->setpower)
990 			slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
991 		regs = mci_readl(slot->host, PWREN);
992 		regs |= (1 << slot->id);
993 		mci_writel(slot->host, PWREN, regs);
994 		break;
995 	case MMC_POWER_OFF:
996 		/* Power down slot */
997 		if (slot->host->pdata->setpower)
998 			slot->host->pdata->setpower(slot->id, 0);
999 		regs = mci_readl(slot->host, PWREN);
1000 		regs &= ~(1 << slot->id);
1001 		mci_writel(slot->host, PWREN, regs);
1002 		break;
1003 	default:
1004 		break;
1005 	}
1006 }
1007 
1008 static int dw_mci_get_ro(struct mmc_host *mmc)
1009 {
1010 	int read_only;
1011 	struct dw_mci_slot *slot = mmc_priv(mmc);
1012 	struct dw_mci_board *brd = slot->host->pdata;
1013 
1014 	/* Use platform get_ro function, else try on board write protect */
1015 	if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1016 		read_only = 0;
1017 	else if (brd->get_ro)
1018 		read_only = brd->get_ro(slot->id);
1019 	else if (gpio_is_valid(slot->wp_gpio))
1020 		read_only = gpio_get_value(slot->wp_gpio);
1021 	else
1022 		read_only =
1023 			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1024 
1025 	dev_dbg(&mmc->class_dev, "card is %s\n",
1026 		read_only ? "read-only" : "read-write");
1027 
1028 	return read_only;
1029 }
1030 
1031 static int dw_mci_get_cd(struct mmc_host *mmc)
1032 {
1033 	int present;
1034 	struct dw_mci_slot *slot = mmc_priv(mmc);
1035 	struct dw_mci_board *brd = slot->host->pdata;
1036 	struct dw_mci *host = slot->host;
1037 	int gpio_cd = mmc_gpio_get_cd(mmc);
1038 
1039 	/* Use platform get_cd function, else try onboard card detect */
1040 	if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1041 		present = 1;
1042 	else if (brd->get_cd)
1043 		present = !brd->get_cd(slot->id);
1044 	else if (!IS_ERR_VALUE(gpio_cd))
1045 		present = gpio_cd;
1046 	else
1047 		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1048 			== 0 ? 1 : 0;
1049 
1050 	spin_lock_bh(&host->lock);
1051 	if (present) {
1052 		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1053 		dev_dbg(&mmc->class_dev, "card is present\n");
1054 	} else {
1055 		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1056 		dev_dbg(&mmc->class_dev, "card is not present\n");
1057 	}
1058 	spin_unlock_bh(&host->lock);
1059 
1060 	return present;
1061 }
1062 
1063 /*
1064  * Disable lower power mode.
1065  *
1066  * Low power mode will stop the card clock when idle.  According to the
1067  * description of the CLKENA register we should disable low power mode
1068  * for SDIO cards if we need SDIO interrupts to work.
1069  *
1070  * This function is fast if low power mode is already disabled.
1071  */
1072 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1073 {
1074 	struct dw_mci *host = slot->host;
1075 	u32 clk_en_a;
1076 	const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1077 
1078 	clk_en_a = mci_readl(host, CLKENA);
1079 
1080 	if (clk_en_a & clken_low_pwr) {
1081 		mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1082 		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1083 			     SDMMC_CMD_PRV_DAT_WAIT, 0);
1084 	}
1085 }
1086 
1087 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1088 {
1089 	struct dw_mci_slot *slot = mmc_priv(mmc);
1090 	struct dw_mci *host = slot->host;
1091 	u32 int_mask;
1092 
1093 	/* Enable/disable Slot Specific SDIO interrupt */
1094 	int_mask = mci_readl(host, INTMASK);
1095 	if (enb) {
1096 		/*
1097 		 * Turn off low power mode if it was enabled.  This is a bit of
1098 		 * a heavy operation and we disable / enable IRQs a lot, so
1099 		 * we'll leave low power mode disabled and it will get
1100 		 * re-enabled again in dw_mci_setup_bus().
1101 		 */
1102 		dw_mci_disable_low_power(slot);
1103 
1104 		mci_writel(host, INTMASK,
1105 			   (int_mask | SDMMC_INT_SDIO(slot->id)));
1106 	} else {
1107 		mci_writel(host, INTMASK,
1108 			   (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1109 	}
1110 }
1111 
1112 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1113 {
1114 	struct dw_mci_slot *slot = mmc_priv(mmc);
1115 	struct dw_mci *host = slot->host;
1116 	const struct dw_mci_drv_data *drv_data = host->drv_data;
1117 	struct dw_mci_tuning_data tuning_data;
1118 	int err = -ENOSYS;
1119 
1120 	if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1121 		if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1122 			tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1123 			tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1124 		} else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1125 			tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1126 			tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1127 		} else {
1128 			return -EINVAL;
1129 		}
1130 	} else if (opcode == MMC_SEND_TUNING_BLOCK) {
1131 		tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1132 		tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1133 	} else {
1134 		dev_err(host->dev,
1135 			"Undefined command(%d) for tuning\n", opcode);
1136 		return -EINVAL;
1137 	}
1138 
1139 	if (drv_data && drv_data->execute_tuning)
1140 		err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1141 	return err;
1142 }
1143 
1144 static const struct mmc_host_ops dw_mci_ops = {
1145 	.request		= dw_mci_request,
1146 	.pre_req		= dw_mci_pre_req,
1147 	.post_req		= dw_mci_post_req,
1148 	.set_ios		= dw_mci_set_ios,
1149 	.get_ro			= dw_mci_get_ro,
1150 	.get_cd			= dw_mci_get_cd,
1151 	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
1152 	.execute_tuning		= dw_mci_execute_tuning,
1153 };
1154 
1155 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1156 	__releases(&host->lock)
1157 	__acquires(&host->lock)
1158 {
1159 	struct dw_mci_slot *slot;
1160 	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
1161 
1162 	WARN_ON(host->cmd || host->data);
1163 
1164 	host->cur_slot->mrq = NULL;
1165 	host->mrq = NULL;
1166 	if (!list_empty(&host->queue)) {
1167 		slot = list_entry(host->queue.next,
1168 				  struct dw_mci_slot, queue_node);
1169 		list_del(&slot->queue_node);
1170 		dev_vdbg(host->dev, "list not empty: %s is next\n",
1171 			 mmc_hostname(slot->mmc));
1172 		host->state = STATE_SENDING_CMD;
1173 		dw_mci_start_request(host, slot);
1174 	} else {
1175 		dev_vdbg(host->dev, "list empty\n");
1176 		host->state = STATE_IDLE;
1177 	}
1178 
1179 	spin_unlock(&host->lock);
1180 	mmc_request_done(prev_mmc, mrq);
1181 	spin_lock(&host->lock);
1182 }
1183 
1184 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1185 {
1186 	u32 status = host->cmd_status;
1187 
1188 	host->cmd_status = 0;
1189 
1190 	/* Read the response from the card (up to 16 bytes) */
1191 	if (cmd->flags & MMC_RSP_PRESENT) {
1192 		if (cmd->flags & MMC_RSP_136) {
1193 			cmd->resp[3] = mci_readl(host, RESP0);
1194 			cmd->resp[2] = mci_readl(host, RESP1);
1195 			cmd->resp[1] = mci_readl(host, RESP2);
1196 			cmd->resp[0] = mci_readl(host, RESP3);
1197 		} else {
1198 			cmd->resp[0] = mci_readl(host, RESP0);
1199 			cmd->resp[1] = 0;
1200 			cmd->resp[2] = 0;
1201 			cmd->resp[3] = 0;
1202 		}
1203 	}
1204 
1205 	if (status & SDMMC_INT_RTO)
1206 		cmd->error = -ETIMEDOUT;
1207 	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1208 		cmd->error = -EILSEQ;
1209 	else if (status & SDMMC_INT_RESP_ERR)
1210 		cmd->error = -EIO;
1211 	else
1212 		cmd->error = 0;
1213 
1214 	if (cmd->error) {
1215 		/* newer ip versions need a delay between retries */
1216 		if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1217 			mdelay(20);
1218 	}
1219 
1220 	return cmd->error;
1221 }
1222 
1223 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1224 {
1225 	u32 status = host->data_status;
1226 
1227 	if (status & DW_MCI_DATA_ERROR_FLAGS) {
1228 		if (status & SDMMC_INT_DRTO) {
1229 			data->error = -ETIMEDOUT;
1230 		} else if (status & SDMMC_INT_DCRC) {
1231 			data->error = -EILSEQ;
1232 		} else if (status & SDMMC_INT_EBE) {
1233 			if (host->dir_status ==
1234 				DW_MCI_SEND_STATUS) {
1235 				/*
1236 				 * No data CRC status was returned.
1237 				 * The number of bytes transferred
1238 				 * will be exaggerated in PIO mode.
1239 				 */
1240 				data->bytes_xfered = 0;
1241 				data->error = -ETIMEDOUT;
1242 			} else if (host->dir_status ==
1243 					DW_MCI_RECV_STATUS) {
1244 				data->error = -EIO;
1245 			}
1246 		} else {
1247 			/* SDMMC_INT_SBE is included */
1248 			data->error = -EIO;
1249 		}
1250 
1251 		dev_err(host->dev, "data error, status 0x%08x\n", status);
1252 
1253 		/*
1254 		 * After an error, there may be data lingering
1255 		 * in the FIFO
1256 		 */
1257 		dw_mci_fifo_reset(host);
1258 	} else {
1259 		data->bytes_xfered = data->blocks * data->blksz;
1260 		data->error = 0;
1261 	}
1262 
1263 	return data->error;
1264 }
1265 
1266 static void dw_mci_tasklet_func(unsigned long priv)
1267 {
1268 	struct dw_mci *host = (struct dw_mci *)priv;
1269 	struct mmc_data	*data;
1270 	struct mmc_command *cmd;
1271 	struct mmc_request *mrq;
1272 	enum dw_mci_state state;
1273 	enum dw_mci_state prev_state;
1274 	unsigned int err;
1275 
1276 	spin_lock(&host->lock);
1277 
1278 	state = host->state;
1279 	data = host->data;
1280 	mrq = host->mrq;
1281 
1282 	do {
1283 		prev_state = state;
1284 
1285 		switch (state) {
1286 		case STATE_IDLE:
1287 			break;
1288 
1289 		case STATE_SENDING_CMD:
1290 			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1291 						&host->pending_events))
1292 				break;
1293 
1294 			cmd = host->cmd;
1295 			host->cmd = NULL;
1296 			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1297 			err = dw_mci_command_complete(host, cmd);
1298 			if (cmd == mrq->sbc && !err) {
1299 				prev_state = state = STATE_SENDING_CMD;
1300 				__dw_mci_start_request(host, host->cur_slot,
1301 						       mrq->cmd);
1302 				goto unlock;
1303 			}
1304 
1305 			if (cmd->data && err) {
1306 				dw_mci_stop_dma(host);
1307 				send_stop_abort(host, data);
1308 				state = STATE_SENDING_STOP;
1309 				break;
1310 			}
1311 
1312 			if (!cmd->data || err) {
1313 				dw_mci_request_end(host, mrq);
1314 				goto unlock;
1315 			}
1316 
1317 			prev_state = state = STATE_SENDING_DATA;
1318 			/* fall through */
1319 
1320 		case STATE_SENDING_DATA:
1321 			if (test_and_clear_bit(EVENT_DATA_ERROR,
1322 					       &host->pending_events)) {
1323 				dw_mci_stop_dma(host);
1324 				send_stop_abort(host, data);
1325 				state = STATE_DATA_ERROR;
1326 				break;
1327 			}
1328 
1329 			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1330 						&host->pending_events))
1331 				break;
1332 
1333 			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1334 			prev_state = state = STATE_DATA_BUSY;
1335 			/* fall through */
1336 
1337 		case STATE_DATA_BUSY:
1338 			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1339 						&host->pending_events))
1340 				break;
1341 
1342 			host->data = NULL;
1343 			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1344 			err = dw_mci_data_complete(host, data);
1345 
1346 			if (!err) {
1347 				if (!data->stop || mrq->sbc) {
1348 					if (mrq->sbc)
1349 						data->stop->error = 0;
1350 					dw_mci_request_end(host, mrq);
1351 					goto unlock;
1352 				}
1353 
1354 				/* stop command for open-ended transfer*/
1355 				if (data->stop)
1356 					send_stop_abort(host, data);
1357 			}
1358 
1359 			/*
1360 			 * If err has non-zero,
1361 			 * stop-abort command has been already issued.
1362 			 */
1363 			prev_state = state = STATE_SENDING_STOP;
1364 
1365 			/* fall through */
1366 
1367 		case STATE_SENDING_STOP:
1368 			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1369 						&host->pending_events))
1370 				break;
1371 
1372 			/* CMD error in data command */
1373 			if (mrq->cmd->error && mrq->data)
1374 				dw_mci_fifo_reset(host);
1375 
1376 			host->cmd = NULL;
1377 			host->data = NULL;
1378 
1379 			if (mrq->stop)
1380 				dw_mci_command_complete(host, mrq->stop);
1381 			else
1382 				host->cmd_status = 0;
1383 
1384 			dw_mci_request_end(host, mrq);
1385 			goto unlock;
1386 
1387 		case STATE_DATA_ERROR:
1388 			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1389 						&host->pending_events))
1390 				break;
1391 
1392 			state = STATE_DATA_BUSY;
1393 			break;
1394 		}
1395 	} while (state != prev_state);
1396 
1397 	host->state = state;
1398 unlock:
1399 	spin_unlock(&host->lock);
1400 
1401 }
1402 
1403 /* push final bytes to part_buf, only use during push */
1404 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1405 {
1406 	memcpy((void *)&host->part_buf, buf, cnt);
1407 	host->part_buf_count = cnt;
1408 }
1409 
1410 /* append bytes to part_buf, only use during push */
1411 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1412 {
1413 	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1414 	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1415 	host->part_buf_count += cnt;
1416 	return cnt;
1417 }
1418 
1419 /* pull first bytes from part_buf, only use during pull */
1420 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1421 {
1422 	cnt = min(cnt, (int)host->part_buf_count);
1423 	if (cnt) {
1424 		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1425 		       cnt);
1426 		host->part_buf_count -= cnt;
1427 		host->part_buf_start += cnt;
1428 	}
1429 	return cnt;
1430 }
1431 
1432 /* pull final bytes from the part_buf, assuming it's just been filled */
1433 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1434 {
1435 	memcpy(buf, &host->part_buf, cnt);
1436 	host->part_buf_start = cnt;
1437 	host->part_buf_count = (1 << host->data_shift) - cnt;
1438 }
1439 
1440 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1441 {
1442 	struct mmc_data *data = host->data;
1443 	int init_cnt = cnt;
1444 
1445 	/* try and push anything in the part_buf */
1446 	if (unlikely(host->part_buf_count)) {
1447 		int len = dw_mci_push_part_bytes(host, buf, cnt);
1448 		buf += len;
1449 		cnt -= len;
1450 		if (host->part_buf_count == 2) {
1451 			mci_writew(host, DATA(host->data_offset),
1452 					host->part_buf16);
1453 			host->part_buf_count = 0;
1454 		}
1455 	}
1456 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1457 	if (unlikely((unsigned long)buf & 0x1)) {
1458 		while (cnt >= 2) {
1459 			u16 aligned_buf[64];
1460 			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1461 			int items = len >> 1;
1462 			int i;
1463 			/* memcpy from input buffer into aligned buffer */
1464 			memcpy(aligned_buf, buf, len);
1465 			buf += len;
1466 			cnt -= len;
1467 			/* push data from aligned buffer into fifo */
1468 			for (i = 0; i < items; ++i)
1469 				mci_writew(host, DATA(host->data_offset),
1470 						aligned_buf[i]);
1471 		}
1472 	} else
1473 #endif
1474 	{
1475 		u16 *pdata = buf;
1476 		for (; cnt >= 2; cnt -= 2)
1477 			mci_writew(host, DATA(host->data_offset), *pdata++);
1478 		buf = pdata;
1479 	}
1480 	/* put anything remaining in the part_buf */
1481 	if (cnt) {
1482 		dw_mci_set_part_bytes(host, buf, cnt);
1483 		 /* Push data if we have reached the expected data length */
1484 		if ((data->bytes_xfered + init_cnt) ==
1485 		    (data->blksz * data->blocks))
1486 			mci_writew(host, DATA(host->data_offset),
1487 				   host->part_buf16);
1488 	}
1489 }
1490 
1491 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1492 {
1493 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1494 	if (unlikely((unsigned long)buf & 0x1)) {
1495 		while (cnt >= 2) {
1496 			/* pull data from fifo into aligned buffer */
1497 			u16 aligned_buf[64];
1498 			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1499 			int items = len >> 1;
1500 			int i;
1501 			for (i = 0; i < items; ++i)
1502 				aligned_buf[i] = mci_readw(host,
1503 						DATA(host->data_offset));
1504 			/* memcpy from aligned buffer into output buffer */
1505 			memcpy(buf, aligned_buf, len);
1506 			buf += len;
1507 			cnt -= len;
1508 		}
1509 	} else
1510 #endif
1511 	{
1512 		u16 *pdata = buf;
1513 		for (; cnt >= 2; cnt -= 2)
1514 			*pdata++ = mci_readw(host, DATA(host->data_offset));
1515 		buf = pdata;
1516 	}
1517 	if (cnt) {
1518 		host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1519 		dw_mci_pull_final_bytes(host, buf, cnt);
1520 	}
1521 }
1522 
1523 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1524 {
1525 	struct mmc_data *data = host->data;
1526 	int init_cnt = cnt;
1527 
1528 	/* try and push anything in the part_buf */
1529 	if (unlikely(host->part_buf_count)) {
1530 		int len = dw_mci_push_part_bytes(host, buf, cnt);
1531 		buf += len;
1532 		cnt -= len;
1533 		if (host->part_buf_count == 4) {
1534 			mci_writel(host, DATA(host->data_offset),
1535 					host->part_buf32);
1536 			host->part_buf_count = 0;
1537 		}
1538 	}
1539 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1540 	if (unlikely((unsigned long)buf & 0x3)) {
1541 		while (cnt >= 4) {
1542 			u32 aligned_buf[32];
1543 			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1544 			int items = len >> 2;
1545 			int i;
1546 			/* memcpy from input buffer into aligned buffer */
1547 			memcpy(aligned_buf, buf, len);
1548 			buf += len;
1549 			cnt -= len;
1550 			/* push data from aligned buffer into fifo */
1551 			for (i = 0; i < items; ++i)
1552 				mci_writel(host, DATA(host->data_offset),
1553 						aligned_buf[i]);
1554 		}
1555 	} else
1556 #endif
1557 	{
1558 		u32 *pdata = buf;
1559 		for (; cnt >= 4; cnt -= 4)
1560 			mci_writel(host, DATA(host->data_offset), *pdata++);
1561 		buf = pdata;
1562 	}
1563 	/* put anything remaining in the part_buf */
1564 	if (cnt) {
1565 		dw_mci_set_part_bytes(host, buf, cnt);
1566 		 /* Push data if we have reached the expected data length */
1567 		if ((data->bytes_xfered + init_cnt) ==
1568 		    (data->blksz * data->blocks))
1569 			mci_writel(host, DATA(host->data_offset),
1570 				   host->part_buf32);
1571 	}
1572 }
1573 
1574 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1575 {
1576 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1577 	if (unlikely((unsigned long)buf & 0x3)) {
1578 		while (cnt >= 4) {
1579 			/* pull data from fifo into aligned buffer */
1580 			u32 aligned_buf[32];
1581 			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1582 			int items = len >> 2;
1583 			int i;
1584 			for (i = 0; i < items; ++i)
1585 				aligned_buf[i] = mci_readl(host,
1586 						DATA(host->data_offset));
1587 			/* memcpy from aligned buffer into output buffer */
1588 			memcpy(buf, aligned_buf, len);
1589 			buf += len;
1590 			cnt -= len;
1591 		}
1592 	} else
1593 #endif
1594 	{
1595 		u32 *pdata = buf;
1596 		for (; cnt >= 4; cnt -= 4)
1597 			*pdata++ = mci_readl(host, DATA(host->data_offset));
1598 		buf = pdata;
1599 	}
1600 	if (cnt) {
1601 		host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1602 		dw_mci_pull_final_bytes(host, buf, cnt);
1603 	}
1604 }
1605 
1606 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1607 {
1608 	struct mmc_data *data = host->data;
1609 	int init_cnt = cnt;
1610 
1611 	/* try and push anything in the part_buf */
1612 	if (unlikely(host->part_buf_count)) {
1613 		int len = dw_mci_push_part_bytes(host, buf, cnt);
1614 		buf += len;
1615 		cnt -= len;
1616 
1617 		if (host->part_buf_count == 8) {
1618 			mci_writeq(host, DATA(host->data_offset),
1619 					host->part_buf);
1620 			host->part_buf_count = 0;
1621 		}
1622 	}
1623 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1624 	if (unlikely((unsigned long)buf & 0x7)) {
1625 		while (cnt >= 8) {
1626 			u64 aligned_buf[16];
1627 			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1628 			int items = len >> 3;
1629 			int i;
1630 			/* memcpy from input buffer into aligned buffer */
1631 			memcpy(aligned_buf, buf, len);
1632 			buf += len;
1633 			cnt -= len;
1634 			/* push data from aligned buffer into fifo */
1635 			for (i = 0; i < items; ++i)
1636 				mci_writeq(host, DATA(host->data_offset),
1637 						aligned_buf[i]);
1638 		}
1639 	} else
1640 #endif
1641 	{
1642 		u64 *pdata = buf;
1643 		for (; cnt >= 8; cnt -= 8)
1644 			mci_writeq(host, DATA(host->data_offset), *pdata++);
1645 		buf = pdata;
1646 	}
1647 	/* put anything remaining in the part_buf */
1648 	if (cnt) {
1649 		dw_mci_set_part_bytes(host, buf, cnt);
1650 		/* Push data if we have reached the expected data length */
1651 		if ((data->bytes_xfered + init_cnt) ==
1652 		    (data->blksz * data->blocks))
1653 			mci_writeq(host, DATA(host->data_offset),
1654 				   host->part_buf);
1655 	}
1656 }
1657 
1658 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1659 {
1660 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1661 	if (unlikely((unsigned long)buf & 0x7)) {
1662 		while (cnt >= 8) {
1663 			/* pull data from fifo into aligned buffer */
1664 			u64 aligned_buf[16];
1665 			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1666 			int items = len >> 3;
1667 			int i;
1668 			for (i = 0; i < items; ++i)
1669 				aligned_buf[i] = mci_readq(host,
1670 						DATA(host->data_offset));
1671 			/* memcpy from aligned buffer into output buffer */
1672 			memcpy(buf, aligned_buf, len);
1673 			buf += len;
1674 			cnt -= len;
1675 		}
1676 	} else
1677 #endif
1678 	{
1679 		u64 *pdata = buf;
1680 		for (; cnt >= 8; cnt -= 8)
1681 			*pdata++ = mci_readq(host, DATA(host->data_offset));
1682 		buf = pdata;
1683 	}
1684 	if (cnt) {
1685 		host->part_buf = mci_readq(host, DATA(host->data_offset));
1686 		dw_mci_pull_final_bytes(host, buf, cnt);
1687 	}
1688 }
1689 
1690 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1691 {
1692 	int len;
1693 
1694 	/* get remaining partial bytes */
1695 	len = dw_mci_pull_part_bytes(host, buf, cnt);
1696 	if (unlikely(len == cnt))
1697 		return;
1698 	buf += len;
1699 	cnt -= len;
1700 
1701 	/* get the rest of the data */
1702 	host->pull_data(host, buf, cnt);
1703 }
1704 
1705 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1706 {
1707 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1708 	void *buf;
1709 	unsigned int offset;
1710 	struct mmc_data	*data = host->data;
1711 	int shift = host->data_shift;
1712 	u32 status;
1713 	unsigned int len;
1714 	unsigned int remain, fcnt;
1715 
1716 	do {
1717 		if (!sg_miter_next(sg_miter))
1718 			goto done;
1719 
1720 		host->sg = sg_miter->piter.sg;
1721 		buf = sg_miter->addr;
1722 		remain = sg_miter->length;
1723 		offset = 0;
1724 
1725 		do {
1726 			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1727 					<< shift) + host->part_buf_count;
1728 			len = min(remain, fcnt);
1729 			if (!len)
1730 				break;
1731 			dw_mci_pull_data(host, (void *)(buf + offset), len);
1732 			data->bytes_xfered += len;
1733 			offset += len;
1734 			remain -= len;
1735 		} while (remain);
1736 
1737 		sg_miter->consumed = offset;
1738 		status = mci_readl(host, MINTSTS);
1739 		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1740 	/* if the RXDR is ready read again */
1741 	} while ((status & SDMMC_INT_RXDR) ||
1742 		 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1743 
1744 	if (!remain) {
1745 		if (!sg_miter_next(sg_miter))
1746 			goto done;
1747 		sg_miter->consumed = 0;
1748 	}
1749 	sg_miter_stop(sg_miter);
1750 	return;
1751 
1752 done:
1753 	sg_miter_stop(sg_miter);
1754 	host->sg = NULL;
1755 	smp_wmb();
1756 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1757 }
1758 
1759 static void dw_mci_write_data_pio(struct dw_mci *host)
1760 {
1761 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1762 	void *buf;
1763 	unsigned int offset;
1764 	struct mmc_data	*data = host->data;
1765 	int shift = host->data_shift;
1766 	u32 status;
1767 	unsigned int len;
1768 	unsigned int fifo_depth = host->fifo_depth;
1769 	unsigned int remain, fcnt;
1770 
1771 	do {
1772 		if (!sg_miter_next(sg_miter))
1773 			goto done;
1774 
1775 		host->sg = sg_miter->piter.sg;
1776 		buf = sg_miter->addr;
1777 		remain = sg_miter->length;
1778 		offset = 0;
1779 
1780 		do {
1781 			fcnt = ((fifo_depth -
1782 				 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1783 					<< shift) - host->part_buf_count;
1784 			len = min(remain, fcnt);
1785 			if (!len)
1786 				break;
1787 			host->push_data(host, (void *)(buf + offset), len);
1788 			data->bytes_xfered += len;
1789 			offset += len;
1790 			remain -= len;
1791 		} while (remain);
1792 
1793 		sg_miter->consumed = offset;
1794 		status = mci_readl(host, MINTSTS);
1795 		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1796 	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1797 
1798 	if (!remain) {
1799 		if (!sg_miter_next(sg_miter))
1800 			goto done;
1801 		sg_miter->consumed = 0;
1802 	}
1803 	sg_miter_stop(sg_miter);
1804 	return;
1805 
1806 done:
1807 	sg_miter_stop(sg_miter);
1808 	host->sg = NULL;
1809 	smp_wmb();
1810 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1811 }
1812 
1813 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1814 {
1815 	if (!host->cmd_status)
1816 		host->cmd_status = status;
1817 
1818 	smp_wmb();
1819 
1820 	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1821 	tasklet_schedule(&host->tasklet);
1822 }
1823 
1824 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1825 {
1826 	struct dw_mci *host = dev_id;
1827 	u32 pending;
1828 	int i;
1829 
1830 	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1831 
1832 	/*
1833 	 * DTO fix - version 2.10a and below, and only if internal DMA
1834 	 * is configured.
1835 	 */
1836 	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1837 		if (!pending &&
1838 		    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1839 			pending |= SDMMC_INT_DATA_OVER;
1840 	}
1841 
1842 	if (pending) {
1843 		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1844 			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1845 			host->cmd_status = pending;
1846 			smp_wmb();
1847 			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1848 		}
1849 
1850 		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1851 			/* if there is an error report DATA_ERROR */
1852 			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1853 			host->data_status = pending;
1854 			smp_wmb();
1855 			set_bit(EVENT_DATA_ERROR, &host->pending_events);
1856 			tasklet_schedule(&host->tasklet);
1857 		}
1858 
1859 		if (pending & SDMMC_INT_DATA_OVER) {
1860 			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1861 			if (!host->data_status)
1862 				host->data_status = pending;
1863 			smp_wmb();
1864 			if (host->dir_status == DW_MCI_RECV_STATUS) {
1865 				if (host->sg != NULL)
1866 					dw_mci_read_data_pio(host, true);
1867 			}
1868 			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1869 			tasklet_schedule(&host->tasklet);
1870 		}
1871 
1872 		if (pending & SDMMC_INT_RXDR) {
1873 			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1874 			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1875 				dw_mci_read_data_pio(host, false);
1876 		}
1877 
1878 		if (pending & SDMMC_INT_TXDR) {
1879 			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1880 			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1881 				dw_mci_write_data_pio(host);
1882 		}
1883 
1884 		if (pending & SDMMC_INT_CMD_DONE) {
1885 			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1886 			dw_mci_cmd_interrupt(host, pending);
1887 		}
1888 
1889 		if (pending & SDMMC_INT_CD) {
1890 			mci_writel(host, RINTSTS, SDMMC_INT_CD);
1891 			queue_work(host->card_workqueue, &host->card_work);
1892 		}
1893 
1894 		/* Handle SDIO Interrupts */
1895 		for (i = 0; i < host->num_slots; i++) {
1896 			struct dw_mci_slot *slot = host->slot[i];
1897 			if (pending & SDMMC_INT_SDIO(i)) {
1898 				mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1899 				mmc_signal_sdio_irq(slot->mmc);
1900 			}
1901 		}
1902 
1903 	}
1904 
1905 #ifdef CONFIG_MMC_DW_IDMAC
1906 	/* Handle DMA interrupts */
1907 	pending = mci_readl(host, IDSTS);
1908 	if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1909 		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1910 		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1911 		host->dma_ops->complete(host);
1912 	}
1913 #endif
1914 
1915 	return IRQ_HANDLED;
1916 }
1917 
1918 static void dw_mci_work_routine_card(struct work_struct *work)
1919 {
1920 	struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1921 	int i;
1922 
1923 	for (i = 0; i < host->num_slots; i++) {
1924 		struct dw_mci_slot *slot = host->slot[i];
1925 		struct mmc_host *mmc = slot->mmc;
1926 		struct mmc_request *mrq;
1927 		int present;
1928 
1929 		present = dw_mci_get_cd(mmc);
1930 		while (present != slot->last_detect_state) {
1931 			dev_dbg(&slot->mmc->class_dev, "card %s\n",
1932 				present ? "inserted" : "removed");
1933 
1934 			spin_lock_bh(&host->lock);
1935 
1936 			/* Card change detected */
1937 			slot->last_detect_state = present;
1938 
1939 			/* Clean up queue if present */
1940 			mrq = slot->mrq;
1941 			if (mrq) {
1942 				if (mrq == host->mrq) {
1943 					host->data = NULL;
1944 					host->cmd = NULL;
1945 
1946 					switch (host->state) {
1947 					case STATE_IDLE:
1948 						break;
1949 					case STATE_SENDING_CMD:
1950 						mrq->cmd->error = -ENOMEDIUM;
1951 						if (!mrq->data)
1952 							break;
1953 						/* fall through */
1954 					case STATE_SENDING_DATA:
1955 						mrq->data->error = -ENOMEDIUM;
1956 						dw_mci_stop_dma(host);
1957 						break;
1958 					case STATE_DATA_BUSY:
1959 					case STATE_DATA_ERROR:
1960 						if (mrq->data->error == -EINPROGRESS)
1961 							mrq->data->error = -ENOMEDIUM;
1962 						/* fall through */
1963 					case STATE_SENDING_STOP:
1964 						if (mrq->stop)
1965 							mrq->stop->error = -ENOMEDIUM;
1966 						break;
1967 					}
1968 
1969 					dw_mci_request_end(host, mrq);
1970 				} else {
1971 					list_del(&slot->queue_node);
1972 					mrq->cmd->error = -ENOMEDIUM;
1973 					if (mrq->data)
1974 						mrq->data->error = -ENOMEDIUM;
1975 					if (mrq->stop)
1976 						mrq->stop->error = -ENOMEDIUM;
1977 
1978 					spin_unlock(&host->lock);
1979 					mmc_request_done(slot->mmc, mrq);
1980 					spin_lock(&host->lock);
1981 				}
1982 			}
1983 
1984 			/* Power down slot */
1985 			if (present == 0) {
1986 				/* Clear down the FIFO */
1987 				dw_mci_fifo_reset(host);
1988 #ifdef CONFIG_MMC_DW_IDMAC
1989 				dw_mci_idmac_reset(host);
1990 #endif
1991 
1992 			}
1993 
1994 			spin_unlock_bh(&host->lock);
1995 
1996 			present = dw_mci_get_cd(mmc);
1997 		}
1998 
1999 		mmc_detect_change(slot->mmc,
2000 			msecs_to_jiffies(host->pdata->detect_delay_ms));
2001 	}
2002 }
2003 
2004 #ifdef CONFIG_OF
2005 /* given a slot id, find out the device node representing that slot */
2006 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2007 {
2008 	struct device_node *np;
2009 	const __be32 *addr;
2010 	int len;
2011 
2012 	if (!dev || !dev->of_node)
2013 		return NULL;
2014 
2015 	for_each_child_of_node(dev->of_node, np) {
2016 		addr = of_get_property(np, "reg", &len);
2017 		if (!addr || (len < sizeof(int)))
2018 			continue;
2019 		if (be32_to_cpup(addr) == slot)
2020 			return np;
2021 	}
2022 	return NULL;
2023 }
2024 
2025 static struct dw_mci_of_slot_quirks {
2026 	char *quirk;
2027 	int id;
2028 } of_slot_quirks[] = {
2029 	{
2030 		.quirk	= "disable-wp",
2031 		.id	= DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2032 	},
2033 };
2034 
2035 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2036 {
2037 	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2038 	int quirks = 0;
2039 	int idx;
2040 
2041 	/* get quirks */
2042 	for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2043 		if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2044 			quirks |= of_slot_quirks[idx].id;
2045 
2046 	return quirks;
2047 }
2048 
2049 /* find out bus-width for a given slot */
2050 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2051 {
2052 	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2053 	u32 bus_wd = 1;
2054 
2055 	if (!np)
2056 		return 1;
2057 
2058 	if (of_property_read_u32(np, "bus-width", &bus_wd))
2059 		dev_err(dev, "bus-width property not found, assuming width"
2060 			       " as 1\n");
2061 	return bus_wd;
2062 }
2063 
2064 /* find the write protect gpio for a given slot; or -1 if none specified */
2065 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2066 {
2067 	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2068 	int gpio;
2069 
2070 	if (!np)
2071 		return -EINVAL;
2072 
2073 	gpio = of_get_named_gpio(np, "wp-gpios", 0);
2074 
2075 	/* Having a missing entry is valid; return silently */
2076 	if (!gpio_is_valid(gpio))
2077 		return -EINVAL;
2078 
2079 	if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2080 		dev_warn(dev, "gpio [%d] request failed\n", gpio);
2081 		return -EINVAL;
2082 	}
2083 
2084 	return gpio;
2085 }
2086 
2087 /* find the cd gpio for a given slot */
2088 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2089 					struct mmc_host *mmc)
2090 {
2091 	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2092 	int gpio;
2093 
2094 	if (!np)
2095 		return;
2096 
2097 	gpio = of_get_named_gpio(np, "cd-gpios", 0);
2098 
2099 	/* Having a missing entry is valid; return silently */
2100 	if (!gpio_is_valid(gpio))
2101 		return;
2102 
2103 	if (mmc_gpio_request_cd(mmc, gpio, 0))
2104 		dev_warn(dev, "gpio [%d] request failed\n", gpio);
2105 }
2106 #else /* CONFIG_OF */
2107 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2108 {
2109 	return 0;
2110 }
2111 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2112 {
2113 	return 1;
2114 }
2115 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2116 {
2117 	return NULL;
2118 }
2119 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2120 {
2121 	return -EINVAL;
2122 }
2123 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2124 					struct mmc_host *mmc)
2125 {
2126 	return;
2127 }
2128 #endif /* CONFIG_OF */
2129 
2130 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2131 {
2132 	struct mmc_host *mmc;
2133 	struct dw_mci_slot *slot;
2134 	const struct dw_mci_drv_data *drv_data = host->drv_data;
2135 	int ctrl_id, ret;
2136 	u32 freq[2];
2137 	u8 bus_width;
2138 
2139 	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2140 	if (!mmc)
2141 		return -ENOMEM;
2142 
2143 	slot = mmc_priv(mmc);
2144 	slot->id = id;
2145 	slot->mmc = mmc;
2146 	slot->host = host;
2147 	host->slot[id] = slot;
2148 
2149 	slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2150 
2151 	mmc->ops = &dw_mci_ops;
2152 	if (of_property_read_u32_array(host->dev->of_node,
2153 				       "clock-freq-min-max", freq, 2)) {
2154 		mmc->f_min = DW_MCI_FREQ_MIN;
2155 		mmc->f_max = DW_MCI_FREQ_MAX;
2156 	} else {
2157 		mmc->f_min = freq[0];
2158 		mmc->f_max = freq[1];
2159 	}
2160 
2161 	if (host->pdata->get_ocr)
2162 		mmc->ocr_avail = host->pdata->get_ocr(id);
2163 	else
2164 		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2165 
2166 	/*
2167 	 * Start with slot power disabled, it will be enabled when a card
2168 	 * is detected.
2169 	 */
2170 	if (host->pdata->setpower)
2171 		host->pdata->setpower(id, 0);
2172 
2173 	if (host->pdata->caps)
2174 		mmc->caps = host->pdata->caps;
2175 
2176 	if (host->pdata->pm_caps)
2177 		mmc->pm_caps = host->pdata->pm_caps;
2178 
2179 	if (host->dev->of_node) {
2180 		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2181 		if (ctrl_id < 0)
2182 			ctrl_id = 0;
2183 	} else {
2184 		ctrl_id = to_platform_device(host->dev)->id;
2185 	}
2186 	if (drv_data && drv_data->caps)
2187 		mmc->caps |= drv_data->caps[ctrl_id];
2188 
2189 	if (host->pdata->caps2)
2190 		mmc->caps2 = host->pdata->caps2;
2191 
2192 	if (host->pdata->get_bus_wd)
2193 		bus_width = host->pdata->get_bus_wd(slot->id);
2194 	else if (host->dev->of_node)
2195 		bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2196 	else
2197 		bus_width = 1;
2198 
2199 	switch (bus_width) {
2200 	case 8:
2201 		mmc->caps |= MMC_CAP_8_BIT_DATA;
2202 	case 4:
2203 		mmc->caps |= MMC_CAP_4_BIT_DATA;
2204 	}
2205 
2206 	if (host->pdata->blk_settings) {
2207 		mmc->max_segs = host->pdata->blk_settings->max_segs;
2208 		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2209 		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2210 		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2211 		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2212 	} else {
2213 		/* Useful defaults if platform data is unset. */
2214 #ifdef CONFIG_MMC_DW_IDMAC
2215 		mmc->max_segs = host->ring_size;
2216 		mmc->max_blk_size = 65536;
2217 		mmc->max_blk_count = host->ring_size;
2218 		mmc->max_seg_size = 0x1000;
2219 		mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2220 #else
2221 		mmc->max_segs = 64;
2222 		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2223 		mmc->max_blk_count = 512;
2224 		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2225 		mmc->max_seg_size = mmc->max_req_size;
2226 #endif /* CONFIG_MMC_DW_IDMAC */
2227 	}
2228 
2229 	slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2230 	dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
2231 
2232 	ret = mmc_add_host(mmc);
2233 	if (ret)
2234 		goto err_setup_bus;
2235 
2236 #if defined(CONFIG_DEBUG_FS)
2237 	dw_mci_init_debugfs(slot);
2238 #endif
2239 
2240 	/* Card initially undetected */
2241 	slot->last_detect_state = 0;
2242 
2243 	return 0;
2244 
2245 err_setup_bus:
2246 	mmc_free_host(mmc);
2247 	return -EINVAL;
2248 }
2249 
2250 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2251 {
2252 	/* Shutdown detect IRQ */
2253 	if (slot->host->pdata->exit)
2254 		slot->host->pdata->exit(id);
2255 
2256 	/* Debugfs stuff is cleaned up by mmc core */
2257 	mmc_remove_host(slot->mmc);
2258 	slot->host->slot[id] = NULL;
2259 	mmc_free_host(slot->mmc);
2260 }
2261 
2262 static void dw_mci_init_dma(struct dw_mci *host)
2263 {
2264 	/* Alloc memory for sg translation */
2265 	host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2266 					  &host->sg_dma, GFP_KERNEL);
2267 	if (!host->sg_cpu) {
2268 		dev_err(host->dev, "%s: could not alloc DMA memory\n",
2269 			__func__);
2270 		goto no_dma;
2271 	}
2272 
2273 	/* Determine which DMA interface to use */
2274 #ifdef CONFIG_MMC_DW_IDMAC
2275 	host->dma_ops = &dw_mci_idmac_ops;
2276 	dev_info(host->dev, "Using internal DMA controller.\n");
2277 #endif
2278 
2279 	if (!host->dma_ops)
2280 		goto no_dma;
2281 
2282 	if (host->dma_ops->init && host->dma_ops->start &&
2283 	    host->dma_ops->stop && host->dma_ops->cleanup) {
2284 		if (host->dma_ops->init(host)) {
2285 			dev_err(host->dev, "%s: Unable to initialize "
2286 				"DMA Controller.\n", __func__);
2287 			goto no_dma;
2288 		}
2289 	} else {
2290 		dev_err(host->dev, "DMA initialization not found.\n");
2291 		goto no_dma;
2292 	}
2293 
2294 	host->use_dma = 1;
2295 	return;
2296 
2297 no_dma:
2298 	dev_info(host->dev, "Using PIO mode.\n");
2299 	host->use_dma = 0;
2300 	return;
2301 }
2302 
2303 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2304 {
2305 	unsigned long timeout = jiffies + msecs_to_jiffies(500);
2306 	u32 ctrl;
2307 
2308 	ctrl = mci_readl(host, CTRL);
2309 	ctrl |= reset;
2310 	mci_writel(host, CTRL, ctrl);
2311 
2312 	/* wait till resets clear */
2313 	do {
2314 		ctrl = mci_readl(host, CTRL);
2315 		if (!(ctrl & reset))
2316 			return true;
2317 	} while (time_before(jiffies, timeout));
2318 
2319 	dev_err(host->dev,
2320 		"Timeout resetting block (ctrl reset %#x)\n",
2321 		ctrl & reset);
2322 
2323 	return false;
2324 }
2325 
2326 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2327 {
2328 	/*
2329 	 * Reseting generates a block interrupt, hence setting
2330 	 * the scatter-gather pointer to NULL.
2331 	 */
2332 	if (host->sg) {
2333 		sg_miter_stop(&host->sg_miter);
2334 		host->sg = NULL;
2335 	}
2336 
2337 	return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2338 }
2339 
2340 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2341 {
2342 	return dw_mci_ctrl_reset(host,
2343 				 SDMMC_CTRL_FIFO_RESET |
2344 				 SDMMC_CTRL_RESET |
2345 				 SDMMC_CTRL_DMA_RESET);
2346 }
2347 
2348 #ifdef CONFIG_OF
2349 static struct dw_mci_of_quirks {
2350 	char *quirk;
2351 	int id;
2352 } of_quirks[] = {
2353 	{
2354 		.quirk	= "broken-cd",
2355 		.id	= DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2356 	},
2357 };
2358 
2359 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2360 {
2361 	struct dw_mci_board *pdata;
2362 	struct device *dev = host->dev;
2363 	struct device_node *np = dev->of_node;
2364 	const struct dw_mci_drv_data *drv_data = host->drv_data;
2365 	int idx, ret;
2366 	u32 clock_frequency;
2367 
2368 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2369 	if (!pdata) {
2370 		dev_err(dev, "could not allocate memory for pdata\n");
2371 		return ERR_PTR(-ENOMEM);
2372 	}
2373 
2374 	/* find out number of slots supported */
2375 	if (of_property_read_u32(dev->of_node, "num-slots",
2376 				&pdata->num_slots)) {
2377 		dev_info(dev, "num-slots property not found, "
2378 				"assuming 1 slot is available\n");
2379 		pdata->num_slots = 1;
2380 	}
2381 
2382 	/* get quirks */
2383 	for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2384 		if (of_get_property(np, of_quirks[idx].quirk, NULL))
2385 			pdata->quirks |= of_quirks[idx].id;
2386 
2387 	if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2388 		dev_info(dev, "fifo-depth property not found, using "
2389 				"value of FIFOTH register as default\n");
2390 
2391 	of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2392 
2393 	if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2394 		pdata->bus_hz = clock_frequency;
2395 
2396 	if (drv_data && drv_data->parse_dt) {
2397 		ret = drv_data->parse_dt(host);
2398 		if (ret)
2399 			return ERR_PTR(ret);
2400 	}
2401 
2402 	if (of_find_property(np, "keep-power-in-suspend", NULL))
2403 		pdata->pm_caps |= MMC_PM_KEEP_POWER;
2404 
2405 	if (of_find_property(np, "enable-sdio-wakeup", NULL))
2406 		pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2407 
2408 	if (of_find_property(np, "supports-highspeed", NULL))
2409 		pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2410 
2411 	if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2412 		pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2413 
2414 	if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2415 		pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2416 
2417 	if (of_get_property(np, "cd-inverted", NULL))
2418 		pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
2419 
2420 	return pdata;
2421 }
2422 
2423 #else /* CONFIG_OF */
2424 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2425 {
2426 	return ERR_PTR(-EINVAL);
2427 }
2428 #endif /* CONFIG_OF */
2429 
2430 int dw_mci_probe(struct dw_mci *host)
2431 {
2432 	const struct dw_mci_drv_data *drv_data = host->drv_data;
2433 	int width, i, ret = 0;
2434 	u32 fifo_size;
2435 	int init_slots = 0;
2436 
2437 	if (!host->pdata) {
2438 		host->pdata = dw_mci_parse_dt(host);
2439 		if (IS_ERR(host->pdata)) {
2440 			dev_err(host->dev, "platform data not available\n");
2441 			return -EINVAL;
2442 		}
2443 	}
2444 
2445 	if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2446 		dev_err(host->dev,
2447 			"Platform data must supply select_slot function\n");
2448 		return -ENODEV;
2449 	}
2450 
2451 	host->biu_clk = devm_clk_get(host->dev, "biu");
2452 	if (IS_ERR(host->biu_clk)) {
2453 		dev_dbg(host->dev, "biu clock not available\n");
2454 	} else {
2455 		ret = clk_prepare_enable(host->biu_clk);
2456 		if (ret) {
2457 			dev_err(host->dev, "failed to enable biu clock\n");
2458 			return ret;
2459 		}
2460 	}
2461 
2462 	host->ciu_clk = devm_clk_get(host->dev, "ciu");
2463 	if (IS_ERR(host->ciu_clk)) {
2464 		dev_dbg(host->dev, "ciu clock not available\n");
2465 		host->bus_hz = host->pdata->bus_hz;
2466 	} else {
2467 		ret = clk_prepare_enable(host->ciu_clk);
2468 		if (ret) {
2469 			dev_err(host->dev, "failed to enable ciu clock\n");
2470 			goto err_clk_biu;
2471 		}
2472 
2473 		if (host->pdata->bus_hz) {
2474 			ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2475 			if (ret)
2476 				dev_warn(host->dev,
2477 					 "Unable to set bus rate to %ul\n",
2478 					 host->pdata->bus_hz);
2479 		}
2480 		host->bus_hz = clk_get_rate(host->ciu_clk);
2481 	}
2482 
2483 	if (drv_data && drv_data->init) {
2484 		ret = drv_data->init(host);
2485 		if (ret) {
2486 			dev_err(host->dev,
2487 				"implementation specific init failed\n");
2488 			goto err_clk_ciu;
2489 		}
2490 	}
2491 
2492 	if (drv_data && drv_data->setup_clock) {
2493 		ret = drv_data->setup_clock(host);
2494 		if (ret) {
2495 			dev_err(host->dev,
2496 				"implementation specific clock setup failed\n");
2497 			goto err_clk_ciu;
2498 		}
2499 	}
2500 
2501 	host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2502 	if (IS_ERR(host->vmmc)) {
2503 		ret = PTR_ERR(host->vmmc);
2504 		if (ret == -EPROBE_DEFER)
2505 			goto err_clk_ciu;
2506 
2507 		dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2508 		host->vmmc = NULL;
2509 	} else {
2510 		ret = regulator_enable(host->vmmc);
2511 		if (ret) {
2512 			if (ret != -EPROBE_DEFER)
2513 				dev_err(host->dev,
2514 					"regulator_enable fail: %d\n", ret);
2515 			goto err_clk_ciu;
2516 		}
2517 	}
2518 
2519 	if (!host->bus_hz) {
2520 		dev_err(host->dev,
2521 			"Platform data must supply bus speed\n");
2522 		ret = -ENODEV;
2523 		goto err_regulator;
2524 	}
2525 
2526 	host->quirks = host->pdata->quirks;
2527 
2528 	spin_lock_init(&host->lock);
2529 	INIT_LIST_HEAD(&host->queue);
2530 
2531 	/*
2532 	 * Get the host data width - this assumes that HCON has been set with
2533 	 * the correct values.
2534 	 */
2535 	i = (mci_readl(host, HCON) >> 7) & 0x7;
2536 	if (!i) {
2537 		host->push_data = dw_mci_push_data16;
2538 		host->pull_data = dw_mci_pull_data16;
2539 		width = 16;
2540 		host->data_shift = 1;
2541 	} else if (i == 2) {
2542 		host->push_data = dw_mci_push_data64;
2543 		host->pull_data = dw_mci_pull_data64;
2544 		width = 64;
2545 		host->data_shift = 3;
2546 	} else {
2547 		/* Check for a reserved value, and warn if it is */
2548 		WARN((i != 1),
2549 		     "HCON reports a reserved host data width!\n"
2550 		     "Defaulting to 32-bit access.\n");
2551 		host->push_data = dw_mci_push_data32;
2552 		host->pull_data = dw_mci_pull_data32;
2553 		width = 32;
2554 		host->data_shift = 2;
2555 	}
2556 
2557 	/* Reset all blocks */
2558 	if (!dw_mci_ctrl_all_reset(host))
2559 		return -ENODEV;
2560 
2561 	host->dma_ops = host->pdata->dma_ops;
2562 	dw_mci_init_dma(host);
2563 
2564 	/* Clear the interrupts for the host controller */
2565 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2566 	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2567 
2568 	/* Put in max timeout */
2569 	mci_writel(host, TMOUT, 0xFFFFFFFF);
2570 
2571 	/*
2572 	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2573 	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
2574 	 */
2575 	if (!host->pdata->fifo_depth) {
2576 		/*
2577 		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2578 		 * have been overwritten by the bootloader, just like we're
2579 		 * about to do, so if you know the value for your hardware, you
2580 		 * should put it in the platform data.
2581 		 */
2582 		fifo_size = mci_readl(host, FIFOTH);
2583 		fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2584 	} else {
2585 		fifo_size = host->pdata->fifo_depth;
2586 	}
2587 	host->fifo_depth = fifo_size;
2588 	host->fifoth_val =
2589 		SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2590 	mci_writel(host, FIFOTH, host->fifoth_val);
2591 
2592 	/* disable clock to CIU */
2593 	mci_writel(host, CLKENA, 0);
2594 	mci_writel(host, CLKSRC, 0);
2595 
2596 	/*
2597 	 * In 2.40a spec, Data offset is changed.
2598 	 * Need to check the version-id and set data-offset for DATA register.
2599 	 */
2600 	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2601 	dev_info(host->dev, "Version ID is %04x\n", host->verid);
2602 
2603 	if (host->verid < DW_MMC_240A)
2604 		host->data_offset = DATA_OFFSET;
2605 	else
2606 		host->data_offset = DATA_240A_OFFSET;
2607 
2608 	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2609 	host->card_workqueue = alloc_workqueue("dw-mci-card",
2610 			WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2611 	if (!host->card_workqueue) {
2612 		ret = -ENOMEM;
2613 		goto err_dmaunmap;
2614 	}
2615 	INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2616 	ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2617 			       host->irq_flags, "dw-mci", host);
2618 	if (ret)
2619 		goto err_workqueue;
2620 
2621 	if (host->pdata->num_slots)
2622 		host->num_slots = host->pdata->num_slots;
2623 	else
2624 		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2625 
2626 	/*
2627 	 * Enable interrupts for command done, data over, data empty, card det,
2628 	 * receive ready and error such as transmit, receive timeout, crc error
2629 	 */
2630 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2631 	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2632 		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2633 		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2634 	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2635 
2636 	dev_info(host->dev, "DW MMC controller at irq %d, "
2637 		 "%d bit host data width, "
2638 		 "%u deep fifo\n",
2639 		 host->irq, width, fifo_size);
2640 
2641 	/* We need at least one slot to succeed */
2642 	for (i = 0; i < host->num_slots; i++) {
2643 		ret = dw_mci_init_slot(host, i);
2644 		if (ret)
2645 			dev_dbg(host->dev, "slot %d init failed\n", i);
2646 		else
2647 			init_slots++;
2648 	}
2649 
2650 	if (init_slots) {
2651 		dev_info(host->dev, "%d slots initialized\n", init_slots);
2652 	} else {
2653 		dev_dbg(host->dev, "attempted to initialize %d slots, "
2654 					"but failed on all\n", host->num_slots);
2655 		goto err_workqueue;
2656 	}
2657 
2658 	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2659 		dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2660 
2661 	return 0;
2662 
2663 err_workqueue:
2664 	destroy_workqueue(host->card_workqueue);
2665 
2666 err_dmaunmap:
2667 	if (host->use_dma && host->dma_ops->exit)
2668 		host->dma_ops->exit(host);
2669 
2670 err_regulator:
2671 	if (host->vmmc)
2672 		regulator_disable(host->vmmc);
2673 
2674 err_clk_ciu:
2675 	if (!IS_ERR(host->ciu_clk))
2676 		clk_disable_unprepare(host->ciu_clk);
2677 
2678 err_clk_biu:
2679 	if (!IS_ERR(host->biu_clk))
2680 		clk_disable_unprepare(host->biu_clk);
2681 
2682 	return ret;
2683 }
2684 EXPORT_SYMBOL(dw_mci_probe);
2685 
2686 void dw_mci_remove(struct dw_mci *host)
2687 {
2688 	int i;
2689 
2690 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2691 	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2692 
2693 	for (i = 0; i < host->num_slots; i++) {
2694 		dev_dbg(host->dev, "remove slot %d\n", i);
2695 		if (host->slot[i])
2696 			dw_mci_cleanup_slot(host->slot[i], i);
2697 	}
2698 
2699 	/* disable clock to CIU */
2700 	mci_writel(host, CLKENA, 0);
2701 	mci_writel(host, CLKSRC, 0);
2702 
2703 	destroy_workqueue(host->card_workqueue);
2704 
2705 	if (host->use_dma && host->dma_ops->exit)
2706 		host->dma_ops->exit(host);
2707 
2708 	if (host->vmmc)
2709 		regulator_disable(host->vmmc);
2710 
2711 	if (!IS_ERR(host->ciu_clk))
2712 		clk_disable_unprepare(host->ciu_clk);
2713 
2714 	if (!IS_ERR(host->biu_clk))
2715 		clk_disable_unprepare(host->biu_clk);
2716 }
2717 EXPORT_SYMBOL(dw_mci_remove);
2718 
2719 
2720 
2721 #ifdef CONFIG_PM_SLEEP
2722 /*
2723  * TODO: we should probably disable the clock to the card in the suspend path.
2724  */
2725 int dw_mci_suspend(struct dw_mci *host)
2726 {
2727 	if (host->vmmc)
2728 		regulator_disable(host->vmmc);
2729 
2730 	return 0;
2731 }
2732 EXPORT_SYMBOL(dw_mci_suspend);
2733 
2734 int dw_mci_resume(struct dw_mci *host)
2735 {
2736 	int i, ret;
2737 
2738 	if (host->vmmc) {
2739 		ret = regulator_enable(host->vmmc);
2740 		if (ret) {
2741 			dev_err(host->dev,
2742 				"failed to enable regulator: %d\n", ret);
2743 			return ret;
2744 		}
2745 	}
2746 
2747 	if (!dw_mci_ctrl_all_reset(host)) {
2748 		ret = -ENODEV;
2749 		return ret;
2750 	}
2751 
2752 	if (host->use_dma && host->dma_ops->init)
2753 		host->dma_ops->init(host);
2754 
2755 	/*
2756 	 * Restore the initial value at FIFOTH register
2757 	 * And Invalidate the prev_blksz with zero
2758 	 */
2759 	mci_writel(host, FIFOTH, host->fifoth_val);
2760 	host->prev_blksz = 0;
2761 
2762 	/* Put in max timeout */
2763 	mci_writel(host, TMOUT, 0xFFFFFFFF);
2764 
2765 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2766 	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2767 		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2768 		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2769 	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2770 
2771 	for (i = 0; i < host->num_slots; i++) {
2772 		struct dw_mci_slot *slot = host->slot[i];
2773 		if (!slot)
2774 			continue;
2775 		if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2776 			dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2777 			dw_mci_setup_bus(slot, true);
2778 		}
2779 	}
2780 	return 0;
2781 }
2782 EXPORT_SYMBOL(dw_mci_resume);
2783 #endif /* CONFIG_PM_SLEEP */
2784 
2785 static int __init dw_mci_init(void)
2786 {
2787 	pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2788 	return 0;
2789 }
2790 
2791 static void __exit dw_mci_exit(void)
2792 {
2793 }
2794 
2795 module_init(dw_mci_init);
2796 module_exit(dw_mci_exit);
2797 
2798 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2799 MODULE_AUTHOR("NXP Semiconductor VietNam");
2800 MODULE_AUTHOR("Imagination Technologies Ltd");
2801 MODULE_LICENSE("GPL v2");
2802