xref: /openbmc/linux/drivers/mmc/host/dw_mmc.c (revision 206204a1)
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  *  (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13 
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/dw_mmc.h>
34 #include <linux/bitops.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/workqueue.h>
37 #include <linux/of.h>
38 #include <linux/of_gpio.h>
39 #include <linux/mmc/slot-gpio.h>
40 
41 #include "dw_mmc.h"
42 
43 /* Common flag combinations */
44 #define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
45 				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
46 				 SDMMC_INT_EBE)
47 #define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 				 SDMMC_INT_RESP_ERR)
49 #define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
50 				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
51 #define DW_MCI_SEND_STATUS	1
52 #define DW_MCI_RECV_STATUS	2
53 #define DW_MCI_DMA_THRESHOLD	16
54 
55 #define DW_MCI_FREQ_MAX	200000000	/* unit: HZ */
56 #define DW_MCI_FREQ_MIN	400000		/* unit: HZ */
57 
58 #ifdef CONFIG_MMC_DW_IDMAC
59 #define IDMAC_INT_CLR		(SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 				 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 				 SDMMC_IDMAC_INT_TI)
63 
64 struct idmac_desc {
65 	u32		des0;	/* Control Descriptor */
66 #define IDMAC_DES0_DIC	BIT(1)
67 #define IDMAC_DES0_LD	BIT(2)
68 #define IDMAC_DES0_FD	BIT(3)
69 #define IDMAC_DES0_CH	BIT(4)
70 #define IDMAC_DES0_ER	BIT(5)
71 #define IDMAC_DES0_CES	BIT(30)
72 #define IDMAC_DES0_OWN	BIT(31)
73 
74 	u32		des1;	/* Buffer sizes */
75 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
76 	((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
77 
78 	u32		des2;	/* buffer 1 physical address */
79 
80 	u32		des3;	/* buffer 2 physical address */
81 };
82 #endif /* CONFIG_MMC_DW_IDMAC */
83 
84 static const u8 tuning_blk_pattern_4bit[] = {
85 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93 };
94 
95 static const u8 tuning_blk_pattern_8bit[] = {
96 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
112 };
113 
114 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
115 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
116 
117 #if defined(CONFIG_DEBUG_FS)
118 static int dw_mci_req_show(struct seq_file *s, void *v)
119 {
120 	struct dw_mci_slot *slot = s->private;
121 	struct mmc_request *mrq;
122 	struct mmc_command *cmd;
123 	struct mmc_command *stop;
124 	struct mmc_data	*data;
125 
126 	/* Make sure we get a consistent snapshot */
127 	spin_lock_bh(&slot->host->lock);
128 	mrq = slot->mrq;
129 
130 	if (mrq) {
131 		cmd = mrq->cmd;
132 		data = mrq->data;
133 		stop = mrq->stop;
134 
135 		if (cmd)
136 			seq_printf(s,
137 				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 				   cmd->opcode, cmd->arg, cmd->flags,
139 				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 				   cmd->resp[2], cmd->error);
141 		if (data)
142 			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 				   data->bytes_xfered, data->blocks,
144 				   data->blksz, data->flags, data->error);
145 		if (stop)
146 			seq_printf(s,
147 				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 				   stop->opcode, stop->arg, stop->flags,
149 				   stop->resp[0], stop->resp[1], stop->resp[2],
150 				   stop->resp[2], stop->error);
151 	}
152 
153 	spin_unlock_bh(&slot->host->lock);
154 
155 	return 0;
156 }
157 
158 static int dw_mci_req_open(struct inode *inode, struct file *file)
159 {
160 	return single_open(file, dw_mci_req_show, inode->i_private);
161 }
162 
163 static const struct file_operations dw_mci_req_fops = {
164 	.owner		= THIS_MODULE,
165 	.open		= dw_mci_req_open,
166 	.read		= seq_read,
167 	.llseek		= seq_lseek,
168 	.release	= single_release,
169 };
170 
171 static int dw_mci_regs_show(struct seq_file *s, void *v)
172 {
173 	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
179 
180 	return 0;
181 }
182 
183 static int dw_mci_regs_open(struct inode *inode, struct file *file)
184 {
185 	return single_open(file, dw_mci_regs_show, inode->i_private);
186 }
187 
188 static const struct file_operations dw_mci_regs_fops = {
189 	.owner		= THIS_MODULE,
190 	.open		= dw_mci_regs_open,
191 	.read		= seq_read,
192 	.llseek		= seq_lseek,
193 	.release	= single_release,
194 };
195 
196 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197 {
198 	struct mmc_host	*mmc = slot->mmc;
199 	struct dw_mci *host = slot->host;
200 	struct dentry *root;
201 	struct dentry *node;
202 
203 	root = mmc->debugfs_root;
204 	if (!root)
205 		return;
206 
207 	node = debugfs_create_file("regs", S_IRUSR, root, host,
208 				   &dw_mci_regs_fops);
209 	if (!node)
210 		goto err;
211 
212 	node = debugfs_create_file("req", S_IRUSR, root, slot,
213 				   &dw_mci_req_fops);
214 	if (!node)
215 		goto err;
216 
217 	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
218 	if (!node)
219 		goto err;
220 
221 	node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 				  (u32 *)&host->pending_events);
223 	if (!node)
224 		goto err;
225 
226 	node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 				  (u32 *)&host->completed_events);
228 	if (!node)
229 		goto err;
230 
231 	return;
232 
233 err:
234 	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235 }
236 #endif /* defined(CONFIG_DEBUG_FS) */
237 
238 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
239 {
240 	struct mmc_data	*data;
241 	struct dw_mci_slot *slot = mmc_priv(mmc);
242 	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
243 	u32 cmdr;
244 	cmd->error = -EINPROGRESS;
245 
246 	cmdr = cmd->opcode;
247 
248 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
249 	    cmd->opcode == MMC_GO_IDLE_STATE ||
250 	    cmd->opcode == MMC_GO_INACTIVE_STATE ||
251 	    (cmd->opcode == SD_IO_RW_DIRECT &&
252 	     ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
253 		cmdr |= SDMMC_CMD_STOP;
254 	else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
255 		cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
256 
257 	if (cmd->flags & MMC_RSP_PRESENT) {
258 		/* We expect a response, so set this bit */
259 		cmdr |= SDMMC_CMD_RESP_EXP;
260 		if (cmd->flags & MMC_RSP_136)
261 			cmdr |= SDMMC_CMD_RESP_LONG;
262 	}
263 
264 	if (cmd->flags & MMC_RSP_CRC)
265 		cmdr |= SDMMC_CMD_RESP_CRC;
266 
267 	data = cmd->data;
268 	if (data) {
269 		cmdr |= SDMMC_CMD_DAT_EXP;
270 		if (data->flags & MMC_DATA_STREAM)
271 			cmdr |= SDMMC_CMD_STRM_MODE;
272 		if (data->flags & MMC_DATA_WRITE)
273 			cmdr |= SDMMC_CMD_DAT_WR;
274 	}
275 
276 	if (drv_data && drv_data->prepare_command)
277 		drv_data->prepare_command(slot->host, &cmdr);
278 
279 	return cmdr;
280 }
281 
282 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
283 {
284 	struct mmc_command *stop;
285 	u32 cmdr;
286 
287 	if (!cmd->data)
288 		return 0;
289 
290 	stop = &host->stop_abort;
291 	cmdr = cmd->opcode;
292 	memset(stop, 0, sizeof(struct mmc_command));
293 
294 	if (cmdr == MMC_READ_SINGLE_BLOCK ||
295 	    cmdr == MMC_READ_MULTIPLE_BLOCK ||
296 	    cmdr == MMC_WRITE_BLOCK ||
297 	    cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
298 		stop->opcode = MMC_STOP_TRANSMISSION;
299 		stop->arg = 0;
300 		stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
301 	} else if (cmdr == SD_IO_RW_EXTENDED) {
302 		stop->opcode = SD_IO_RW_DIRECT;
303 		stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
304 			     ((cmd->arg >> 28) & 0x7);
305 		stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
306 	} else {
307 		return 0;
308 	}
309 
310 	cmdr = stop->opcode | SDMMC_CMD_STOP |
311 		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
312 
313 	return cmdr;
314 }
315 
316 static void dw_mci_start_command(struct dw_mci *host,
317 				 struct mmc_command *cmd, u32 cmd_flags)
318 {
319 	host->cmd = cmd;
320 	dev_vdbg(host->dev,
321 		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
322 		 cmd->arg, cmd_flags);
323 
324 	mci_writel(host, CMDARG, cmd->arg);
325 	wmb();
326 
327 	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
328 }
329 
330 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
331 {
332 	struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
333 	dw_mci_start_command(host, stop, host->stop_cmdr);
334 }
335 
336 /* DMA interface functions */
337 static void dw_mci_stop_dma(struct dw_mci *host)
338 {
339 	if (host->using_dma) {
340 		host->dma_ops->stop(host);
341 		host->dma_ops->cleanup(host);
342 	}
343 
344 	/* Data transfer was stopped by the interrupt handler */
345 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
346 }
347 
348 static int dw_mci_get_dma_dir(struct mmc_data *data)
349 {
350 	if (data->flags & MMC_DATA_WRITE)
351 		return DMA_TO_DEVICE;
352 	else
353 		return DMA_FROM_DEVICE;
354 }
355 
356 #ifdef CONFIG_MMC_DW_IDMAC
357 static void dw_mci_dma_cleanup(struct dw_mci *host)
358 {
359 	struct mmc_data *data = host->data;
360 
361 	if (data)
362 		if (!data->host_cookie)
363 			dma_unmap_sg(host->dev,
364 				     data->sg,
365 				     data->sg_len,
366 				     dw_mci_get_dma_dir(data));
367 }
368 
369 static void dw_mci_idmac_reset(struct dw_mci *host)
370 {
371 	u32 bmod = mci_readl(host, BMOD);
372 	/* Software reset of DMA */
373 	bmod |= SDMMC_IDMAC_SWRESET;
374 	mci_writel(host, BMOD, bmod);
375 }
376 
377 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
378 {
379 	u32 temp;
380 
381 	/* Disable and reset the IDMAC interface */
382 	temp = mci_readl(host, CTRL);
383 	temp &= ~SDMMC_CTRL_USE_IDMAC;
384 	temp |= SDMMC_CTRL_DMA_RESET;
385 	mci_writel(host, CTRL, temp);
386 
387 	/* Stop the IDMAC running */
388 	temp = mci_readl(host, BMOD);
389 	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
390 	temp |= SDMMC_IDMAC_SWRESET;
391 	mci_writel(host, BMOD, temp);
392 }
393 
394 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
395 {
396 	struct mmc_data *data = host->data;
397 
398 	dev_vdbg(host->dev, "DMA complete\n");
399 
400 	host->dma_ops->cleanup(host);
401 
402 	/*
403 	 * If the card was removed, data will be NULL. No point in trying to
404 	 * send the stop command or waiting for NBUSY in this case.
405 	 */
406 	if (data) {
407 		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
408 		tasklet_schedule(&host->tasklet);
409 	}
410 }
411 
412 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
413 				    unsigned int sg_len)
414 {
415 	int i;
416 	struct idmac_desc *desc = host->sg_cpu;
417 
418 	for (i = 0; i < sg_len; i++, desc++) {
419 		unsigned int length = sg_dma_len(&data->sg[i]);
420 		u32 mem_addr = sg_dma_address(&data->sg[i]);
421 
422 		/* Set the OWN bit and disable interrupts for this descriptor */
423 		desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
424 
425 		/* Buffer length */
426 		IDMAC_SET_BUFFER1_SIZE(desc, length);
427 
428 		/* Physical address to DMA to/from */
429 		desc->des2 = mem_addr;
430 	}
431 
432 	/* Set first descriptor */
433 	desc = host->sg_cpu;
434 	desc->des0 |= IDMAC_DES0_FD;
435 
436 	/* Set last descriptor */
437 	desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
438 	desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
439 	desc->des0 |= IDMAC_DES0_LD;
440 
441 	wmb();
442 }
443 
444 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
445 {
446 	u32 temp;
447 
448 	dw_mci_translate_sglist(host, host->data, sg_len);
449 
450 	/* Select IDMAC interface */
451 	temp = mci_readl(host, CTRL);
452 	temp |= SDMMC_CTRL_USE_IDMAC;
453 	mci_writel(host, CTRL, temp);
454 
455 	wmb();
456 
457 	/* Enable the IDMAC */
458 	temp = mci_readl(host, BMOD);
459 	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
460 	mci_writel(host, BMOD, temp);
461 
462 	/* Start it running */
463 	mci_writel(host, PLDMND, 1);
464 }
465 
466 static int dw_mci_idmac_init(struct dw_mci *host)
467 {
468 	struct idmac_desc *p;
469 	int i;
470 
471 	/* Number of descriptors in the ring buffer */
472 	host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
473 
474 	/* Forward link the descriptor list */
475 	for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
476 		p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
477 
478 	/* Set the last descriptor as the end-of-ring descriptor */
479 	p->des3 = host->sg_dma;
480 	p->des0 = IDMAC_DES0_ER;
481 
482 	dw_mci_idmac_reset(host);
483 
484 	/* Mask out interrupts - get Tx & Rx complete only */
485 	mci_writel(host, IDSTS, IDMAC_INT_CLR);
486 	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
487 		   SDMMC_IDMAC_INT_TI);
488 
489 	/* Set the descriptor base address */
490 	mci_writel(host, DBADDR, host->sg_dma);
491 	return 0;
492 }
493 
494 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
495 	.init = dw_mci_idmac_init,
496 	.start = dw_mci_idmac_start_dma,
497 	.stop = dw_mci_idmac_stop_dma,
498 	.complete = dw_mci_idmac_complete_dma,
499 	.cleanup = dw_mci_dma_cleanup,
500 };
501 #endif /* CONFIG_MMC_DW_IDMAC */
502 
503 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
504 				   struct mmc_data *data,
505 				   bool next)
506 {
507 	struct scatterlist *sg;
508 	unsigned int i, sg_len;
509 
510 	if (!next && data->host_cookie)
511 		return data->host_cookie;
512 
513 	/*
514 	 * We don't do DMA on "complex" transfers, i.e. with
515 	 * non-word-aligned buffers or lengths. Also, we don't bother
516 	 * with all the DMA setup overhead for short transfers.
517 	 */
518 	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
519 		return -EINVAL;
520 
521 	if (data->blksz & 3)
522 		return -EINVAL;
523 
524 	for_each_sg(data->sg, sg, data->sg_len, i) {
525 		if (sg->offset & 3 || sg->length & 3)
526 			return -EINVAL;
527 	}
528 
529 	sg_len = dma_map_sg(host->dev,
530 			    data->sg,
531 			    data->sg_len,
532 			    dw_mci_get_dma_dir(data));
533 	if (sg_len == 0)
534 		return -EINVAL;
535 
536 	if (next)
537 		data->host_cookie = sg_len;
538 
539 	return sg_len;
540 }
541 
542 static void dw_mci_pre_req(struct mmc_host *mmc,
543 			   struct mmc_request *mrq,
544 			   bool is_first_req)
545 {
546 	struct dw_mci_slot *slot = mmc_priv(mmc);
547 	struct mmc_data *data = mrq->data;
548 
549 	if (!slot->host->use_dma || !data)
550 		return;
551 
552 	if (data->host_cookie) {
553 		data->host_cookie = 0;
554 		return;
555 	}
556 
557 	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
558 		data->host_cookie = 0;
559 }
560 
561 static void dw_mci_post_req(struct mmc_host *mmc,
562 			    struct mmc_request *mrq,
563 			    int err)
564 {
565 	struct dw_mci_slot *slot = mmc_priv(mmc);
566 	struct mmc_data *data = mrq->data;
567 
568 	if (!slot->host->use_dma || !data)
569 		return;
570 
571 	if (data->host_cookie)
572 		dma_unmap_sg(slot->host->dev,
573 			     data->sg,
574 			     data->sg_len,
575 			     dw_mci_get_dma_dir(data));
576 	data->host_cookie = 0;
577 }
578 
579 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
580 {
581 #ifdef CONFIG_MMC_DW_IDMAC
582 	unsigned int blksz = data->blksz;
583 	const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
584 	u32 fifo_width = 1 << host->data_shift;
585 	u32 blksz_depth = blksz / fifo_width, fifoth_val;
586 	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
587 	int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
588 
589 	tx_wmark = (host->fifo_depth) / 2;
590 	tx_wmark_invers = host->fifo_depth - tx_wmark;
591 
592 	/*
593 	 * MSIZE is '1',
594 	 * if blksz is not a multiple of the FIFO width
595 	 */
596 	if (blksz % fifo_width) {
597 		msize = 0;
598 		rx_wmark = 1;
599 		goto done;
600 	}
601 
602 	do {
603 		if (!((blksz_depth % mszs[idx]) ||
604 		     (tx_wmark_invers % mszs[idx]))) {
605 			msize = idx;
606 			rx_wmark = mszs[idx] - 1;
607 			break;
608 		}
609 	} while (--idx > 0);
610 	/*
611 	 * If idx is '0', it won't be tried
612 	 * Thus, initial values are uesed
613 	 */
614 done:
615 	fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
616 	mci_writel(host, FIFOTH, fifoth_val);
617 #endif
618 }
619 
620 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
621 {
622 	unsigned int blksz = data->blksz;
623 	u32 blksz_depth, fifo_depth;
624 	u16 thld_size;
625 
626 	WARN_ON(!(data->flags & MMC_DATA_READ));
627 
628 	if (host->timing != MMC_TIMING_MMC_HS200 &&
629 	    host->timing != MMC_TIMING_UHS_SDR104)
630 		goto disable;
631 
632 	blksz_depth = blksz / (1 << host->data_shift);
633 	fifo_depth = host->fifo_depth;
634 
635 	if (blksz_depth > fifo_depth)
636 		goto disable;
637 
638 	/*
639 	 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
640 	 * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
641 	 * Currently just choose blksz.
642 	 */
643 	thld_size = blksz;
644 	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
645 	return;
646 
647 disable:
648 	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
649 }
650 
651 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
652 {
653 	int sg_len;
654 	u32 temp;
655 
656 	host->using_dma = 0;
657 
658 	/* If we don't have a channel, we can't do DMA */
659 	if (!host->use_dma)
660 		return -ENODEV;
661 
662 	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
663 	if (sg_len < 0) {
664 		host->dma_ops->stop(host);
665 		return sg_len;
666 	}
667 
668 	host->using_dma = 1;
669 
670 	dev_vdbg(host->dev,
671 		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
672 		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
673 		 sg_len);
674 
675 	/*
676 	 * Decide the MSIZE and RX/TX Watermark.
677 	 * If current block size is same with previous size,
678 	 * no need to update fifoth.
679 	 */
680 	if (host->prev_blksz != data->blksz)
681 		dw_mci_adjust_fifoth(host, data);
682 
683 	/* Enable the DMA interface */
684 	temp = mci_readl(host, CTRL);
685 	temp |= SDMMC_CTRL_DMA_ENABLE;
686 	mci_writel(host, CTRL, temp);
687 
688 	/* Disable RX/TX IRQs, let DMA handle it */
689 	temp = mci_readl(host, INTMASK);
690 	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
691 	mci_writel(host, INTMASK, temp);
692 
693 	host->dma_ops->start(host, sg_len);
694 
695 	return 0;
696 }
697 
698 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
699 {
700 	u32 temp;
701 
702 	data->error = -EINPROGRESS;
703 
704 	WARN_ON(host->data);
705 	host->sg = NULL;
706 	host->data = data;
707 
708 	if (data->flags & MMC_DATA_READ) {
709 		host->dir_status = DW_MCI_RECV_STATUS;
710 		dw_mci_ctrl_rd_thld(host, data);
711 	} else {
712 		host->dir_status = DW_MCI_SEND_STATUS;
713 	}
714 
715 	if (dw_mci_submit_data_dma(host, data)) {
716 		int flags = SG_MITER_ATOMIC;
717 		if (host->data->flags & MMC_DATA_READ)
718 			flags |= SG_MITER_TO_SG;
719 		else
720 			flags |= SG_MITER_FROM_SG;
721 
722 		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
723 		host->sg = data->sg;
724 		host->part_buf_start = 0;
725 		host->part_buf_count = 0;
726 
727 		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
728 		temp = mci_readl(host, INTMASK);
729 		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
730 		mci_writel(host, INTMASK, temp);
731 
732 		temp = mci_readl(host, CTRL);
733 		temp &= ~SDMMC_CTRL_DMA_ENABLE;
734 		mci_writel(host, CTRL, temp);
735 
736 		/*
737 		 * Use the initial fifoth_val for PIO mode.
738 		 * If next issued data may be transfered by DMA mode,
739 		 * prev_blksz should be invalidated.
740 		 */
741 		mci_writel(host, FIFOTH, host->fifoth_val);
742 		host->prev_blksz = 0;
743 	} else {
744 		/*
745 		 * Keep the current block size.
746 		 * It will be used to decide whether to update
747 		 * fifoth register next time.
748 		 */
749 		host->prev_blksz = data->blksz;
750 	}
751 }
752 
753 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
754 {
755 	struct dw_mci *host = slot->host;
756 	unsigned long timeout = jiffies + msecs_to_jiffies(500);
757 	unsigned int cmd_status = 0;
758 
759 	mci_writel(host, CMDARG, arg);
760 	wmb();
761 	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
762 
763 	while (time_before(jiffies, timeout)) {
764 		cmd_status = mci_readl(host, CMD);
765 		if (!(cmd_status & SDMMC_CMD_START))
766 			return;
767 	}
768 	dev_err(&slot->mmc->class_dev,
769 		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
770 		cmd, arg, cmd_status);
771 }
772 
773 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
774 {
775 	struct dw_mci *host = slot->host;
776 	unsigned int clock = slot->clock;
777 	u32 div;
778 	u32 clk_en_a;
779 
780 	if (!clock) {
781 		mci_writel(host, CLKENA, 0);
782 		mci_send_cmd(slot,
783 			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
784 	} else if (clock != host->current_speed || force_clkinit) {
785 		div = host->bus_hz / clock;
786 		if (host->bus_hz % clock && host->bus_hz > clock)
787 			/*
788 			 * move the + 1 after the divide to prevent
789 			 * over-clocking the card.
790 			 */
791 			div += 1;
792 
793 		div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
794 
795 		if ((clock << div) != slot->__clk_old || force_clkinit)
796 			dev_info(&slot->mmc->class_dev,
797 				 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
798 				 slot->id, host->bus_hz, clock,
799 				 div ? ((host->bus_hz / div) >> 1) :
800 				 host->bus_hz, div);
801 
802 		/* disable clock */
803 		mci_writel(host, CLKENA, 0);
804 		mci_writel(host, CLKSRC, 0);
805 
806 		/* inform CIU */
807 		mci_send_cmd(slot,
808 			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
809 
810 		/* set clock to desired speed */
811 		mci_writel(host, CLKDIV, div);
812 
813 		/* inform CIU */
814 		mci_send_cmd(slot,
815 			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
816 
817 		/* enable clock; only low power if no SDIO */
818 		clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
819 		if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
820 			clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
821 		mci_writel(host, CLKENA, clk_en_a);
822 
823 		/* inform CIU */
824 		mci_send_cmd(slot,
825 			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
826 
827 		/* keep the clock with reflecting clock dividor */
828 		slot->__clk_old = clock << div;
829 	}
830 
831 	host->current_speed = clock;
832 
833 	/* Set the current slot bus width */
834 	mci_writel(host, CTYPE, (slot->ctype << slot->id));
835 }
836 
837 static void __dw_mci_start_request(struct dw_mci *host,
838 				   struct dw_mci_slot *slot,
839 				   struct mmc_command *cmd)
840 {
841 	struct mmc_request *mrq;
842 	struct mmc_data	*data;
843 	u32 cmdflags;
844 
845 	mrq = slot->mrq;
846 
847 	host->cur_slot = slot;
848 	host->mrq = mrq;
849 
850 	host->pending_events = 0;
851 	host->completed_events = 0;
852 	host->cmd_status = 0;
853 	host->data_status = 0;
854 	host->dir_status = 0;
855 
856 	data = cmd->data;
857 	if (data) {
858 		mci_writel(host, TMOUT, 0xFFFFFFFF);
859 		mci_writel(host, BYTCNT, data->blksz*data->blocks);
860 		mci_writel(host, BLKSIZ, data->blksz);
861 	}
862 
863 	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
864 
865 	/* this is the first command, send the initialization clock */
866 	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
867 		cmdflags |= SDMMC_CMD_INIT;
868 
869 	if (data) {
870 		dw_mci_submit_data(host, data);
871 		wmb();
872 	}
873 
874 	dw_mci_start_command(host, cmd, cmdflags);
875 
876 	if (mrq->stop)
877 		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
878 	else
879 		host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
880 }
881 
882 static void dw_mci_start_request(struct dw_mci *host,
883 				 struct dw_mci_slot *slot)
884 {
885 	struct mmc_request *mrq = slot->mrq;
886 	struct mmc_command *cmd;
887 
888 	cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
889 	__dw_mci_start_request(host, slot, cmd);
890 }
891 
892 /* must be called with host->lock held */
893 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
894 				 struct mmc_request *mrq)
895 {
896 	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
897 		 host->state);
898 
899 	slot->mrq = mrq;
900 
901 	if (host->state == STATE_IDLE) {
902 		host->state = STATE_SENDING_CMD;
903 		dw_mci_start_request(host, slot);
904 	} else {
905 		list_add_tail(&slot->queue_node, &host->queue);
906 	}
907 }
908 
909 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
910 {
911 	struct dw_mci_slot *slot = mmc_priv(mmc);
912 	struct dw_mci *host = slot->host;
913 
914 	WARN_ON(slot->mrq);
915 
916 	/*
917 	 * The check for card presence and queueing of the request must be
918 	 * atomic, otherwise the card could be removed in between and the
919 	 * request wouldn't fail until another card was inserted.
920 	 */
921 	spin_lock_bh(&host->lock);
922 
923 	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
924 		spin_unlock_bh(&host->lock);
925 		mrq->cmd->error = -ENOMEDIUM;
926 		mmc_request_done(mmc, mrq);
927 		return;
928 	}
929 
930 	dw_mci_queue_request(host, slot, mrq);
931 
932 	spin_unlock_bh(&host->lock);
933 }
934 
935 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
936 {
937 	struct dw_mci_slot *slot = mmc_priv(mmc);
938 	const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
939 	u32 regs;
940 
941 	switch (ios->bus_width) {
942 	case MMC_BUS_WIDTH_4:
943 		slot->ctype = SDMMC_CTYPE_4BIT;
944 		break;
945 	case MMC_BUS_WIDTH_8:
946 		slot->ctype = SDMMC_CTYPE_8BIT;
947 		break;
948 	default:
949 		/* set default 1 bit mode */
950 		slot->ctype = SDMMC_CTYPE_1BIT;
951 	}
952 
953 	regs = mci_readl(slot->host, UHS_REG);
954 
955 	/* DDR mode set */
956 	if (ios->timing == MMC_TIMING_MMC_DDR52)
957 		regs |= ((0x1 << slot->id) << 16);
958 	else
959 		regs &= ~((0x1 << slot->id) << 16);
960 
961 	mci_writel(slot->host, UHS_REG, regs);
962 	slot->host->timing = ios->timing;
963 
964 	/*
965 	 * Use mirror of ios->clock to prevent race with mmc
966 	 * core ios update when finding the minimum.
967 	 */
968 	slot->clock = ios->clock;
969 
970 	if (drv_data && drv_data->set_ios)
971 		drv_data->set_ios(slot->host, ios);
972 
973 	/* Slot specific timing and width adjustment */
974 	dw_mci_setup_bus(slot, false);
975 
976 	switch (ios->power_mode) {
977 	case MMC_POWER_UP:
978 		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
979 		regs = mci_readl(slot->host, PWREN);
980 		regs |= (1 << slot->id);
981 		mci_writel(slot->host, PWREN, regs);
982 		break;
983 	case MMC_POWER_OFF:
984 		regs = mci_readl(slot->host, PWREN);
985 		regs &= ~(1 << slot->id);
986 		mci_writel(slot->host, PWREN, regs);
987 		break;
988 	default:
989 		break;
990 	}
991 }
992 
993 static int dw_mci_get_ro(struct mmc_host *mmc)
994 {
995 	int read_only;
996 	struct dw_mci_slot *slot = mmc_priv(mmc);
997 	int gpio_ro = mmc_gpio_get_ro(mmc);
998 
999 	/* Use platform get_ro function, else try on board write protect */
1000 	if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1001 		read_only = 0;
1002 	else if (!IS_ERR_VALUE(gpio_ro))
1003 		read_only = gpio_ro;
1004 	else
1005 		read_only =
1006 			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1007 
1008 	dev_dbg(&mmc->class_dev, "card is %s\n",
1009 		read_only ? "read-only" : "read-write");
1010 
1011 	return read_only;
1012 }
1013 
1014 static int dw_mci_get_cd(struct mmc_host *mmc)
1015 {
1016 	int present;
1017 	struct dw_mci_slot *slot = mmc_priv(mmc);
1018 	struct dw_mci_board *brd = slot->host->pdata;
1019 	struct dw_mci *host = slot->host;
1020 	int gpio_cd = mmc_gpio_get_cd(mmc);
1021 
1022 	/* Use platform get_cd function, else try onboard card detect */
1023 	if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1024 		present = 1;
1025 	else if (!IS_ERR_VALUE(gpio_cd))
1026 		present = gpio_cd;
1027 	else
1028 		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1029 			== 0 ? 1 : 0;
1030 
1031 	spin_lock_bh(&host->lock);
1032 	if (present) {
1033 		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1034 		dev_dbg(&mmc->class_dev, "card is present\n");
1035 	} else {
1036 		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1037 		dev_dbg(&mmc->class_dev, "card is not present\n");
1038 	}
1039 	spin_unlock_bh(&host->lock);
1040 
1041 	return present;
1042 }
1043 
1044 /*
1045  * Disable lower power mode.
1046  *
1047  * Low power mode will stop the card clock when idle.  According to the
1048  * description of the CLKENA register we should disable low power mode
1049  * for SDIO cards if we need SDIO interrupts to work.
1050  *
1051  * This function is fast if low power mode is already disabled.
1052  */
1053 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1054 {
1055 	struct dw_mci *host = slot->host;
1056 	u32 clk_en_a;
1057 	const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1058 
1059 	clk_en_a = mci_readl(host, CLKENA);
1060 
1061 	if (clk_en_a & clken_low_pwr) {
1062 		mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1063 		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1064 			     SDMMC_CMD_PRV_DAT_WAIT, 0);
1065 	}
1066 }
1067 
1068 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1069 {
1070 	struct dw_mci_slot *slot = mmc_priv(mmc);
1071 	struct dw_mci *host = slot->host;
1072 	u32 int_mask;
1073 
1074 	/* Enable/disable Slot Specific SDIO interrupt */
1075 	int_mask = mci_readl(host, INTMASK);
1076 	if (enb) {
1077 		/*
1078 		 * Turn off low power mode if it was enabled.  This is a bit of
1079 		 * a heavy operation and we disable / enable IRQs a lot, so
1080 		 * we'll leave low power mode disabled and it will get
1081 		 * re-enabled again in dw_mci_setup_bus().
1082 		 */
1083 		dw_mci_disable_low_power(slot);
1084 
1085 		mci_writel(host, INTMASK,
1086 			   (int_mask | SDMMC_INT_SDIO(slot->id)));
1087 	} else {
1088 		mci_writel(host, INTMASK,
1089 			   (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1090 	}
1091 }
1092 
1093 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1094 {
1095 	struct dw_mci_slot *slot = mmc_priv(mmc);
1096 	struct dw_mci *host = slot->host;
1097 	const struct dw_mci_drv_data *drv_data = host->drv_data;
1098 	struct dw_mci_tuning_data tuning_data;
1099 	int err = -ENOSYS;
1100 
1101 	if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1102 		if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1103 			tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1104 			tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1105 		} else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1106 			tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1107 			tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1108 		} else {
1109 			return -EINVAL;
1110 		}
1111 	} else if (opcode == MMC_SEND_TUNING_BLOCK) {
1112 		tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1113 		tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1114 	} else {
1115 		dev_err(host->dev,
1116 			"Undefined command(%d) for tuning\n", opcode);
1117 		return -EINVAL;
1118 	}
1119 
1120 	if (drv_data && drv_data->execute_tuning)
1121 		err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1122 	return err;
1123 }
1124 
1125 static const struct mmc_host_ops dw_mci_ops = {
1126 	.request		= dw_mci_request,
1127 	.pre_req		= dw_mci_pre_req,
1128 	.post_req		= dw_mci_post_req,
1129 	.set_ios		= dw_mci_set_ios,
1130 	.get_ro			= dw_mci_get_ro,
1131 	.get_cd			= dw_mci_get_cd,
1132 	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
1133 	.execute_tuning		= dw_mci_execute_tuning,
1134 };
1135 
1136 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1137 	__releases(&host->lock)
1138 	__acquires(&host->lock)
1139 {
1140 	struct dw_mci_slot *slot;
1141 	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
1142 
1143 	WARN_ON(host->cmd || host->data);
1144 
1145 	host->cur_slot->mrq = NULL;
1146 	host->mrq = NULL;
1147 	if (!list_empty(&host->queue)) {
1148 		slot = list_entry(host->queue.next,
1149 				  struct dw_mci_slot, queue_node);
1150 		list_del(&slot->queue_node);
1151 		dev_vdbg(host->dev, "list not empty: %s is next\n",
1152 			 mmc_hostname(slot->mmc));
1153 		host->state = STATE_SENDING_CMD;
1154 		dw_mci_start_request(host, slot);
1155 	} else {
1156 		dev_vdbg(host->dev, "list empty\n");
1157 		host->state = STATE_IDLE;
1158 	}
1159 
1160 	spin_unlock(&host->lock);
1161 	mmc_request_done(prev_mmc, mrq);
1162 	spin_lock(&host->lock);
1163 }
1164 
1165 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1166 {
1167 	u32 status = host->cmd_status;
1168 
1169 	host->cmd_status = 0;
1170 
1171 	/* Read the response from the card (up to 16 bytes) */
1172 	if (cmd->flags & MMC_RSP_PRESENT) {
1173 		if (cmd->flags & MMC_RSP_136) {
1174 			cmd->resp[3] = mci_readl(host, RESP0);
1175 			cmd->resp[2] = mci_readl(host, RESP1);
1176 			cmd->resp[1] = mci_readl(host, RESP2);
1177 			cmd->resp[0] = mci_readl(host, RESP3);
1178 		} else {
1179 			cmd->resp[0] = mci_readl(host, RESP0);
1180 			cmd->resp[1] = 0;
1181 			cmd->resp[2] = 0;
1182 			cmd->resp[3] = 0;
1183 		}
1184 	}
1185 
1186 	if (status & SDMMC_INT_RTO)
1187 		cmd->error = -ETIMEDOUT;
1188 	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1189 		cmd->error = -EILSEQ;
1190 	else if (status & SDMMC_INT_RESP_ERR)
1191 		cmd->error = -EIO;
1192 	else
1193 		cmd->error = 0;
1194 
1195 	if (cmd->error) {
1196 		/* newer ip versions need a delay between retries */
1197 		if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1198 			mdelay(20);
1199 	}
1200 
1201 	return cmd->error;
1202 }
1203 
1204 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1205 {
1206 	u32 status = host->data_status;
1207 
1208 	if (status & DW_MCI_DATA_ERROR_FLAGS) {
1209 		if (status & SDMMC_INT_DRTO) {
1210 			data->error = -ETIMEDOUT;
1211 		} else if (status & SDMMC_INT_DCRC) {
1212 			data->error = -EILSEQ;
1213 		} else if (status & SDMMC_INT_EBE) {
1214 			if (host->dir_status ==
1215 				DW_MCI_SEND_STATUS) {
1216 				/*
1217 				 * No data CRC status was returned.
1218 				 * The number of bytes transferred
1219 				 * will be exaggerated in PIO mode.
1220 				 */
1221 				data->bytes_xfered = 0;
1222 				data->error = -ETIMEDOUT;
1223 			} else if (host->dir_status ==
1224 					DW_MCI_RECV_STATUS) {
1225 				data->error = -EIO;
1226 			}
1227 		} else {
1228 			/* SDMMC_INT_SBE is included */
1229 			data->error = -EIO;
1230 		}
1231 
1232 		dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1233 
1234 		/*
1235 		 * After an error, there may be data lingering
1236 		 * in the FIFO
1237 		 */
1238 		dw_mci_fifo_reset(host);
1239 	} else {
1240 		data->bytes_xfered = data->blocks * data->blksz;
1241 		data->error = 0;
1242 	}
1243 
1244 	return data->error;
1245 }
1246 
1247 static void dw_mci_tasklet_func(unsigned long priv)
1248 {
1249 	struct dw_mci *host = (struct dw_mci *)priv;
1250 	struct mmc_data	*data;
1251 	struct mmc_command *cmd;
1252 	struct mmc_request *mrq;
1253 	enum dw_mci_state state;
1254 	enum dw_mci_state prev_state;
1255 	unsigned int err;
1256 
1257 	spin_lock(&host->lock);
1258 
1259 	state = host->state;
1260 	data = host->data;
1261 	mrq = host->mrq;
1262 
1263 	do {
1264 		prev_state = state;
1265 
1266 		switch (state) {
1267 		case STATE_IDLE:
1268 			break;
1269 
1270 		case STATE_SENDING_CMD:
1271 			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1272 						&host->pending_events))
1273 				break;
1274 
1275 			cmd = host->cmd;
1276 			host->cmd = NULL;
1277 			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1278 			err = dw_mci_command_complete(host, cmd);
1279 			if (cmd == mrq->sbc && !err) {
1280 				prev_state = state = STATE_SENDING_CMD;
1281 				__dw_mci_start_request(host, host->cur_slot,
1282 						       mrq->cmd);
1283 				goto unlock;
1284 			}
1285 
1286 			if (cmd->data && err) {
1287 				dw_mci_stop_dma(host);
1288 				send_stop_abort(host, data);
1289 				state = STATE_SENDING_STOP;
1290 				break;
1291 			}
1292 
1293 			if (!cmd->data || err) {
1294 				dw_mci_request_end(host, mrq);
1295 				goto unlock;
1296 			}
1297 
1298 			prev_state = state = STATE_SENDING_DATA;
1299 			/* fall through */
1300 
1301 		case STATE_SENDING_DATA:
1302 			if (test_and_clear_bit(EVENT_DATA_ERROR,
1303 					       &host->pending_events)) {
1304 				dw_mci_stop_dma(host);
1305 				send_stop_abort(host, data);
1306 				state = STATE_DATA_ERROR;
1307 				break;
1308 			}
1309 
1310 			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1311 						&host->pending_events))
1312 				break;
1313 
1314 			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1315 			prev_state = state = STATE_DATA_BUSY;
1316 			/* fall through */
1317 
1318 		case STATE_DATA_BUSY:
1319 			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1320 						&host->pending_events))
1321 				break;
1322 
1323 			host->data = NULL;
1324 			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1325 			err = dw_mci_data_complete(host, data);
1326 
1327 			if (!err) {
1328 				if (!data->stop || mrq->sbc) {
1329 					if (mrq->sbc && data->stop)
1330 						data->stop->error = 0;
1331 					dw_mci_request_end(host, mrq);
1332 					goto unlock;
1333 				}
1334 
1335 				/* stop command for open-ended transfer*/
1336 				if (data->stop)
1337 					send_stop_abort(host, data);
1338 			}
1339 
1340 			/*
1341 			 * If err has non-zero,
1342 			 * stop-abort command has been already issued.
1343 			 */
1344 			prev_state = state = STATE_SENDING_STOP;
1345 
1346 			/* fall through */
1347 
1348 		case STATE_SENDING_STOP:
1349 			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1350 						&host->pending_events))
1351 				break;
1352 
1353 			/* CMD error in data command */
1354 			if (mrq->cmd->error && mrq->data)
1355 				dw_mci_fifo_reset(host);
1356 
1357 			host->cmd = NULL;
1358 			host->data = NULL;
1359 
1360 			if (mrq->stop)
1361 				dw_mci_command_complete(host, mrq->stop);
1362 			else
1363 				host->cmd_status = 0;
1364 
1365 			dw_mci_request_end(host, mrq);
1366 			goto unlock;
1367 
1368 		case STATE_DATA_ERROR:
1369 			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1370 						&host->pending_events))
1371 				break;
1372 
1373 			state = STATE_DATA_BUSY;
1374 			break;
1375 		}
1376 	} while (state != prev_state);
1377 
1378 	host->state = state;
1379 unlock:
1380 	spin_unlock(&host->lock);
1381 
1382 }
1383 
1384 /* push final bytes to part_buf, only use during push */
1385 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1386 {
1387 	memcpy((void *)&host->part_buf, buf, cnt);
1388 	host->part_buf_count = cnt;
1389 }
1390 
1391 /* append bytes to part_buf, only use during push */
1392 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1393 {
1394 	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1395 	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1396 	host->part_buf_count += cnt;
1397 	return cnt;
1398 }
1399 
1400 /* pull first bytes from part_buf, only use during pull */
1401 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1402 {
1403 	cnt = min(cnt, (int)host->part_buf_count);
1404 	if (cnt) {
1405 		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1406 		       cnt);
1407 		host->part_buf_count -= cnt;
1408 		host->part_buf_start += cnt;
1409 	}
1410 	return cnt;
1411 }
1412 
1413 /* pull final bytes from the part_buf, assuming it's just been filled */
1414 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1415 {
1416 	memcpy(buf, &host->part_buf, cnt);
1417 	host->part_buf_start = cnt;
1418 	host->part_buf_count = (1 << host->data_shift) - cnt;
1419 }
1420 
1421 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1422 {
1423 	struct mmc_data *data = host->data;
1424 	int init_cnt = cnt;
1425 
1426 	/* try and push anything in the part_buf */
1427 	if (unlikely(host->part_buf_count)) {
1428 		int len = dw_mci_push_part_bytes(host, buf, cnt);
1429 		buf += len;
1430 		cnt -= len;
1431 		if (host->part_buf_count == 2) {
1432 			mci_writew(host, DATA(host->data_offset),
1433 					host->part_buf16);
1434 			host->part_buf_count = 0;
1435 		}
1436 	}
1437 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1438 	if (unlikely((unsigned long)buf & 0x1)) {
1439 		while (cnt >= 2) {
1440 			u16 aligned_buf[64];
1441 			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1442 			int items = len >> 1;
1443 			int i;
1444 			/* memcpy from input buffer into aligned buffer */
1445 			memcpy(aligned_buf, buf, len);
1446 			buf += len;
1447 			cnt -= len;
1448 			/* push data from aligned buffer into fifo */
1449 			for (i = 0; i < items; ++i)
1450 				mci_writew(host, DATA(host->data_offset),
1451 						aligned_buf[i]);
1452 		}
1453 	} else
1454 #endif
1455 	{
1456 		u16 *pdata = buf;
1457 		for (; cnt >= 2; cnt -= 2)
1458 			mci_writew(host, DATA(host->data_offset), *pdata++);
1459 		buf = pdata;
1460 	}
1461 	/* put anything remaining in the part_buf */
1462 	if (cnt) {
1463 		dw_mci_set_part_bytes(host, buf, cnt);
1464 		 /* Push data if we have reached the expected data length */
1465 		if ((data->bytes_xfered + init_cnt) ==
1466 		    (data->blksz * data->blocks))
1467 			mci_writew(host, DATA(host->data_offset),
1468 				   host->part_buf16);
1469 	}
1470 }
1471 
1472 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1473 {
1474 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1475 	if (unlikely((unsigned long)buf & 0x1)) {
1476 		while (cnt >= 2) {
1477 			/* pull data from fifo into aligned buffer */
1478 			u16 aligned_buf[64];
1479 			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1480 			int items = len >> 1;
1481 			int i;
1482 			for (i = 0; i < items; ++i)
1483 				aligned_buf[i] = mci_readw(host,
1484 						DATA(host->data_offset));
1485 			/* memcpy from aligned buffer into output buffer */
1486 			memcpy(buf, aligned_buf, len);
1487 			buf += len;
1488 			cnt -= len;
1489 		}
1490 	} else
1491 #endif
1492 	{
1493 		u16 *pdata = buf;
1494 		for (; cnt >= 2; cnt -= 2)
1495 			*pdata++ = mci_readw(host, DATA(host->data_offset));
1496 		buf = pdata;
1497 	}
1498 	if (cnt) {
1499 		host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1500 		dw_mci_pull_final_bytes(host, buf, cnt);
1501 	}
1502 }
1503 
1504 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1505 {
1506 	struct mmc_data *data = host->data;
1507 	int init_cnt = cnt;
1508 
1509 	/* try and push anything in the part_buf */
1510 	if (unlikely(host->part_buf_count)) {
1511 		int len = dw_mci_push_part_bytes(host, buf, cnt);
1512 		buf += len;
1513 		cnt -= len;
1514 		if (host->part_buf_count == 4) {
1515 			mci_writel(host, DATA(host->data_offset),
1516 					host->part_buf32);
1517 			host->part_buf_count = 0;
1518 		}
1519 	}
1520 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1521 	if (unlikely((unsigned long)buf & 0x3)) {
1522 		while (cnt >= 4) {
1523 			u32 aligned_buf[32];
1524 			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1525 			int items = len >> 2;
1526 			int i;
1527 			/* memcpy from input buffer into aligned buffer */
1528 			memcpy(aligned_buf, buf, len);
1529 			buf += len;
1530 			cnt -= len;
1531 			/* push data from aligned buffer into fifo */
1532 			for (i = 0; i < items; ++i)
1533 				mci_writel(host, DATA(host->data_offset),
1534 						aligned_buf[i]);
1535 		}
1536 	} else
1537 #endif
1538 	{
1539 		u32 *pdata = buf;
1540 		for (; cnt >= 4; cnt -= 4)
1541 			mci_writel(host, DATA(host->data_offset), *pdata++);
1542 		buf = pdata;
1543 	}
1544 	/* put anything remaining in the part_buf */
1545 	if (cnt) {
1546 		dw_mci_set_part_bytes(host, buf, cnt);
1547 		 /* Push data if we have reached the expected data length */
1548 		if ((data->bytes_xfered + init_cnt) ==
1549 		    (data->blksz * data->blocks))
1550 			mci_writel(host, DATA(host->data_offset),
1551 				   host->part_buf32);
1552 	}
1553 }
1554 
1555 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1556 {
1557 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1558 	if (unlikely((unsigned long)buf & 0x3)) {
1559 		while (cnt >= 4) {
1560 			/* pull data from fifo into aligned buffer */
1561 			u32 aligned_buf[32];
1562 			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1563 			int items = len >> 2;
1564 			int i;
1565 			for (i = 0; i < items; ++i)
1566 				aligned_buf[i] = mci_readl(host,
1567 						DATA(host->data_offset));
1568 			/* memcpy from aligned buffer into output buffer */
1569 			memcpy(buf, aligned_buf, len);
1570 			buf += len;
1571 			cnt -= len;
1572 		}
1573 	} else
1574 #endif
1575 	{
1576 		u32 *pdata = buf;
1577 		for (; cnt >= 4; cnt -= 4)
1578 			*pdata++ = mci_readl(host, DATA(host->data_offset));
1579 		buf = pdata;
1580 	}
1581 	if (cnt) {
1582 		host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1583 		dw_mci_pull_final_bytes(host, buf, cnt);
1584 	}
1585 }
1586 
1587 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1588 {
1589 	struct mmc_data *data = host->data;
1590 	int init_cnt = cnt;
1591 
1592 	/* try and push anything in the part_buf */
1593 	if (unlikely(host->part_buf_count)) {
1594 		int len = dw_mci_push_part_bytes(host, buf, cnt);
1595 		buf += len;
1596 		cnt -= len;
1597 
1598 		if (host->part_buf_count == 8) {
1599 			mci_writeq(host, DATA(host->data_offset),
1600 					host->part_buf);
1601 			host->part_buf_count = 0;
1602 		}
1603 	}
1604 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1605 	if (unlikely((unsigned long)buf & 0x7)) {
1606 		while (cnt >= 8) {
1607 			u64 aligned_buf[16];
1608 			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1609 			int items = len >> 3;
1610 			int i;
1611 			/* memcpy from input buffer into aligned buffer */
1612 			memcpy(aligned_buf, buf, len);
1613 			buf += len;
1614 			cnt -= len;
1615 			/* push data from aligned buffer into fifo */
1616 			for (i = 0; i < items; ++i)
1617 				mci_writeq(host, DATA(host->data_offset),
1618 						aligned_buf[i]);
1619 		}
1620 	} else
1621 #endif
1622 	{
1623 		u64 *pdata = buf;
1624 		for (; cnt >= 8; cnt -= 8)
1625 			mci_writeq(host, DATA(host->data_offset), *pdata++);
1626 		buf = pdata;
1627 	}
1628 	/* put anything remaining in the part_buf */
1629 	if (cnt) {
1630 		dw_mci_set_part_bytes(host, buf, cnt);
1631 		/* Push data if we have reached the expected data length */
1632 		if ((data->bytes_xfered + init_cnt) ==
1633 		    (data->blksz * data->blocks))
1634 			mci_writeq(host, DATA(host->data_offset),
1635 				   host->part_buf);
1636 	}
1637 }
1638 
1639 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1640 {
1641 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1642 	if (unlikely((unsigned long)buf & 0x7)) {
1643 		while (cnt >= 8) {
1644 			/* pull data from fifo into aligned buffer */
1645 			u64 aligned_buf[16];
1646 			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1647 			int items = len >> 3;
1648 			int i;
1649 			for (i = 0; i < items; ++i)
1650 				aligned_buf[i] = mci_readq(host,
1651 						DATA(host->data_offset));
1652 			/* memcpy from aligned buffer into output buffer */
1653 			memcpy(buf, aligned_buf, len);
1654 			buf += len;
1655 			cnt -= len;
1656 		}
1657 	} else
1658 #endif
1659 	{
1660 		u64 *pdata = buf;
1661 		for (; cnt >= 8; cnt -= 8)
1662 			*pdata++ = mci_readq(host, DATA(host->data_offset));
1663 		buf = pdata;
1664 	}
1665 	if (cnt) {
1666 		host->part_buf = mci_readq(host, DATA(host->data_offset));
1667 		dw_mci_pull_final_bytes(host, buf, cnt);
1668 	}
1669 }
1670 
1671 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1672 {
1673 	int len;
1674 
1675 	/* get remaining partial bytes */
1676 	len = dw_mci_pull_part_bytes(host, buf, cnt);
1677 	if (unlikely(len == cnt))
1678 		return;
1679 	buf += len;
1680 	cnt -= len;
1681 
1682 	/* get the rest of the data */
1683 	host->pull_data(host, buf, cnt);
1684 }
1685 
1686 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1687 {
1688 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1689 	void *buf;
1690 	unsigned int offset;
1691 	struct mmc_data	*data = host->data;
1692 	int shift = host->data_shift;
1693 	u32 status;
1694 	unsigned int len;
1695 	unsigned int remain, fcnt;
1696 
1697 	do {
1698 		if (!sg_miter_next(sg_miter))
1699 			goto done;
1700 
1701 		host->sg = sg_miter->piter.sg;
1702 		buf = sg_miter->addr;
1703 		remain = sg_miter->length;
1704 		offset = 0;
1705 
1706 		do {
1707 			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1708 					<< shift) + host->part_buf_count;
1709 			len = min(remain, fcnt);
1710 			if (!len)
1711 				break;
1712 			dw_mci_pull_data(host, (void *)(buf + offset), len);
1713 			data->bytes_xfered += len;
1714 			offset += len;
1715 			remain -= len;
1716 		} while (remain);
1717 
1718 		sg_miter->consumed = offset;
1719 		status = mci_readl(host, MINTSTS);
1720 		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1721 	/* if the RXDR is ready read again */
1722 	} while ((status & SDMMC_INT_RXDR) ||
1723 		 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1724 
1725 	if (!remain) {
1726 		if (!sg_miter_next(sg_miter))
1727 			goto done;
1728 		sg_miter->consumed = 0;
1729 	}
1730 	sg_miter_stop(sg_miter);
1731 	return;
1732 
1733 done:
1734 	sg_miter_stop(sg_miter);
1735 	host->sg = NULL;
1736 	smp_wmb();
1737 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1738 }
1739 
1740 static void dw_mci_write_data_pio(struct dw_mci *host)
1741 {
1742 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1743 	void *buf;
1744 	unsigned int offset;
1745 	struct mmc_data	*data = host->data;
1746 	int shift = host->data_shift;
1747 	u32 status;
1748 	unsigned int len;
1749 	unsigned int fifo_depth = host->fifo_depth;
1750 	unsigned int remain, fcnt;
1751 
1752 	do {
1753 		if (!sg_miter_next(sg_miter))
1754 			goto done;
1755 
1756 		host->sg = sg_miter->piter.sg;
1757 		buf = sg_miter->addr;
1758 		remain = sg_miter->length;
1759 		offset = 0;
1760 
1761 		do {
1762 			fcnt = ((fifo_depth -
1763 				 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1764 					<< shift) - host->part_buf_count;
1765 			len = min(remain, fcnt);
1766 			if (!len)
1767 				break;
1768 			host->push_data(host, (void *)(buf + offset), len);
1769 			data->bytes_xfered += len;
1770 			offset += len;
1771 			remain -= len;
1772 		} while (remain);
1773 
1774 		sg_miter->consumed = offset;
1775 		status = mci_readl(host, MINTSTS);
1776 		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1777 	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1778 
1779 	if (!remain) {
1780 		if (!sg_miter_next(sg_miter))
1781 			goto done;
1782 		sg_miter->consumed = 0;
1783 	}
1784 	sg_miter_stop(sg_miter);
1785 	return;
1786 
1787 done:
1788 	sg_miter_stop(sg_miter);
1789 	host->sg = NULL;
1790 	smp_wmb();
1791 	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1792 }
1793 
1794 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1795 {
1796 	if (!host->cmd_status)
1797 		host->cmd_status = status;
1798 
1799 	smp_wmb();
1800 
1801 	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1802 	tasklet_schedule(&host->tasklet);
1803 }
1804 
1805 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1806 {
1807 	struct dw_mci *host = dev_id;
1808 	u32 pending;
1809 	int i;
1810 
1811 	pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1812 
1813 	/*
1814 	 * DTO fix - version 2.10a and below, and only if internal DMA
1815 	 * is configured.
1816 	 */
1817 	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1818 		if (!pending &&
1819 		    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1820 			pending |= SDMMC_INT_DATA_OVER;
1821 	}
1822 
1823 	if (pending) {
1824 		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1825 			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1826 			host->cmd_status = pending;
1827 			smp_wmb();
1828 			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1829 		}
1830 
1831 		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1832 			/* if there is an error report DATA_ERROR */
1833 			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1834 			host->data_status = pending;
1835 			smp_wmb();
1836 			set_bit(EVENT_DATA_ERROR, &host->pending_events);
1837 			tasklet_schedule(&host->tasklet);
1838 		}
1839 
1840 		if (pending & SDMMC_INT_DATA_OVER) {
1841 			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1842 			if (!host->data_status)
1843 				host->data_status = pending;
1844 			smp_wmb();
1845 			if (host->dir_status == DW_MCI_RECV_STATUS) {
1846 				if (host->sg != NULL)
1847 					dw_mci_read_data_pio(host, true);
1848 			}
1849 			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1850 			tasklet_schedule(&host->tasklet);
1851 		}
1852 
1853 		if (pending & SDMMC_INT_RXDR) {
1854 			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1855 			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1856 				dw_mci_read_data_pio(host, false);
1857 		}
1858 
1859 		if (pending & SDMMC_INT_TXDR) {
1860 			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1861 			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1862 				dw_mci_write_data_pio(host);
1863 		}
1864 
1865 		if (pending & SDMMC_INT_CMD_DONE) {
1866 			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1867 			dw_mci_cmd_interrupt(host, pending);
1868 		}
1869 
1870 		if (pending & SDMMC_INT_CD) {
1871 			mci_writel(host, RINTSTS, SDMMC_INT_CD);
1872 			queue_work(host->card_workqueue, &host->card_work);
1873 		}
1874 
1875 		/* Handle SDIO Interrupts */
1876 		for (i = 0; i < host->num_slots; i++) {
1877 			struct dw_mci_slot *slot = host->slot[i];
1878 			if (pending & SDMMC_INT_SDIO(i)) {
1879 				mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1880 				mmc_signal_sdio_irq(slot->mmc);
1881 			}
1882 		}
1883 
1884 	}
1885 
1886 #ifdef CONFIG_MMC_DW_IDMAC
1887 	/* Handle DMA interrupts */
1888 	pending = mci_readl(host, IDSTS);
1889 	if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1890 		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1891 		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1892 		host->dma_ops->complete(host);
1893 	}
1894 #endif
1895 
1896 	return IRQ_HANDLED;
1897 }
1898 
1899 static void dw_mci_work_routine_card(struct work_struct *work)
1900 {
1901 	struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1902 	int i;
1903 
1904 	for (i = 0; i < host->num_slots; i++) {
1905 		struct dw_mci_slot *slot = host->slot[i];
1906 		struct mmc_host *mmc = slot->mmc;
1907 		struct mmc_request *mrq;
1908 		int present;
1909 
1910 		present = dw_mci_get_cd(mmc);
1911 		while (present != slot->last_detect_state) {
1912 			dev_dbg(&slot->mmc->class_dev, "card %s\n",
1913 				present ? "inserted" : "removed");
1914 
1915 			spin_lock_bh(&host->lock);
1916 
1917 			/* Card change detected */
1918 			slot->last_detect_state = present;
1919 
1920 			/* Clean up queue if present */
1921 			mrq = slot->mrq;
1922 			if (mrq) {
1923 				if (mrq == host->mrq) {
1924 					host->data = NULL;
1925 					host->cmd = NULL;
1926 
1927 					switch (host->state) {
1928 					case STATE_IDLE:
1929 						break;
1930 					case STATE_SENDING_CMD:
1931 						mrq->cmd->error = -ENOMEDIUM;
1932 						if (!mrq->data)
1933 							break;
1934 						/* fall through */
1935 					case STATE_SENDING_DATA:
1936 						mrq->data->error = -ENOMEDIUM;
1937 						dw_mci_stop_dma(host);
1938 						break;
1939 					case STATE_DATA_BUSY:
1940 					case STATE_DATA_ERROR:
1941 						if (mrq->data->error == -EINPROGRESS)
1942 							mrq->data->error = -ENOMEDIUM;
1943 						/* fall through */
1944 					case STATE_SENDING_STOP:
1945 						if (mrq->stop)
1946 							mrq->stop->error = -ENOMEDIUM;
1947 						break;
1948 					}
1949 
1950 					dw_mci_request_end(host, mrq);
1951 				} else {
1952 					list_del(&slot->queue_node);
1953 					mrq->cmd->error = -ENOMEDIUM;
1954 					if (mrq->data)
1955 						mrq->data->error = -ENOMEDIUM;
1956 					if (mrq->stop)
1957 						mrq->stop->error = -ENOMEDIUM;
1958 
1959 					spin_unlock(&host->lock);
1960 					mmc_request_done(slot->mmc, mrq);
1961 					spin_lock(&host->lock);
1962 				}
1963 			}
1964 
1965 			/* Power down slot */
1966 			if (present == 0) {
1967 				/* Clear down the FIFO */
1968 				dw_mci_fifo_reset(host);
1969 #ifdef CONFIG_MMC_DW_IDMAC
1970 				dw_mci_idmac_reset(host);
1971 #endif
1972 
1973 			}
1974 
1975 			spin_unlock_bh(&host->lock);
1976 
1977 			present = dw_mci_get_cd(mmc);
1978 		}
1979 
1980 		mmc_detect_change(slot->mmc,
1981 			msecs_to_jiffies(host->pdata->detect_delay_ms));
1982 	}
1983 }
1984 
1985 #ifdef CONFIG_OF
1986 /* given a slot id, find out the device node representing that slot */
1987 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1988 {
1989 	struct device_node *np;
1990 	const __be32 *addr;
1991 	int len;
1992 
1993 	if (!dev || !dev->of_node)
1994 		return NULL;
1995 
1996 	for_each_child_of_node(dev->of_node, np) {
1997 		addr = of_get_property(np, "reg", &len);
1998 		if (!addr || (len < sizeof(int)))
1999 			continue;
2000 		if (be32_to_cpup(addr) == slot)
2001 			return np;
2002 	}
2003 	return NULL;
2004 }
2005 
2006 static struct dw_mci_of_slot_quirks {
2007 	char *quirk;
2008 	int id;
2009 } of_slot_quirks[] = {
2010 	{
2011 		.quirk	= "disable-wp",
2012 		.id	= DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2013 	},
2014 };
2015 
2016 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2017 {
2018 	struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2019 	int quirks = 0;
2020 	int idx;
2021 
2022 	/* get quirks */
2023 	for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2024 		if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2025 			quirks |= of_slot_quirks[idx].id;
2026 
2027 	return quirks;
2028 }
2029 #else /* CONFIG_OF */
2030 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2031 {
2032 	return 0;
2033 }
2034 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2035 {
2036 	return NULL;
2037 }
2038 #endif /* CONFIG_OF */
2039 
2040 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2041 {
2042 	struct mmc_host *mmc;
2043 	struct dw_mci_slot *slot;
2044 	const struct dw_mci_drv_data *drv_data = host->drv_data;
2045 	int ctrl_id, ret;
2046 	u32 freq[2];
2047 
2048 	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2049 	if (!mmc)
2050 		return -ENOMEM;
2051 
2052 	slot = mmc_priv(mmc);
2053 	slot->id = id;
2054 	slot->mmc = mmc;
2055 	slot->host = host;
2056 	host->slot[id] = slot;
2057 
2058 	slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2059 
2060 	mmc->ops = &dw_mci_ops;
2061 	if (of_property_read_u32_array(host->dev->of_node,
2062 				       "clock-freq-min-max", freq, 2)) {
2063 		mmc->f_min = DW_MCI_FREQ_MIN;
2064 		mmc->f_max = DW_MCI_FREQ_MAX;
2065 	} else {
2066 		mmc->f_min = freq[0];
2067 		mmc->f_max = freq[1];
2068 	}
2069 
2070 	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2071 
2072 	if (host->pdata->caps)
2073 		mmc->caps = host->pdata->caps;
2074 
2075 	if (host->pdata->pm_caps)
2076 		mmc->pm_caps = host->pdata->pm_caps;
2077 
2078 	if (host->dev->of_node) {
2079 		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2080 		if (ctrl_id < 0)
2081 			ctrl_id = 0;
2082 	} else {
2083 		ctrl_id = to_platform_device(host->dev)->id;
2084 	}
2085 	if (drv_data && drv_data->caps)
2086 		mmc->caps |= drv_data->caps[ctrl_id];
2087 
2088 	if (host->pdata->caps2)
2089 		mmc->caps2 = host->pdata->caps2;
2090 
2091 	mmc_of_parse(mmc);
2092 
2093 	if (host->pdata->blk_settings) {
2094 		mmc->max_segs = host->pdata->blk_settings->max_segs;
2095 		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2096 		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2097 		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2098 		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2099 	} else {
2100 		/* Useful defaults if platform data is unset. */
2101 #ifdef CONFIG_MMC_DW_IDMAC
2102 		mmc->max_segs = host->ring_size;
2103 		mmc->max_blk_size = 65536;
2104 		mmc->max_blk_count = host->ring_size;
2105 		mmc->max_seg_size = 0x1000;
2106 		mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2107 #else
2108 		mmc->max_segs = 64;
2109 		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2110 		mmc->max_blk_count = 512;
2111 		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2112 		mmc->max_seg_size = mmc->max_req_size;
2113 #endif /* CONFIG_MMC_DW_IDMAC */
2114 	}
2115 
2116 	if (dw_mci_get_cd(mmc))
2117 		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2118 	else
2119 		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2120 
2121 	ret = mmc_add_host(mmc);
2122 	if (ret)
2123 		goto err_setup_bus;
2124 
2125 #if defined(CONFIG_DEBUG_FS)
2126 	dw_mci_init_debugfs(slot);
2127 #endif
2128 
2129 	/* Card initially undetected */
2130 	slot->last_detect_state = 0;
2131 
2132 	return 0;
2133 
2134 err_setup_bus:
2135 	mmc_free_host(mmc);
2136 	return -EINVAL;
2137 }
2138 
2139 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2140 {
2141 	/* Debugfs stuff is cleaned up by mmc core */
2142 	mmc_remove_host(slot->mmc);
2143 	slot->host->slot[id] = NULL;
2144 	mmc_free_host(slot->mmc);
2145 }
2146 
2147 static void dw_mci_init_dma(struct dw_mci *host)
2148 {
2149 	/* Alloc memory for sg translation */
2150 	host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2151 					  &host->sg_dma, GFP_KERNEL);
2152 	if (!host->sg_cpu) {
2153 		dev_err(host->dev, "%s: could not alloc DMA memory\n",
2154 			__func__);
2155 		goto no_dma;
2156 	}
2157 
2158 	/* Determine which DMA interface to use */
2159 #ifdef CONFIG_MMC_DW_IDMAC
2160 	host->dma_ops = &dw_mci_idmac_ops;
2161 	dev_info(host->dev, "Using internal DMA controller.\n");
2162 #endif
2163 
2164 	if (!host->dma_ops)
2165 		goto no_dma;
2166 
2167 	if (host->dma_ops->init && host->dma_ops->start &&
2168 	    host->dma_ops->stop && host->dma_ops->cleanup) {
2169 		if (host->dma_ops->init(host)) {
2170 			dev_err(host->dev, "%s: Unable to initialize "
2171 				"DMA Controller.\n", __func__);
2172 			goto no_dma;
2173 		}
2174 	} else {
2175 		dev_err(host->dev, "DMA initialization not found.\n");
2176 		goto no_dma;
2177 	}
2178 
2179 	host->use_dma = 1;
2180 	return;
2181 
2182 no_dma:
2183 	dev_info(host->dev, "Using PIO mode.\n");
2184 	host->use_dma = 0;
2185 	return;
2186 }
2187 
2188 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2189 {
2190 	unsigned long timeout = jiffies + msecs_to_jiffies(500);
2191 	u32 ctrl;
2192 
2193 	ctrl = mci_readl(host, CTRL);
2194 	ctrl |= reset;
2195 	mci_writel(host, CTRL, ctrl);
2196 
2197 	/* wait till resets clear */
2198 	do {
2199 		ctrl = mci_readl(host, CTRL);
2200 		if (!(ctrl & reset))
2201 			return true;
2202 	} while (time_before(jiffies, timeout));
2203 
2204 	dev_err(host->dev,
2205 		"Timeout resetting block (ctrl reset %#x)\n",
2206 		ctrl & reset);
2207 
2208 	return false;
2209 }
2210 
2211 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2212 {
2213 	/*
2214 	 * Reseting generates a block interrupt, hence setting
2215 	 * the scatter-gather pointer to NULL.
2216 	 */
2217 	if (host->sg) {
2218 		sg_miter_stop(&host->sg_miter);
2219 		host->sg = NULL;
2220 	}
2221 
2222 	return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2223 }
2224 
2225 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2226 {
2227 	return dw_mci_ctrl_reset(host,
2228 				 SDMMC_CTRL_FIFO_RESET |
2229 				 SDMMC_CTRL_RESET |
2230 				 SDMMC_CTRL_DMA_RESET);
2231 }
2232 
2233 #ifdef CONFIG_OF
2234 static struct dw_mci_of_quirks {
2235 	char *quirk;
2236 	int id;
2237 } of_quirks[] = {
2238 	{
2239 		.quirk	= "broken-cd",
2240 		.id	= DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2241 	},
2242 };
2243 
2244 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2245 {
2246 	struct dw_mci_board *pdata;
2247 	struct device *dev = host->dev;
2248 	struct device_node *np = dev->of_node;
2249 	const struct dw_mci_drv_data *drv_data = host->drv_data;
2250 	int idx, ret;
2251 	u32 clock_frequency;
2252 
2253 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2254 	if (!pdata) {
2255 		dev_err(dev, "could not allocate memory for pdata\n");
2256 		return ERR_PTR(-ENOMEM);
2257 	}
2258 
2259 	/* find out number of slots supported */
2260 	if (of_property_read_u32(dev->of_node, "num-slots",
2261 				&pdata->num_slots)) {
2262 		dev_info(dev, "num-slots property not found, "
2263 				"assuming 1 slot is available\n");
2264 		pdata->num_slots = 1;
2265 	}
2266 
2267 	/* get quirks */
2268 	for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2269 		if (of_get_property(np, of_quirks[idx].quirk, NULL))
2270 			pdata->quirks |= of_quirks[idx].id;
2271 
2272 	if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2273 		dev_info(dev, "fifo-depth property not found, using "
2274 				"value of FIFOTH register as default\n");
2275 
2276 	of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2277 
2278 	if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2279 		pdata->bus_hz = clock_frequency;
2280 
2281 	if (drv_data && drv_data->parse_dt) {
2282 		ret = drv_data->parse_dt(host);
2283 		if (ret)
2284 			return ERR_PTR(ret);
2285 	}
2286 
2287 	if (of_find_property(np, "supports-highspeed", NULL))
2288 		pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2289 
2290 	return pdata;
2291 }
2292 
2293 #else /* CONFIG_OF */
2294 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2295 {
2296 	return ERR_PTR(-EINVAL);
2297 }
2298 #endif /* CONFIG_OF */
2299 
2300 int dw_mci_probe(struct dw_mci *host)
2301 {
2302 	const struct dw_mci_drv_data *drv_data = host->drv_data;
2303 	int width, i, ret = 0;
2304 	u32 fifo_size;
2305 	int init_slots = 0;
2306 
2307 	if (!host->pdata) {
2308 		host->pdata = dw_mci_parse_dt(host);
2309 		if (IS_ERR(host->pdata)) {
2310 			dev_err(host->dev, "platform data not available\n");
2311 			return -EINVAL;
2312 		}
2313 	}
2314 
2315 	if (host->pdata->num_slots > 1) {
2316 		dev_err(host->dev,
2317 			"Platform data must supply num_slots.\n");
2318 		return -ENODEV;
2319 	}
2320 
2321 	host->biu_clk = devm_clk_get(host->dev, "biu");
2322 	if (IS_ERR(host->biu_clk)) {
2323 		dev_dbg(host->dev, "biu clock not available\n");
2324 	} else {
2325 		ret = clk_prepare_enable(host->biu_clk);
2326 		if (ret) {
2327 			dev_err(host->dev, "failed to enable biu clock\n");
2328 			return ret;
2329 		}
2330 	}
2331 
2332 	host->ciu_clk = devm_clk_get(host->dev, "ciu");
2333 	if (IS_ERR(host->ciu_clk)) {
2334 		dev_dbg(host->dev, "ciu clock not available\n");
2335 		host->bus_hz = host->pdata->bus_hz;
2336 	} else {
2337 		ret = clk_prepare_enable(host->ciu_clk);
2338 		if (ret) {
2339 			dev_err(host->dev, "failed to enable ciu clock\n");
2340 			goto err_clk_biu;
2341 		}
2342 
2343 		if (host->pdata->bus_hz) {
2344 			ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2345 			if (ret)
2346 				dev_warn(host->dev,
2347 					 "Unable to set bus rate to %uHz\n",
2348 					 host->pdata->bus_hz);
2349 		}
2350 		host->bus_hz = clk_get_rate(host->ciu_clk);
2351 	}
2352 
2353 	if (!host->bus_hz) {
2354 		dev_err(host->dev,
2355 			"Platform data must supply bus speed\n");
2356 		ret = -ENODEV;
2357 		goto err_clk_ciu;
2358 	}
2359 
2360 	if (drv_data && drv_data->init) {
2361 		ret = drv_data->init(host);
2362 		if (ret) {
2363 			dev_err(host->dev,
2364 				"implementation specific init failed\n");
2365 			goto err_clk_ciu;
2366 		}
2367 	}
2368 
2369 	if (drv_data && drv_data->setup_clock) {
2370 		ret = drv_data->setup_clock(host);
2371 		if (ret) {
2372 			dev_err(host->dev,
2373 				"implementation specific clock setup failed\n");
2374 			goto err_clk_ciu;
2375 		}
2376 	}
2377 
2378 	host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2379 	if (IS_ERR(host->vmmc)) {
2380 		ret = PTR_ERR(host->vmmc);
2381 		if (ret == -EPROBE_DEFER)
2382 			goto err_clk_ciu;
2383 
2384 		dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2385 		host->vmmc = NULL;
2386 	} else {
2387 		ret = regulator_enable(host->vmmc);
2388 		if (ret) {
2389 			if (ret != -EPROBE_DEFER)
2390 				dev_err(host->dev,
2391 					"regulator_enable fail: %d\n", ret);
2392 			goto err_clk_ciu;
2393 		}
2394 	}
2395 
2396 	host->quirks = host->pdata->quirks;
2397 
2398 	spin_lock_init(&host->lock);
2399 	INIT_LIST_HEAD(&host->queue);
2400 
2401 	/*
2402 	 * Get the host data width - this assumes that HCON has been set with
2403 	 * the correct values.
2404 	 */
2405 	i = (mci_readl(host, HCON) >> 7) & 0x7;
2406 	if (!i) {
2407 		host->push_data = dw_mci_push_data16;
2408 		host->pull_data = dw_mci_pull_data16;
2409 		width = 16;
2410 		host->data_shift = 1;
2411 	} else if (i == 2) {
2412 		host->push_data = dw_mci_push_data64;
2413 		host->pull_data = dw_mci_pull_data64;
2414 		width = 64;
2415 		host->data_shift = 3;
2416 	} else {
2417 		/* Check for a reserved value, and warn if it is */
2418 		WARN((i != 1),
2419 		     "HCON reports a reserved host data width!\n"
2420 		     "Defaulting to 32-bit access.\n");
2421 		host->push_data = dw_mci_push_data32;
2422 		host->pull_data = dw_mci_pull_data32;
2423 		width = 32;
2424 		host->data_shift = 2;
2425 	}
2426 
2427 	/* Reset all blocks */
2428 	if (!dw_mci_ctrl_all_reset(host))
2429 		return -ENODEV;
2430 
2431 	host->dma_ops = host->pdata->dma_ops;
2432 	dw_mci_init_dma(host);
2433 
2434 	/* Clear the interrupts for the host controller */
2435 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2436 	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2437 
2438 	/* Put in max timeout */
2439 	mci_writel(host, TMOUT, 0xFFFFFFFF);
2440 
2441 	/*
2442 	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2443 	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
2444 	 */
2445 	if (!host->pdata->fifo_depth) {
2446 		/*
2447 		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2448 		 * have been overwritten by the bootloader, just like we're
2449 		 * about to do, so if you know the value for your hardware, you
2450 		 * should put it in the platform data.
2451 		 */
2452 		fifo_size = mci_readl(host, FIFOTH);
2453 		fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2454 	} else {
2455 		fifo_size = host->pdata->fifo_depth;
2456 	}
2457 	host->fifo_depth = fifo_size;
2458 	host->fifoth_val =
2459 		SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2460 	mci_writel(host, FIFOTH, host->fifoth_val);
2461 
2462 	/* disable clock to CIU */
2463 	mci_writel(host, CLKENA, 0);
2464 	mci_writel(host, CLKSRC, 0);
2465 
2466 	/*
2467 	 * In 2.40a spec, Data offset is changed.
2468 	 * Need to check the version-id and set data-offset for DATA register.
2469 	 */
2470 	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2471 	dev_info(host->dev, "Version ID is %04x\n", host->verid);
2472 
2473 	if (host->verid < DW_MMC_240A)
2474 		host->data_offset = DATA_OFFSET;
2475 	else
2476 		host->data_offset = DATA_240A_OFFSET;
2477 
2478 	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2479 	host->card_workqueue = alloc_workqueue("dw-mci-card",
2480 			WQ_MEM_RECLAIM, 1);
2481 	if (!host->card_workqueue) {
2482 		ret = -ENOMEM;
2483 		goto err_dmaunmap;
2484 	}
2485 	INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2486 	ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2487 			       host->irq_flags, "dw-mci", host);
2488 	if (ret)
2489 		goto err_workqueue;
2490 
2491 	if (host->pdata->num_slots)
2492 		host->num_slots = host->pdata->num_slots;
2493 	else
2494 		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2495 
2496 	/*
2497 	 * Enable interrupts for command done, data over, data empty, card det,
2498 	 * receive ready and error such as transmit, receive timeout, crc error
2499 	 */
2500 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2501 	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2502 		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2503 		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2504 	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2505 
2506 	dev_info(host->dev, "DW MMC controller at irq %d, "
2507 		 "%d bit host data width, "
2508 		 "%u deep fifo\n",
2509 		 host->irq, width, fifo_size);
2510 
2511 	/* We need at least one slot to succeed */
2512 	for (i = 0; i < host->num_slots; i++) {
2513 		ret = dw_mci_init_slot(host, i);
2514 		if (ret)
2515 			dev_dbg(host->dev, "slot %d init failed\n", i);
2516 		else
2517 			init_slots++;
2518 	}
2519 
2520 	if (init_slots) {
2521 		dev_info(host->dev, "%d slots initialized\n", init_slots);
2522 	} else {
2523 		dev_dbg(host->dev, "attempted to initialize %d slots, "
2524 					"but failed on all\n", host->num_slots);
2525 		goto err_workqueue;
2526 	}
2527 
2528 	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2529 		dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2530 
2531 	return 0;
2532 
2533 err_workqueue:
2534 	destroy_workqueue(host->card_workqueue);
2535 
2536 err_dmaunmap:
2537 	if (host->use_dma && host->dma_ops->exit)
2538 		host->dma_ops->exit(host);
2539 	if (host->vmmc)
2540 		regulator_disable(host->vmmc);
2541 
2542 err_clk_ciu:
2543 	if (!IS_ERR(host->ciu_clk))
2544 		clk_disable_unprepare(host->ciu_clk);
2545 
2546 err_clk_biu:
2547 	if (!IS_ERR(host->biu_clk))
2548 		clk_disable_unprepare(host->biu_clk);
2549 
2550 	return ret;
2551 }
2552 EXPORT_SYMBOL(dw_mci_probe);
2553 
2554 void dw_mci_remove(struct dw_mci *host)
2555 {
2556 	int i;
2557 
2558 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2559 	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2560 
2561 	for (i = 0; i < host->num_slots; i++) {
2562 		dev_dbg(host->dev, "remove slot %d\n", i);
2563 		if (host->slot[i])
2564 			dw_mci_cleanup_slot(host->slot[i], i);
2565 	}
2566 
2567 	/* disable clock to CIU */
2568 	mci_writel(host, CLKENA, 0);
2569 	mci_writel(host, CLKSRC, 0);
2570 
2571 	destroy_workqueue(host->card_workqueue);
2572 
2573 	if (host->use_dma && host->dma_ops->exit)
2574 		host->dma_ops->exit(host);
2575 
2576 	if (host->vmmc)
2577 		regulator_disable(host->vmmc);
2578 
2579 	if (!IS_ERR(host->ciu_clk))
2580 		clk_disable_unprepare(host->ciu_clk);
2581 
2582 	if (!IS_ERR(host->biu_clk))
2583 		clk_disable_unprepare(host->biu_clk);
2584 }
2585 EXPORT_SYMBOL(dw_mci_remove);
2586 
2587 
2588 
2589 #ifdef CONFIG_PM_SLEEP
2590 /*
2591  * TODO: we should probably disable the clock to the card in the suspend path.
2592  */
2593 int dw_mci_suspend(struct dw_mci *host)
2594 {
2595 	if (host->vmmc)
2596 		regulator_disable(host->vmmc);
2597 
2598 	return 0;
2599 }
2600 EXPORT_SYMBOL(dw_mci_suspend);
2601 
2602 int dw_mci_resume(struct dw_mci *host)
2603 {
2604 	int i, ret;
2605 
2606 	if (host->vmmc) {
2607 		ret = regulator_enable(host->vmmc);
2608 		if (ret) {
2609 			dev_err(host->dev,
2610 				"failed to enable regulator: %d\n", ret);
2611 			return ret;
2612 		}
2613 	}
2614 
2615 	if (!dw_mci_ctrl_all_reset(host)) {
2616 		ret = -ENODEV;
2617 		return ret;
2618 	}
2619 
2620 	if (host->use_dma && host->dma_ops->init)
2621 		host->dma_ops->init(host);
2622 
2623 	/*
2624 	 * Restore the initial value at FIFOTH register
2625 	 * And Invalidate the prev_blksz with zero
2626 	 */
2627 	mci_writel(host, FIFOTH, host->fifoth_val);
2628 	host->prev_blksz = 0;
2629 
2630 	/* Put in max timeout */
2631 	mci_writel(host, TMOUT, 0xFFFFFFFF);
2632 
2633 	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2634 	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2635 		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2636 		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2637 	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2638 
2639 	for (i = 0; i < host->num_slots; i++) {
2640 		struct dw_mci_slot *slot = host->slot[i];
2641 		if (!slot)
2642 			continue;
2643 		if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2644 			dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2645 			dw_mci_setup_bus(slot, true);
2646 		}
2647 	}
2648 	return 0;
2649 }
2650 EXPORT_SYMBOL(dw_mci_resume);
2651 #endif /* CONFIG_PM_SLEEP */
2652 
2653 static int __init dw_mci_init(void)
2654 {
2655 	pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2656 	return 0;
2657 }
2658 
2659 static void __exit dw_mci_exit(void)
2660 {
2661 }
2662 
2663 module_init(dw_mci_init);
2664 module_exit(dw_mci_exit);
2665 
2666 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2667 MODULE_AUTHOR("NXP Semiconductor VietNam");
2668 MODULE_AUTHOR("Imagination Technologies Ltd");
2669 MODULE_LICENSE("GPL v2");
2670