xref: /openbmc/linux/drivers/mmc/host/sh_mmcif.c (revision 81d67439)
1 /*
2  * MMCIF eMMC driver.
3  *
4  * Copyright (C) 2010 Renesas Solutions Corp.
5  * Yusuke Goda <yusuke.goda.sx@renesas.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License.
10  *
11  *
12  * TODO
13  *  1. DMA
14  *  2. Power management
15  *  3. Handle MMC errors better
16  *
17  */
18 
19 #include <linux/clk.h>
20 #include <linux/completion.h>
21 #include <linux/delay.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mmc/card.h>
25 #include <linux/mmc/core.h>
26 #include <linux/mmc/host.h>
27 #include <linux/mmc/mmc.h>
28 #include <linux/mmc/sdio.h>
29 #include <linux/mmc/sh_mmcif.h>
30 #include <linux/pagemap.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/spinlock.h>
34 
35 #define DRIVER_NAME	"sh_mmcif"
36 #define DRIVER_VERSION	"2010-04-28"
37 
38 /* CE_CMD_SET */
39 #define CMD_MASK		0x3f000000
40 #define CMD_SET_RTYP_NO		((0 << 23) | (0 << 22))
41 #define CMD_SET_RTYP_6B		((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
42 #define CMD_SET_RTYP_17B	((1 << 23) | (0 << 22)) /* R2 */
43 #define CMD_SET_RBSY		(1 << 21) /* R1b */
44 #define CMD_SET_CCSEN		(1 << 20)
45 #define CMD_SET_WDAT		(1 << 19) /* 1: on data, 0: no data */
46 #define CMD_SET_DWEN		(1 << 18) /* 1: write, 0: read */
47 #define CMD_SET_CMLTE		(1 << 17) /* 1: multi block trans, 0: single */
48 #define CMD_SET_CMD12EN		(1 << 16) /* 1: CMD12 auto issue */
49 #define CMD_SET_RIDXC_INDEX	((0 << 15) | (0 << 14)) /* index check */
50 #define CMD_SET_RIDXC_BITS	((0 << 15) | (1 << 14)) /* check bits check */
51 #define CMD_SET_RIDXC_NO	((1 << 15) | (0 << 14)) /* no check */
52 #define CMD_SET_CRC7C		((0 << 13) | (0 << 12)) /* CRC7 check*/
53 #define CMD_SET_CRC7C_BITS	((0 << 13) | (1 << 12)) /* check bits check*/
54 #define CMD_SET_CRC7C_INTERNAL	((1 << 13) | (0 << 12)) /* internal CRC7 check*/
55 #define CMD_SET_CRC16C		(1 << 10) /* 0: CRC16 check*/
56 #define CMD_SET_CRCSTE		(1 << 8) /* 1: not receive CRC status */
57 #define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */
58 #define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */
59 #define CMD_SET_CCSH		(1 << 5)
60 #define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */
61 #define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */
62 #define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */
63 
64 /* CE_CMD_CTRL */
65 #define CMD_CTRL_BREAK		(1 << 0)
66 
67 /* CE_BLOCK_SET */
68 #define BLOCK_SIZE_MASK		0x0000ffff
69 
70 /* CE_INT */
71 #define INT_CCSDE		(1 << 29)
72 #define INT_CMD12DRE		(1 << 26)
73 #define INT_CMD12RBE		(1 << 25)
74 #define INT_CMD12CRE		(1 << 24)
75 #define INT_DTRANE		(1 << 23)
76 #define INT_BUFRE		(1 << 22)
77 #define INT_BUFWEN		(1 << 21)
78 #define INT_BUFREN		(1 << 20)
79 #define INT_CCSRCV		(1 << 19)
80 #define INT_RBSYE		(1 << 17)
81 #define INT_CRSPE		(1 << 16)
82 #define INT_CMDVIO		(1 << 15)
83 #define INT_BUFVIO		(1 << 14)
84 #define INT_WDATERR		(1 << 11)
85 #define INT_RDATERR		(1 << 10)
86 #define INT_RIDXERR		(1 << 9)
87 #define INT_RSPERR		(1 << 8)
88 #define INT_CCSTO		(1 << 5)
89 #define INT_CRCSTO		(1 << 4)
90 #define INT_WDATTO		(1 << 3)
91 #define INT_RDATTO		(1 << 2)
92 #define INT_RBSYTO		(1 << 1)
93 #define INT_RSPTO		(1 << 0)
94 #define INT_ERR_STS		(INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
95 				 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
96 				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \
97 				 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
98 
99 /* CE_INT_MASK */
100 #define MASK_ALL		0x00000000
101 #define MASK_MCCSDE		(1 << 29)
102 #define MASK_MCMD12DRE		(1 << 26)
103 #define MASK_MCMD12RBE		(1 << 25)
104 #define MASK_MCMD12CRE		(1 << 24)
105 #define MASK_MDTRANE		(1 << 23)
106 #define MASK_MBUFRE		(1 << 22)
107 #define MASK_MBUFWEN		(1 << 21)
108 #define MASK_MBUFREN		(1 << 20)
109 #define MASK_MCCSRCV		(1 << 19)
110 #define MASK_MRBSYE		(1 << 17)
111 #define MASK_MCRSPE		(1 << 16)
112 #define MASK_MCMDVIO		(1 << 15)
113 #define MASK_MBUFVIO		(1 << 14)
114 #define MASK_MWDATERR		(1 << 11)
115 #define MASK_MRDATERR		(1 << 10)
116 #define MASK_MRIDXERR		(1 << 9)
117 #define MASK_MRSPERR		(1 << 8)
118 #define MASK_MCCSTO		(1 << 5)
119 #define MASK_MCRCSTO		(1 << 4)
120 #define MASK_MWDATTO		(1 << 3)
121 #define MASK_MRDATTO		(1 << 2)
122 #define MASK_MRBSYTO		(1 << 1)
123 #define MASK_MRSPTO		(1 << 0)
124 
125 /* CE_HOST_STS1 */
126 #define STS1_CMDSEQ		(1 << 31)
127 
128 /* CE_HOST_STS2 */
129 #define STS2_CRCSTE		(1 << 31)
130 #define STS2_CRC16E		(1 << 30)
131 #define STS2_AC12CRCE		(1 << 29)
132 #define STS2_RSPCRC7E		(1 << 28)
133 #define STS2_CRCSTEBE		(1 << 27)
134 #define STS2_RDATEBE		(1 << 26)
135 #define STS2_AC12REBE		(1 << 25)
136 #define STS2_RSPEBE		(1 << 24)
137 #define STS2_AC12IDXE		(1 << 23)
138 #define STS2_RSPIDXE		(1 << 22)
139 #define STS2_CCSTO		(1 << 15)
140 #define STS2_RDATTO		(1 << 14)
141 #define STS2_DATBSYTO		(1 << 13)
142 #define STS2_CRCSTTO		(1 << 12)
143 #define STS2_AC12BSYTO		(1 << 11)
144 #define STS2_RSPBSYTO		(1 << 10)
145 #define STS2_AC12RSPTO		(1 << 9)
146 #define STS2_RSPTO		(1 << 8)
147 #define STS2_CRC_ERR		(STS2_CRCSTE | STS2_CRC16E |		\
148 				 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
149 #define STS2_TIMEOUT_ERR	(STS2_CCSTO | STS2_RDATTO |		\
150 				 STS2_DATBSYTO | STS2_CRCSTTO |		\
151 				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
152 				 STS2_AC12RSPTO | STS2_RSPTO)
153 
154 #define CLKDEV_EMMC_DATA	52000000 /* 52MHz */
155 #define CLKDEV_MMC_DATA		20000000 /* 20MHz */
156 #define CLKDEV_INIT		400000   /* 400 KHz */
157 
158 enum mmcif_state {
159 	STATE_IDLE,
160 	STATE_REQUEST,
161 	STATE_IOS,
162 };
163 
164 struct sh_mmcif_host {
165 	struct mmc_host *mmc;
166 	struct mmc_data *data;
167 	struct platform_device *pd;
168 	struct clk *hclk;
169 	unsigned int clk;
170 	int bus_width;
171 	bool sd_error;
172 	long timeout;
173 	void __iomem *addr;
174 	struct completion intr_wait;
175 	enum mmcif_state state;
176 	spinlock_t lock;
177 	bool power;
178 	bool card_present;
179 
180 	/* DMA support */
181 	struct dma_chan		*chan_rx;
182 	struct dma_chan		*chan_tx;
183 	struct completion	dma_complete;
184 	bool			dma_active;
185 };
186 
187 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
188 					unsigned int reg, u32 val)
189 {
190 	writel(val | readl(host->addr + reg), host->addr + reg);
191 }
192 
193 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
194 					unsigned int reg, u32 val)
195 {
196 	writel(~val & readl(host->addr + reg), host->addr + reg);
197 }
198 
199 static void mmcif_dma_complete(void *arg)
200 {
201 	struct sh_mmcif_host *host = arg;
202 	dev_dbg(&host->pd->dev, "Command completed\n");
203 
204 	if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
205 		 dev_name(&host->pd->dev)))
206 		return;
207 
208 	if (host->data->flags & MMC_DATA_READ)
209 		dma_unmap_sg(host->chan_rx->device->dev,
210 			     host->data->sg, host->data->sg_len,
211 			     DMA_FROM_DEVICE);
212 	else
213 		dma_unmap_sg(host->chan_tx->device->dev,
214 			     host->data->sg, host->data->sg_len,
215 			     DMA_TO_DEVICE);
216 
217 	complete(&host->dma_complete);
218 }
219 
220 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
221 {
222 	struct scatterlist *sg = host->data->sg;
223 	struct dma_async_tx_descriptor *desc = NULL;
224 	struct dma_chan *chan = host->chan_rx;
225 	dma_cookie_t cookie = -EINVAL;
226 	int ret;
227 
228 	ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
229 			 DMA_FROM_DEVICE);
230 	if (ret > 0) {
231 		host->dma_active = true;
232 		desc = chan->device->device_prep_slave_sg(chan, sg, ret,
233 			DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
234 	}
235 
236 	if (desc) {
237 		desc->callback = mmcif_dma_complete;
238 		desc->callback_param = host;
239 		cookie = dmaengine_submit(desc);
240 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
241 		dma_async_issue_pending(chan);
242 	}
243 	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
244 		__func__, host->data->sg_len, ret, cookie);
245 
246 	if (!desc) {
247 		/* DMA failed, fall back to PIO */
248 		if (ret >= 0)
249 			ret = -EIO;
250 		host->chan_rx = NULL;
251 		host->dma_active = false;
252 		dma_release_channel(chan);
253 		/* Free the Tx channel too */
254 		chan = host->chan_tx;
255 		if (chan) {
256 			host->chan_tx = NULL;
257 			dma_release_channel(chan);
258 		}
259 		dev_warn(&host->pd->dev,
260 			 "DMA failed: %d, falling back to PIO\n", ret);
261 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
262 	}
263 
264 	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
265 		desc, cookie, host->data->sg_len);
266 }
267 
268 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
269 {
270 	struct scatterlist *sg = host->data->sg;
271 	struct dma_async_tx_descriptor *desc = NULL;
272 	struct dma_chan *chan = host->chan_tx;
273 	dma_cookie_t cookie = -EINVAL;
274 	int ret;
275 
276 	ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
277 			 DMA_TO_DEVICE);
278 	if (ret > 0) {
279 		host->dma_active = true;
280 		desc = chan->device->device_prep_slave_sg(chan, sg, ret,
281 			DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
282 	}
283 
284 	if (desc) {
285 		desc->callback = mmcif_dma_complete;
286 		desc->callback_param = host;
287 		cookie = dmaengine_submit(desc);
288 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
289 		dma_async_issue_pending(chan);
290 	}
291 	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
292 		__func__, host->data->sg_len, ret, cookie);
293 
294 	if (!desc) {
295 		/* DMA failed, fall back to PIO */
296 		if (ret >= 0)
297 			ret = -EIO;
298 		host->chan_tx = NULL;
299 		host->dma_active = false;
300 		dma_release_channel(chan);
301 		/* Free the Rx channel too */
302 		chan = host->chan_rx;
303 		if (chan) {
304 			host->chan_rx = NULL;
305 			dma_release_channel(chan);
306 		}
307 		dev_warn(&host->pd->dev,
308 			 "DMA failed: %d, falling back to PIO\n", ret);
309 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
310 	}
311 
312 	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
313 		desc, cookie);
314 }
315 
316 static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
317 {
318 	dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
319 	chan->private = arg;
320 	return true;
321 }
322 
323 static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
324 				 struct sh_mmcif_plat_data *pdata)
325 {
326 	host->dma_active = false;
327 
328 	/* We can only either use DMA for both Tx and Rx or not use it at all */
329 	if (pdata->dma) {
330 		dma_cap_mask_t mask;
331 
332 		dma_cap_zero(mask);
333 		dma_cap_set(DMA_SLAVE, mask);
334 
335 		host->chan_tx = dma_request_channel(mask, sh_mmcif_filter,
336 						    &pdata->dma->chan_priv_tx);
337 		dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
338 			host->chan_tx);
339 
340 		if (!host->chan_tx)
341 			return;
342 
343 		host->chan_rx = dma_request_channel(mask, sh_mmcif_filter,
344 						    &pdata->dma->chan_priv_rx);
345 		dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
346 			host->chan_rx);
347 
348 		if (!host->chan_rx) {
349 			dma_release_channel(host->chan_tx);
350 			host->chan_tx = NULL;
351 			return;
352 		}
353 
354 		init_completion(&host->dma_complete);
355 	}
356 }
357 
358 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
359 {
360 	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
361 	/* Descriptors are freed automatically */
362 	if (host->chan_tx) {
363 		struct dma_chan *chan = host->chan_tx;
364 		host->chan_tx = NULL;
365 		dma_release_channel(chan);
366 	}
367 	if (host->chan_rx) {
368 		struct dma_chan *chan = host->chan_rx;
369 		host->chan_rx = NULL;
370 		dma_release_channel(chan);
371 	}
372 
373 	host->dma_active = false;
374 }
375 
376 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
377 {
378 	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
379 
380 	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
381 	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
382 
383 	if (!clk)
384 		return;
385 	if (p->sup_pclk && clk == host->clk)
386 		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
387 	else
388 		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
389 			(ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
390 
391 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
392 }
393 
394 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
395 {
396 	u32 tmp;
397 
398 	tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
399 
400 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
401 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
402 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
403 		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
404 	/* byte swap on */
405 	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
406 }
407 
408 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
409 {
410 	u32 state1, state2;
411 	int ret, timeout = 10000000;
412 
413 	host->sd_error = false;
414 
415 	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
416 	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
417 	dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
418 	dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
419 
420 	if (state1 & STS1_CMDSEQ) {
421 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
422 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
423 		while (1) {
424 			timeout--;
425 			if (timeout < 0) {
426 				dev_err(&host->pd->dev,
427 					"Forceed end of command sequence timeout err\n");
428 				return -EIO;
429 			}
430 			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
431 								& STS1_CMDSEQ))
432 				break;
433 			mdelay(1);
434 		}
435 		sh_mmcif_sync_reset(host);
436 		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
437 		return -EIO;
438 	}
439 
440 	if (state2 & STS2_CRC_ERR) {
441 		dev_dbg(&host->pd->dev, ": Happened CRC error\n");
442 		ret = -EIO;
443 	} else if (state2 & STS2_TIMEOUT_ERR) {
444 		dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
445 		ret = -ETIMEDOUT;
446 	} else {
447 		dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
448 		ret = -EIO;
449 	}
450 	return ret;
451 }
452 
453 static int sh_mmcif_single_read(struct sh_mmcif_host *host,
454 					struct mmc_request *mrq)
455 {
456 	struct mmc_data *data = mrq->data;
457 	long time;
458 	u32 blocksize, i, *p = sg_virt(data->sg);
459 
460 	/* buf read enable */
461 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
462 	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
463 			host->timeout);
464 	if (time <= 0 || host->sd_error)
465 		return sh_mmcif_error_manage(host);
466 
467 	blocksize = (BLOCK_SIZE_MASK &
468 			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
469 	for (i = 0; i < blocksize / 4; i++)
470 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
471 
472 	/* buffer read end */
473 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
474 	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
475 			host->timeout);
476 	if (time <= 0 || host->sd_error)
477 		return sh_mmcif_error_manage(host);
478 
479 	return 0;
480 }
481 
482 static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
483 					struct mmc_request *mrq)
484 {
485 	struct mmc_data *data = mrq->data;
486 	long time;
487 	u32 blocksize, i, j, sec, *p;
488 
489 	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
490 						     MMCIF_CE_BLOCK_SET);
491 	for (j = 0; j < data->sg_len; j++) {
492 		p = sg_virt(data->sg);
493 		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
494 			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
495 			/* buf read enable */
496 			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
497 				host->timeout);
498 
499 			if (time <= 0 || host->sd_error)
500 				return sh_mmcif_error_manage(host);
501 
502 			for (i = 0; i < blocksize / 4; i++)
503 				*p++ = sh_mmcif_readl(host->addr,
504 						      MMCIF_CE_DATA);
505 		}
506 		if (j < data->sg_len - 1)
507 			data->sg++;
508 	}
509 	return 0;
510 }
511 
512 static int sh_mmcif_single_write(struct sh_mmcif_host *host,
513 					struct mmc_request *mrq)
514 {
515 	struct mmc_data *data = mrq->data;
516 	long time;
517 	u32 blocksize, i, *p = sg_virt(data->sg);
518 
519 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
520 
521 	/* buf write enable */
522 	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
523 			host->timeout);
524 	if (time <= 0 || host->sd_error)
525 		return sh_mmcif_error_manage(host);
526 
527 	blocksize = (BLOCK_SIZE_MASK &
528 			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
529 	for (i = 0; i < blocksize / 4; i++)
530 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
531 
532 	/* buffer write end */
533 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
534 
535 	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
536 			host->timeout);
537 	if (time <= 0 || host->sd_error)
538 		return sh_mmcif_error_manage(host);
539 
540 	return 0;
541 }
542 
543 static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
544 						struct mmc_request *mrq)
545 {
546 	struct mmc_data *data = mrq->data;
547 	long time;
548 	u32 i, sec, j, blocksize, *p;
549 
550 	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
551 						     MMCIF_CE_BLOCK_SET);
552 
553 	for (j = 0; j < data->sg_len; j++) {
554 		p = sg_virt(data->sg);
555 		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
556 			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
557 			/* buf write enable*/
558 			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
559 				host->timeout);
560 
561 			if (time <= 0 || host->sd_error)
562 				return sh_mmcif_error_manage(host);
563 
564 			for (i = 0; i < blocksize / 4; i++)
565 				sh_mmcif_writel(host->addr,
566 						MMCIF_CE_DATA, *p++);
567 		}
568 		if (j < data->sg_len - 1)
569 			data->sg++;
570 	}
571 	return 0;
572 }
573 
574 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
575 						struct mmc_command *cmd)
576 {
577 	if (cmd->flags & MMC_RSP_136) {
578 		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
579 		cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
580 		cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
581 		cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
582 	} else
583 		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
584 }
585 
586 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
587 						struct mmc_command *cmd)
588 {
589 	cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
590 }
591 
592 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
593 		struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
594 {
595 	u32 tmp = 0;
596 
597 	/* Response Type check */
598 	switch (mmc_resp_type(cmd)) {
599 	case MMC_RSP_NONE:
600 		tmp |= CMD_SET_RTYP_NO;
601 		break;
602 	case MMC_RSP_R1:
603 	case MMC_RSP_R1B:
604 	case MMC_RSP_R3:
605 		tmp |= CMD_SET_RTYP_6B;
606 		break;
607 	case MMC_RSP_R2:
608 		tmp |= CMD_SET_RTYP_17B;
609 		break;
610 	default:
611 		dev_err(&host->pd->dev, "Unsupported response type.\n");
612 		break;
613 	}
614 	switch (opc) {
615 	/* RBSY */
616 	case MMC_SWITCH:
617 	case MMC_STOP_TRANSMISSION:
618 	case MMC_SET_WRITE_PROT:
619 	case MMC_CLR_WRITE_PROT:
620 	case MMC_ERASE:
621 	case MMC_GEN_CMD:
622 		tmp |= CMD_SET_RBSY;
623 		break;
624 	}
625 	/* WDAT / DATW */
626 	if (host->data) {
627 		tmp |= CMD_SET_WDAT;
628 		switch (host->bus_width) {
629 		case MMC_BUS_WIDTH_1:
630 			tmp |= CMD_SET_DATW_1;
631 			break;
632 		case MMC_BUS_WIDTH_4:
633 			tmp |= CMD_SET_DATW_4;
634 			break;
635 		case MMC_BUS_WIDTH_8:
636 			tmp |= CMD_SET_DATW_8;
637 			break;
638 		default:
639 			dev_err(&host->pd->dev, "Unsupported bus width.\n");
640 			break;
641 		}
642 	}
643 	/* DWEN */
644 	if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
645 		tmp |= CMD_SET_DWEN;
646 	/* CMLTE/CMD12EN */
647 	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
648 		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
649 		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
650 					mrq->data->blocks << 16);
651 	}
652 	/* RIDXC[1:0] check bits */
653 	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
654 	    opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
655 		tmp |= CMD_SET_RIDXC_BITS;
656 	/* RCRC7C[1:0] check bits */
657 	if (opc == MMC_SEND_OP_COND)
658 		tmp |= CMD_SET_CRC7C_BITS;
659 	/* RCRC7C[1:0] internal CRC7 */
660 	if (opc == MMC_ALL_SEND_CID ||
661 		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
662 		tmp |= CMD_SET_CRC7C_INTERNAL;
663 
664 	return opc = ((opc << 24) | tmp);
665 }
666 
667 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
668 				struct mmc_request *mrq, u32 opc)
669 {
670 	int ret;
671 
672 	switch (opc) {
673 	case MMC_READ_MULTIPLE_BLOCK:
674 		ret = sh_mmcif_multi_read(host, mrq);
675 		break;
676 	case MMC_WRITE_MULTIPLE_BLOCK:
677 		ret = sh_mmcif_multi_write(host, mrq);
678 		break;
679 	case MMC_WRITE_BLOCK:
680 		ret = sh_mmcif_single_write(host, mrq);
681 		break;
682 	case MMC_READ_SINGLE_BLOCK:
683 	case MMC_SEND_EXT_CSD:
684 		ret = sh_mmcif_single_read(host, mrq);
685 		break;
686 	default:
687 		dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
688 		ret = -EINVAL;
689 		break;
690 	}
691 	return ret;
692 }
693 
694 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
695 			struct mmc_request *mrq, struct mmc_command *cmd)
696 {
697 	long time;
698 	int ret = 0, mask = 0;
699 	u32 opc = cmd->opcode;
700 
701 	switch (opc) {
702 	/* respons busy check */
703 	case MMC_SWITCH:
704 	case MMC_STOP_TRANSMISSION:
705 	case MMC_SET_WRITE_PROT:
706 	case MMC_CLR_WRITE_PROT:
707 	case MMC_ERASE:
708 	case MMC_GEN_CMD:
709 		mask = MASK_MRBSYE;
710 		break;
711 	default:
712 		mask = MASK_MCRSPE;
713 		break;
714 	}
715 	mask |=	MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
716 		MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
717 		MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
718 		MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
719 
720 	if (host->data) {
721 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
722 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
723 				mrq->data->blksz);
724 	}
725 	opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
726 
727 	sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
728 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
729 	/* set arg */
730 	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
731 	/* set cmd */
732 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
733 
734 	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
735 		host->timeout);
736 	if (time <= 0) {
737 		cmd->error = sh_mmcif_error_manage(host);
738 		return;
739 	}
740 	if (host->sd_error) {
741 		switch (cmd->opcode) {
742 		case MMC_ALL_SEND_CID:
743 		case MMC_SELECT_CARD:
744 		case MMC_APP_CMD:
745 			cmd->error = -ETIMEDOUT;
746 			break;
747 		default:
748 			dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
749 					cmd->opcode);
750 			cmd->error = sh_mmcif_error_manage(host);
751 			break;
752 		}
753 		host->sd_error = false;
754 		return;
755 	}
756 	if (!(cmd->flags & MMC_RSP_PRESENT)) {
757 		cmd->error = 0;
758 		return;
759 	}
760 	sh_mmcif_get_response(host, cmd);
761 	if (host->data) {
762 		if (!host->dma_active) {
763 			ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
764 		} else {
765 			long time =
766 				wait_for_completion_interruptible_timeout(&host->dma_complete,
767 									  host->timeout);
768 			if (!time)
769 				ret = -ETIMEDOUT;
770 			else if (time < 0)
771 				ret = time;
772 			sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
773 					BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
774 			host->dma_active = false;
775 		}
776 		if (ret < 0)
777 			mrq->data->bytes_xfered = 0;
778 		else
779 			mrq->data->bytes_xfered =
780 				mrq->data->blocks * mrq->data->blksz;
781 	}
782 	cmd->error = ret;
783 }
784 
785 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
786 		struct mmc_request *mrq, struct mmc_command *cmd)
787 {
788 	long time;
789 
790 	if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
791 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
792 	else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
793 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
794 	else {
795 		dev_err(&host->pd->dev, "unsupported stop cmd\n");
796 		cmd->error = sh_mmcif_error_manage(host);
797 		return;
798 	}
799 
800 	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
801 			host->timeout);
802 	if (time <= 0 || host->sd_error) {
803 		cmd->error = sh_mmcif_error_manage(host);
804 		return;
805 	}
806 	sh_mmcif_get_cmd12response(host, cmd);
807 	cmd->error = 0;
808 }
809 
810 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
811 {
812 	struct sh_mmcif_host *host = mmc_priv(mmc);
813 	unsigned long flags;
814 
815 	spin_lock_irqsave(&host->lock, flags);
816 	if (host->state != STATE_IDLE) {
817 		spin_unlock_irqrestore(&host->lock, flags);
818 		mrq->cmd->error = -EAGAIN;
819 		mmc_request_done(mmc, mrq);
820 		return;
821 	}
822 
823 	host->state = STATE_REQUEST;
824 	spin_unlock_irqrestore(&host->lock, flags);
825 
826 	switch (mrq->cmd->opcode) {
827 	/* MMCIF does not support SD/SDIO command */
828 	case SD_IO_SEND_OP_COND:
829 	case MMC_APP_CMD:
830 		host->state = STATE_IDLE;
831 		mrq->cmd->error = -ETIMEDOUT;
832 		mmc_request_done(mmc, mrq);
833 		return;
834 	case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
835 		if (!mrq->data) {
836 			/* send_if_cond cmd (not support) */
837 			host->state = STATE_IDLE;
838 			mrq->cmd->error = -ETIMEDOUT;
839 			mmc_request_done(mmc, mrq);
840 			return;
841 		}
842 		break;
843 	default:
844 		break;
845 	}
846 	host->data = mrq->data;
847 	if (mrq->data) {
848 		if (mrq->data->flags & MMC_DATA_READ) {
849 			if (host->chan_rx)
850 				sh_mmcif_start_dma_rx(host);
851 		} else {
852 			if (host->chan_tx)
853 				sh_mmcif_start_dma_tx(host);
854 		}
855 	}
856 	sh_mmcif_start_cmd(host, mrq, mrq->cmd);
857 	host->data = NULL;
858 
859 	if (!mrq->cmd->error && mrq->stop)
860 		sh_mmcif_stop_cmd(host, mrq, mrq->stop);
861 	host->state = STATE_IDLE;
862 	mmc_request_done(mmc, mrq);
863 }
864 
865 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
866 {
867 	struct sh_mmcif_host *host = mmc_priv(mmc);
868 	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
869 	unsigned long flags;
870 
871 	spin_lock_irqsave(&host->lock, flags);
872 	if (host->state != STATE_IDLE) {
873 		spin_unlock_irqrestore(&host->lock, flags);
874 		return;
875 	}
876 
877 	host->state = STATE_IOS;
878 	spin_unlock_irqrestore(&host->lock, flags);
879 
880 	if (ios->power_mode == MMC_POWER_UP) {
881 		if (!host->card_present) {
882 			/* See if we also get DMA */
883 			sh_mmcif_request_dma(host, host->pd->dev.platform_data);
884 			host->card_present = true;
885 		}
886 	} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
887 		/* clock stop */
888 		sh_mmcif_clock_control(host, 0);
889 		if (ios->power_mode == MMC_POWER_OFF) {
890 			if (host->card_present) {
891 				sh_mmcif_release_dma(host);
892 				host->card_present = false;
893 			}
894 		}
895 		if (host->power) {
896 			pm_runtime_put(&host->pd->dev);
897 			host->power = false;
898 			if (p->down_pwr)
899 				p->down_pwr(host->pd);
900 		}
901 		host->state = STATE_IDLE;
902 		return;
903 	}
904 
905 	if (ios->clock) {
906 		if (!host->power) {
907 			if (p->set_pwr)
908 				p->set_pwr(host->pd, ios->power_mode);
909 			pm_runtime_get_sync(&host->pd->dev);
910 			host->power = true;
911 			sh_mmcif_sync_reset(host);
912 		}
913 		sh_mmcif_clock_control(host, ios->clock);
914 	}
915 
916 	host->bus_width = ios->bus_width;
917 	host->state = STATE_IDLE;
918 }
919 
920 static int sh_mmcif_get_cd(struct mmc_host *mmc)
921 {
922 	struct sh_mmcif_host *host = mmc_priv(mmc);
923 	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
924 
925 	if (!p->get_cd)
926 		return -ENOSYS;
927 	else
928 		return p->get_cd(host->pd);
929 }
930 
931 static struct mmc_host_ops sh_mmcif_ops = {
932 	.request	= sh_mmcif_request,
933 	.set_ios	= sh_mmcif_set_ios,
934 	.get_cd		= sh_mmcif_get_cd,
935 };
936 
937 static void sh_mmcif_detect(struct mmc_host *mmc)
938 {
939 	mmc_detect_change(mmc, 0);
940 }
941 
942 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
943 {
944 	struct sh_mmcif_host *host = dev_id;
945 	u32 state;
946 	int err = 0;
947 
948 	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
949 
950 	if (state & INT_RBSYE) {
951 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
952 				~(INT_RBSYE | INT_CRSPE));
953 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
954 	} else if (state & INT_CRSPE) {
955 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
956 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
957 	} else if (state & INT_BUFREN) {
958 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
959 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
960 	} else if (state & INT_BUFWEN) {
961 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
962 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
963 	} else if (state & INT_CMD12DRE) {
964 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
965 			~(INT_CMD12DRE | INT_CMD12RBE |
966 			  INT_CMD12CRE | INT_BUFRE));
967 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
968 	} else if (state & INT_BUFRE) {
969 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
970 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
971 	} else if (state & INT_DTRANE) {
972 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE);
973 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
974 	} else if (state & INT_CMD12RBE) {
975 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
976 				~(INT_CMD12RBE | INT_CMD12CRE));
977 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
978 	} else if (state & INT_ERR_STS) {
979 		/* err interrupts */
980 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
981 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
982 		err = 1;
983 	} else {
984 		dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
985 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
986 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
987 		err = 1;
988 	}
989 	if (err) {
990 		host->sd_error = true;
991 		dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
992 	}
993 	if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
994 		complete(&host->intr_wait);
995 	else
996 		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
997 
998 	return IRQ_HANDLED;
999 }
1000 
1001 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1002 {
1003 	int ret = 0, irq[2];
1004 	struct mmc_host *mmc;
1005 	struct sh_mmcif_host *host;
1006 	struct sh_mmcif_plat_data *pd;
1007 	struct resource *res;
1008 	void __iomem *reg;
1009 	char clk_name[8];
1010 
1011 	irq[0] = platform_get_irq(pdev, 0);
1012 	irq[1] = platform_get_irq(pdev, 1);
1013 	if (irq[0] < 0 || irq[1] < 0) {
1014 		dev_err(&pdev->dev, "Get irq error\n");
1015 		return -ENXIO;
1016 	}
1017 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1018 	if (!res) {
1019 		dev_err(&pdev->dev, "platform_get_resource error.\n");
1020 		return -ENXIO;
1021 	}
1022 	reg = ioremap(res->start, resource_size(res));
1023 	if (!reg) {
1024 		dev_err(&pdev->dev, "ioremap error.\n");
1025 		return -ENOMEM;
1026 	}
1027 	pd = pdev->dev.platform_data;
1028 	if (!pd) {
1029 		dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
1030 		ret = -ENXIO;
1031 		goto clean_up;
1032 	}
1033 	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
1034 	if (!mmc) {
1035 		ret = -ENOMEM;
1036 		goto clean_up;
1037 	}
1038 	host		= mmc_priv(mmc);
1039 	host->mmc	= mmc;
1040 	host->addr	= reg;
1041 	host->timeout	= 1000;
1042 
1043 	snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
1044 	host->hclk = clk_get(&pdev->dev, clk_name);
1045 	if (IS_ERR(host->hclk)) {
1046 		dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
1047 		ret = PTR_ERR(host->hclk);
1048 		goto clean_up1;
1049 	}
1050 	clk_enable(host->hclk);
1051 	host->clk = clk_get_rate(host->hclk);
1052 	host->pd = pdev;
1053 
1054 	init_completion(&host->intr_wait);
1055 	spin_lock_init(&host->lock);
1056 
1057 	mmc->ops = &sh_mmcif_ops;
1058 	mmc->f_max = host->clk;
1059 	/* close to 400KHz */
1060 	if (mmc->f_max < 51200000)
1061 		mmc->f_min = mmc->f_max / 128;
1062 	else if (mmc->f_max < 102400000)
1063 		mmc->f_min = mmc->f_max / 256;
1064 	else
1065 		mmc->f_min = mmc->f_max / 512;
1066 	if (pd->ocr)
1067 		mmc->ocr_avail = pd->ocr;
1068 	mmc->caps = MMC_CAP_MMC_HIGHSPEED;
1069 	if (pd->caps)
1070 		mmc->caps |= pd->caps;
1071 	mmc->max_segs = 32;
1072 	mmc->max_blk_size = 512;
1073 	mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
1074 	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1075 	mmc->max_seg_size = mmc->max_req_size;
1076 
1077 	sh_mmcif_sync_reset(host);
1078 	platform_set_drvdata(pdev, host);
1079 
1080 	pm_runtime_enable(&pdev->dev);
1081 	host->power = false;
1082 
1083 	ret = pm_runtime_resume(&pdev->dev);
1084 	if (ret < 0)
1085 		goto clean_up2;
1086 
1087 	mmc_add_host(mmc);
1088 
1089 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1090 
1091 	ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
1092 	if (ret) {
1093 		dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
1094 		goto clean_up3;
1095 	}
1096 	ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
1097 	if (ret) {
1098 		free_irq(irq[0], host);
1099 		dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1100 		goto clean_up3;
1101 	}
1102 
1103 	sh_mmcif_detect(host->mmc);
1104 
1105 	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
1106 	dev_dbg(&pdev->dev, "chip ver H'%04x\n",
1107 		sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1108 	return ret;
1109 
1110 clean_up3:
1111 	mmc_remove_host(mmc);
1112 	pm_runtime_suspend(&pdev->dev);
1113 clean_up2:
1114 	pm_runtime_disable(&pdev->dev);
1115 	clk_disable(host->hclk);
1116 clean_up1:
1117 	mmc_free_host(mmc);
1118 clean_up:
1119 	if (reg)
1120 		iounmap(reg);
1121 	return ret;
1122 }
1123 
1124 static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1125 {
1126 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1127 	int irq[2];
1128 
1129 	pm_runtime_get_sync(&pdev->dev);
1130 
1131 	mmc_remove_host(host->mmc);
1132 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1133 
1134 	if (host->addr)
1135 		iounmap(host->addr);
1136 
1137 	irq[0] = platform_get_irq(pdev, 0);
1138 	irq[1] = platform_get_irq(pdev, 1);
1139 
1140 	free_irq(irq[0], host);
1141 	free_irq(irq[1], host);
1142 
1143 	platform_set_drvdata(pdev, NULL);
1144 
1145 	clk_disable(host->hclk);
1146 	mmc_free_host(host->mmc);
1147 	pm_runtime_put_sync(&pdev->dev);
1148 	pm_runtime_disable(&pdev->dev);
1149 
1150 	return 0;
1151 }
1152 
1153 #ifdef CONFIG_PM
1154 static int sh_mmcif_suspend(struct device *dev)
1155 {
1156 	struct platform_device *pdev = to_platform_device(dev);
1157 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1158 	int ret = mmc_suspend_host(host->mmc);
1159 
1160 	if (!ret) {
1161 		sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1162 		clk_disable(host->hclk);
1163 	}
1164 
1165 	return ret;
1166 }
1167 
1168 static int sh_mmcif_resume(struct device *dev)
1169 {
1170 	struct platform_device *pdev = to_platform_device(dev);
1171 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1172 
1173 	clk_enable(host->hclk);
1174 
1175 	return mmc_resume_host(host->mmc);
1176 }
1177 #else
1178 #define sh_mmcif_suspend	NULL
1179 #define sh_mmcif_resume		NULL
1180 #endif	/* CONFIG_PM */
1181 
1182 static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1183 	.suspend = sh_mmcif_suspend,
1184 	.resume = sh_mmcif_resume,
1185 };
1186 
1187 static struct platform_driver sh_mmcif_driver = {
1188 	.probe		= sh_mmcif_probe,
1189 	.remove		= sh_mmcif_remove,
1190 	.driver		= {
1191 		.name	= DRIVER_NAME,
1192 		.pm	= &sh_mmcif_dev_pm_ops,
1193 	},
1194 };
1195 
1196 static int __init sh_mmcif_init(void)
1197 {
1198 	return platform_driver_register(&sh_mmcif_driver);
1199 }
1200 
1201 static void __exit sh_mmcif_exit(void)
1202 {
1203 	platform_driver_unregister(&sh_mmcif_driver);
1204 }
1205 
1206 module_init(sh_mmcif_init);
1207 module_exit(sh_mmcif_exit);
1208 
1209 
1210 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1211 MODULE_LICENSE("GPL");
1212 MODULE_ALIAS("platform:" DRIVER_NAME);
1213 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
1214