xref: /openbmc/linux/drivers/mmc/host/sh_mmcif.c (revision faca6648)
1 /*
2  * MMCIF eMMC driver.
3  *
4  * Copyright (C) 2010 Renesas Solutions Corp.
5  * Yusuke Goda <yusuke.goda.sx@renesas.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License.
10  *
11  *
12  * TODO
13  *  1. DMA
14  *  2. Power management
15  *  3. Handle MMC errors better
16  *
17  */
18 
19 #include <linux/clk.h>
20 #include <linux/completion.h>
21 #include <linux/delay.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mmc/card.h>
25 #include <linux/mmc/core.h>
26 #include <linux/mmc/host.h>
27 #include <linux/mmc/mmc.h>
28 #include <linux/mmc/sdio.h>
29 #include <linux/mmc/sh_mmcif.h>
30 #include <linux/pagemap.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/spinlock.h>
34 
35 #define DRIVER_NAME	"sh_mmcif"
36 #define DRIVER_VERSION	"2010-04-28"
37 
38 /* CE_CMD_SET */
39 #define CMD_MASK		0x3f000000
40 #define CMD_SET_RTYP_NO		((0 << 23) | (0 << 22))
41 #define CMD_SET_RTYP_6B		((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
42 #define CMD_SET_RTYP_17B	((1 << 23) | (0 << 22)) /* R2 */
43 #define CMD_SET_RBSY		(1 << 21) /* R1b */
44 #define CMD_SET_CCSEN		(1 << 20)
45 #define CMD_SET_WDAT		(1 << 19) /* 1: on data, 0: no data */
46 #define CMD_SET_DWEN		(1 << 18) /* 1: write, 0: read */
47 #define CMD_SET_CMLTE		(1 << 17) /* 1: multi block trans, 0: single */
48 #define CMD_SET_CMD12EN		(1 << 16) /* 1: CMD12 auto issue */
49 #define CMD_SET_RIDXC_INDEX	((0 << 15) | (0 << 14)) /* index check */
50 #define CMD_SET_RIDXC_BITS	((0 << 15) | (1 << 14)) /* check bits check */
51 #define CMD_SET_RIDXC_NO	((1 << 15) | (0 << 14)) /* no check */
52 #define CMD_SET_CRC7C		((0 << 13) | (0 << 12)) /* CRC7 check*/
53 #define CMD_SET_CRC7C_BITS	((0 << 13) | (1 << 12)) /* check bits check*/
54 #define CMD_SET_CRC7C_INTERNAL	((1 << 13) | (0 << 12)) /* internal CRC7 check*/
55 #define CMD_SET_CRC16C		(1 << 10) /* 0: CRC16 check*/
56 #define CMD_SET_CRCSTE		(1 << 8) /* 1: not receive CRC status */
57 #define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */
58 #define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */
59 #define CMD_SET_CCSH		(1 << 5)
60 #define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */
61 #define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */
62 #define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */
63 
64 /* CE_CMD_CTRL */
65 #define CMD_CTRL_BREAK		(1 << 0)
66 
67 /* CE_BLOCK_SET */
68 #define BLOCK_SIZE_MASK		0x0000ffff
69 
70 /* CE_INT */
71 #define INT_CCSDE		(1 << 29)
72 #define INT_CMD12DRE		(1 << 26)
73 #define INT_CMD12RBE		(1 << 25)
74 #define INT_CMD12CRE		(1 << 24)
75 #define INT_DTRANE		(1 << 23)
76 #define INT_BUFRE		(1 << 22)
77 #define INT_BUFWEN		(1 << 21)
78 #define INT_BUFREN		(1 << 20)
79 #define INT_CCSRCV		(1 << 19)
80 #define INT_RBSYE		(1 << 17)
81 #define INT_CRSPE		(1 << 16)
82 #define INT_CMDVIO		(1 << 15)
83 #define INT_BUFVIO		(1 << 14)
84 #define INT_WDATERR		(1 << 11)
85 #define INT_RDATERR		(1 << 10)
86 #define INT_RIDXERR		(1 << 9)
87 #define INT_RSPERR		(1 << 8)
88 #define INT_CCSTO		(1 << 5)
89 #define INT_CRCSTO		(1 << 4)
90 #define INT_WDATTO		(1 << 3)
91 #define INT_RDATTO		(1 << 2)
92 #define INT_RBSYTO		(1 << 1)
93 #define INT_RSPTO		(1 << 0)
94 #define INT_ERR_STS		(INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
95 				 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
96 				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \
97 				 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
98 
99 /* CE_INT_MASK */
100 #define MASK_ALL		0x00000000
101 #define MASK_MCCSDE		(1 << 29)
102 #define MASK_MCMD12DRE		(1 << 26)
103 #define MASK_MCMD12RBE		(1 << 25)
104 #define MASK_MCMD12CRE		(1 << 24)
105 #define MASK_MDTRANE		(1 << 23)
106 #define MASK_MBUFRE		(1 << 22)
107 #define MASK_MBUFWEN		(1 << 21)
108 #define MASK_MBUFREN		(1 << 20)
109 #define MASK_MCCSRCV		(1 << 19)
110 #define MASK_MRBSYE		(1 << 17)
111 #define MASK_MCRSPE		(1 << 16)
112 #define MASK_MCMDVIO		(1 << 15)
113 #define MASK_MBUFVIO		(1 << 14)
114 #define MASK_MWDATERR		(1 << 11)
115 #define MASK_MRDATERR		(1 << 10)
116 #define MASK_MRIDXERR		(1 << 9)
117 #define MASK_MRSPERR		(1 << 8)
118 #define MASK_MCCSTO		(1 << 5)
119 #define MASK_MCRCSTO		(1 << 4)
120 #define MASK_MWDATTO		(1 << 3)
121 #define MASK_MRDATTO		(1 << 2)
122 #define MASK_MRBSYTO		(1 << 1)
123 #define MASK_MRSPTO		(1 << 0)
124 
125 /* CE_HOST_STS1 */
126 #define STS1_CMDSEQ		(1 << 31)
127 
128 /* CE_HOST_STS2 */
129 #define STS2_CRCSTE		(1 << 31)
130 #define STS2_CRC16E		(1 << 30)
131 #define STS2_AC12CRCE		(1 << 29)
132 #define STS2_RSPCRC7E		(1 << 28)
133 #define STS2_CRCSTEBE		(1 << 27)
134 #define STS2_RDATEBE		(1 << 26)
135 #define STS2_AC12REBE		(1 << 25)
136 #define STS2_RSPEBE		(1 << 24)
137 #define STS2_AC12IDXE		(1 << 23)
138 #define STS2_RSPIDXE		(1 << 22)
139 #define STS2_CCSTO		(1 << 15)
140 #define STS2_RDATTO		(1 << 14)
141 #define STS2_DATBSYTO		(1 << 13)
142 #define STS2_CRCSTTO		(1 << 12)
143 #define STS2_AC12BSYTO		(1 << 11)
144 #define STS2_RSPBSYTO		(1 << 10)
145 #define STS2_AC12RSPTO		(1 << 9)
146 #define STS2_RSPTO		(1 << 8)
147 #define STS2_CRC_ERR		(STS2_CRCSTE | STS2_CRC16E |		\
148 				 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
149 #define STS2_TIMEOUT_ERR	(STS2_CCSTO | STS2_RDATTO |		\
150 				 STS2_DATBSYTO | STS2_CRCSTTO |		\
151 				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
152 				 STS2_AC12RSPTO | STS2_RSPTO)
153 
154 #define CLKDEV_EMMC_DATA	52000000 /* 52MHz */
155 #define CLKDEV_MMC_DATA		20000000 /* 20MHz */
156 #define CLKDEV_INIT		400000   /* 400 KHz */
157 
158 enum mmcif_state {
159 	STATE_IDLE,
160 	STATE_REQUEST,
161 	STATE_IOS,
162 };
163 
164 struct sh_mmcif_host {
165 	struct mmc_host *mmc;
166 	struct mmc_data *data;
167 	struct platform_device *pd;
168 	struct clk *hclk;
169 	unsigned int clk;
170 	int bus_width;
171 	bool sd_error;
172 	long timeout;
173 	void __iomem *addr;
174 	struct completion intr_wait;
175 	enum mmcif_state state;
176 	spinlock_t lock;
177 	bool power;
178 
179 	/* DMA support */
180 	struct dma_chan		*chan_rx;
181 	struct dma_chan		*chan_tx;
182 	struct completion	dma_complete;
183 	bool			dma_active;
184 };
185 
186 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
187 					unsigned int reg, u32 val)
188 {
189 	writel(val | readl(host->addr + reg), host->addr + reg);
190 }
191 
192 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
193 					unsigned int reg, u32 val)
194 {
195 	writel(~val & readl(host->addr + reg), host->addr + reg);
196 }
197 
198 static void mmcif_dma_complete(void *arg)
199 {
200 	struct sh_mmcif_host *host = arg;
201 	dev_dbg(&host->pd->dev, "Command completed\n");
202 
203 	if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
204 		 dev_name(&host->pd->dev)))
205 		return;
206 
207 	if (host->data->flags & MMC_DATA_READ)
208 		dma_unmap_sg(host->chan_rx->device->dev,
209 			     host->data->sg, host->data->sg_len,
210 			     DMA_FROM_DEVICE);
211 	else
212 		dma_unmap_sg(host->chan_tx->device->dev,
213 			     host->data->sg, host->data->sg_len,
214 			     DMA_TO_DEVICE);
215 
216 	complete(&host->dma_complete);
217 }
218 
219 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
220 {
221 	struct scatterlist *sg = host->data->sg;
222 	struct dma_async_tx_descriptor *desc = NULL;
223 	struct dma_chan *chan = host->chan_rx;
224 	dma_cookie_t cookie = -EINVAL;
225 	int ret;
226 
227 	ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
228 			 DMA_FROM_DEVICE);
229 	if (ret > 0) {
230 		host->dma_active = true;
231 		desc = chan->device->device_prep_slave_sg(chan, sg, ret,
232 			DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
233 	}
234 
235 	if (desc) {
236 		desc->callback = mmcif_dma_complete;
237 		desc->callback_param = host;
238 		cookie = dmaengine_submit(desc);
239 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
240 		dma_async_issue_pending(chan);
241 	}
242 	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
243 		__func__, host->data->sg_len, ret, cookie);
244 
245 	if (!desc) {
246 		/* DMA failed, fall back to PIO */
247 		if (ret >= 0)
248 			ret = -EIO;
249 		host->chan_rx = NULL;
250 		host->dma_active = false;
251 		dma_release_channel(chan);
252 		/* Free the Tx channel too */
253 		chan = host->chan_tx;
254 		if (chan) {
255 			host->chan_tx = NULL;
256 			dma_release_channel(chan);
257 		}
258 		dev_warn(&host->pd->dev,
259 			 "DMA failed: %d, falling back to PIO\n", ret);
260 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
261 	}
262 
263 	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
264 		desc, cookie, host->data->sg_len);
265 }
266 
267 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
268 {
269 	struct scatterlist *sg = host->data->sg;
270 	struct dma_async_tx_descriptor *desc = NULL;
271 	struct dma_chan *chan = host->chan_tx;
272 	dma_cookie_t cookie = -EINVAL;
273 	int ret;
274 
275 	ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
276 			 DMA_TO_DEVICE);
277 	if (ret > 0) {
278 		host->dma_active = true;
279 		desc = chan->device->device_prep_slave_sg(chan, sg, ret,
280 			DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
281 	}
282 
283 	if (desc) {
284 		desc->callback = mmcif_dma_complete;
285 		desc->callback_param = host;
286 		cookie = dmaengine_submit(desc);
287 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
288 		dma_async_issue_pending(chan);
289 	}
290 	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
291 		__func__, host->data->sg_len, ret, cookie);
292 
293 	if (!desc) {
294 		/* DMA failed, fall back to PIO */
295 		if (ret >= 0)
296 			ret = -EIO;
297 		host->chan_tx = NULL;
298 		host->dma_active = false;
299 		dma_release_channel(chan);
300 		/* Free the Rx channel too */
301 		chan = host->chan_rx;
302 		if (chan) {
303 			host->chan_rx = NULL;
304 			dma_release_channel(chan);
305 		}
306 		dev_warn(&host->pd->dev,
307 			 "DMA failed: %d, falling back to PIO\n", ret);
308 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
309 	}
310 
311 	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
312 		desc, cookie);
313 }
314 
315 static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
316 {
317 	dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
318 	chan->private = arg;
319 	return true;
320 }
321 
322 static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
323 				 struct sh_mmcif_plat_data *pdata)
324 {
325 	host->dma_active = false;
326 
327 	/* We can only either use DMA for both Tx and Rx or not use it at all */
328 	if (pdata->dma) {
329 		dma_cap_mask_t mask;
330 
331 		dma_cap_zero(mask);
332 		dma_cap_set(DMA_SLAVE, mask);
333 
334 		host->chan_tx = dma_request_channel(mask, sh_mmcif_filter,
335 						    &pdata->dma->chan_priv_tx);
336 		dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
337 			host->chan_tx);
338 
339 		if (!host->chan_tx)
340 			return;
341 
342 		host->chan_rx = dma_request_channel(mask, sh_mmcif_filter,
343 						    &pdata->dma->chan_priv_rx);
344 		dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
345 			host->chan_rx);
346 
347 		if (!host->chan_rx) {
348 			dma_release_channel(host->chan_tx);
349 			host->chan_tx = NULL;
350 			return;
351 		}
352 
353 		init_completion(&host->dma_complete);
354 	}
355 }
356 
357 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
358 {
359 	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
360 	/* Descriptors are freed automatically */
361 	if (host->chan_tx) {
362 		struct dma_chan *chan = host->chan_tx;
363 		host->chan_tx = NULL;
364 		dma_release_channel(chan);
365 	}
366 	if (host->chan_rx) {
367 		struct dma_chan *chan = host->chan_rx;
368 		host->chan_rx = NULL;
369 		dma_release_channel(chan);
370 	}
371 
372 	host->dma_active = false;
373 }
374 
375 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
376 {
377 	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
378 
379 	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
380 	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
381 
382 	if (!clk)
383 		return;
384 	if (p->sup_pclk && clk == host->clk)
385 		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
386 	else
387 		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
388 			(ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
389 
390 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
391 }
392 
393 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
394 {
395 	u32 tmp;
396 
397 	tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
398 
399 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
400 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
401 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
402 		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
403 	/* byte swap on */
404 	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
405 }
406 
407 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
408 {
409 	u32 state1, state2;
410 	int ret, timeout = 10000000;
411 
412 	host->sd_error = false;
413 
414 	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
415 	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
416 	dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
417 	dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
418 
419 	if (state1 & STS1_CMDSEQ) {
420 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
421 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
422 		while (1) {
423 			timeout--;
424 			if (timeout < 0) {
425 				dev_err(&host->pd->dev,
426 					"Forceed end of command sequence timeout err\n");
427 				return -EIO;
428 			}
429 			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
430 								& STS1_CMDSEQ))
431 				break;
432 			mdelay(1);
433 		}
434 		sh_mmcif_sync_reset(host);
435 		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
436 		return -EIO;
437 	}
438 
439 	if (state2 & STS2_CRC_ERR) {
440 		dev_dbg(&host->pd->dev, ": Happened CRC error\n");
441 		ret = -EIO;
442 	} else if (state2 & STS2_TIMEOUT_ERR) {
443 		dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
444 		ret = -ETIMEDOUT;
445 	} else {
446 		dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
447 		ret = -EIO;
448 	}
449 	return ret;
450 }
451 
452 static int sh_mmcif_single_read(struct sh_mmcif_host *host,
453 					struct mmc_request *mrq)
454 {
455 	struct mmc_data *data = mrq->data;
456 	long time;
457 	u32 blocksize, i, *p = sg_virt(data->sg);
458 
459 	/* buf read enable */
460 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
461 	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
462 			host->timeout);
463 	if (time <= 0 || host->sd_error)
464 		return sh_mmcif_error_manage(host);
465 
466 	blocksize = (BLOCK_SIZE_MASK &
467 			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
468 	for (i = 0; i < blocksize / 4; i++)
469 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
470 
471 	/* buffer read end */
472 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
473 	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
474 			host->timeout);
475 	if (time <= 0 || host->sd_error)
476 		return sh_mmcif_error_manage(host);
477 
478 	return 0;
479 }
480 
481 static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
482 					struct mmc_request *mrq)
483 {
484 	struct mmc_data *data = mrq->data;
485 	long time;
486 	u32 blocksize, i, j, sec, *p;
487 
488 	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
489 						     MMCIF_CE_BLOCK_SET);
490 	for (j = 0; j < data->sg_len; j++) {
491 		p = sg_virt(data->sg);
492 		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
493 			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
494 			/* buf read enable */
495 			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
496 				host->timeout);
497 
498 			if (time <= 0 || host->sd_error)
499 				return sh_mmcif_error_manage(host);
500 
501 			for (i = 0; i < blocksize / 4; i++)
502 				*p++ = sh_mmcif_readl(host->addr,
503 						      MMCIF_CE_DATA);
504 		}
505 		if (j < data->sg_len - 1)
506 			data->sg++;
507 	}
508 	return 0;
509 }
510 
511 static int sh_mmcif_single_write(struct sh_mmcif_host *host,
512 					struct mmc_request *mrq)
513 {
514 	struct mmc_data *data = mrq->data;
515 	long time;
516 	u32 blocksize, i, *p = sg_virt(data->sg);
517 
518 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
519 
520 	/* buf write enable */
521 	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
522 			host->timeout);
523 	if (time <= 0 || host->sd_error)
524 		return sh_mmcif_error_manage(host);
525 
526 	blocksize = (BLOCK_SIZE_MASK &
527 			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
528 	for (i = 0; i < blocksize / 4; i++)
529 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
530 
531 	/* buffer write end */
532 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
533 
534 	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
535 			host->timeout);
536 	if (time <= 0 || host->sd_error)
537 		return sh_mmcif_error_manage(host);
538 
539 	return 0;
540 }
541 
542 static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
543 						struct mmc_request *mrq)
544 {
545 	struct mmc_data *data = mrq->data;
546 	long time;
547 	u32 i, sec, j, blocksize, *p;
548 
549 	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
550 						     MMCIF_CE_BLOCK_SET);
551 
552 	for (j = 0; j < data->sg_len; j++) {
553 		p = sg_virt(data->sg);
554 		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
555 			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
556 			/* buf write enable*/
557 			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
558 				host->timeout);
559 
560 			if (time <= 0 || host->sd_error)
561 				return sh_mmcif_error_manage(host);
562 
563 			for (i = 0; i < blocksize / 4; i++)
564 				sh_mmcif_writel(host->addr,
565 						MMCIF_CE_DATA, *p++);
566 		}
567 		if (j < data->sg_len - 1)
568 			data->sg++;
569 	}
570 	return 0;
571 }
572 
573 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
574 						struct mmc_command *cmd)
575 {
576 	if (cmd->flags & MMC_RSP_136) {
577 		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
578 		cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
579 		cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
580 		cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
581 	} else
582 		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
583 }
584 
585 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
586 						struct mmc_command *cmd)
587 {
588 	cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
589 }
590 
591 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
592 		struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
593 {
594 	u32 tmp = 0;
595 
596 	/* Response Type check */
597 	switch (mmc_resp_type(cmd)) {
598 	case MMC_RSP_NONE:
599 		tmp |= CMD_SET_RTYP_NO;
600 		break;
601 	case MMC_RSP_R1:
602 	case MMC_RSP_R1B:
603 	case MMC_RSP_R3:
604 		tmp |= CMD_SET_RTYP_6B;
605 		break;
606 	case MMC_RSP_R2:
607 		tmp |= CMD_SET_RTYP_17B;
608 		break;
609 	default:
610 		dev_err(&host->pd->dev, "Unsupported response type.\n");
611 		break;
612 	}
613 	switch (opc) {
614 	/* RBSY */
615 	case MMC_SWITCH:
616 	case MMC_STOP_TRANSMISSION:
617 	case MMC_SET_WRITE_PROT:
618 	case MMC_CLR_WRITE_PROT:
619 	case MMC_ERASE:
620 	case MMC_GEN_CMD:
621 		tmp |= CMD_SET_RBSY;
622 		break;
623 	}
624 	/* WDAT / DATW */
625 	if (host->data) {
626 		tmp |= CMD_SET_WDAT;
627 		switch (host->bus_width) {
628 		case MMC_BUS_WIDTH_1:
629 			tmp |= CMD_SET_DATW_1;
630 			break;
631 		case MMC_BUS_WIDTH_4:
632 			tmp |= CMD_SET_DATW_4;
633 			break;
634 		case MMC_BUS_WIDTH_8:
635 			tmp |= CMD_SET_DATW_8;
636 			break;
637 		default:
638 			dev_err(&host->pd->dev, "Unsupported bus width.\n");
639 			break;
640 		}
641 	}
642 	/* DWEN */
643 	if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
644 		tmp |= CMD_SET_DWEN;
645 	/* CMLTE/CMD12EN */
646 	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
647 		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
648 		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
649 					mrq->data->blocks << 16);
650 	}
651 	/* RIDXC[1:0] check bits */
652 	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
653 	    opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
654 		tmp |= CMD_SET_RIDXC_BITS;
655 	/* RCRC7C[1:0] check bits */
656 	if (opc == MMC_SEND_OP_COND)
657 		tmp |= CMD_SET_CRC7C_BITS;
658 	/* RCRC7C[1:0] internal CRC7 */
659 	if (opc == MMC_ALL_SEND_CID ||
660 		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
661 		tmp |= CMD_SET_CRC7C_INTERNAL;
662 
663 	return opc = ((opc << 24) | tmp);
664 }
665 
666 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
667 				struct mmc_request *mrq, u32 opc)
668 {
669 	int ret;
670 
671 	switch (opc) {
672 	case MMC_READ_MULTIPLE_BLOCK:
673 		ret = sh_mmcif_multi_read(host, mrq);
674 		break;
675 	case MMC_WRITE_MULTIPLE_BLOCK:
676 		ret = sh_mmcif_multi_write(host, mrq);
677 		break;
678 	case MMC_WRITE_BLOCK:
679 		ret = sh_mmcif_single_write(host, mrq);
680 		break;
681 	case MMC_READ_SINGLE_BLOCK:
682 	case MMC_SEND_EXT_CSD:
683 		ret = sh_mmcif_single_read(host, mrq);
684 		break;
685 	default:
686 		dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
687 		ret = -EINVAL;
688 		break;
689 	}
690 	return ret;
691 }
692 
693 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
694 			struct mmc_request *mrq, struct mmc_command *cmd)
695 {
696 	long time;
697 	int ret = 0, mask = 0;
698 	u32 opc = cmd->opcode;
699 
700 	switch (opc) {
701 	/* respons busy check */
702 	case MMC_SWITCH:
703 	case MMC_STOP_TRANSMISSION:
704 	case MMC_SET_WRITE_PROT:
705 	case MMC_CLR_WRITE_PROT:
706 	case MMC_ERASE:
707 	case MMC_GEN_CMD:
708 		mask = MASK_MRBSYE;
709 		break;
710 	default:
711 		mask = MASK_MCRSPE;
712 		break;
713 	}
714 	mask |=	MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
715 		MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
716 		MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
717 		MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
718 
719 	if (host->data) {
720 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
721 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
722 				mrq->data->blksz);
723 	}
724 	opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
725 
726 	sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
727 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
728 	/* set arg */
729 	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
730 	/* set cmd */
731 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
732 
733 	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
734 		host->timeout);
735 	if (time <= 0) {
736 		cmd->error = sh_mmcif_error_manage(host);
737 		return;
738 	}
739 	if (host->sd_error) {
740 		switch (cmd->opcode) {
741 		case MMC_ALL_SEND_CID:
742 		case MMC_SELECT_CARD:
743 		case MMC_APP_CMD:
744 			cmd->error = -ETIMEDOUT;
745 			break;
746 		default:
747 			dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
748 					cmd->opcode);
749 			cmd->error = sh_mmcif_error_manage(host);
750 			break;
751 		}
752 		host->sd_error = false;
753 		return;
754 	}
755 	if (!(cmd->flags & MMC_RSP_PRESENT)) {
756 		cmd->error = 0;
757 		return;
758 	}
759 	sh_mmcif_get_response(host, cmd);
760 	if (host->data) {
761 		if (!host->dma_active) {
762 			ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
763 		} else {
764 			long time =
765 				wait_for_completion_interruptible_timeout(&host->dma_complete,
766 									  host->timeout);
767 			if (!time)
768 				ret = -ETIMEDOUT;
769 			else if (time < 0)
770 				ret = time;
771 			sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
772 					BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
773 			host->dma_active = false;
774 		}
775 		if (ret < 0)
776 			mrq->data->bytes_xfered = 0;
777 		else
778 			mrq->data->bytes_xfered =
779 				mrq->data->blocks * mrq->data->blksz;
780 	}
781 	cmd->error = ret;
782 }
783 
784 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
785 		struct mmc_request *mrq, struct mmc_command *cmd)
786 {
787 	long time;
788 
789 	if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
790 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
791 	else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
792 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
793 	else {
794 		dev_err(&host->pd->dev, "unsupported stop cmd\n");
795 		cmd->error = sh_mmcif_error_manage(host);
796 		return;
797 	}
798 
799 	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
800 			host->timeout);
801 	if (time <= 0 || host->sd_error) {
802 		cmd->error = sh_mmcif_error_manage(host);
803 		return;
804 	}
805 	sh_mmcif_get_cmd12response(host, cmd);
806 	cmd->error = 0;
807 }
808 
809 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
810 {
811 	struct sh_mmcif_host *host = mmc_priv(mmc);
812 	unsigned long flags;
813 
814 	spin_lock_irqsave(&host->lock, flags);
815 	if (host->state != STATE_IDLE) {
816 		spin_unlock_irqrestore(&host->lock, flags);
817 		mrq->cmd->error = -EAGAIN;
818 		mmc_request_done(mmc, mrq);
819 		return;
820 	}
821 
822 	host->state = STATE_REQUEST;
823 	spin_unlock_irqrestore(&host->lock, flags);
824 
825 	switch (mrq->cmd->opcode) {
826 	/* MMCIF does not support SD/SDIO command */
827 	case SD_IO_SEND_OP_COND:
828 	case MMC_APP_CMD:
829 		host->state = STATE_IDLE;
830 		mrq->cmd->error = -ETIMEDOUT;
831 		mmc_request_done(mmc, mrq);
832 		return;
833 	case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
834 		if (!mrq->data) {
835 			/* send_if_cond cmd (not support) */
836 			host->state = STATE_IDLE;
837 			mrq->cmd->error = -ETIMEDOUT;
838 			mmc_request_done(mmc, mrq);
839 			return;
840 		}
841 		break;
842 	default:
843 		break;
844 	}
845 	host->data = mrq->data;
846 	if (mrq->data) {
847 		if (mrq->data->flags & MMC_DATA_READ) {
848 			if (host->chan_rx)
849 				sh_mmcif_start_dma_rx(host);
850 		} else {
851 			if (host->chan_tx)
852 				sh_mmcif_start_dma_tx(host);
853 		}
854 	}
855 	sh_mmcif_start_cmd(host, mrq, mrq->cmd);
856 	host->data = NULL;
857 
858 	if (!mrq->cmd->error && mrq->stop)
859 		sh_mmcif_stop_cmd(host, mrq, mrq->stop);
860 	host->state = STATE_IDLE;
861 	mmc_request_done(mmc, mrq);
862 }
863 
864 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
865 {
866 	struct sh_mmcif_host *host = mmc_priv(mmc);
867 	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
868 	unsigned long flags;
869 
870 	spin_lock_irqsave(&host->lock, flags);
871 	if (host->state != STATE_IDLE) {
872 		spin_unlock_irqrestore(&host->lock, flags);
873 		return;
874 	}
875 
876 	host->state = STATE_IOS;
877 	spin_unlock_irqrestore(&host->lock, flags);
878 
879 	if (ios->power_mode == MMC_POWER_UP) {
880 		if (p->set_pwr)
881 			p->set_pwr(host->pd, ios->power_mode);
882 		if (!host->power) {
883 			/* See if we also get DMA */
884 			sh_mmcif_request_dma(host, host->pd->dev.platform_data);
885 			pm_runtime_get_sync(&host->pd->dev);
886 			host->power = true;
887 		}
888 	} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
889 		/* clock stop */
890 		sh_mmcif_clock_control(host, 0);
891 		if (ios->power_mode == MMC_POWER_OFF) {
892 			if (host->power) {
893 				pm_runtime_put(&host->pd->dev);
894 				sh_mmcif_release_dma(host);
895 				host->power = false;
896 			}
897 			if (p->down_pwr)
898 				p->down_pwr(host->pd);
899 		}
900 		host->state = STATE_IDLE;
901 		return;
902 	}
903 
904 	if (ios->clock)
905 		sh_mmcif_clock_control(host, ios->clock);
906 
907 	host->bus_width = ios->bus_width;
908 	host->state = STATE_IDLE;
909 }
910 
911 static int sh_mmcif_get_cd(struct mmc_host *mmc)
912 {
913 	struct sh_mmcif_host *host = mmc_priv(mmc);
914 	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
915 
916 	if (!p->get_cd)
917 		return -ENOSYS;
918 	else
919 		return p->get_cd(host->pd);
920 }
921 
922 static struct mmc_host_ops sh_mmcif_ops = {
923 	.request	= sh_mmcif_request,
924 	.set_ios	= sh_mmcif_set_ios,
925 	.get_cd		= sh_mmcif_get_cd,
926 };
927 
928 static void sh_mmcif_detect(struct mmc_host *mmc)
929 {
930 	mmc_detect_change(mmc, 0);
931 }
932 
933 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
934 {
935 	struct sh_mmcif_host *host = dev_id;
936 	u32 state;
937 	int err = 0;
938 
939 	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
940 
941 	if (state & INT_RBSYE) {
942 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
943 				~(INT_RBSYE | INT_CRSPE));
944 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
945 	} else if (state & INT_CRSPE) {
946 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
947 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
948 	} else if (state & INT_BUFREN) {
949 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
950 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
951 	} else if (state & INT_BUFWEN) {
952 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
953 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
954 	} else if (state & INT_CMD12DRE) {
955 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
956 			~(INT_CMD12DRE | INT_CMD12RBE |
957 			  INT_CMD12CRE | INT_BUFRE));
958 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
959 	} else if (state & INT_BUFRE) {
960 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
961 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
962 	} else if (state & INT_DTRANE) {
963 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE);
964 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
965 	} else if (state & INT_CMD12RBE) {
966 		sh_mmcif_writel(host->addr, MMCIF_CE_INT,
967 				~(INT_CMD12RBE | INT_CMD12CRE));
968 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
969 	} else if (state & INT_ERR_STS) {
970 		/* err interrupts */
971 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
972 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
973 		err = 1;
974 	} else {
975 		dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
976 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
977 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
978 		err = 1;
979 	}
980 	if (err) {
981 		host->sd_error = true;
982 		dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
983 	}
984 	if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
985 		complete(&host->intr_wait);
986 	else
987 		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
988 
989 	return IRQ_HANDLED;
990 }
991 
992 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
993 {
994 	int ret = 0, irq[2];
995 	struct mmc_host *mmc;
996 	struct sh_mmcif_host *host;
997 	struct sh_mmcif_plat_data *pd;
998 	struct resource *res;
999 	void __iomem *reg;
1000 	char clk_name[8];
1001 
1002 	irq[0] = platform_get_irq(pdev, 0);
1003 	irq[1] = platform_get_irq(pdev, 1);
1004 	if (irq[0] < 0 || irq[1] < 0) {
1005 		dev_err(&pdev->dev, "Get irq error\n");
1006 		return -ENXIO;
1007 	}
1008 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1009 	if (!res) {
1010 		dev_err(&pdev->dev, "platform_get_resource error.\n");
1011 		return -ENXIO;
1012 	}
1013 	reg = ioremap(res->start, resource_size(res));
1014 	if (!reg) {
1015 		dev_err(&pdev->dev, "ioremap error.\n");
1016 		return -ENOMEM;
1017 	}
1018 	pd = pdev->dev.platform_data;
1019 	if (!pd) {
1020 		dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
1021 		ret = -ENXIO;
1022 		goto clean_up;
1023 	}
1024 	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
1025 	if (!mmc) {
1026 		ret = -ENOMEM;
1027 		goto clean_up;
1028 	}
1029 	host		= mmc_priv(mmc);
1030 	host->mmc	= mmc;
1031 	host->addr	= reg;
1032 	host->timeout	= 1000;
1033 
1034 	snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
1035 	host->hclk = clk_get(&pdev->dev, clk_name);
1036 	if (IS_ERR(host->hclk)) {
1037 		dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
1038 		ret = PTR_ERR(host->hclk);
1039 		goto clean_up1;
1040 	}
1041 	clk_enable(host->hclk);
1042 	host->clk = clk_get_rate(host->hclk);
1043 	host->pd = pdev;
1044 
1045 	init_completion(&host->intr_wait);
1046 	spin_lock_init(&host->lock);
1047 
1048 	mmc->ops = &sh_mmcif_ops;
1049 	mmc->f_max = host->clk;
1050 	/* close to 400KHz */
1051 	if (mmc->f_max < 51200000)
1052 		mmc->f_min = mmc->f_max / 128;
1053 	else if (mmc->f_max < 102400000)
1054 		mmc->f_min = mmc->f_max / 256;
1055 	else
1056 		mmc->f_min = mmc->f_max / 512;
1057 	if (pd->ocr)
1058 		mmc->ocr_avail = pd->ocr;
1059 	mmc->caps = MMC_CAP_MMC_HIGHSPEED;
1060 	if (pd->caps)
1061 		mmc->caps |= pd->caps;
1062 	mmc->max_segs = 32;
1063 	mmc->max_blk_size = 512;
1064 	mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
1065 	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1066 	mmc->max_seg_size = mmc->max_req_size;
1067 
1068 	sh_mmcif_sync_reset(host);
1069 	platform_set_drvdata(pdev, host);
1070 
1071 	pm_runtime_enable(&pdev->dev);
1072 	host->power = false;
1073 
1074 	ret = pm_runtime_resume(&pdev->dev);
1075 	if (ret < 0)
1076 		goto clean_up2;
1077 
1078 	mmc_add_host(mmc);
1079 
1080 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1081 
1082 	ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
1083 	if (ret) {
1084 		dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
1085 		goto clean_up3;
1086 	}
1087 	ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
1088 	if (ret) {
1089 		free_irq(irq[0], host);
1090 		dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1091 		goto clean_up3;
1092 	}
1093 
1094 	sh_mmcif_detect(host->mmc);
1095 
1096 	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
1097 	dev_dbg(&pdev->dev, "chip ver H'%04x\n",
1098 		sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1099 	return ret;
1100 
1101 clean_up3:
1102 	mmc_remove_host(mmc);
1103 	pm_runtime_suspend(&pdev->dev);
1104 clean_up2:
1105 	pm_runtime_disable(&pdev->dev);
1106 	clk_disable(host->hclk);
1107 clean_up1:
1108 	mmc_free_host(mmc);
1109 clean_up:
1110 	if (reg)
1111 		iounmap(reg);
1112 	return ret;
1113 }
1114 
1115 static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1116 {
1117 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1118 	int irq[2];
1119 
1120 	pm_runtime_get_sync(&pdev->dev);
1121 
1122 	mmc_remove_host(host->mmc);
1123 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1124 
1125 	if (host->addr)
1126 		iounmap(host->addr);
1127 
1128 	irq[0] = platform_get_irq(pdev, 0);
1129 	irq[1] = platform_get_irq(pdev, 1);
1130 
1131 	free_irq(irq[0], host);
1132 	free_irq(irq[1], host);
1133 
1134 	platform_set_drvdata(pdev, NULL);
1135 
1136 	clk_disable(host->hclk);
1137 	mmc_free_host(host->mmc);
1138 	pm_runtime_put_sync(&pdev->dev);
1139 	pm_runtime_disable(&pdev->dev);
1140 
1141 	return 0;
1142 }
1143 
1144 #ifdef CONFIG_PM
1145 static int sh_mmcif_suspend(struct device *dev)
1146 {
1147 	struct platform_device *pdev = to_platform_device(dev);
1148 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1149 	int ret = mmc_suspend_host(host->mmc);
1150 
1151 	if (!ret) {
1152 		sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1153 		clk_disable(host->hclk);
1154 	}
1155 
1156 	return ret;
1157 }
1158 
1159 static int sh_mmcif_resume(struct device *dev)
1160 {
1161 	struct platform_device *pdev = to_platform_device(dev);
1162 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1163 
1164 	clk_enable(host->hclk);
1165 
1166 	return mmc_resume_host(host->mmc);
1167 }
1168 #else
1169 #define sh_mmcif_suspend	NULL
1170 #define sh_mmcif_resume		NULL
1171 #endif	/* CONFIG_PM */
1172 
1173 static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1174 	.suspend = sh_mmcif_suspend,
1175 	.resume = sh_mmcif_resume,
1176 };
1177 
1178 static struct platform_driver sh_mmcif_driver = {
1179 	.probe		= sh_mmcif_probe,
1180 	.remove		= sh_mmcif_remove,
1181 	.driver		= {
1182 		.name	= DRIVER_NAME,
1183 		.pm	= &sh_mmcif_dev_pm_ops,
1184 	},
1185 };
1186 
1187 static int __init sh_mmcif_init(void)
1188 {
1189 	return platform_driver_register(&sh_mmcif_driver);
1190 }
1191 
1192 static void __exit sh_mmcif_exit(void)
1193 {
1194 	platform_driver_unregister(&sh_mmcif_driver);
1195 }
1196 
1197 module_init(sh_mmcif_init);
1198 module_exit(sh_mmcif_exit);
1199 
1200 
1201 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1202 MODULE_LICENSE("GPL");
1203 MODULE_ALIAS("platform:" DRIVER_NAME);
1204 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
1205