xref: /openbmc/linux/drivers/mmc/host/sh_mmcif.c (revision 56b5b1c7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * MMCIF eMMC driver.
4  *
5  * Copyright (C) 2010 Renesas Solutions Corp.
6  * Yusuke Goda <yusuke.goda.sx@renesas.com>
7  */
8 
9 /*
10  * The MMCIF driver is now processing MMC requests asynchronously, according
11  * to the Linux MMC API requirement.
12  *
13  * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
14  * data, and optional stop. To achieve asynchronous processing each of these
15  * stages is split into two halves: a top and a bottom half. The top half
16  * initialises the hardware, installs a timeout handler to handle completion
17  * timeouts, and returns. In case of the command stage this immediately returns
18  * control to the caller, leaving all further processing to run asynchronously.
19  * All further request processing is performed by the bottom halves.
20  *
21  * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
22  * thread, a DMA completion callback, if DMA is used, a timeout work, and
23  * request- and stage-specific handler methods.
24  *
25  * Each bottom half run begins with either a hardware interrupt, a DMA callback
26  * invocation, or a timeout work run. In case of an error or a successful
27  * processing completion, the MMC core is informed and the request processing is
28  * finished. In case processing has to continue, i.e., if data has to be read
29  * from or written to the card, or if a stop command has to be sent, the next
30  * top half is called, which performs the necessary hardware handling and
31  * reschedules the timeout work. This returns the driver state machine into the
32  * bottom half waiting state.
33  */
34 
35 #include <linux/bitops.h>
36 #include <linux/clk.h>
37 #include <linux/completion.h>
38 #include <linux/delay.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/dmaengine.h>
41 #include <linux/mmc/card.h>
42 #include <linux/mmc/core.h>
43 #include <linux/mmc/host.h>
44 #include <linux/mmc/mmc.h>
45 #include <linux/mmc/sdio.h>
46 #include <linux/mmc/sh_mmcif.h>
47 #include <linux/mmc/slot-gpio.h>
48 #include <linux/mod_devicetable.h>
49 #include <linux/mutex.h>
50 #include <linux/of_device.h>
51 #include <linux/pagemap.h>
52 #include <linux/platform_device.h>
53 #include <linux/pm_qos.h>
54 #include <linux/pm_runtime.h>
55 #include <linux/sh_dma.h>
56 #include <linux/spinlock.h>
57 #include <linux/module.h>
58 
59 #define DRIVER_NAME	"sh_mmcif"
60 
61 /* CE_CMD_SET */
62 #define CMD_MASK		0x3f000000
63 #define CMD_SET_RTYP_NO		((0 << 23) | (0 << 22))
64 #define CMD_SET_RTYP_6B		((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
65 #define CMD_SET_RTYP_17B	((1 << 23) | (0 << 22)) /* R2 */
66 #define CMD_SET_RBSY		(1 << 21) /* R1b */
67 #define CMD_SET_CCSEN		(1 << 20)
68 #define CMD_SET_WDAT		(1 << 19) /* 1: on data, 0: no data */
69 #define CMD_SET_DWEN		(1 << 18) /* 1: write, 0: read */
70 #define CMD_SET_CMLTE		(1 << 17) /* 1: multi block trans, 0: single */
71 #define CMD_SET_CMD12EN		(1 << 16) /* 1: CMD12 auto issue */
72 #define CMD_SET_RIDXC_INDEX	((0 << 15) | (0 << 14)) /* index check */
73 #define CMD_SET_RIDXC_BITS	((0 << 15) | (1 << 14)) /* check bits check */
74 #define CMD_SET_RIDXC_NO	((1 << 15) | (0 << 14)) /* no check */
75 #define CMD_SET_CRC7C		((0 << 13) | (0 << 12)) /* CRC7 check*/
76 #define CMD_SET_CRC7C_BITS	((0 << 13) | (1 << 12)) /* check bits check*/
77 #define CMD_SET_CRC7C_INTERNAL	((1 << 13) | (0 << 12)) /* internal CRC7 check*/
78 #define CMD_SET_CRC16C		(1 << 10) /* 0: CRC16 check*/
79 #define CMD_SET_CRCSTE		(1 << 8) /* 1: not receive CRC status */
80 #define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */
81 #define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */
82 #define CMD_SET_CCSH		(1 << 5)
83 #define CMD_SET_DARS		(1 << 2) /* Dual Data Rate */
84 #define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */
85 #define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */
86 #define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */
87 
88 /* CE_CMD_CTRL */
89 #define CMD_CTRL_BREAK		(1 << 0)
90 
91 /* CE_BLOCK_SET */
92 #define BLOCK_SIZE_MASK		0x0000ffff
93 
94 /* CE_INT */
95 #define INT_CCSDE		(1 << 29)
96 #define INT_CMD12DRE		(1 << 26)
97 #define INT_CMD12RBE		(1 << 25)
98 #define INT_CMD12CRE		(1 << 24)
99 #define INT_DTRANE		(1 << 23)
100 #define INT_BUFRE		(1 << 22)
101 #define INT_BUFWEN		(1 << 21)
102 #define INT_BUFREN		(1 << 20)
103 #define INT_CCSRCV		(1 << 19)
104 #define INT_RBSYE		(1 << 17)
105 #define INT_CRSPE		(1 << 16)
106 #define INT_CMDVIO		(1 << 15)
107 #define INT_BUFVIO		(1 << 14)
108 #define INT_WDATERR		(1 << 11)
109 #define INT_RDATERR		(1 << 10)
110 #define INT_RIDXERR		(1 << 9)
111 #define INT_RSPERR		(1 << 8)
112 #define INT_CCSTO		(1 << 5)
113 #define INT_CRCSTO		(1 << 4)
114 #define INT_WDATTO		(1 << 3)
115 #define INT_RDATTO		(1 << 2)
116 #define INT_RBSYTO		(1 << 1)
117 #define INT_RSPTO		(1 << 0)
118 #define INT_ERR_STS		(INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
119 				 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
120 				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \
121 				 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
122 
123 #define INT_ALL			(INT_RBSYE | INT_CRSPE | INT_BUFREN |	 \
124 				 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
125 				 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
126 
127 #define INT_CCS			(INT_CCSTO | INT_CCSRCV | INT_CCSDE)
128 
129 /* CE_INT_MASK */
130 #define MASK_ALL		0x00000000
131 #define MASK_MCCSDE		(1 << 29)
132 #define MASK_MCMD12DRE		(1 << 26)
133 #define MASK_MCMD12RBE		(1 << 25)
134 #define MASK_MCMD12CRE		(1 << 24)
135 #define MASK_MDTRANE		(1 << 23)
136 #define MASK_MBUFRE		(1 << 22)
137 #define MASK_MBUFWEN		(1 << 21)
138 #define MASK_MBUFREN		(1 << 20)
139 #define MASK_MCCSRCV		(1 << 19)
140 #define MASK_MRBSYE		(1 << 17)
141 #define MASK_MCRSPE		(1 << 16)
142 #define MASK_MCMDVIO		(1 << 15)
143 #define MASK_MBUFVIO		(1 << 14)
144 #define MASK_MWDATERR		(1 << 11)
145 #define MASK_MRDATERR		(1 << 10)
146 #define MASK_MRIDXERR		(1 << 9)
147 #define MASK_MRSPERR		(1 << 8)
148 #define MASK_MCCSTO		(1 << 5)
149 #define MASK_MCRCSTO		(1 << 4)
150 #define MASK_MWDATTO		(1 << 3)
151 #define MASK_MRDATTO		(1 << 2)
152 #define MASK_MRBSYTO		(1 << 1)
153 #define MASK_MRSPTO		(1 << 0)
154 
155 #define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
156 				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
157 				 MASK_MCRCSTO | MASK_MWDATTO | \
158 				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
159 
160 #define MASK_CLEAN		(INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |	\
161 				 MASK_MBUFREN | MASK_MBUFWEN |			\
162 				 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE |	\
163 				 MASK_MCMD12RBE | MASK_MCMD12CRE)
164 
165 /* CE_HOST_STS1 */
166 #define STS1_CMDSEQ		(1 << 31)
167 
168 /* CE_HOST_STS2 */
169 #define STS2_CRCSTE		(1 << 31)
170 #define STS2_CRC16E		(1 << 30)
171 #define STS2_AC12CRCE		(1 << 29)
172 #define STS2_RSPCRC7E		(1 << 28)
173 #define STS2_CRCSTEBE		(1 << 27)
174 #define STS2_RDATEBE		(1 << 26)
175 #define STS2_AC12REBE		(1 << 25)
176 #define STS2_RSPEBE		(1 << 24)
177 #define STS2_AC12IDXE		(1 << 23)
178 #define STS2_RSPIDXE		(1 << 22)
179 #define STS2_CCSTO		(1 << 15)
180 #define STS2_RDATTO		(1 << 14)
181 #define STS2_DATBSYTO		(1 << 13)
182 #define STS2_CRCSTTO		(1 << 12)
183 #define STS2_AC12BSYTO		(1 << 11)
184 #define STS2_RSPBSYTO		(1 << 10)
185 #define STS2_AC12RSPTO		(1 << 9)
186 #define STS2_RSPTO		(1 << 8)
187 #define STS2_CRC_ERR		(STS2_CRCSTE | STS2_CRC16E |		\
188 				 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
189 #define STS2_TIMEOUT_ERR	(STS2_CCSTO | STS2_RDATTO |		\
190 				 STS2_DATBSYTO | STS2_CRCSTTO |		\
191 				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
192 				 STS2_AC12RSPTO | STS2_RSPTO)
193 
194 #define CLKDEV_EMMC_DATA	52000000 /* 52 MHz */
195 #define CLKDEV_MMC_DATA		20000000 /* 20 MHz */
196 #define CLKDEV_INIT		400000   /* 400 kHz */
197 
198 enum sh_mmcif_state {
199 	STATE_IDLE,
200 	STATE_REQUEST,
201 	STATE_IOS,
202 	STATE_TIMEOUT,
203 };
204 
205 enum sh_mmcif_wait_for {
206 	MMCIF_WAIT_FOR_REQUEST,
207 	MMCIF_WAIT_FOR_CMD,
208 	MMCIF_WAIT_FOR_MREAD,
209 	MMCIF_WAIT_FOR_MWRITE,
210 	MMCIF_WAIT_FOR_READ,
211 	MMCIF_WAIT_FOR_WRITE,
212 	MMCIF_WAIT_FOR_READ_END,
213 	MMCIF_WAIT_FOR_WRITE_END,
214 	MMCIF_WAIT_FOR_STOP,
215 };
216 
217 /*
218  * difference for each SoC
219  */
220 struct sh_mmcif_host {
221 	struct mmc_host *mmc;
222 	struct mmc_request *mrq;
223 	struct platform_device *pd;
224 	struct clk *clk;
225 	int bus_width;
226 	unsigned char timing;
227 	bool sd_error;
228 	bool dying;
229 	long timeout;
230 	void __iomem *addr;
231 	u32 *pio_ptr;
232 	spinlock_t lock;		/* protect sh_mmcif_host::state */
233 	enum sh_mmcif_state state;
234 	enum sh_mmcif_wait_for wait_for;
235 	struct delayed_work timeout_work;
236 	size_t blocksize;
237 	int sg_idx;
238 	int sg_blkidx;
239 	bool power;
240 	bool ccs_enable;		/* Command Completion Signal support */
241 	bool clk_ctrl2_enable;
242 	struct mutex thread_lock;
243 	u32 clkdiv_map;         /* see CE_CLK_CTRL::CLKDIV */
244 
245 	/* DMA support */
246 	struct dma_chan		*chan_rx;
247 	struct dma_chan		*chan_tx;
248 	struct completion	dma_complete;
249 	bool			dma_active;
250 };
251 
252 static const struct of_device_id sh_mmcif_of_match[] = {
253 	{ .compatible = "renesas,sh-mmcif" },
254 	{ }
255 };
256 MODULE_DEVICE_TABLE(of, sh_mmcif_of_match);
257 
258 #define sh_mmcif_host_to_dev(host) (&host->pd->dev)
259 
260 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
261 					unsigned int reg, u32 val)
262 {
263 	writel(val | readl(host->addr + reg), host->addr + reg);
264 }
265 
266 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
267 					unsigned int reg, u32 val)
268 {
269 	writel(~val & readl(host->addr + reg), host->addr + reg);
270 }
271 
272 static void sh_mmcif_dma_complete(void *arg)
273 {
274 	struct sh_mmcif_host *host = arg;
275 	struct mmc_request *mrq = host->mrq;
276 	struct device *dev = sh_mmcif_host_to_dev(host);
277 
278 	dev_dbg(dev, "Command completed\n");
279 
280 	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
281 		 dev_name(dev)))
282 		return;
283 
284 	complete(&host->dma_complete);
285 }
286 
287 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
288 {
289 	struct mmc_data *data = host->mrq->data;
290 	struct scatterlist *sg = data->sg;
291 	struct dma_async_tx_descriptor *desc = NULL;
292 	struct dma_chan *chan = host->chan_rx;
293 	struct device *dev = sh_mmcif_host_to_dev(host);
294 	dma_cookie_t cookie = -EINVAL;
295 	int ret;
296 
297 	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
298 			 DMA_FROM_DEVICE);
299 	if (ret > 0) {
300 		host->dma_active = true;
301 		desc = dmaengine_prep_slave_sg(chan, sg, ret,
302 			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
303 	}
304 
305 	if (desc) {
306 		desc->callback = sh_mmcif_dma_complete;
307 		desc->callback_param = host;
308 		cookie = dmaengine_submit(desc);
309 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
310 		dma_async_issue_pending(chan);
311 	}
312 	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
313 		__func__, data->sg_len, ret, cookie);
314 
315 	if (!desc) {
316 		/* DMA failed, fall back to PIO */
317 		if (ret >= 0)
318 			ret = -EIO;
319 		host->chan_rx = NULL;
320 		host->dma_active = false;
321 		dma_release_channel(chan);
322 		/* Free the Tx channel too */
323 		chan = host->chan_tx;
324 		if (chan) {
325 			host->chan_tx = NULL;
326 			dma_release_channel(chan);
327 		}
328 		dev_warn(dev,
329 			 "DMA failed: %d, falling back to PIO\n", ret);
330 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
331 	}
332 
333 	dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
334 		desc, cookie, data->sg_len);
335 }
336 
337 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
338 {
339 	struct mmc_data *data = host->mrq->data;
340 	struct scatterlist *sg = data->sg;
341 	struct dma_async_tx_descriptor *desc = NULL;
342 	struct dma_chan *chan = host->chan_tx;
343 	struct device *dev = sh_mmcif_host_to_dev(host);
344 	dma_cookie_t cookie = -EINVAL;
345 	int ret;
346 
347 	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
348 			 DMA_TO_DEVICE);
349 	if (ret > 0) {
350 		host->dma_active = true;
351 		desc = dmaengine_prep_slave_sg(chan, sg, ret,
352 			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
353 	}
354 
355 	if (desc) {
356 		desc->callback = sh_mmcif_dma_complete;
357 		desc->callback_param = host;
358 		cookie = dmaengine_submit(desc);
359 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
360 		dma_async_issue_pending(chan);
361 	}
362 	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
363 		__func__, data->sg_len, ret, cookie);
364 
365 	if (!desc) {
366 		/* DMA failed, fall back to PIO */
367 		if (ret >= 0)
368 			ret = -EIO;
369 		host->chan_tx = NULL;
370 		host->dma_active = false;
371 		dma_release_channel(chan);
372 		/* Free the Rx channel too */
373 		chan = host->chan_rx;
374 		if (chan) {
375 			host->chan_rx = NULL;
376 			dma_release_channel(chan);
377 		}
378 		dev_warn(dev,
379 			 "DMA failed: %d, falling back to PIO\n", ret);
380 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
381 	}
382 
383 	dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
384 		desc, cookie);
385 }
386 
387 static struct dma_chan *
388 sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
389 {
390 	dma_cap_mask_t mask;
391 
392 	dma_cap_zero(mask);
393 	dma_cap_set(DMA_SLAVE, mask);
394 	if (slave_id <= 0)
395 		return NULL;
396 
397 	return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
398 }
399 
400 static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
401 				     struct dma_chan *chan,
402 				     enum dma_transfer_direction direction)
403 {
404 	struct resource *res;
405 	struct dma_slave_config cfg = { 0, };
406 
407 	res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
408 	if (!res)
409 		return -EINVAL;
410 
411 	cfg.direction = direction;
412 
413 	if (direction == DMA_DEV_TO_MEM) {
414 		cfg.src_addr = res->start + MMCIF_CE_DATA;
415 		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
416 	} else {
417 		cfg.dst_addr = res->start + MMCIF_CE_DATA;
418 		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
419 	}
420 
421 	return dmaengine_slave_config(chan, &cfg);
422 }
423 
424 static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
425 {
426 	struct device *dev = sh_mmcif_host_to_dev(host);
427 	host->dma_active = false;
428 
429 	/* We can only either use DMA for both Tx and Rx or not use it at all */
430 	if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
431 		struct sh_mmcif_plat_data *pdata = dev->platform_data;
432 
433 		host->chan_tx = sh_mmcif_request_dma_pdata(host,
434 							pdata->slave_id_tx);
435 		host->chan_rx = sh_mmcif_request_dma_pdata(host,
436 							pdata->slave_id_rx);
437 	} else {
438 		host->chan_tx = dma_request_chan(dev, "tx");
439 		if (IS_ERR(host->chan_tx))
440 			host->chan_tx = NULL;
441 		host->chan_rx = dma_request_chan(dev, "rx");
442 		if (IS_ERR(host->chan_rx))
443 			host->chan_rx = NULL;
444 	}
445 	dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
446 		host->chan_rx);
447 
448 	if (!host->chan_tx || !host->chan_rx ||
449 	    sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
450 	    sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
451 		goto error;
452 
453 	return;
454 
455 error:
456 	if (host->chan_tx)
457 		dma_release_channel(host->chan_tx);
458 	if (host->chan_rx)
459 		dma_release_channel(host->chan_rx);
460 	host->chan_tx = host->chan_rx = NULL;
461 }
462 
463 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
464 {
465 	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
466 	/* Descriptors are freed automatically */
467 	if (host->chan_tx) {
468 		struct dma_chan *chan = host->chan_tx;
469 		host->chan_tx = NULL;
470 		dma_release_channel(chan);
471 	}
472 	if (host->chan_rx) {
473 		struct dma_chan *chan = host->chan_rx;
474 		host->chan_rx = NULL;
475 		dma_release_channel(chan);
476 	}
477 
478 	host->dma_active = false;
479 }
480 
481 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
482 {
483 	struct device *dev = sh_mmcif_host_to_dev(host);
484 	struct sh_mmcif_plat_data *p = dev->platform_data;
485 	bool sup_pclk = p ? p->sup_pclk : false;
486 	unsigned int current_clk = clk_get_rate(host->clk);
487 	unsigned int clkdiv;
488 
489 	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
490 	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
491 
492 	if (!clk)
493 		return;
494 
495 	if (host->clkdiv_map) {
496 		unsigned int freq, best_freq, myclk, div, diff_min, diff;
497 		int i;
498 
499 		clkdiv = 0;
500 		diff_min = ~0;
501 		best_freq = 0;
502 		for (i = 31; i >= 0; i--) {
503 			if (!((1 << i) & host->clkdiv_map))
504 				continue;
505 
506 			/*
507 			 * clk = parent_freq / div
508 			 * -> parent_freq = clk x div
509 			 */
510 
511 			div = 1 << (i + 1);
512 			freq = clk_round_rate(host->clk, clk * div);
513 			myclk = freq / div;
514 			diff = (myclk > clk) ? myclk - clk : clk - myclk;
515 
516 			if (diff <= diff_min) {
517 				best_freq = freq;
518 				clkdiv = i;
519 				diff_min = diff;
520 			}
521 		}
522 
523 		dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
524 			(best_freq / (1 << (clkdiv + 1))), clk,
525 			best_freq, clkdiv);
526 
527 		clk_set_rate(host->clk, best_freq);
528 		clkdiv = clkdiv << 16;
529 	} else if (sup_pclk && clk == current_clk) {
530 		clkdiv = CLK_SUP_PCLK;
531 	} else {
532 		clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16;
533 	}
534 
535 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv);
536 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
537 }
538 
539 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
540 {
541 	u32 tmp;
542 
543 	tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
544 
545 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
546 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
547 	if (host->ccs_enable)
548 		tmp |= SCCSTO_29;
549 	if (host->clk_ctrl2_enable)
550 		sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
551 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
552 		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
553 	/* byte swap on */
554 	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
555 }
556 
557 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
558 {
559 	struct device *dev = sh_mmcif_host_to_dev(host);
560 	u32 state1, state2;
561 	int ret, timeout;
562 
563 	host->sd_error = false;
564 
565 	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
566 	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
567 	dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
568 	dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
569 
570 	if (state1 & STS1_CMDSEQ) {
571 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
572 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
573 		for (timeout = 10000; timeout; timeout--) {
574 			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
575 			      & STS1_CMDSEQ))
576 				break;
577 			mdelay(1);
578 		}
579 		if (!timeout) {
580 			dev_err(dev,
581 				"Forced end of command sequence timeout err\n");
582 			return -EIO;
583 		}
584 		sh_mmcif_sync_reset(host);
585 		dev_dbg(dev, "Forced end of command sequence\n");
586 		return -EIO;
587 	}
588 
589 	if (state2 & STS2_CRC_ERR) {
590 		dev_err(dev, " CRC error: state %u, wait %u\n",
591 			host->state, host->wait_for);
592 		ret = -EIO;
593 	} else if (state2 & STS2_TIMEOUT_ERR) {
594 		dev_err(dev, " Timeout: state %u, wait %u\n",
595 			host->state, host->wait_for);
596 		ret = -ETIMEDOUT;
597 	} else {
598 		dev_dbg(dev, " End/Index error: state %u, wait %u\n",
599 			host->state, host->wait_for);
600 		ret = -EIO;
601 	}
602 	return ret;
603 }
604 
605 static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
606 {
607 	struct mmc_data *data = host->mrq->data;
608 
609 	host->sg_blkidx += host->blocksize;
610 
611 	/* data->sg->length must be a multiple of host->blocksize? */
612 	BUG_ON(host->sg_blkidx > data->sg->length);
613 
614 	if (host->sg_blkidx == data->sg->length) {
615 		host->sg_blkidx = 0;
616 		if (++host->sg_idx < data->sg_len)
617 			host->pio_ptr = sg_virt(++data->sg);
618 	} else {
619 		host->pio_ptr = p;
620 	}
621 
622 	return host->sg_idx != data->sg_len;
623 }
624 
625 static void sh_mmcif_single_read(struct sh_mmcif_host *host,
626 				 struct mmc_request *mrq)
627 {
628 	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
629 			   BLOCK_SIZE_MASK) + 3;
630 
631 	host->wait_for = MMCIF_WAIT_FOR_READ;
632 
633 	/* buf read enable */
634 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
635 }
636 
637 static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
638 {
639 	struct device *dev = sh_mmcif_host_to_dev(host);
640 	struct mmc_data *data = host->mrq->data;
641 	u32 *p = sg_virt(data->sg);
642 	int i;
643 
644 	if (host->sd_error) {
645 		data->error = sh_mmcif_error_manage(host);
646 		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
647 		return false;
648 	}
649 
650 	for (i = 0; i < host->blocksize / 4; i++)
651 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
652 
653 	/* buffer read end */
654 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
655 	host->wait_for = MMCIF_WAIT_FOR_READ_END;
656 
657 	return true;
658 }
659 
660 static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
661 				struct mmc_request *mrq)
662 {
663 	struct mmc_data *data = mrq->data;
664 
665 	if (!data->sg_len || !data->sg->length)
666 		return;
667 
668 	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
669 		BLOCK_SIZE_MASK;
670 
671 	host->wait_for = MMCIF_WAIT_FOR_MREAD;
672 	host->sg_idx = 0;
673 	host->sg_blkidx = 0;
674 	host->pio_ptr = sg_virt(data->sg);
675 
676 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
677 }
678 
679 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
680 {
681 	struct device *dev = sh_mmcif_host_to_dev(host);
682 	struct mmc_data *data = host->mrq->data;
683 	u32 *p = host->pio_ptr;
684 	int i;
685 
686 	if (host->sd_error) {
687 		data->error = sh_mmcif_error_manage(host);
688 		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
689 		return false;
690 	}
691 
692 	BUG_ON(!data->sg->length);
693 
694 	for (i = 0; i < host->blocksize / 4; i++)
695 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
696 
697 	if (!sh_mmcif_next_block(host, p))
698 		return false;
699 
700 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
701 
702 	return true;
703 }
704 
705 static void sh_mmcif_single_write(struct sh_mmcif_host *host,
706 					struct mmc_request *mrq)
707 {
708 	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
709 			   BLOCK_SIZE_MASK) + 3;
710 
711 	host->wait_for = MMCIF_WAIT_FOR_WRITE;
712 
713 	/* buf write enable */
714 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
715 }
716 
717 static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
718 {
719 	struct device *dev = sh_mmcif_host_to_dev(host);
720 	struct mmc_data *data = host->mrq->data;
721 	u32 *p = sg_virt(data->sg);
722 	int i;
723 
724 	if (host->sd_error) {
725 		data->error = sh_mmcif_error_manage(host);
726 		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
727 		return false;
728 	}
729 
730 	for (i = 0; i < host->blocksize / 4; i++)
731 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
732 
733 	/* buffer write end */
734 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
735 	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
736 
737 	return true;
738 }
739 
740 static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
741 				struct mmc_request *mrq)
742 {
743 	struct mmc_data *data = mrq->data;
744 
745 	if (!data->sg_len || !data->sg->length)
746 		return;
747 
748 	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
749 		BLOCK_SIZE_MASK;
750 
751 	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
752 	host->sg_idx = 0;
753 	host->sg_blkidx = 0;
754 	host->pio_ptr = sg_virt(data->sg);
755 
756 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
757 }
758 
759 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
760 {
761 	struct device *dev = sh_mmcif_host_to_dev(host);
762 	struct mmc_data *data = host->mrq->data;
763 	u32 *p = host->pio_ptr;
764 	int i;
765 
766 	if (host->sd_error) {
767 		data->error = sh_mmcif_error_manage(host);
768 		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
769 		return false;
770 	}
771 
772 	BUG_ON(!data->sg->length);
773 
774 	for (i = 0; i < host->blocksize / 4; i++)
775 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
776 
777 	if (!sh_mmcif_next_block(host, p))
778 		return false;
779 
780 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
781 
782 	return true;
783 }
784 
785 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
786 						struct mmc_command *cmd)
787 {
788 	if (cmd->flags & MMC_RSP_136) {
789 		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
790 		cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
791 		cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
792 		cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
793 	} else
794 		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
795 }
796 
797 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
798 						struct mmc_command *cmd)
799 {
800 	cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
801 }
802 
803 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
804 			    struct mmc_request *mrq)
805 {
806 	struct device *dev = sh_mmcif_host_to_dev(host);
807 	struct mmc_data *data = mrq->data;
808 	struct mmc_command *cmd = mrq->cmd;
809 	u32 opc = cmd->opcode;
810 	u32 tmp = 0;
811 
812 	/* Response Type check */
813 	switch (mmc_resp_type(cmd)) {
814 	case MMC_RSP_NONE:
815 		tmp |= CMD_SET_RTYP_NO;
816 		break;
817 	case MMC_RSP_R1:
818 	case MMC_RSP_R3:
819 		tmp |= CMD_SET_RTYP_6B;
820 		break;
821 	case MMC_RSP_R1B:
822 		tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B;
823 		break;
824 	case MMC_RSP_R2:
825 		tmp |= CMD_SET_RTYP_17B;
826 		break;
827 	default:
828 		dev_err(dev, "Unsupported response type.\n");
829 		break;
830 	}
831 
832 	/* WDAT / DATW */
833 	if (data) {
834 		tmp |= CMD_SET_WDAT;
835 		switch (host->bus_width) {
836 		case MMC_BUS_WIDTH_1:
837 			tmp |= CMD_SET_DATW_1;
838 			break;
839 		case MMC_BUS_WIDTH_4:
840 			tmp |= CMD_SET_DATW_4;
841 			break;
842 		case MMC_BUS_WIDTH_8:
843 			tmp |= CMD_SET_DATW_8;
844 			break;
845 		default:
846 			dev_err(dev, "Unsupported bus width.\n");
847 			break;
848 		}
849 		switch (host->timing) {
850 		case MMC_TIMING_MMC_DDR52:
851 			/*
852 			 * MMC core will only set this timing, if the host
853 			 * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
854 			 * capability. MMCIF implementations with this
855 			 * capability, e.g. sh73a0, will have to set it
856 			 * in their platform data.
857 			 */
858 			tmp |= CMD_SET_DARS;
859 			break;
860 		}
861 	}
862 	/* DWEN */
863 	if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
864 		tmp |= CMD_SET_DWEN;
865 	/* CMLTE/CMD12EN */
866 	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
867 		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
868 		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
869 				data->blocks << 16);
870 	}
871 	/* RIDXC[1:0] check bits */
872 	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
873 	    opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
874 		tmp |= CMD_SET_RIDXC_BITS;
875 	/* RCRC7C[1:0] check bits */
876 	if (opc == MMC_SEND_OP_COND)
877 		tmp |= CMD_SET_CRC7C_BITS;
878 	/* RCRC7C[1:0] internal CRC7 */
879 	if (opc == MMC_ALL_SEND_CID ||
880 		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
881 		tmp |= CMD_SET_CRC7C_INTERNAL;
882 
883 	return (opc << 24) | tmp;
884 }
885 
886 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
887 			       struct mmc_request *mrq, u32 opc)
888 {
889 	struct device *dev = sh_mmcif_host_to_dev(host);
890 
891 	switch (opc) {
892 	case MMC_READ_MULTIPLE_BLOCK:
893 		sh_mmcif_multi_read(host, mrq);
894 		return 0;
895 	case MMC_WRITE_MULTIPLE_BLOCK:
896 		sh_mmcif_multi_write(host, mrq);
897 		return 0;
898 	case MMC_WRITE_BLOCK:
899 		sh_mmcif_single_write(host, mrq);
900 		return 0;
901 	case MMC_READ_SINGLE_BLOCK:
902 	case MMC_SEND_EXT_CSD:
903 		sh_mmcif_single_read(host, mrq);
904 		return 0;
905 	default:
906 		dev_err(dev, "Unsupported CMD%d\n", opc);
907 		return -EINVAL;
908 	}
909 }
910 
911 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
912 			       struct mmc_request *mrq)
913 {
914 	struct mmc_command *cmd = mrq->cmd;
915 	u32 opc;
916 	u32 mask = 0;
917 	unsigned long flags;
918 
919 	if (cmd->flags & MMC_RSP_BUSY)
920 		mask = MASK_START_CMD | MASK_MRBSYE;
921 	else
922 		mask = MASK_START_CMD | MASK_MCRSPE;
923 
924 	if (host->ccs_enable)
925 		mask |= MASK_MCCSTO;
926 
927 	if (mrq->data) {
928 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
929 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
930 				mrq->data->blksz);
931 	}
932 	opc = sh_mmcif_set_cmd(host, mrq);
933 
934 	if (host->ccs_enable)
935 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
936 	else
937 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
938 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
939 	/* set arg */
940 	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
941 	/* set cmd */
942 	spin_lock_irqsave(&host->lock, flags);
943 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
944 
945 	host->wait_for = MMCIF_WAIT_FOR_CMD;
946 	schedule_delayed_work(&host->timeout_work, host->timeout);
947 	spin_unlock_irqrestore(&host->lock, flags);
948 }
949 
950 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
951 			      struct mmc_request *mrq)
952 {
953 	struct device *dev = sh_mmcif_host_to_dev(host);
954 
955 	switch (mrq->cmd->opcode) {
956 	case MMC_READ_MULTIPLE_BLOCK:
957 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
958 		break;
959 	case MMC_WRITE_MULTIPLE_BLOCK:
960 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
961 		break;
962 	default:
963 		dev_err(dev, "unsupported stop cmd\n");
964 		mrq->stop->error = sh_mmcif_error_manage(host);
965 		return;
966 	}
967 
968 	host->wait_for = MMCIF_WAIT_FOR_STOP;
969 }
970 
971 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
972 {
973 	struct sh_mmcif_host *host = mmc_priv(mmc);
974 	struct device *dev = sh_mmcif_host_to_dev(host);
975 	unsigned long flags;
976 
977 	spin_lock_irqsave(&host->lock, flags);
978 	if (host->state != STATE_IDLE) {
979 		dev_dbg(dev, "%s() rejected, state %u\n",
980 			__func__, host->state);
981 		spin_unlock_irqrestore(&host->lock, flags);
982 		mrq->cmd->error = -EAGAIN;
983 		mmc_request_done(mmc, mrq);
984 		return;
985 	}
986 
987 	host->state = STATE_REQUEST;
988 	spin_unlock_irqrestore(&host->lock, flags);
989 
990 	host->mrq = mrq;
991 
992 	sh_mmcif_start_cmd(host, mrq);
993 }
994 
995 static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
996 {
997 	struct device *dev = sh_mmcif_host_to_dev(host);
998 
999 	if (host->mmc->f_max) {
1000 		unsigned int f_max, f_min = 0, f_min_old;
1001 
1002 		f_max = host->mmc->f_max;
1003 		for (f_min_old = f_max; f_min_old > 2;) {
1004 			f_min = clk_round_rate(host->clk, f_min_old / 2);
1005 			if (f_min == f_min_old)
1006 				break;
1007 			f_min_old = f_min;
1008 		}
1009 
1010 		/*
1011 		 * This driver assumes this SoC is R-Car Gen2 or later
1012 		 */
1013 		host->clkdiv_map = 0x3ff;
1014 
1015 		host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map));
1016 		host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map));
1017 	} else {
1018 		unsigned int clk = clk_get_rate(host->clk);
1019 
1020 		host->mmc->f_max = clk / 2;
1021 		host->mmc->f_min = clk / 512;
1022 	}
1023 
1024 	dev_dbg(dev, "clk max/min = %d/%d\n",
1025 		host->mmc->f_max, host->mmc->f_min);
1026 }
1027 
1028 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1029 {
1030 	struct sh_mmcif_host *host = mmc_priv(mmc);
1031 	struct device *dev = sh_mmcif_host_to_dev(host);
1032 	unsigned long flags;
1033 
1034 	spin_lock_irqsave(&host->lock, flags);
1035 	if (host->state != STATE_IDLE) {
1036 		dev_dbg(dev, "%s() rejected, state %u\n",
1037 			__func__, host->state);
1038 		spin_unlock_irqrestore(&host->lock, flags);
1039 		return;
1040 	}
1041 
1042 	host->state = STATE_IOS;
1043 	spin_unlock_irqrestore(&host->lock, flags);
1044 
1045 	switch (ios->power_mode) {
1046 	case MMC_POWER_UP:
1047 		if (!IS_ERR(mmc->supply.vmmc))
1048 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1049 		if (!host->power) {
1050 			clk_prepare_enable(host->clk);
1051 			pm_runtime_get_sync(dev);
1052 			sh_mmcif_sync_reset(host);
1053 			sh_mmcif_request_dma(host);
1054 			host->power = true;
1055 		}
1056 		break;
1057 	case MMC_POWER_OFF:
1058 		if (!IS_ERR(mmc->supply.vmmc))
1059 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1060 		if (host->power) {
1061 			sh_mmcif_clock_control(host, 0);
1062 			sh_mmcif_release_dma(host);
1063 			pm_runtime_put(dev);
1064 			clk_disable_unprepare(host->clk);
1065 			host->power = false;
1066 		}
1067 		break;
1068 	case MMC_POWER_ON:
1069 		sh_mmcif_clock_control(host, ios->clock);
1070 		break;
1071 	}
1072 
1073 	host->timing = ios->timing;
1074 	host->bus_width = ios->bus_width;
1075 	host->state = STATE_IDLE;
1076 }
1077 
1078 static const struct mmc_host_ops sh_mmcif_ops = {
1079 	.request	= sh_mmcif_request,
1080 	.set_ios	= sh_mmcif_set_ios,
1081 	.get_cd		= mmc_gpio_get_cd,
1082 };
1083 
1084 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1085 {
1086 	struct mmc_command *cmd = host->mrq->cmd;
1087 	struct mmc_data *data = host->mrq->data;
1088 	struct device *dev = sh_mmcif_host_to_dev(host);
1089 	long time;
1090 
1091 	if (host->sd_error) {
1092 		switch (cmd->opcode) {
1093 		case MMC_ALL_SEND_CID:
1094 		case MMC_SELECT_CARD:
1095 		case MMC_APP_CMD:
1096 			cmd->error = -ETIMEDOUT;
1097 			break;
1098 		default:
1099 			cmd->error = sh_mmcif_error_manage(host);
1100 			break;
1101 		}
1102 		dev_dbg(dev, "CMD%d error %d\n",
1103 			cmd->opcode, cmd->error);
1104 		host->sd_error = false;
1105 		return false;
1106 	}
1107 	if (!(cmd->flags & MMC_RSP_PRESENT)) {
1108 		cmd->error = 0;
1109 		return false;
1110 	}
1111 
1112 	sh_mmcif_get_response(host, cmd);
1113 
1114 	if (!data)
1115 		return false;
1116 
1117 	/*
1118 	 * Completion can be signalled from DMA callback and error, so, have to
1119 	 * reset here, before setting .dma_active
1120 	 */
1121 	init_completion(&host->dma_complete);
1122 
1123 	if (data->flags & MMC_DATA_READ) {
1124 		if (host->chan_rx)
1125 			sh_mmcif_start_dma_rx(host);
1126 	} else {
1127 		if (host->chan_tx)
1128 			sh_mmcif_start_dma_tx(host);
1129 	}
1130 
1131 	if (!host->dma_active) {
1132 		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1133 		return !data->error;
1134 	}
1135 
1136 	/* Running in the IRQ thread, can sleep */
1137 	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1138 							 host->timeout);
1139 
1140 	if (data->flags & MMC_DATA_READ)
1141 		dma_unmap_sg(host->chan_rx->device->dev,
1142 			     data->sg, data->sg_len,
1143 			     DMA_FROM_DEVICE);
1144 	else
1145 		dma_unmap_sg(host->chan_tx->device->dev,
1146 			     data->sg, data->sg_len,
1147 			     DMA_TO_DEVICE);
1148 
1149 	if (host->sd_error) {
1150 		dev_err(host->mmc->parent,
1151 			"Error IRQ while waiting for DMA completion!\n");
1152 		/* Woken up by an error IRQ: abort DMA */
1153 		data->error = sh_mmcif_error_manage(host);
1154 	} else if (!time) {
1155 		dev_err(host->mmc->parent, "DMA timeout!\n");
1156 		data->error = -ETIMEDOUT;
1157 	} else if (time < 0) {
1158 		dev_err(host->mmc->parent,
1159 			"wait_for_completion_...() error %ld!\n", time);
1160 		data->error = time;
1161 	}
1162 	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1163 			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1164 	host->dma_active = false;
1165 
1166 	if (data->error) {
1167 		data->bytes_xfered = 0;
1168 		/* Abort DMA */
1169 		if (data->flags & MMC_DATA_READ)
1170 			dmaengine_terminate_sync(host->chan_rx);
1171 		else
1172 			dmaengine_terminate_sync(host->chan_tx);
1173 	}
1174 
1175 	return false;
1176 }
1177 
1178 static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1179 {
1180 	struct sh_mmcif_host *host = dev_id;
1181 	struct mmc_request *mrq;
1182 	struct device *dev = sh_mmcif_host_to_dev(host);
1183 	bool wait = false;
1184 	unsigned long flags;
1185 	int wait_work;
1186 
1187 	spin_lock_irqsave(&host->lock, flags);
1188 	wait_work = host->wait_for;
1189 	spin_unlock_irqrestore(&host->lock, flags);
1190 
1191 	cancel_delayed_work_sync(&host->timeout_work);
1192 
1193 	mutex_lock(&host->thread_lock);
1194 
1195 	mrq = host->mrq;
1196 	if (!mrq) {
1197 		dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1198 			host->state, host->wait_for);
1199 		mutex_unlock(&host->thread_lock);
1200 		return IRQ_HANDLED;
1201 	}
1202 
1203 	/*
1204 	 * All handlers return true, if processing continues, and false, if the
1205 	 * request has to be completed - successfully or not
1206 	 */
1207 	switch (wait_work) {
1208 	case MMCIF_WAIT_FOR_REQUEST:
1209 		/* We're too late, the timeout has already kicked in */
1210 		mutex_unlock(&host->thread_lock);
1211 		return IRQ_HANDLED;
1212 	case MMCIF_WAIT_FOR_CMD:
1213 		/* Wait for data? */
1214 		wait = sh_mmcif_end_cmd(host);
1215 		break;
1216 	case MMCIF_WAIT_FOR_MREAD:
1217 		/* Wait for more data? */
1218 		wait = sh_mmcif_mread_block(host);
1219 		break;
1220 	case MMCIF_WAIT_FOR_READ:
1221 		/* Wait for data end? */
1222 		wait = sh_mmcif_read_block(host);
1223 		break;
1224 	case MMCIF_WAIT_FOR_MWRITE:
1225 		/* Wait data to write? */
1226 		wait = sh_mmcif_mwrite_block(host);
1227 		break;
1228 	case MMCIF_WAIT_FOR_WRITE:
1229 		/* Wait for data end? */
1230 		wait = sh_mmcif_write_block(host);
1231 		break;
1232 	case MMCIF_WAIT_FOR_STOP:
1233 		if (host->sd_error) {
1234 			mrq->stop->error = sh_mmcif_error_manage(host);
1235 			dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
1236 			break;
1237 		}
1238 		sh_mmcif_get_cmd12response(host, mrq->stop);
1239 		mrq->stop->error = 0;
1240 		break;
1241 	case MMCIF_WAIT_FOR_READ_END:
1242 	case MMCIF_WAIT_FOR_WRITE_END:
1243 		if (host->sd_error) {
1244 			mrq->data->error = sh_mmcif_error_manage(host);
1245 			dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
1246 		}
1247 		break;
1248 	default:
1249 		BUG();
1250 	}
1251 
1252 	if (wait) {
1253 		schedule_delayed_work(&host->timeout_work, host->timeout);
1254 		/* Wait for more data */
1255 		mutex_unlock(&host->thread_lock);
1256 		return IRQ_HANDLED;
1257 	}
1258 
1259 	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1260 		struct mmc_data *data = mrq->data;
1261 		if (!mrq->cmd->error && data && !data->error)
1262 			data->bytes_xfered =
1263 				data->blocks * data->blksz;
1264 
1265 		if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1266 			sh_mmcif_stop_cmd(host, mrq);
1267 			if (!mrq->stop->error) {
1268 				schedule_delayed_work(&host->timeout_work, host->timeout);
1269 				mutex_unlock(&host->thread_lock);
1270 				return IRQ_HANDLED;
1271 			}
1272 		}
1273 	}
1274 
1275 	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1276 	host->state = STATE_IDLE;
1277 	host->mrq = NULL;
1278 	mmc_request_done(host->mmc, mrq);
1279 
1280 	mutex_unlock(&host->thread_lock);
1281 
1282 	return IRQ_HANDLED;
1283 }
1284 
1285 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1286 {
1287 	struct sh_mmcif_host *host = dev_id;
1288 	struct device *dev = sh_mmcif_host_to_dev(host);
1289 	u32 state, mask;
1290 
1291 	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1292 	mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
1293 	if (host->ccs_enable)
1294 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
1295 	else
1296 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
1297 	sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
1298 
1299 	if (state & ~MASK_CLEAN)
1300 		dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
1301 			state);
1302 
1303 	if (state & INT_ERR_STS || state & ~INT_ALL) {
1304 		host->sd_error = true;
1305 		dev_dbg(dev, "int err state = 0x%08x\n", state);
1306 	}
1307 	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1308 		if (!host->mrq)
1309 			dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
1310 		if (!host->dma_active)
1311 			return IRQ_WAKE_THREAD;
1312 		else if (host->sd_error)
1313 			sh_mmcif_dma_complete(host);
1314 	} else {
1315 		dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
1316 	}
1317 
1318 	return IRQ_HANDLED;
1319 }
1320 
1321 static void sh_mmcif_timeout_work(struct work_struct *work)
1322 {
1323 	struct delayed_work *d = to_delayed_work(work);
1324 	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1325 	struct mmc_request *mrq = host->mrq;
1326 	struct device *dev = sh_mmcif_host_to_dev(host);
1327 	unsigned long flags;
1328 
1329 	if (host->dying)
1330 		/* Don't run after mmc_remove_host() */
1331 		return;
1332 
1333 	spin_lock_irqsave(&host->lock, flags);
1334 	if (host->state == STATE_IDLE) {
1335 		spin_unlock_irqrestore(&host->lock, flags);
1336 		return;
1337 	}
1338 
1339 	dev_err(dev, "Timeout waiting for %u on CMD%u\n",
1340 		host->wait_for, mrq->cmd->opcode);
1341 
1342 	host->state = STATE_TIMEOUT;
1343 	spin_unlock_irqrestore(&host->lock, flags);
1344 
1345 	/*
1346 	 * Handle races with cancel_delayed_work(), unless
1347 	 * cancel_delayed_work_sync() is used
1348 	 */
1349 	switch (host->wait_for) {
1350 	case MMCIF_WAIT_FOR_CMD:
1351 		mrq->cmd->error = sh_mmcif_error_manage(host);
1352 		break;
1353 	case MMCIF_WAIT_FOR_STOP:
1354 		mrq->stop->error = sh_mmcif_error_manage(host);
1355 		break;
1356 	case MMCIF_WAIT_FOR_MREAD:
1357 	case MMCIF_WAIT_FOR_MWRITE:
1358 	case MMCIF_WAIT_FOR_READ:
1359 	case MMCIF_WAIT_FOR_WRITE:
1360 	case MMCIF_WAIT_FOR_READ_END:
1361 	case MMCIF_WAIT_FOR_WRITE_END:
1362 		mrq->data->error = sh_mmcif_error_manage(host);
1363 		break;
1364 	default:
1365 		BUG();
1366 	}
1367 
1368 	host->state = STATE_IDLE;
1369 	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1370 	host->mrq = NULL;
1371 	mmc_request_done(host->mmc, mrq);
1372 }
1373 
1374 static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1375 {
1376 	struct device *dev = sh_mmcif_host_to_dev(host);
1377 	struct sh_mmcif_plat_data *pd = dev->platform_data;
1378 	struct mmc_host *mmc = host->mmc;
1379 
1380 	mmc_regulator_get_supply(mmc);
1381 
1382 	if (!pd)
1383 		return;
1384 
1385 	if (!mmc->ocr_avail)
1386 		mmc->ocr_avail = pd->ocr;
1387 	else if (pd->ocr)
1388 		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1389 }
1390 
1391 static int sh_mmcif_probe(struct platform_device *pdev)
1392 {
1393 	int ret = 0, irq[2];
1394 	struct mmc_host *mmc;
1395 	struct sh_mmcif_host *host;
1396 	struct device *dev = &pdev->dev;
1397 	struct sh_mmcif_plat_data *pd = dev->platform_data;
1398 	void __iomem *reg;
1399 	const char *name;
1400 
1401 	irq[0] = platform_get_irq(pdev, 0);
1402 	irq[1] = platform_get_irq_optional(pdev, 1);
1403 	if (irq[0] < 0)
1404 		return -ENXIO;
1405 
1406 	reg = devm_platform_ioremap_resource(pdev, 0);
1407 	if (IS_ERR(reg))
1408 		return PTR_ERR(reg);
1409 
1410 	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
1411 	if (!mmc)
1412 		return -ENOMEM;
1413 
1414 	ret = mmc_of_parse(mmc);
1415 	if (ret < 0)
1416 		goto err_host;
1417 
1418 	host		= mmc_priv(mmc);
1419 	host->mmc	= mmc;
1420 	host->addr	= reg;
1421 	host->timeout	= msecs_to_jiffies(10000);
1422 	host->ccs_enable = true;
1423 	host->clk_ctrl2_enable = false;
1424 
1425 	host->pd = pdev;
1426 
1427 	spin_lock_init(&host->lock);
1428 
1429 	mmc->ops = &sh_mmcif_ops;
1430 	sh_mmcif_init_ocr(host);
1431 
1432 	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1433 	mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1434 	mmc->max_busy_timeout = 10000;
1435 
1436 	if (pd && pd->caps)
1437 		mmc->caps |= pd->caps;
1438 	mmc->max_segs = 32;
1439 	mmc->max_blk_size = 512;
1440 	mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1441 	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1442 	mmc->max_seg_size = mmc->max_req_size;
1443 
1444 	platform_set_drvdata(pdev, host);
1445 
1446 	host->clk = devm_clk_get(dev, NULL);
1447 	if (IS_ERR(host->clk)) {
1448 		ret = PTR_ERR(host->clk);
1449 		dev_err(dev, "cannot get clock: %d\n", ret);
1450 		goto err_host;
1451 	}
1452 
1453 	ret = clk_prepare_enable(host->clk);
1454 	if (ret < 0)
1455 		goto err_host;
1456 
1457 	sh_mmcif_clk_setup(host);
1458 
1459 	pm_runtime_enable(dev);
1460 	host->power = false;
1461 
1462 	ret = pm_runtime_get_sync(dev);
1463 	if (ret < 0)
1464 		goto err_clk;
1465 
1466 	INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work);
1467 
1468 	sh_mmcif_sync_reset(host);
1469 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1470 
1471 	name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error";
1472 	ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr,
1473 					sh_mmcif_irqt, 0, name, host);
1474 	if (ret) {
1475 		dev_err(dev, "request_irq error (%s)\n", name);
1476 		goto err_clk;
1477 	}
1478 	if (irq[1] >= 0) {
1479 		ret = devm_request_threaded_irq(dev, irq[1],
1480 						sh_mmcif_intr, sh_mmcif_irqt,
1481 						0, "sh_mmc:int", host);
1482 		if (ret) {
1483 			dev_err(dev, "request_irq error (sh_mmc:int)\n");
1484 			goto err_clk;
1485 		}
1486 	}
1487 
1488 	mutex_init(&host->thread_lock);
1489 
1490 	ret = mmc_add_host(mmc);
1491 	if (ret < 0)
1492 		goto err_clk;
1493 
1494 	dev_pm_qos_expose_latency_limit(dev, 100);
1495 
1496 	dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n",
1497 		 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
1498 		 clk_get_rate(host->clk) / 1000000UL);
1499 
1500 	pm_runtime_put(dev);
1501 	clk_disable_unprepare(host->clk);
1502 	return ret;
1503 
1504 err_clk:
1505 	clk_disable_unprepare(host->clk);
1506 	pm_runtime_put_sync(dev);
1507 	pm_runtime_disable(dev);
1508 err_host:
1509 	mmc_free_host(mmc);
1510 	return ret;
1511 }
1512 
1513 static int sh_mmcif_remove(struct platform_device *pdev)
1514 {
1515 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1516 
1517 	host->dying = true;
1518 	clk_prepare_enable(host->clk);
1519 	pm_runtime_get_sync(&pdev->dev);
1520 
1521 	dev_pm_qos_hide_latency_limit(&pdev->dev);
1522 
1523 	mmc_remove_host(host->mmc);
1524 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1525 
1526 	/*
1527 	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1528 	 * mmc_remove_host() call above. But swapping order doesn't help either
1529 	 * (a query on the linux-mmc mailing list didn't bring any replies).
1530 	 */
1531 	cancel_delayed_work_sync(&host->timeout_work);
1532 
1533 	clk_disable_unprepare(host->clk);
1534 	mmc_free_host(host->mmc);
1535 	pm_runtime_put_sync(&pdev->dev);
1536 	pm_runtime_disable(&pdev->dev);
1537 
1538 	return 0;
1539 }
1540 
1541 #ifdef CONFIG_PM_SLEEP
1542 static int sh_mmcif_suspend(struct device *dev)
1543 {
1544 	struct sh_mmcif_host *host = dev_get_drvdata(dev);
1545 
1546 	pm_runtime_get_sync(dev);
1547 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1548 	pm_runtime_put(dev);
1549 
1550 	return 0;
1551 }
1552 
1553 static int sh_mmcif_resume(struct device *dev)
1554 {
1555 	return 0;
1556 }
1557 #endif
1558 
1559 static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1560 	SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
1561 };
1562 
1563 static struct platform_driver sh_mmcif_driver = {
1564 	.probe		= sh_mmcif_probe,
1565 	.remove		= sh_mmcif_remove,
1566 	.driver		= {
1567 		.name	= DRIVER_NAME,
1568 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1569 		.pm	= &sh_mmcif_dev_pm_ops,
1570 		.of_match_table = sh_mmcif_of_match,
1571 	},
1572 };
1573 
1574 module_platform_driver(sh_mmcif_driver);
1575 
1576 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1577 MODULE_LICENSE("GPL v2");
1578 MODULE_ALIAS("platform:" DRIVER_NAME);
1579 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
1580