xref: /openbmc/linux/drivers/mmc/host/sh_mmcif.c (revision 7587eb18)
1 /*
2  * MMCIF eMMC driver.
3  *
4  * Copyright (C) 2010 Renesas Solutions Corp.
5  * Yusuke Goda <yusuke.goda.sx@renesas.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License.
10  *
11  *
12  * TODO
13  *  1. DMA
14  *  2. Power management
15  *  3. Handle MMC errors better
16  *
17  */
18 
19 /*
20  * The MMCIF driver is now processing MMC requests asynchronously, according
21  * to the Linux MMC API requirement.
22  *
23  * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
24  * data, and optional stop. To achieve asynchronous processing each of these
25  * stages is split into two halves: a top and a bottom half. The top half
26  * initialises the hardware, installs a timeout handler to handle completion
27  * timeouts, and returns. In case of the command stage this immediately returns
28  * control to the caller, leaving all further processing to run asynchronously.
29  * All further request processing is performed by the bottom halves.
30  *
31  * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
32  * thread, a DMA completion callback, if DMA is used, a timeout work, and
33  * request- and stage-specific handler methods.
34  *
35  * Each bottom half run begins with either a hardware interrupt, a DMA callback
36  * invocation, or a timeout work run. In case of an error or a successful
37  * processing completion, the MMC core is informed and the request processing is
38  * finished. In case processing has to continue, i.e., if data has to be read
39  * from or written to the card, or if a stop command has to be sent, the next
40  * top half is called, which performs the necessary hardware handling and
41  * reschedules the timeout work. This returns the driver state machine into the
42  * bottom half waiting state.
43  */
44 
45 #include <linux/bitops.h>
46 #include <linux/clk.h>
47 #include <linux/completion.h>
48 #include <linux/delay.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/dmaengine.h>
51 #include <linux/mmc/card.h>
52 #include <linux/mmc/core.h>
53 #include <linux/mmc/host.h>
54 #include <linux/mmc/mmc.h>
55 #include <linux/mmc/sdio.h>
56 #include <linux/mmc/sh_mmcif.h>
57 #include <linux/mmc/slot-gpio.h>
58 #include <linux/mod_devicetable.h>
59 #include <linux/mutex.h>
60 #include <linux/of_device.h>
61 #include <linux/pagemap.h>
62 #include <linux/platform_device.h>
63 #include <linux/pm_qos.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/sh_dma.h>
66 #include <linux/spinlock.h>
67 #include <linux/module.h>
68 
69 #define DRIVER_NAME	"sh_mmcif"
70 #define DRIVER_VERSION	"2010-04-28"
71 
72 /* CE_CMD_SET */
73 #define CMD_MASK		0x3f000000
74 #define CMD_SET_RTYP_NO		((0 << 23) | (0 << 22))
75 #define CMD_SET_RTYP_6B		((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
76 #define CMD_SET_RTYP_17B	((1 << 23) | (0 << 22)) /* R2 */
77 #define CMD_SET_RBSY		(1 << 21) /* R1b */
78 #define CMD_SET_CCSEN		(1 << 20)
79 #define CMD_SET_WDAT		(1 << 19) /* 1: on data, 0: no data */
80 #define CMD_SET_DWEN		(1 << 18) /* 1: write, 0: read */
81 #define CMD_SET_CMLTE		(1 << 17) /* 1: multi block trans, 0: single */
82 #define CMD_SET_CMD12EN		(1 << 16) /* 1: CMD12 auto issue */
83 #define CMD_SET_RIDXC_INDEX	((0 << 15) | (0 << 14)) /* index check */
84 #define CMD_SET_RIDXC_BITS	((0 << 15) | (1 << 14)) /* check bits check */
85 #define CMD_SET_RIDXC_NO	((1 << 15) | (0 << 14)) /* no check */
86 #define CMD_SET_CRC7C		((0 << 13) | (0 << 12)) /* CRC7 check*/
87 #define CMD_SET_CRC7C_BITS	((0 << 13) | (1 << 12)) /* check bits check*/
88 #define CMD_SET_CRC7C_INTERNAL	((1 << 13) | (0 << 12)) /* internal CRC7 check*/
89 #define CMD_SET_CRC16C		(1 << 10) /* 0: CRC16 check*/
90 #define CMD_SET_CRCSTE		(1 << 8) /* 1: not receive CRC status */
91 #define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */
92 #define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */
93 #define CMD_SET_CCSH		(1 << 5)
94 #define CMD_SET_DARS		(1 << 2) /* Dual Data Rate */
95 #define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */
96 #define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */
97 #define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */
98 
99 /* CE_CMD_CTRL */
100 #define CMD_CTRL_BREAK		(1 << 0)
101 
102 /* CE_BLOCK_SET */
103 #define BLOCK_SIZE_MASK		0x0000ffff
104 
105 /* CE_INT */
106 #define INT_CCSDE		(1 << 29)
107 #define INT_CMD12DRE		(1 << 26)
108 #define INT_CMD12RBE		(1 << 25)
109 #define INT_CMD12CRE		(1 << 24)
110 #define INT_DTRANE		(1 << 23)
111 #define INT_BUFRE		(1 << 22)
112 #define INT_BUFWEN		(1 << 21)
113 #define INT_BUFREN		(1 << 20)
114 #define INT_CCSRCV		(1 << 19)
115 #define INT_RBSYE		(1 << 17)
116 #define INT_CRSPE		(1 << 16)
117 #define INT_CMDVIO		(1 << 15)
118 #define INT_BUFVIO		(1 << 14)
119 #define INT_WDATERR		(1 << 11)
120 #define INT_RDATERR		(1 << 10)
121 #define INT_RIDXERR		(1 << 9)
122 #define INT_RSPERR		(1 << 8)
123 #define INT_CCSTO		(1 << 5)
124 #define INT_CRCSTO		(1 << 4)
125 #define INT_WDATTO		(1 << 3)
126 #define INT_RDATTO		(1 << 2)
127 #define INT_RBSYTO		(1 << 1)
128 #define INT_RSPTO		(1 << 0)
129 #define INT_ERR_STS		(INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
130 				 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
131 				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \
132 				 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
133 
134 #define INT_ALL			(INT_RBSYE | INT_CRSPE | INT_BUFREN |	 \
135 				 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
136 				 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
137 
138 #define INT_CCS			(INT_CCSTO | INT_CCSRCV | INT_CCSDE)
139 
140 /* CE_INT_MASK */
141 #define MASK_ALL		0x00000000
142 #define MASK_MCCSDE		(1 << 29)
143 #define MASK_MCMD12DRE		(1 << 26)
144 #define MASK_MCMD12RBE		(1 << 25)
145 #define MASK_MCMD12CRE		(1 << 24)
146 #define MASK_MDTRANE		(1 << 23)
147 #define MASK_MBUFRE		(1 << 22)
148 #define MASK_MBUFWEN		(1 << 21)
149 #define MASK_MBUFREN		(1 << 20)
150 #define MASK_MCCSRCV		(1 << 19)
151 #define MASK_MRBSYE		(1 << 17)
152 #define MASK_MCRSPE		(1 << 16)
153 #define MASK_MCMDVIO		(1 << 15)
154 #define MASK_MBUFVIO		(1 << 14)
155 #define MASK_MWDATERR		(1 << 11)
156 #define MASK_MRDATERR		(1 << 10)
157 #define MASK_MRIDXERR		(1 << 9)
158 #define MASK_MRSPERR		(1 << 8)
159 #define MASK_MCCSTO		(1 << 5)
160 #define MASK_MCRCSTO		(1 << 4)
161 #define MASK_MWDATTO		(1 << 3)
162 #define MASK_MRDATTO		(1 << 2)
163 #define MASK_MRBSYTO		(1 << 1)
164 #define MASK_MRSPTO		(1 << 0)
165 
166 #define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
167 				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
168 				 MASK_MCRCSTO | MASK_MWDATTO | \
169 				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
170 
171 #define MASK_CLEAN		(INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |	\
172 				 MASK_MBUFREN | MASK_MBUFWEN |			\
173 				 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE |	\
174 				 MASK_MCMD12RBE | MASK_MCMD12CRE)
175 
176 /* CE_HOST_STS1 */
177 #define STS1_CMDSEQ		(1 << 31)
178 
179 /* CE_HOST_STS2 */
180 #define STS2_CRCSTE		(1 << 31)
181 #define STS2_CRC16E		(1 << 30)
182 #define STS2_AC12CRCE		(1 << 29)
183 #define STS2_RSPCRC7E		(1 << 28)
184 #define STS2_CRCSTEBE		(1 << 27)
185 #define STS2_RDATEBE		(1 << 26)
186 #define STS2_AC12REBE		(1 << 25)
187 #define STS2_RSPEBE		(1 << 24)
188 #define STS2_AC12IDXE		(1 << 23)
189 #define STS2_RSPIDXE		(1 << 22)
190 #define STS2_CCSTO		(1 << 15)
191 #define STS2_RDATTO		(1 << 14)
192 #define STS2_DATBSYTO		(1 << 13)
193 #define STS2_CRCSTTO		(1 << 12)
194 #define STS2_AC12BSYTO		(1 << 11)
195 #define STS2_RSPBSYTO		(1 << 10)
196 #define STS2_AC12RSPTO		(1 << 9)
197 #define STS2_RSPTO		(1 << 8)
198 #define STS2_CRC_ERR		(STS2_CRCSTE | STS2_CRC16E |		\
199 				 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
200 #define STS2_TIMEOUT_ERR	(STS2_CCSTO | STS2_RDATTO |		\
201 				 STS2_DATBSYTO | STS2_CRCSTTO |		\
202 				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
203 				 STS2_AC12RSPTO | STS2_RSPTO)
204 
205 #define CLKDEV_EMMC_DATA	52000000 /* 52MHz */
206 #define CLKDEV_MMC_DATA		20000000 /* 20MHz */
207 #define CLKDEV_INIT		400000   /* 400 KHz */
208 
209 enum sh_mmcif_state {
210 	STATE_IDLE,
211 	STATE_REQUEST,
212 	STATE_IOS,
213 	STATE_TIMEOUT,
214 };
215 
216 enum sh_mmcif_wait_for {
217 	MMCIF_WAIT_FOR_REQUEST,
218 	MMCIF_WAIT_FOR_CMD,
219 	MMCIF_WAIT_FOR_MREAD,
220 	MMCIF_WAIT_FOR_MWRITE,
221 	MMCIF_WAIT_FOR_READ,
222 	MMCIF_WAIT_FOR_WRITE,
223 	MMCIF_WAIT_FOR_READ_END,
224 	MMCIF_WAIT_FOR_WRITE_END,
225 	MMCIF_WAIT_FOR_STOP,
226 };
227 
228 /*
229  * difference for each SoC
230  */
231 struct sh_mmcif_host {
232 	struct mmc_host *mmc;
233 	struct mmc_request *mrq;
234 	struct platform_device *pd;
235 	struct clk *clk;
236 	int bus_width;
237 	unsigned char timing;
238 	bool sd_error;
239 	bool dying;
240 	long timeout;
241 	void __iomem *addr;
242 	u32 *pio_ptr;
243 	spinlock_t lock;		/* protect sh_mmcif_host::state */
244 	enum sh_mmcif_state state;
245 	enum sh_mmcif_wait_for wait_for;
246 	struct delayed_work timeout_work;
247 	size_t blocksize;
248 	int sg_idx;
249 	int sg_blkidx;
250 	bool power;
251 	bool ccs_enable;		/* Command Completion Signal support */
252 	bool clk_ctrl2_enable;
253 	struct mutex thread_lock;
254 	u32 clkdiv_map;         /* see CE_CLK_CTRL::CLKDIV */
255 
256 	/* DMA support */
257 	struct dma_chan		*chan_rx;
258 	struct dma_chan		*chan_tx;
259 	struct completion	dma_complete;
260 	bool			dma_active;
261 };
262 
263 static const struct of_device_id sh_mmcif_of_match[] = {
264 	{ .compatible = "renesas,sh-mmcif" },
265 	{ }
266 };
267 MODULE_DEVICE_TABLE(of, sh_mmcif_of_match);
268 
269 #define sh_mmcif_host_to_dev(host) (&host->pd->dev)
270 
271 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
272 					unsigned int reg, u32 val)
273 {
274 	writel(val | readl(host->addr + reg), host->addr + reg);
275 }
276 
277 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
278 					unsigned int reg, u32 val)
279 {
280 	writel(~val & readl(host->addr + reg), host->addr + reg);
281 }
282 
283 static void sh_mmcif_dma_complete(void *arg)
284 {
285 	struct sh_mmcif_host *host = arg;
286 	struct mmc_request *mrq = host->mrq;
287 	struct device *dev = sh_mmcif_host_to_dev(host);
288 
289 	dev_dbg(dev, "Command completed\n");
290 
291 	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
292 		 dev_name(dev)))
293 		return;
294 
295 	complete(&host->dma_complete);
296 }
297 
298 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
299 {
300 	struct mmc_data *data = host->mrq->data;
301 	struct scatterlist *sg = data->sg;
302 	struct dma_async_tx_descriptor *desc = NULL;
303 	struct dma_chan *chan = host->chan_rx;
304 	struct device *dev = sh_mmcif_host_to_dev(host);
305 	dma_cookie_t cookie = -EINVAL;
306 	int ret;
307 
308 	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
309 			 DMA_FROM_DEVICE);
310 	if (ret > 0) {
311 		host->dma_active = true;
312 		desc = dmaengine_prep_slave_sg(chan, sg, ret,
313 			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
314 	}
315 
316 	if (desc) {
317 		desc->callback = sh_mmcif_dma_complete;
318 		desc->callback_param = host;
319 		cookie = dmaengine_submit(desc);
320 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
321 		dma_async_issue_pending(chan);
322 	}
323 	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
324 		__func__, data->sg_len, ret, cookie);
325 
326 	if (!desc) {
327 		/* DMA failed, fall back to PIO */
328 		if (ret >= 0)
329 			ret = -EIO;
330 		host->chan_rx = NULL;
331 		host->dma_active = false;
332 		dma_release_channel(chan);
333 		/* Free the Tx channel too */
334 		chan = host->chan_tx;
335 		if (chan) {
336 			host->chan_tx = NULL;
337 			dma_release_channel(chan);
338 		}
339 		dev_warn(dev,
340 			 "DMA failed: %d, falling back to PIO\n", ret);
341 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
342 	}
343 
344 	dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
345 		desc, cookie, data->sg_len);
346 }
347 
348 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
349 {
350 	struct mmc_data *data = host->mrq->data;
351 	struct scatterlist *sg = data->sg;
352 	struct dma_async_tx_descriptor *desc = NULL;
353 	struct dma_chan *chan = host->chan_tx;
354 	struct device *dev = sh_mmcif_host_to_dev(host);
355 	dma_cookie_t cookie = -EINVAL;
356 	int ret;
357 
358 	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
359 			 DMA_TO_DEVICE);
360 	if (ret > 0) {
361 		host->dma_active = true;
362 		desc = dmaengine_prep_slave_sg(chan, sg, ret,
363 			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
364 	}
365 
366 	if (desc) {
367 		desc->callback = sh_mmcif_dma_complete;
368 		desc->callback_param = host;
369 		cookie = dmaengine_submit(desc);
370 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
371 		dma_async_issue_pending(chan);
372 	}
373 	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
374 		__func__, data->sg_len, ret, cookie);
375 
376 	if (!desc) {
377 		/* DMA failed, fall back to PIO */
378 		if (ret >= 0)
379 			ret = -EIO;
380 		host->chan_tx = NULL;
381 		host->dma_active = false;
382 		dma_release_channel(chan);
383 		/* Free the Rx channel too */
384 		chan = host->chan_rx;
385 		if (chan) {
386 			host->chan_rx = NULL;
387 			dma_release_channel(chan);
388 		}
389 		dev_warn(dev,
390 			 "DMA failed: %d, falling back to PIO\n", ret);
391 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
392 	}
393 
394 	dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
395 		desc, cookie);
396 }
397 
398 static struct dma_chan *
399 sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
400 {
401 	dma_cap_mask_t mask;
402 
403 	dma_cap_zero(mask);
404 	dma_cap_set(DMA_SLAVE, mask);
405 	if (slave_id <= 0)
406 		return NULL;
407 
408 	return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
409 }
410 
411 static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
412 				     struct dma_chan *chan,
413 				     enum dma_transfer_direction direction)
414 {
415 	struct resource *res;
416 	struct dma_slave_config cfg = { 0, };
417 
418 	res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
419 	cfg.direction = direction;
420 
421 	if (direction == DMA_DEV_TO_MEM) {
422 		cfg.src_addr = res->start + MMCIF_CE_DATA;
423 		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
424 	} else {
425 		cfg.dst_addr = res->start + MMCIF_CE_DATA;
426 		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
427 	}
428 
429 	return dmaengine_slave_config(chan, &cfg);
430 }
431 
432 static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
433 {
434 	struct device *dev = sh_mmcif_host_to_dev(host);
435 	host->dma_active = false;
436 
437 	/* We can only either use DMA for both Tx and Rx or not use it at all */
438 	if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
439 		struct sh_mmcif_plat_data *pdata = dev->platform_data;
440 
441 		host->chan_tx = sh_mmcif_request_dma_pdata(host,
442 							pdata->slave_id_tx);
443 		host->chan_rx = sh_mmcif_request_dma_pdata(host,
444 							pdata->slave_id_rx);
445 	} else {
446 		host->chan_tx = dma_request_slave_channel(dev, "tx");
447 		host->chan_rx = dma_request_slave_channel(dev, "rx");
448 	}
449 	dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
450 		host->chan_rx);
451 
452 	if (!host->chan_tx || !host->chan_rx ||
453 	    sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
454 	    sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
455 		goto error;
456 
457 	return;
458 
459 error:
460 	if (host->chan_tx)
461 		dma_release_channel(host->chan_tx);
462 	if (host->chan_rx)
463 		dma_release_channel(host->chan_rx);
464 	host->chan_tx = host->chan_rx = NULL;
465 }
466 
467 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
468 {
469 	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
470 	/* Descriptors are freed automatically */
471 	if (host->chan_tx) {
472 		struct dma_chan *chan = host->chan_tx;
473 		host->chan_tx = NULL;
474 		dma_release_channel(chan);
475 	}
476 	if (host->chan_rx) {
477 		struct dma_chan *chan = host->chan_rx;
478 		host->chan_rx = NULL;
479 		dma_release_channel(chan);
480 	}
481 
482 	host->dma_active = false;
483 }
484 
485 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
486 {
487 	struct device *dev = sh_mmcif_host_to_dev(host);
488 	struct sh_mmcif_plat_data *p = dev->platform_data;
489 	bool sup_pclk = p ? p->sup_pclk : false;
490 	unsigned int current_clk = clk_get_rate(host->clk);
491 	unsigned int clkdiv;
492 
493 	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
494 	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
495 
496 	if (!clk)
497 		return;
498 
499 	if (host->clkdiv_map) {
500 		unsigned int freq, best_freq, myclk, div, diff_min, diff;
501 		int i;
502 
503 		clkdiv = 0;
504 		diff_min = ~0;
505 		best_freq = 0;
506 		for (i = 31; i >= 0; i--) {
507 			if (!((1 << i) & host->clkdiv_map))
508 				continue;
509 
510 			/*
511 			 * clk = parent_freq / div
512 			 * -> parent_freq = clk x div
513 			 */
514 
515 			div = 1 << (i + 1);
516 			freq = clk_round_rate(host->clk, clk * div);
517 			myclk = freq / div;
518 			diff = (myclk > clk) ? myclk - clk : clk - myclk;
519 
520 			if (diff <= diff_min) {
521 				best_freq = freq;
522 				clkdiv = i;
523 				diff_min = diff;
524 			}
525 		}
526 
527 		dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
528 			(best_freq / (1 << (clkdiv + 1))), clk,
529 			best_freq, clkdiv);
530 
531 		clk_set_rate(host->clk, best_freq);
532 		clkdiv = clkdiv << 16;
533 	} else if (sup_pclk && clk == current_clk) {
534 		clkdiv = CLK_SUP_PCLK;
535 	} else {
536 		clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16;
537 	}
538 
539 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv);
540 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
541 }
542 
543 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
544 {
545 	u32 tmp;
546 
547 	tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
548 
549 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
550 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
551 	if (host->ccs_enable)
552 		tmp |= SCCSTO_29;
553 	if (host->clk_ctrl2_enable)
554 		sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
555 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
556 		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
557 	/* byte swap on */
558 	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
559 }
560 
561 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
562 {
563 	struct device *dev = sh_mmcif_host_to_dev(host);
564 	u32 state1, state2;
565 	int ret, timeout;
566 
567 	host->sd_error = false;
568 
569 	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
570 	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
571 	dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
572 	dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
573 
574 	if (state1 & STS1_CMDSEQ) {
575 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
576 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
577 		for (timeout = 10000000; timeout; timeout--) {
578 			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
579 			      & STS1_CMDSEQ))
580 				break;
581 			mdelay(1);
582 		}
583 		if (!timeout) {
584 			dev_err(dev,
585 				"Forced end of command sequence timeout err\n");
586 			return -EIO;
587 		}
588 		sh_mmcif_sync_reset(host);
589 		dev_dbg(dev, "Forced end of command sequence\n");
590 		return -EIO;
591 	}
592 
593 	if (state2 & STS2_CRC_ERR) {
594 		dev_err(dev, " CRC error: state %u, wait %u\n",
595 			host->state, host->wait_for);
596 		ret = -EIO;
597 	} else if (state2 & STS2_TIMEOUT_ERR) {
598 		dev_err(dev, " Timeout: state %u, wait %u\n",
599 			host->state, host->wait_for);
600 		ret = -ETIMEDOUT;
601 	} else {
602 		dev_dbg(dev, " End/Index error: state %u, wait %u\n",
603 			host->state, host->wait_for);
604 		ret = -EIO;
605 	}
606 	return ret;
607 }
608 
609 static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
610 {
611 	struct mmc_data *data = host->mrq->data;
612 
613 	host->sg_blkidx += host->blocksize;
614 
615 	/* data->sg->length must be a multiple of host->blocksize? */
616 	BUG_ON(host->sg_blkidx > data->sg->length);
617 
618 	if (host->sg_blkidx == data->sg->length) {
619 		host->sg_blkidx = 0;
620 		if (++host->sg_idx < data->sg_len)
621 			host->pio_ptr = sg_virt(++data->sg);
622 	} else {
623 		host->pio_ptr = p;
624 	}
625 
626 	return host->sg_idx != data->sg_len;
627 }
628 
629 static void sh_mmcif_single_read(struct sh_mmcif_host *host,
630 				 struct mmc_request *mrq)
631 {
632 	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
633 			   BLOCK_SIZE_MASK) + 3;
634 
635 	host->wait_for = MMCIF_WAIT_FOR_READ;
636 
637 	/* buf read enable */
638 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
639 }
640 
641 static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
642 {
643 	struct device *dev = sh_mmcif_host_to_dev(host);
644 	struct mmc_data *data = host->mrq->data;
645 	u32 *p = sg_virt(data->sg);
646 	int i;
647 
648 	if (host->sd_error) {
649 		data->error = sh_mmcif_error_manage(host);
650 		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
651 		return false;
652 	}
653 
654 	for (i = 0; i < host->blocksize / 4; i++)
655 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
656 
657 	/* buffer read end */
658 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
659 	host->wait_for = MMCIF_WAIT_FOR_READ_END;
660 
661 	return true;
662 }
663 
664 static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
665 				struct mmc_request *mrq)
666 {
667 	struct mmc_data *data = mrq->data;
668 
669 	if (!data->sg_len || !data->sg->length)
670 		return;
671 
672 	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
673 		BLOCK_SIZE_MASK;
674 
675 	host->wait_for = MMCIF_WAIT_FOR_MREAD;
676 	host->sg_idx = 0;
677 	host->sg_blkidx = 0;
678 	host->pio_ptr = sg_virt(data->sg);
679 
680 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
681 }
682 
683 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
684 {
685 	struct device *dev = sh_mmcif_host_to_dev(host);
686 	struct mmc_data *data = host->mrq->data;
687 	u32 *p = host->pio_ptr;
688 	int i;
689 
690 	if (host->sd_error) {
691 		data->error = sh_mmcif_error_manage(host);
692 		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
693 		return false;
694 	}
695 
696 	BUG_ON(!data->sg->length);
697 
698 	for (i = 0; i < host->blocksize / 4; i++)
699 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
700 
701 	if (!sh_mmcif_next_block(host, p))
702 		return false;
703 
704 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
705 
706 	return true;
707 }
708 
709 static void sh_mmcif_single_write(struct sh_mmcif_host *host,
710 					struct mmc_request *mrq)
711 {
712 	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
713 			   BLOCK_SIZE_MASK) + 3;
714 
715 	host->wait_for = MMCIF_WAIT_FOR_WRITE;
716 
717 	/* buf write enable */
718 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
719 }
720 
721 static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
722 {
723 	struct device *dev = sh_mmcif_host_to_dev(host);
724 	struct mmc_data *data = host->mrq->data;
725 	u32 *p = sg_virt(data->sg);
726 	int i;
727 
728 	if (host->sd_error) {
729 		data->error = sh_mmcif_error_manage(host);
730 		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
731 		return false;
732 	}
733 
734 	for (i = 0; i < host->blocksize / 4; i++)
735 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
736 
737 	/* buffer write end */
738 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
739 	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
740 
741 	return true;
742 }
743 
744 static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
745 				struct mmc_request *mrq)
746 {
747 	struct mmc_data *data = mrq->data;
748 
749 	if (!data->sg_len || !data->sg->length)
750 		return;
751 
752 	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
753 		BLOCK_SIZE_MASK;
754 
755 	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
756 	host->sg_idx = 0;
757 	host->sg_blkidx = 0;
758 	host->pio_ptr = sg_virt(data->sg);
759 
760 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
761 }
762 
763 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
764 {
765 	struct device *dev = sh_mmcif_host_to_dev(host);
766 	struct mmc_data *data = host->mrq->data;
767 	u32 *p = host->pio_ptr;
768 	int i;
769 
770 	if (host->sd_error) {
771 		data->error = sh_mmcif_error_manage(host);
772 		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
773 		return false;
774 	}
775 
776 	BUG_ON(!data->sg->length);
777 
778 	for (i = 0; i < host->blocksize / 4; i++)
779 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
780 
781 	if (!sh_mmcif_next_block(host, p))
782 		return false;
783 
784 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
785 
786 	return true;
787 }
788 
789 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
790 						struct mmc_command *cmd)
791 {
792 	if (cmd->flags & MMC_RSP_136) {
793 		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
794 		cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
795 		cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
796 		cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
797 	} else
798 		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
799 }
800 
801 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
802 						struct mmc_command *cmd)
803 {
804 	cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
805 }
806 
807 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
808 			    struct mmc_request *mrq)
809 {
810 	struct device *dev = sh_mmcif_host_to_dev(host);
811 	struct mmc_data *data = mrq->data;
812 	struct mmc_command *cmd = mrq->cmd;
813 	u32 opc = cmd->opcode;
814 	u32 tmp = 0;
815 
816 	/* Response Type check */
817 	switch (mmc_resp_type(cmd)) {
818 	case MMC_RSP_NONE:
819 		tmp |= CMD_SET_RTYP_NO;
820 		break;
821 	case MMC_RSP_R1:
822 	case MMC_RSP_R1B:
823 	case MMC_RSP_R3:
824 		tmp |= CMD_SET_RTYP_6B;
825 		break;
826 	case MMC_RSP_R2:
827 		tmp |= CMD_SET_RTYP_17B;
828 		break;
829 	default:
830 		dev_err(dev, "Unsupported response type.\n");
831 		break;
832 	}
833 	switch (opc) {
834 	/* RBSY */
835 	case MMC_SLEEP_AWAKE:
836 	case MMC_SWITCH:
837 	case MMC_STOP_TRANSMISSION:
838 	case MMC_SET_WRITE_PROT:
839 	case MMC_CLR_WRITE_PROT:
840 	case MMC_ERASE:
841 		tmp |= CMD_SET_RBSY;
842 		break;
843 	}
844 	/* WDAT / DATW */
845 	if (data) {
846 		tmp |= CMD_SET_WDAT;
847 		switch (host->bus_width) {
848 		case MMC_BUS_WIDTH_1:
849 			tmp |= CMD_SET_DATW_1;
850 			break;
851 		case MMC_BUS_WIDTH_4:
852 			tmp |= CMD_SET_DATW_4;
853 			break;
854 		case MMC_BUS_WIDTH_8:
855 			tmp |= CMD_SET_DATW_8;
856 			break;
857 		default:
858 			dev_err(dev, "Unsupported bus width.\n");
859 			break;
860 		}
861 		switch (host->timing) {
862 		case MMC_TIMING_MMC_DDR52:
863 			/*
864 			 * MMC core will only set this timing, if the host
865 			 * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
866 			 * capability. MMCIF implementations with this
867 			 * capability, e.g. sh73a0, will have to set it
868 			 * in their platform data.
869 			 */
870 			tmp |= CMD_SET_DARS;
871 			break;
872 		}
873 	}
874 	/* DWEN */
875 	if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
876 		tmp |= CMD_SET_DWEN;
877 	/* CMLTE/CMD12EN */
878 	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
879 		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
880 		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
881 				data->blocks << 16);
882 	}
883 	/* RIDXC[1:0] check bits */
884 	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
885 	    opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
886 		tmp |= CMD_SET_RIDXC_BITS;
887 	/* RCRC7C[1:0] check bits */
888 	if (opc == MMC_SEND_OP_COND)
889 		tmp |= CMD_SET_CRC7C_BITS;
890 	/* RCRC7C[1:0] internal CRC7 */
891 	if (opc == MMC_ALL_SEND_CID ||
892 		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
893 		tmp |= CMD_SET_CRC7C_INTERNAL;
894 
895 	return (opc << 24) | tmp;
896 }
897 
898 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
899 			       struct mmc_request *mrq, u32 opc)
900 {
901 	struct device *dev = sh_mmcif_host_to_dev(host);
902 
903 	switch (opc) {
904 	case MMC_READ_MULTIPLE_BLOCK:
905 		sh_mmcif_multi_read(host, mrq);
906 		return 0;
907 	case MMC_WRITE_MULTIPLE_BLOCK:
908 		sh_mmcif_multi_write(host, mrq);
909 		return 0;
910 	case MMC_WRITE_BLOCK:
911 		sh_mmcif_single_write(host, mrq);
912 		return 0;
913 	case MMC_READ_SINGLE_BLOCK:
914 	case MMC_SEND_EXT_CSD:
915 		sh_mmcif_single_read(host, mrq);
916 		return 0;
917 	default:
918 		dev_err(dev, "Unsupported CMD%d\n", opc);
919 		return -EINVAL;
920 	}
921 }
922 
923 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
924 			       struct mmc_request *mrq)
925 {
926 	struct mmc_command *cmd = mrq->cmd;
927 	u32 opc = cmd->opcode;
928 	u32 mask;
929 	unsigned long flags;
930 
931 	switch (opc) {
932 	/* response busy check */
933 	case MMC_SLEEP_AWAKE:
934 	case MMC_SWITCH:
935 	case MMC_STOP_TRANSMISSION:
936 	case MMC_SET_WRITE_PROT:
937 	case MMC_CLR_WRITE_PROT:
938 	case MMC_ERASE:
939 		mask = MASK_START_CMD | MASK_MRBSYE;
940 		break;
941 	default:
942 		mask = MASK_START_CMD | MASK_MCRSPE;
943 		break;
944 	}
945 
946 	if (host->ccs_enable)
947 		mask |= MASK_MCCSTO;
948 
949 	if (mrq->data) {
950 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
951 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
952 				mrq->data->blksz);
953 	}
954 	opc = sh_mmcif_set_cmd(host, mrq);
955 
956 	if (host->ccs_enable)
957 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
958 	else
959 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
960 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
961 	/* set arg */
962 	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
963 	/* set cmd */
964 	spin_lock_irqsave(&host->lock, flags);
965 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
966 
967 	host->wait_for = MMCIF_WAIT_FOR_CMD;
968 	schedule_delayed_work(&host->timeout_work, host->timeout);
969 	spin_unlock_irqrestore(&host->lock, flags);
970 }
971 
972 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
973 			      struct mmc_request *mrq)
974 {
975 	struct device *dev = sh_mmcif_host_to_dev(host);
976 
977 	switch (mrq->cmd->opcode) {
978 	case MMC_READ_MULTIPLE_BLOCK:
979 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
980 		break;
981 	case MMC_WRITE_MULTIPLE_BLOCK:
982 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
983 		break;
984 	default:
985 		dev_err(dev, "unsupported stop cmd\n");
986 		mrq->stop->error = sh_mmcif_error_manage(host);
987 		return;
988 	}
989 
990 	host->wait_for = MMCIF_WAIT_FOR_STOP;
991 }
992 
993 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
994 {
995 	struct sh_mmcif_host *host = mmc_priv(mmc);
996 	struct device *dev = sh_mmcif_host_to_dev(host);
997 	unsigned long flags;
998 
999 	spin_lock_irqsave(&host->lock, flags);
1000 	if (host->state != STATE_IDLE) {
1001 		dev_dbg(dev, "%s() rejected, state %u\n",
1002 			__func__, host->state);
1003 		spin_unlock_irqrestore(&host->lock, flags);
1004 		mrq->cmd->error = -EAGAIN;
1005 		mmc_request_done(mmc, mrq);
1006 		return;
1007 	}
1008 
1009 	host->state = STATE_REQUEST;
1010 	spin_unlock_irqrestore(&host->lock, flags);
1011 
1012 	switch (mrq->cmd->opcode) {
1013 	/* MMCIF does not support SD/SDIO command */
1014 	case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */
1015 	case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
1016 		if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
1017 			break;
1018 	case MMC_APP_CMD:
1019 	case SD_IO_RW_DIRECT:
1020 		host->state = STATE_IDLE;
1021 		mrq->cmd->error = -ETIMEDOUT;
1022 		mmc_request_done(mmc, mrq);
1023 		return;
1024 	default:
1025 		break;
1026 	}
1027 
1028 	host->mrq = mrq;
1029 
1030 	sh_mmcif_start_cmd(host, mrq);
1031 }
1032 
1033 static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
1034 {
1035 	struct device *dev = sh_mmcif_host_to_dev(host);
1036 
1037 	if (host->mmc->f_max) {
1038 		unsigned int f_max, f_min = 0, f_min_old;
1039 
1040 		f_max = host->mmc->f_max;
1041 		for (f_min_old = f_max; f_min_old > 2;) {
1042 			f_min = clk_round_rate(host->clk, f_min_old / 2);
1043 			if (f_min == f_min_old)
1044 				break;
1045 			f_min_old = f_min;
1046 		}
1047 
1048 		/*
1049 		 * This driver assumes this SoC is R-Car Gen2 or later
1050 		 */
1051 		host->clkdiv_map = 0x3ff;
1052 
1053 		host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map));
1054 		host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map));
1055 	} else {
1056 		unsigned int clk = clk_get_rate(host->clk);
1057 
1058 		host->mmc->f_max = clk / 2;
1059 		host->mmc->f_min = clk / 512;
1060 	}
1061 
1062 	dev_dbg(dev, "clk max/min = %d/%d\n",
1063 		host->mmc->f_max, host->mmc->f_min);
1064 }
1065 
1066 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1067 {
1068 	struct sh_mmcif_host *host = mmc_priv(mmc);
1069 	struct device *dev = sh_mmcif_host_to_dev(host);
1070 	unsigned long flags;
1071 
1072 	spin_lock_irqsave(&host->lock, flags);
1073 	if (host->state != STATE_IDLE) {
1074 		dev_dbg(dev, "%s() rejected, state %u\n",
1075 			__func__, host->state);
1076 		spin_unlock_irqrestore(&host->lock, flags);
1077 		return;
1078 	}
1079 
1080 	host->state = STATE_IOS;
1081 	spin_unlock_irqrestore(&host->lock, flags);
1082 
1083 	switch (ios->power_mode) {
1084 	case MMC_POWER_UP:
1085 		if (!IS_ERR(mmc->supply.vmmc))
1086 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1087 		if (!host->power) {
1088 			clk_prepare_enable(host->clk);
1089 			pm_runtime_get_sync(dev);
1090 			sh_mmcif_sync_reset(host);
1091 			sh_mmcif_request_dma(host);
1092 			host->power = true;
1093 		}
1094 		break;
1095 	case MMC_POWER_OFF:
1096 		if (!IS_ERR(mmc->supply.vmmc))
1097 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1098 		if (host->power) {
1099 			sh_mmcif_clock_control(host, 0);
1100 			sh_mmcif_release_dma(host);
1101 			pm_runtime_put(dev);
1102 			clk_disable_unprepare(host->clk);
1103 			host->power = false;
1104 		}
1105 		break;
1106 	case MMC_POWER_ON:
1107 		sh_mmcif_clock_control(host, ios->clock);
1108 		break;
1109 	}
1110 
1111 	host->timing = ios->timing;
1112 	host->bus_width = ios->bus_width;
1113 	host->state = STATE_IDLE;
1114 }
1115 
1116 static int sh_mmcif_get_cd(struct mmc_host *mmc)
1117 {
1118 	struct sh_mmcif_host *host = mmc_priv(mmc);
1119 	struct device *dev = sh_mmcif_host_to_dev(host);
1120 	struct sh_mmcif_plat_data *p = dev->platform_data;
1121 	int ret = mmc_gpio_get_cd(mmc);
1122 
1123 	if (ret >= 0)
1124 		return ret;
1125 
1126 	if (!p || !p->get_cd)
1127 		return -ENOSYS;
1128 	else
1129 		return p->get_cd(host->pd);
1130 }
1131 
1132 static struct mmc_host_ops sh_mmcif_ops = {
1133 	.request	= sh_mmcif_request,
1134 	.set_ios	= sh_mmcif_set_ios,
1135 	.get_cd		= sh_mmcif_get_cd,
1136 };
1137 
1138 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1139 {
1140 	struct mmc_command *cmd = host->mrq->cmd;
1141 	struct mmc_data *data = host->mrq->data;
1142 	struct device *dev = sh_mmcif_host_to_dev(host);
1143 	long time;
1144 
1145 	if (host->sd_error) {
1146 		switch (cmd->opcode) {
1147 		case MMC_ALL_SEND_CID:
1148 		case MMC_SELECT_CARD:
1149 		case MMC_APP_CMD:
1150 			cmd->error = -ETIMEDOUT;
1151 			break;
1152 		default:
1153 			cmd->error = sh_mmcif_error_manage(host);
1154 			break;
1155 		}
1156 		dev_dbg(dev, "CMD%d error %d\n",
1157 			cmd->opcode, cmd->error);
1158 		host->sd_error = false;
1159 		return false;
1160 	}
1161 	if (!(cmd->flags & MMC_RSP_PRESENT)) {
1162 		cmd->error = 0;
1163 		return false;
1164 	}
1165 
1166 	sh_mmcif_get_response(host, cmd);
1167 
1168 	if (!data)
1169 		return false;
1170 
1171 	/*
1172 	 * Completion can be signalled from DMA callback and error, so, have to
1173 	 * reset here, before setting .dma_active
1174 	 */
1175 	init_completion(&host->dma_complete);
1176 
1177 	if (data->flags & MMC_DATA_READ) {
1178 		if (host->chan_rx)
1179 			sh_mmcif_start_dma_rx(host);
1180 	} else {
1181 		if (host->chan_tx)
1182 			sh_mmcif_start_dma_tx(host);
1183 	}
1184 
1185 	if (!host->dma_active) {
1186 		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1187 		return !data->error;
1188 	}
1189 
1190 	/* Running in the IRQ thread, can sleep */
1191 	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1192 							 host->timeout);
1193 
1194 	if (data->flags & MMC_DATA_READ)
1195 		dma_unmap_sg(host->chan_rx->device->dev,
1196 			     data->sg, data->sg_len,
1197 			     DMA_FROM_DEVICE);
1198 	else
1199 		dma_unmap_sg(host->chan_tx->device->dev,
1200 			     data->sg, data->sg_len,
1201 			     DMA_TO_DEVICE);
1202 
1203 	if (host->sd_error) {
1204 		dev_err(host->mmc->parent,
1205 			"Error IRQ while waiting for DMA completion!\n");
1206 		/* Woken up by an error IRQ: abort DMA */
1207 		data->error = sh_mmcif_error_manage(host);
1208 	} else if (!time) {
1209 		dev_err(host->mmc->parent, "DMA timeout!\n");
1210 		data->error = -ETIMEDOUT;
1211 	} else if (time < 0) {
1212 		dev_err(host->mmc->parent,
1213 			"wait_for_completion_...() error %ld!\n", time);
1214 		data->error = time;
1215 	}
1216 	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1217 			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1218 	host->dma_active = false;
1219 
1220 	if (data->error) {
1221 		data->bytes_xfered = 0;
1222 		/* Abort DMA */
1223 		if (data->flags & MMC_DATA_READ)
1224 			dmaengine_terminate_all(host->chan_rx);
1225 		else
1226 			dmaengine_terminate_all(host->chan_tx);
1227 	}
1228 
1229 	return false;
1230 }
1231 
1232 static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1233 {
1234 	struct sh_mmcif_host *host = dev_id;
1235 	struct mmc_request *mrq;
1236 	struct device *dev = sh_mmcif_host_to_dev(host);
1237 	bool wait = false;
1238 	unsigned long flags;
1239 	int wait_work;
1240 
1241 	spin_lock_irqsave(&host->lock, flags);
1242 	wait_work = host->wait_for;
1243 	spin_unlock_irqrestore(&host->lock, flags);
1244 
1245 	cancel_delayed_work_sync(&host->timeout_work);
1246 
1247 	mutex_lock(&host->thread_lock);
1248 
1249 	mrq = host->mrq;
1250 	if (!mrq) {
1251 		dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1252 			host->state, host->wait_for);
1253 		mutex_unlock(&host->thread_lock);
1254 		return IRQ_HANDLED;
1255 	}
1256 
1257 	/*
1258 	 * All handlers return true, if processing continues, and false, if the
1259 	 * request has to be completed - successfully or not
1260 	 */
1261 	switch (wait_work) {
1262 	case MMCIF_WAIT_FOR_REQUEST:
1263 		/* We're too late, the timeout has already kicked in */
1264 		mutex_unlock(&host->thread_lock);
1265 		return IRQ_HANDLED;
1266 	case MMCIF_WAIT_FOR_CMD:
1267 		/* Wait for data? */
1268 		wait = sh_mmcif_end_cmd(host);
1269 		break;
1270 	case MMCIF_WAIT_FOR_MREAD:
1271 		/* Wait for more data? */
1272 		wait = sh_mmcif_mread_block(host);
1273 		break;
1274 	case MMCIF_WAIT_FOR_READ:
1275 		/* Wait for data end? */
1276 		wait = sh_mmcif_read_block(host);
1277 		break;
1278 	case MMCIF_WAIT_FOR_MWRITE:
1279 		/* Wait data to write? */
1280 		wait = sh_mmcif_mwrite_block(host);
1281 		break;
1282 	case MMCIF_WAIT_FOR_WRITE:
1283 		/* Wait for data end? */
1284 		wait = sh_mmcif_write_block(host);
1285 		break;
1286 	case MMCIF_WAIT_FOR_STOP:
1287 		if (host->sd_error) {
1288 			mrq->stop->error = sh_mmcif_error_manage(host);
1289 			dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
1290 			break;
1291 		}
1292 		sh_mmcif_get_cmd12response(host, mrq->stop);
1293 		mrq->stop->error = 0;
1294 		break;
1295 	case MMCIF_WAIT_FOR_READ_END:
1296 	case MMCIF_WAIT_FOR_WRITE_END:
1297 		if (host->sd_error) {
1298 			mrq->data->error = sh_mmcif_error_manage(host);
1299 			dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
1300 		}
1301 		break;
1302 	default:
1303 		BUG();
1304 	}
1305 
1306 	if (wait) {
1307 		schedule_delayed_work(&host->timeout_work, host->timeout);
1308 		/* Wait for more data */
1309 		mutex_unlock(&host->thread_lock);
1310 		return IRQ_HANDLED;
1311 	}
1312 
1313 	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1314 		struct mmc_data *data = mrq->data;
1315 		if (!mrq->cmd->error && data && !data->error)
1316 			data->bytes_xfered =
1317 				data->blocks * data->blksz;
1318 
1319 		if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1320 			sh_mmcif_stop_cmd(host, mrq);
1321 			if (!mrq->stop->error) {
1322 				schedule_delayed_work(&host->timeout_work, host->timeout);
1323 				mutex_unlock(&host->thread_lock);
1324 				return IRQ_HANDLED;
1325 			}
1326 		}
1327 	}
1328 
1329 	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1330 	host->state = STATE_IDLE;
1331 	host->mrq = NULL;
1332 	mmc_request_done(host->mmc, mrq);
1333 
1334 	mutex_unlock(&host->thread_lock);
1335 
1336 	return IRQ_HANDLED;
1337 }
1338 
1339 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1340 {
1341 	struct sh_mmcif_host *host = dev_id;
1342 	struct device *dev = sh_mmcif_host_to_dev(host);
1343 	u32 state, mask;
1344 
1345 	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1346 	mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
1347 	if (host->ccs_enable)
1348 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
1349 	else
1350 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
1351 	sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
1352 
1353 	if (state & ~MASK_CLEAN)
1354 		dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
1355 			state);
1356 
1357 	if (state & INT_ERR_STS || state & ~INT_ALL) {
1358 		host->sd_error = true;
1359 		dev_dbg(dev, "int err state = 0x%08x\n", state);
1360 	}
1361 	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1362 		if (!host->mrq)
1363 			dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
1364 		if (!host->dma_active)
1365 			return IRQ_WAKE_THREAD;
1366 		else if (host->sd_error)
1367 			sh_mmcif_dma_complete(host);
1368 	} else {
1369 		dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
1370 	}
1371 
1372 	return IRQ_HANDLED;
1373 }
1374 
1375 static void sh_mmcif_timeout_work(struct work_struct *work)
1376 {
1377 	struct delayed_work *d = to_delayed_work(work);
1378 	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1379 	struct mmc_request *mrq = host->mrq;
1380 	struct device *dev = sh_mmcif_host_to_dev(host);
1381 	unsigned long flags;
1382 
1383 	if (host->dying)
1384 		/* Don't run after mmc_remove_host() */
1385 		return;
1386 
1387 	spin_lock_irqsave(&host->lock, flags);
1388 	if (host->state == STATE_IDLE) {
1389 		spin_unlock_irqrestore(&host->lock, flags);
1390 		return;
1391 	}
1392 
1393 	dev_err(dev, "Timeout waiting for %u on CMD%u\n",
1394 		host->wait_for, mrq->cmd->opcode);
1395 
1396 	host->state = STATE_TIMEOUT;
1397 	spin_unlock_irqrestore(&host->lock, flags);
1398 
1399 	/*
1400 	 * Handle races with cancel_delayed_work(), unless
1401 	 * cancel_delayed_work_sync() is used
1402 	 */
1403 	switch (host->wait_for) {
1404 	case MMCIF_WAIT_FOR_CMD:
1405 		mrq->cmd->error = sh_mmcif_error_manage(host);
1406 		break;
1407 	case MMCIF_WAIT_FOR_STOP:
1408 		mrq->stop->error = sh_mmcif_error_manage(host);
1409 		break;
1410 	case MMCIF_WAIT_FOR_MREAD:
1411 	case MMCIF_WAIT_FOR_MWRITE:
1412 	case MMCIF_WAIT_FOR_READ:
1413 	case MMCIF_WAIT_FOR_WRITE:
1414 	case MMCIF_WAIT_FOR_READ_END:
1415 	case MMCIF_WAIT_FOR_WRITE_END:
1416 		mrq->data->error = sh_mmcif_error_manage(host);
1417 		break;
1418 	default:
1419 		BUG();
1420 	}
1421 
1422 	host->state = STATE_IDLE;
1423 	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1424 	host->mrq = NULL;
1425 	mmc_request_done(host->mmc, mrq);
1426 }
1427 
1428 static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1429 {
1430 	struct device *dev = sh_mmcif_host_to_dev(host);
1431 	struct sh_mmcif_plat_data *pd = dev->platform_data;
1432 	struct mmc_host *mmc = host->mmc;
1433 
1434 	mmc_regulator_get_supply(mmc);
1435 
1436 	if (!pd)
1437 		return;
1438 
1439 	if (!mmc->ocr_avail)
1440 		mmc->ocr_avail = pd->ocr;
1441 	else if (pd->ocr)
1442 		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1443 }
1444 
1445 static int sh_mmcif_probe(struct platform_device *pdev)
1446 {
1447 	int ret = 0, irq[2];
1448 	struct mmc_host *mmc;
1449 	struct sh_mmcif_host *host;
1450 	struct device *dev = &pdev->dev;
1451 	struct sh_mmcif_plat_data *pd = dev->platform_data;
1452 	struct resource *res;
1453 	void __iomem *reg;
1454 	const char *name;
1455 
1456 	irq[0] = platform_get_irq(pdev, 0);
1457 	irq[1] = platform_get_irq(pdev, 1);
1458 	if (irq[0] < 0) {
1459 		dev_err(dev, "Get irq error\n");
1460 		return -ENXIO;
1461 	}
1462 
1463 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1464 	reg = devm_ioremap_resource(dev, res);
1465 	if (IS_ERR(reg))
1466 		return PTR_ERR(reg);
1467 
1468 	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
1469 	if (!mmc)
1470 		return -ENOMEM;
1471 
1472 	ret = mmc_of_parse(mmc);
1473 	if (ret < 0)
1474 		goto err_host;
1475 
1476 	host		= mmc_priv(mmc);
1477 	host->mmc	= mmc;
1478 	host->addr	= reg;
1479 	host->timeout	= msecs_to_jiffies(10000);
1480 	host->ccs_enable = !pd || !pd->ccs_unsupported;
1481 	host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
1482 
1483 	host->pd = pdev;
1484 
1485 	spin_lock_init(&host->lock);
1486 
1487 	mmc->ops = &sh_mmcif_ops;
1488 	sh_mmcif_init_ocr(host);
1489 
1490 	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1491 	if (pd && pd->caps)
1492 		mmc->caps |= pd->caps;
1493 	mmc->max_segs = 32;
1494 	mmc->max_blk_size = 512;
1495 	mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1496 	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1497 	mmc->max_seg_size = mmc->max_req_size;
1498 
1499 	platform_set_drvdata(pdev, host);
1500 
1501 	host->clk = devm_clk_get(dev, NULL);
1502 	if (IS_ERR(host->clk)) {
1503 		ret = PTR_ERR(host->clk);
1504 		dev_err(dev, "cannot get clock: %d\n", ret);
1505 		goto err_host;
1506 	}
1507 
1508 	ret = clk_prepare_enable(host->clk);
1509 	if (ret < 0)
1510 		goto err_host;
1511 
1512 	sh_mmcif_clk_setup(host);
1513 
1514 	pm_runtime_enable(dev);
1515 	host->power = false;
1516 
1517 	ret = pm_runtime_get_sync(dev);
1518 	if (ret < 0)
1519 		goto err_clk;
1520 
1521 	INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work);
1522 
1523 	sh_mmcif_sync_reset(host);
1524 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1525 
1526 	name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error";
1527 	ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr,
1528 					sh_mmcif_irqt, 0, name, host);
1529 	if (ret) {
1530 		dev_err(dev, "request_irq error (%s)\n", name);
1531 		goto err_clk;
1532 	}
1533 	if (irq[1] >= 0) {
1534 		ret = devm_request_threaded_irq(dev, irq[1],
1535 						sh_mmcif_intr, sh_mmcif_irqt,
1536 						0, "sh_mmc:int", host);
1537 		if (ret) {
1538 			dev_err(dev, "request_irq error (sh_mmc:int)\n");
1539 			goto err_clk;
1540 		}
1541 	}
1542 
1543 	if (pd && pd->use_cd_gpio) {
1544 		ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0);
1545 		if (ret < 0)
1546 			goto err_clk;
1547 	}
1548 
1549 	mutex_init(&host->thread_lock);
1550 
1551 	ret = mmc_add_host(mmc);
1552 	if (ret < 0)
1553 		goto err_clk;
1554 
1555 	dev_pm_qos_expose_latency_limit(dev, 100);
1556 
1557 	dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n",
1558 		 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
1559 		 clk_get_rate(host->clk) / 1000000UL);
1560 
1561 	pm_runtime_put(dev);
1562 	clk_disable_unprepare(host->clk);
1563 	return ret;
1564 
1565 err_clk:
1566 	clk_disable_unprepare(host->clk);
1567 	pm_runtime_put_sync(dev);
1568 	pm_runtime_disable(dev);
1569 err_host:
1570 	mmc_free_host(mmc);
1571 	return ret;
1572 }
1573 
1574 static int sh_mmcif_remove(struct platform_device *pdev)
1575 {
1576 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1577 
1578 	host->dying = true;
1579 	clk_prepare_enable(host->clk);
1580 	pm_runtime_get_sync(&pdev->dev);
1581 
1582 	dev_pm_qos_hide_latency_limit(&pdev->dev);
1583 
1584 	mmc_remove_host(host->mmc);
1585 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1586 
1587 	/*
1588 	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1589 	 * mmc_remove_host() call above. But swapping order doesn't help either
1590 	 * (a query on the linux-mmc mailing list didn't bring any replies).
1591 	 */
1592 	cancel_delayed_work_sync(&host->timeout_work);
1593 
1594 	clk_disable_unprepare(host->clk);
1595 	mmc_free_host(host->mmc);
1596 	pm_runtime_put_sync(&pdev->dev);
1597 	pm_runtime_disable(&pdev->dev);
1598 
1599 	return 0;
1600 }
1601 
1602 #ifdef CONFIG_PM_SLEEP
1603 static int sh_mmcif_suspend(struct device *dev)
1604 {
1605 	struct sh_mmcif_host *host = dev_get_drvdata(dev);
1606 
1607 	pm_runtime_get_sync(dev);
1608 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1609 	pm_runtime_put(dev);
1610 
1611 	return 0;
1612 }
1613 
1614 static int sh_mmcif_resume(struct device *dev)
1615 {
1616 	return 0;
1617 }
1618 #endif
1619 
1620 static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1621 	SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
1622 };
1623 
1624 static struct platform_driver sh_mmcif_driver = {
1625 	.probe		= sh_mmcif_probe,
1626 	.remove		= sh_mmcif_remove,
1627 	.driver		= {
1628 		.name	= DRIVER_NAME,
1629 		.pm	= &sh_mmcif_dev_pm_ops,
1630 		.of_match_table = sh_mmcif_of_match,
1631 	},
1632 };
1633 
1634 module_platform_driver(sh_mmcif_driver);
1635 
1636 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1637 MODULE_LICENSE("GPL");
1638 MODULE_ALIAS("platform:" DRIVER_NAME);
1639 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
1640