xref: /openbmc/linux/drivers/mmc/host/usdhi6rol0.c (revision f707079d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
4  * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmaengine.h>
12 #include <linux/highmem.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/log2.h>
16 #include <linux/mmc/host.h>
17 #include <linux/mmc/mmc.h>
18 #include <linux/mmc/sd.h>
19 #include <linux/mmc/sdio.h>
20 #include <linux/module.h>
21 #include <linux/pagemap.h>
22 #include <linux/pinctrl/consumer.h>
23 #include <linux/platform_device.h>
24 #include <linux/scatterlist.h>
25 #include <linux/string.h>
26 #include <linux/time.h>
27 #include <linux/virtio.h>
28 #include <linux/workqueue.h>
29 
30 #define USDHI6_SD_CMD		0x0000
31 #define USDHI6_SD_PORT_SEL	0x0004
32 #define USDHI6_SD_ARG		0x0008
33 #define USDHI6_SD_STOP		0x0010
34 #define USDHI6_SD_SECCNT	0x0014
35 #define USDHI6_SD_RSP10		0x0018
36 #define USDHI6_SD_RSP32		0x0020
37 #define USDHI6_SD_RSP54		0x0028
38 #define USDHI6_SD_RSP76		0x0030
39 #define USDHI6_SD_INFO1		0x0038
40 #define USDHI6_SD_INFO2		0x003c
41 #define USDHI6_SD_INFO1_MASK	0x0040
42 #define USDHI6_SD_INFO2_MASK	0x0044
43 #define USDHI6_SD_CLK_CTRL	0x0048
44 #define USDHI6_SD_SIZE		0x004c
45 #define USDHI6_SD_OPTION	0x0050
46 #define USDHI6_SD_ERR_STS1	0x0058
47 #define USDHI6_SD_ERR_STS2	0x005c
48 #define USDHI6_SD_BUF0		0x0060
49 #define USDHI6_SDIO_MODE	0x0068
50 #define USDHI6_SDIO_INFO1	0x006c
51 #define USDHI6_SDIO_INFO1_MASK	0x0070
52 #define USDHI6_CC_EXT_MODE	0x01b0
53 #define USDHI6_SOFT_RST		0x01c0
54 #define USDHI6_VERSION		0x01c4
55 #define USDHI6_HOST_MODE	0x01c8
56 #define USDHI6_SDIF_MODE	0x01cc
57 
58 #define USDHI6_SD_CMD_APP		0x0040
59 #define USDHI6_SD_CMD_MODE_RSP_AUTO	0x0000
60 #define USDHI6_SD_CMD_MODE_RSP_NONE	0x0300
61 #define USDHI6_SD_CMD_MODE_RSP_R1	0x0400	/* Also R5, R6, R7 */
62 #define USDHI6_SD_CMD_MODE_RSP_R1B	0x0500	/* R1b */
63 #define USDHI6_SD_CMD_MODE_RSP_R2	0x0600
64 #define USDHI6_SD_CMD_MODE_RSP_R3	0x0700	/* Also R4 */
65 #define USDHI6_SD_CMD_DATA		0x0800
66 #define USDHI6_SD_CMD_READ		0x1000
67 #define USDHI6_SD_CMD_MULTI		0x2000
68 #define USDHI6_SD_CMD_CMD12_AUTO_OFF	0x4000
69 
70 #define USDHI6_CC_EXT_MODE_SDRW		BIT(1)
71 
72 #define USDHI6_SD_INFO1_RSP_END		BIT(0)
73 #define USDHI6_SD_INFO1_ACCESS_END	BIT(2)
74 #define USDHI6_SD_INFO1_CARD_OUT	BIT(3)
75 #define USDHI6_SD_INFO1_CARD_IN		BIT(4)
76 #define USDHI6_SD_INFO1_CD		BIT(5)
77 #define USDHI6_SD_INFO1_WP		BIT(7)
78 #define USDHI6_SD_INFO1_D3_CARD_OUT	BIT(8)
79 #define USDHI6_SD_INFO1_D3_CARD_IN	BIT(9)
80 
81 #define USDHI6_SD_INFO2_CMD_ERR		BIT(0)
82 #define USDHI6_SD_INFO2_CRC_ERR		BIT(1)
83 #define USDHI6_SD_INFO2_END_ERR		BIT(2)
84 #define USDHI6_SD_INFO2_TOUT		BIT(3)
85 #define USDHI6_SD_INFO2_IWA_ERR		BIT(4)
86 #define USDHI6_SD_INFO2_IRA_ERR		BIT(5)
87 #define USDHI6_SD_INFO2_RSP_TOUT	BIT(6)
88 #define USDHI6_SD_INFO2_SDDAT0		BIT(7)
89 #define USDHI6_SD_INFO2_BRE		BIT(8)
90 #define USDHI6_SD_INFO2_BWE		BIT(9)
91 #define USDHI6_SD_INFO2_SCLKDIVEN	BIT(13)
92 #define USDHI6_SD_INFO2_CBSY		BIT(14)
93 #define USDHI6_SD_INFO2_ILA		BIT(15)
94 
95 #define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN)
96 #define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT)
97 #define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT)
98 #define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT)
99 
100 #define USDHI6_SD_INFO2_ERR	(USDHI6_SD_INFO2_CMD_ERR |	\
101 	USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR |	\
102 	USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR |	\
103 	USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT |	\
104 	USDHI6_SD_INFO2_ILA)
105 
106 #define USDHI6_SD_INFO1_IRQ	(USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \
107 				 USDHI6_SD_INFO1_CARD)
108 
109 #define USDHI6_SD_INFO2_IRQ	(USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \
110 				 USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA)
111 
112 #define USDHI6_SD_CLK_CTRL_SCLKEN	BIT(8)
113 
114 #define USDHI6_SD_STOP_STP		BIT(0)
115 #define USDHI6_SD_STOP_SEC		BIT(8)
116 
117 #define USDHI6_SDIO_INFO1_IOIRQ		BIT(0)
118 #define USDHI6_SDIO_INFO1_EXPUB52	BIT(14)
119 #define USDHI6_SDIO_INFO1_EXWT		BIT(15)
120 
121 #define USDHI6_SD_ERR_STS1_CRC_NO_ERROR	BIT(13)
122 
123 #define USDHI6_SOFT_RST_RESERVED	(BIT(1) | BIT(2))
124 #define USDHI6_SOFT_RST_RESET		BIT(0)
125 
126 #define USDHI6_SD_OPTION_TIMEOUT_SHIFT	4
127 #define USDHI6_SD_OPTION_TIMEOUT_MASK	(0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT)
128 #define USDHI6_SD_OPTION_WIDTH_1	BIT(15)
129 
130 #define USDHI6_SD_PORT_SEL_PORTS_SHIFT	8
131 
132 #define USDHI6_SD_CLK_CTRL_DIV_MASK	0xff
133 
134 #define USDHI6_SDIO_INFO1_IRQ	(USDHI6_SDIO_INFO1_IOIRQ | 3 | \
135 				 USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT)
136 
137 #define USDHI6_MIN_DMA 64
138 
139 enum usdhi6_wait_for {
140 	USDHI6_WAIT_FOR_REQUEST,
141 	USDHI6_WAIT_FOR_CMD,
142 	USDHI6_WAIT_FOR_MREAD,
143 	USDHI6_WAIT_FOR_MWRITE,
144 	USDHI6_WAIT_FOR_READ,
145 	USDHI6_WAIT_FOR_WRITE,
146 	USDHI6_WAIT_FOR_DATA_END,
147 	USDHI6_WAIT_FOR_STOP,
148 	USDHI6_WAIT_FOR_DMA,
149 };
150 
151 struct usdhi6_page {
152 	struct page *page;
153 	void *mapped;		/* mapped page */
154 };
155 
156 struct usdhi6_host {
157 	struct mmc_host *mmc;
158 	struct mmc_request *mrq;
159 	void __iomem *base;
160 	struct clk *clk;
161 
162 	/* SG memory handling */
163 
164 	/* Common for multiple and single block requests */
165 	struct usdhi6_page pg;	/* current page from an SG */
166 	void *blk_page;		/* either a mapped page, or the bounce buffer */
167 	size_t offset;		/* offset within a page, including sg->offset */
168 
169 	/* Blocks, crossing a page boundary */
170 	size_t head_len;
171 	struct usdhi6_page head_pg;
172 
173 	/* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */
174 	struct scatterlist bounce_sg;
175 	u8 bounce_buf[512];
176 
177 	/* Multiple block requests only */
178 	struct scatterlist *sg;	/* current SG segment */
179 	int page_idx;		/* page index within an SG segment */
180 
181 	enum usdhi6_wait_for wait;
182 	u32 status_mask;
183 	u32 status2_mask;
184 	u32 sdio_mask;
185 	u32 io_error;
186 	u32 irq_status;
187 	unsigned long imclk;
188 	unsigned long rate;
189 	bool app_cmd;
190 
191 	/* Timeout handling */
192 	struct delayed_work timeout_work;
193 	unsigned long timeout;
194 
195 	/* DMA support */
196 	struct dma_chan *chan_rx;
197 	struct dma_chan *chan_tx;
198 	bool dma_active;
199 
200 	/* Pin control */
201 	struct pinctrl *pinctrl;
202 	struct pinctrl_state *pins_default;
203 	struct pinctrl_state *pins_uhs;
204 };
205 
206 /*			I/O primitives					*/
207 
208 static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data)
209 {
210 	iowrite32(data, host->base + reg);
211 	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
212 		host->base, reg, data);
213 }
214 
215 static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data)
216 {
217 	iowrite16(data, host->base + reg);
218 	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
219 		host->base, reg, data);
220 }
221 
222 static u32 usdhi6_read(struct usdhi6_host *host, u32 reg)
223 {
224 	u32 data = ioread32(host->base + reg);
225 	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
226 		host->base, reg, data);
227 	return data;
228 }
229 
230 static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg)
231 {
232 	u16 data = ioread16(host->base + reg);
233 	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
234 		host->base, reg, data);
235 	return data;
236 }
237 
238 static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2)
239 {
240 	host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1;
241 	host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2;
242 	usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask);
243 	usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask);
244 }
245 
246 static void usdhi6_wait_for_resp(struct usdhi6_host *host)
247 {
248 	usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END |
249 			  USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD,
250 			  USDHI6_SD_INFO2_ERR);
251 }
252 
253 static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read)
254 {
255 	usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END |
256 			  USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR |
257 			  (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE));
258 }
259 
260 static void usdhi6_only_cd(struct usdhi6_host *host)
261 {
262 	/* Mask all except card hotplug */
263 	usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0);
264 }
265 
266 static void usdhi6_mask_all(struct usdhi6_host *host)
267 {
268 	usdhi6_irq_enable(host, 0, 0);
269 }
270 
271 static int usdhi6_error_code(struct usdhi6_host *host)
272 {
273 	u32 err;
274 
275 	usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP);
276 
277 	if (host->io_error &
278 	    (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) {
279 		u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54);
280 		int opc = host->mrq ? host->mrq->cmd->opcode : -1;
281 
282 		err = usdhi6_read(host, USDHI6_SD_ERR_STS2);
283 		/* Response timeout is often normal, don't spam the log */
284 		if (host->wait == USDHI6_WAIT_FOR_CMD)
285 			dev_dbg(mmc_dev(host->mmc),
286 				"T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
287 				err, rsp54, host->wait, opc);
288 		else
289 			dev_warn(mmc_dev(host->mmc),
290 				 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
291 				 err, rsp54, host->wait, opc);
292 		return -ETIMEDOUT;
293 	}
294 
295 	err = usdhi6_read(host, USDHI6_SD_ERR_STS1);
296 	if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR)
297 		dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n",
298 			 err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1);
299 	if (host->io_error & USDHI6_SD_INFO2_ILA)
300 		return -EILSEQ;
301 
302 	return -EIO;
303 }
304 
305 /*			Scatter-Gather management			*/
306 
307 /*
308  * In PIO mode we have to map each page separately, using kmap(). That way
309  * adjacent pages are mapped to non-adjacent virtual addresses. That's why we
310  * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks
311  * have been observed with an SDIO WiFi card (b43 driver).
312  */
313 static void usdhi6_blk_bounce(struct usdhi6_host *host,
314 			      struct scatterlist *sg)
315 {
316 	struct mmc_data *data = host->mrq->data;
317 	size_t blk_head = host->head_len;
318 
319 	dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n",
320 		__func__, host->mrq->cmd->opcode, data->sg_len,
321 		data->blksz, data->blocks, sg->offset);
322 
323 	host->head_pg.page	= host->pg.page;
324 	host->head_pg.mapped	= host->pg.mapped;
325 	host->pg.page		= nth_page(host->pg.page, 1);
326 	host->pg.mapped		= kmap(host->pg.page);
327 
328 	host->blk_page = host->bounce_buf;
329 	host->offset = 0;
330 
331 	if (data->flags & MMC_DATA_READ)
332 		return;
333 
334 	memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head,
335 	       blk_head);
336 	memcpy(host->bounce_buf + blk_head, host->pg.mapped,
337 	       data->blksz - blk_head);
338 }
339 
340 /* Only called for multiple block IO */
341 static void usdhi6_sg_prep(struct usdhi6_host *host)
342 {
343 	struct mmc_request *mrq = host->mrq;
344 	struct mmc_data *data = mrq->data;
345 
346 	usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks);
347 
348 	host->sg = data->sg;
349 	/* TODO: if we always map, this is redundant */
350 	host->offset = host->sg->offset;
351 }
352 
353 /* Map the first page in an SG segment: common for multiple and single block IO */
354 static void *usdhi6_sg_map(struct usdhi6_host *host)
355 {
356 	struct mmc_data *data = host->mrq->data;
357 	struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
358 	size_t head = PAGE_SIZE - sg->offset;
359 	size_t blk_head = head % data->blksz;
360 
361 	WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
362 	if (WARN(sg_dma_len(sg) % data->blksz,
363 		 "SG size %u isn't a multiple of block size %u\n",
364 		 sg_dma_len(sg), data->blksz))
365 		return NULL;
366 
367 	host->pg.page = sg_page(sg);
368 	host->pg.mapped = kmap(host->pg.page);
369 	host->offset = sg->offset;
370 
371 	/*
372 	 * Block size must be a power of 2 for multi-block transfers,
373 	 * therefore blk_head is equal for all pages in this SG
374 	 */
375 	host->head_len = blk_head;
376 
377 	if (head < data->blksz)
378 		/*
379 		 * The first block in the SG crosses a page boundary.
380 		 * Max blksz = 512, so blocks can only span 2 pages
381 		 */
382 		usdhi6_blk_bounce(host, sg);
383 	else
384 		host->blk_page = host->pg.mapped;
385 
386 	dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n",
387 		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
388 		sg->offset, host->mrq->cmd->opcode, host->mrq);
389 
390 	return host->blk_page + host->offset;
391 }
392 
393 /* Unmap the current page: common for multiple and single block IO */
394 static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force)
395 {
396 	struct mmc_data *data = host->mrq->data;
397 	struct page *page = host->head_pg.page;
398 
399 	if (page) {
400 		/* Previous block was cross-page boundary */
401 		struct scatterlist *sg = data->sg_len > 1 ?
402 			host->sg : data->sg;
403 		size_t blk_head = host->head_len;
404 
405 		if (!data->error && data->flags & MMC_DATA_READ) {
406 			memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head,
407 			       host->bounce_buf, blk_head);
408 			memcpy(host->pg.mapped, host->bounce_buf + blk_head,
409 			       data->blksz - blk_head);
410 		}
411 
412 		flush_dcache_page(page);
413 		kunmap(page);
414 
415 		host->head_pg.page = NULL;
416 
417 		if (!force && sg_dma_len(sg) + sg->offset >
418 		    (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
419 			/* More blocks in this SG, don't unmap the next page */
420 			return;
421 	}
422 
423 	page = host->pg.page;
424 	if (!page)
425 		return;
426 
427 	flush_dcache_page(page);
428 	kunmap(page);
429 
430 	host->pg.page = NULL;
431 }
432 
433 /* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */
434 static void usdhi6_sg_advance(struct usdhi6_host *host)
435 {
436 	struct mmc_data *data = host->mrq->data;
437 	size_t done, total;
438 
439 	/* New offset: set at the end of the previous block */
440 	if (host->head_pg.page) {
441 		/* Finished a cross-page block, jump to the new page */
442 		host->page_idx++;
443 		host->offset = data->blksz - host->head_len;
444 		host->blk_page = host->pg.mapped;
445 		usdhi6_sg_unmap(host, false);
446 	} else {
447 		host->offset += data->blksz;
448 		/* The completed block didn't cross a page boundary */
449 		if (host->offset == PAGE_SIZE) {
450 			/* If required, we'll map the page below */
451 			host->offset = 0;
452 			host->page_idx++;
453 		}
454 	}
455 
456 	/*
457 	 * Now host->blk_page + host->offset point at the end of our last block
458 	 * and host->page_idx is the index of the page, in which our new block
459 	 * is located, if any
460 	 */
461 
462 	done = (host->page_idx << PAGE_SHIFT) + host->offset;
463 	total = host->sg->offset + sg_dma_len(host->sg);
464 
465 	dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__,
466 		done, total, host->offset);
467 
468 	if (done < total && host->offset) {
469 		/* More blocks in this page */
470 		if (host->offset + data->blksz > PAGE_SIZE)
471 			/* We approached at a block, that spans 2 pages */
472 			usdhi6_blk_bounce(host, host->sg);
473 
474 		return;
475 	}
476 
477 	/* Finished current page or an SG segment */
478 	usdhi6_sg_unmap(host, false);
479 
480 	if (done == total) {
481 		/*
482 		 * End of an SG segment or the complete SG: jump to the next
483 		 * segment, we'll map it later in usdhi6_blk_read() or
484 		 * usdhi6_blk_write()
485 		 */
486 		struct scatterlist *next = sg_next(host->sg);
487 
488 		host->page_idx = 0;
489 
490 		if (!next)
491 			host->wait = USDHI6_WAIT_FOR_DATA_END;
492 		host->sg = next;
493 
494 		if (WARN(next && sg_dma_len(next) % data->blksz,
495 			 "SG size %u isn't a multiple of block size %u\n",
496 			 sg_dma_len(next), data->blksz))
497 			data->error = -EINVAL;
498 
499 		return;
500 	}
501 
502 	/* We cannot get here after crossing a page border */
503 
504 	/* Next page in the same SG */
505 	host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
506 	host->pg.mapped = kmap(host->pg.page);
507 	host->blk_page = host->pg.mapped;
508 
509 	dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n",
510 		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
511 		host->mrq->cmd->opcode, host->mrq);
512 }
513 
514 /*			DMA handling					*/
515 
516 static void usdhi6_dma_release(struct usdhi6_host *host)
517 {
518 	host->dma_active = false;
519 	if (host->chan_tx) {
520 		struct dma_chan *chan = host->chan_tx;
521 		host->chan_tx = NULL;
522 		dma_release_channel(chan);
523 	}
524 	if (host->chan_rx) {
525 		struct dma_chan *chan = host->chan_rx;
526 		host->chan_rx = NULL;
527 		dma_release_channel(chan);
528 	}
529 }
530 
531 static void usdhi6_dma_stop_unmap(struct usdhi6_host *host)
532 {
533 	struct mmc_data *data = host->mrq->data;
534 
535 	if (!host->dma_active)
536 		return;
537 
538 	usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
539 	host->dma_active = false;
540 
541 	if (data->flags & MMC_DATA_READ)
542 		dma_unmap_sg(host->chan_rx->device->dev, data->sg,
543 			     data->sg_len, DMA_FROM_DEVICE);
544 	else
545 		dma_unmap_sg(host->chan_tx->device->dev, data->sg,
546 			     data->sg_len, DMA_TO_DEVICE);
547 }
548 
549 static void usdhi6_dma_complete(void *arg)
550 {
551 	struct usdhi6_host *host = arg;
552 	struct mmc_request *mrq = host->mrq;
553 
554 	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n",
555 		 dev_name(mmc_dev(host->mmc)), mrq))
556 		return;
557 
558 	dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__,
559 		mrq->cmd->opcode);
560 
561 	usdhi6_dma_stop_unmap(host);
562 	usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
563 }
564 
565 static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan,
566 			    enum dma_transfer_direction dir)
567 {
568 	struct mmc_data *data = host->mrq->data;
569 	struct scatterlist *sg = data->sg;
570 	struct dma_async_tx_descriptor *desc = NULL;
571 	dma_cookie_t cookie = -EINVAL;
572 	enum dma_data_direction data_dir;
573 	int ret;
574 
575 	switch (dir) {
576 	case DMA_MEM_TO_DEV:
577 		data_dir = DMA_TO_DEVICE;
578 		break;
579 	case DMA_DEV_TO_MEM:
580 		data_dir = DMA_FROM_DEVICE;
581 		break;
582 	default:
583 		return -EINVAL;
584 	}
585 
586 	ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir);
587 	if (ret > 0) {
588 		host->dma_active = true;
589 		desc = dmaengine_prep_slave_sg(chan, sg, ret, dir,
590 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
591 	}
592 
593 	if (desc) {
594 		desc->callback = usdhi6_dma_complete;
595 		desc->callback_param = host;
596 		cookie = dmaengine_submit(desc);
597 	}
598 
599 	dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n",
600 		__func__, data->sg_len, ret, cookie, desc);
601 
602 	if (cookie < 0) {
603 		/* DMA failed, fall back to PIO */
604 		if (ret >= 0)
605 			ret = cookie;
606 		usdhi6_dma_release(host);
607 		dev_warn(mmc_dev(host->mmc),
608 			 "DMA failed: %d, falling back to PIO\n", ret);
609 	}
610 
611 	return cookie;
612 }
613 
614 static int usdhi6_dma_start(struct usdhi6_host *host)
615 {
616 	if (!host->chan_rx || !host->chan_tx)
617 		return -ENODEV;
618 
619 	if (host->mrq->data->flags & MMC_DATA_READ)
620 		return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM);
621 
622 	return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV);
623 }
624 
625 static void usdhi6_dma_kill(struct usdhi6_host *host)
626 {
627 	struct mmc_data *data = host->mrq->data;
628 
629 	dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n",
630 		__func__, data->sg_len, data->blocks, data->blksz);
631 	/* Abort DMA */
632 	if (data->flags & MMC_DATA_READ)
633 		dmaengine_terminate_all(host->chan_rx);
634 	else
635 		dmaengine_terminate_all(host->chan_tx);
636 }
637 
638 static void usdhi6_dma_check_error(struct usdhi6_host *host)
639 {
640 	struct mmc_data *data = host->mrq->data;
641 
642 	dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n",
643 		__func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1));
644 
645 	if (host->io_error) {
646 		data->error = usdhi6_error_code(host);
647 		data->bytes_xfered = 0;
648 		usdhi6_dma_kill(host);
649 		usdhi6_dma_release(host);
650 		dev_warn(mmc_dev(host->mmc),
651 			 "DMA failed: %d, falling back to PIO\n", data->error);
652 		return;
653 	}
654 
655 	/*
656 	 * The datasheet tells us to check a response from the card, whereas
657 	 * responses only come after the command phase, not after the data
658 	 * phase. Let's check anyway.
659 	 */
660 	if (host->irq_status & USDHI6_SD_INFO1_RSP_END)
661 		dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n");
662 }
663 
664 static void usdhi6_dma_kick(struct usdhi6_host *host)
665 {
666 	if (host->mrq->data->flags & MMC_DATA_READ)
667 		dma_async_issue_pending(host->chan_rx);
668 	else
669 		dma_async_issue_pending(host->chan_tx);
670 }
671 
672 static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
673 {
674 	struct dma_slave_config cfg = {
675 		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
676 		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
677 	};
678 	int ret;
679 
680 	host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
681 	dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
682 		host->chan_tx);
683 
684 	if (!host->chan_tx)
685 		return;
686 
687 	cfg.direction = DMA_MEM_TO_DEV;
688 	cfg.dst_addr = start + USDHI6_SD_BUF0;
689 	cfg.dst_maxburst = 128;	/* 128 words * 4 bytes = 512 bytes */
690 	cfg.src_addr = 0;
691 	ret = dmaengine_slave_config(host->chan_tx, &cfg);
692 	if (ret < 0)
693 		goto e_release_tx;
694 
695 	host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
696 	dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
697 		host->chan_rx);
698 
699 	if (!host->chan_rx)
700 		goto e_release_tx;
701 
702 	cfg.direction = DMA_DEV_TO_MEM;
703 	cfg.src_addr = cfg.dst_addr;
704 	cfg.src_maxburst = 128;	/* 128 words * 4 bytes = 512 bytes */
705 	cfg.dst_addr = 0;
706 	ret = dmaengine_slave_config(host->chan_rx, &cfg);
707 	if (ret < 0)
708 		goto e_release_rx;
709 
710 	return;
711 
712 e_release_rx:
713 	dma_release_channel(host->chan_rx);
714 	host->chan_rx = NULL;
715 e_release_tx:
716 	dma_release_channel(host->chan_tx);
717 	host->chan_tx = NULL;
718 }
719 
720 /*			API helpers					*/
721 
722 static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios)
723 {
724 	unsigned long rate = ios->clock;
725 	u32 val;
726 	unsigned int i;
727 
728 	for (i = 1000; i; i--) {
729 		if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN)
730 			break;
731 		usleep_range(10, 100);
732 	}
733 
734 	if (!i) {
735 		dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n");
736 		return;
737 	}
738 
739 	val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK;
740 
741 	if (rate) {
742 		unsigned long new_rate;
743 
744 		if (host->imclk <= rate) {
745 			if (ios->timing != MMC_TIMING_UHS_DDR50) {
746 				/* Cannot have 1-to-1 clock in DDR mode */
747 				new_rate = host->imclk;
748 				val |= 0xff;
749 			} else {
750 				new_rate = host->imclk / 2;
751 			}
752 		} else {
753 			unsigned long div =
754 				roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate));
755 			val |= div >> 2;
756 			new_rate = host->imclk / div;
757 		}
758 
759 		if (host->rate == new_rate)
760 			return;
761 
762 		host->rate = new_rate;
763 
764 		dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n",
765 			rate, (val & 0xff) << 2, new_rate);
766 	}
767 
768 	/*
769 	 * if old or new rate is equal to input rate, have to switch the clock
770 	 * off before changing and on after
771 	 */
772 	if (host->imclk == rate || host->imclk == host->rate || !rate)
773 		usdhi6_write(host, USDHI6_SD_CLK_CTRL,
774 			     val & ~USDHI6_SD_CLK_CTRL_SCLKEN);
775 
776 	if (!rate) {
777 		host->rate = 0;
778 		return;
779 	}
780 
781 	usdhi6_write(host, USDHI6_SD_CLK_CTRL, val);
782 
783 	if (host->imclk == rate || host->imclk == host->rate ||
784 	    !(val & USDHI6_SD_CLK_CTRL_SCLKEN))
785 		usdhi6_write(host, USDHI6_SD_CLK_CTRL,
786 			     val | USDHI6_SD_CLK_CTRL_SCLKEN);
787 }
788 
789 static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios)
790 {
791 	struct mmc_host *mmc = host->mmc;
792 
793 	if (!IS_ERR(mmc->supply.vmmc))
794 		/* Errors ignored... */
795 		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
796 				      ios->power_mode ? ios->vdd : 0);
797 }
798 
799 static int usdhi6_reset(struct usdhi6_host *host)
800 {
801 	int i;
802 
803 	usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED);
804 	cpu_relax();
805 	usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET);
806 	for (i = 1000; i; i--)
807 		if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET)
808 			break;
809 
810 	return i ? 0 : -ETIMEDOUT;
811 }
812 
813 static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
814 {
815 	struct usdhi6_host *host = mmc_priv(mmc);
816 	u32 option, mode;
817 	int ret;
818 
819 	dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n",
820 		ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing);
821 
822 	switch (ios->power_mode) {
823 	case MMC_POWER_OFF:
824 		usdhi6_set_power(host, ios);
825 		usdhi6_only_cd(host);
826 		break;
827 	case MMC_POWER_UP:
828 		/*
829 		 * We only also touch USDHI6_SD_OPTION from .request(), which
830 		 * cannot race with MMC_POWER_UP
831 		 */
832 		ret = usdhi6_reset(host);
833 		if (ret < 0) {
834 			dev_err(mmc_dev(mmc), "Cannot reset the interface!\n");
835 		} else {
836 			usdhi6_set_power(host, ios);
837 			usdhi6_only_cd(host);
838 		}
839 		break;
840 	case MMC_POWER_ON:
841 		option = usdhi6_read(host, USDHI6_SD_OPTION);
842 		/*
843 		 * The eMMC standard only allows 4 or 8 bits in the DDR mode,
844 		 * the same probably holds for SD cards. We check here anyway,
845 		 * since the datasheet explicitly requires 4 bits for DDR.
846 		 */
847 		if (ios->bus_width == MMC_BUS_WIDTH_1) {
848 			if (ios->timing == MMC_TIMING_UHS_DDR50)
849 				dev_err(mmc_dev(mmc),
850 					"4 bits are required for DDR\n");
851 			option |= USDHI6_SD_OPTION_WIDTH_1;
852 			mode = 0;
853 		} else {
854 			option &= ~USDHI6_SD_OPTION_WIDTH_1;
855 			mode = ios->timing == MMC_TIMING_UHS_DDR50;
856 		}
857 		usdhi6_write(host, USDHI6_SD_OPTION, option);
858 		usdhi6_write(host, USDHI6_SDIF_MODE, mode);
859 		break;
860 	}
861 
862 	if (host->rate != ios->clock)
863 		usdhi6_clk_set(host, ios);
864 }
865 
866 /* This is data timeout. Response timeout is fixed to 640 clock cycles */
867 static void usdhi6_timeout_set(struct usdhi6_host *host)
868 {
869 	struct mmc_request *mrq = host->mrq;
870 	u32 val;
871 	unsigned long ticks;
872 
873 	if (!mrq->data)
874 		ticks = host->rate / 1000 * mrq->cmd->busy_timeout;
875 	else
876 		ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) +
877 			mrq->data->timeout_clks;
878 
879 	if (!ticks || ticks > 1 << 27)
880 		/* Max timeout */
881 		val = 14;
882 	else if (ticks < 1 << 13)
883 		/* Min timeout */
884 		val = 0;
885 	else
886 		val = order_base_2(ticks) - 13;
887 
888 	dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n",
889 		mrq->data ? "data" : "cmd", ticks, host->rate);
890 
891 	/* Timeout Counter mask: 0xf0 */
892 	usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) |
893 		     (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK));
894 }
895 
896 static void usdhi6_request_done(struct usdhi6_host *host)
897 {
898 	struct mmc_request *mrq = host->mrq;
899 	struct mmc_data *data = mrq->data;
900 
901 	if (WARN(host->pg.page || host->head_pg.page,
902 		 "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n",
903 		 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode,
904 		 data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-',
905 		 data ? host->offset : 0, data ? data->blocks : 0,
906 		 data ? data->blksz : 0, data ? data->sg_len : 0))
907 		usdhi6_sg_unmap(host, true);
908 
909 	if (mrq->cmd->error ||
910 	    (data && data->error) ||
911 	    (mrq->stop && mrq->stop->error))
912 		dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n",
913 			__func__, mrq->cmd->opcode, data ? data->blocks : 0,
914 			data ? data->blksz : 0,
915 			mrq->cmd->error,
916 			data ? data->error : 1,
917 			mrq->stop ? mrq->stop->error : 1);
918 
919 	/* Disable DMA */
920 	usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
921 	host->wait = USDHI6_WAIT_FOR_REQUEST;
922 	host->mrq = NULL;
923 
924 	mmc_request_done(host->mmc, mrq);
925 }
926 
927 static int usdhi6_cmd_flags(struct usdhi6_host *host)
928 {
929 	struct mmc_request *mrq = host->mrq;
930 	struct mmc_command *cmd = mrq->cmd;
931 	u16 opc = cmd->opcode;
932 
933 	if (host->app_cmd) {
934 		host->app_cmd = false;
935 		opc |= USDHI6_SD_CMD_APP;
936 	}
937 
938 	if (mrq->data) {
939 		opc |= USDHI6_SD_CMD_DATA;
940 
941 		if (mrq->data->flags & MMC_DATA_READ)
942 			opc |= USDHI6_SD_CMD_READ;
943 
944 		if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
945 		    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
946 		    (cmd->opcode == SD_IO_RW_EXTENDED &&
947 		     mrq->data->blocks > 1)) {
948 			opc |= USDHI6_SD_CMD_MULTI;
949 			if (!mrq->stop)
950 				opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF;
951 		}
952 
953 		switch (mmc_resp_type(cmd)) {
954 		case MMC_RSP_NONE:
955 			opc |= USDHI6_SD_CMD_MODE_RSP_NONE;
956 			break;
957 		case MMC_RSP_R1:
958 			opc |= USDHI6_SD_CMD_MODE_RSP_R1;
959 			break;
960 		case MMC_RSP_R1B:
961 			opc |= USDHI6_SD_CMD_MODE_RSP_R1B;
962 			break;
963 		case MMC_RSP_R2:
964 			opc |= USDHI6_SD_CMD_MODE_RSP_R2;
965 			break;
966 		case MMC_RSP_R3:
967 			opc |= USDHI6_SD_CMD_MODE_RSP_R3;
968 			break;
969 		default:
970 			dev_warn(mmc_dev(host->mmc),
971 				 "Unknown response type %d\n",
972 				 mmc_resp_type(cmd));
973 			return -EINVAL;
974 		}
975 	}
976 
977 	return opc;
978 }
979 
980 static int usdhi6_rq_start(struct usdhi6_host *host)
981 {
982 	struct mmc_request *mrq = host->mrq;
983 	struct mmc_command *cmd = mrq->cmd;
984 	struct mmc_data *data = mrq->data;
985 	int opc = usdhi6_cmd_flags(host);
986 	int i;
987 
988 	if (opc < 0)
989 		return opc;
990 
991 	for (i = 1000; i; i--) {
992 		if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY))
993 			break;
994 		usleep_range(10, 100);
995 	}
996 
997 	if (!i) {
998 		dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n");
999 		return -EAGAIN;
1000 	}
1001 
1002 	if (data) {
1003 		bool use_dma;
1004 		int ret = 0;
1005 
1006 		host->page_idx = 0;
1007 
1008 		if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) {
1009 			switch (data->blksz) {
1010 			case 512:
1011 				break;
1012 			case 32:
1013 			case 64:
1014 			case 128:
1015 			case 256:
1016 				if (mrq->stop)
1017 					ret = -EINVAL;
1018 				break;
1019 			default:
1020 				ret = -EINVAL;
1021 			}
1022 		} else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1023 			    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) &&
1024 			   data->blksz != 512) {
1025 			ret = -EINVAL;
1026 		}
1027 
1028 		if (ret < 0) {
1029 			dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n",
1030 				 __func__, data->blocks, data->blksz);
1031 			return -EINVAL;
1032 		}
1033 
1034 		if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1035 		    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1036 		    (cmd->opcode == SD_IO_RW_EXTENDED &&
1037 		     data->blocks > 1))
1038 			usdhi6_sg_prep(host);
1039 
1040 		usdhi6_write(host, USDHI6_SD_SIZE, data->blksz);
1041 
1042 		if ((data->blksz >= USDHI6_MIN_DMA ||
1043 		     data->blocks > 1) &&
1044 		    (data->blksz % 4 ||
1045 		     data->sg->offset % 4))
1046 			dev_dbg(mmc_dev(host->mmc),
1047 				"Bad SG of %u: %ux%u @ %u\n", data->sg_len,
1048 				data->blksz, data->blocks, data->sg->offset);
1049 
1050 		/* Enable DMA for USDHI6_MIN_DMA bytes or more */
1051 		use_dma = data->blksz >= USDHI6_MIN_DMA &&
1052 			!(data->blksz % 4) &&
1053 			usdhi6_dma_start(host) >= DMA_MIN_COOKIE;
1054 
1055 		if (use_dma)
1056 			usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW);
1057 
1058 		dev_dbg(mmc_dev(host->mmc),
1059 			"%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n",
1060 			__func__, cmd->opcode, data->blocks, data->blksz,
1061 			data->sg_len, use_dma ? "DMA" : "PIO",
1062 			data->flags & MMC_DATA_READ ? "read" : "write",
1063 			data->sg->offset, mrq->stop ? " + stop" : "");
1064 	} else {
1065 		dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n",
1066 			__func__, cmd->opcode);
1067 	}
1068 
1069 	/* We have to get a command completion interrupt with DMA too */
1070 	usdhi6_wait_for_resp(host);
1071 
1072 	host->wait = USDHI6_WAIT_FOR_CMD;
1073 	schedule_delayed_work(&host->timeout_work, host->timeout);
1074 
1075 	/* SEC bit is required to enable block counting by the core */
1076 	usdhi6_write(host, USDHI6_SD_STOP,
1077 		     data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0);
1078 	usdhi6_write(host, USDHI6_SD_ARG, cmd->arg);
1079 
1080 	/* Kick command execution */
1081 	usdhi6_write(host, USDHI6_SD_CMD, opc);
1082 
1083 	return 0;
1084 }
1085 
1086 static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq)
1087 {
1088 	struct usdhi6_host *host = mmc_priv(mmc);
1089 	int ret;
1090 
1091 	cancel_delayed_work_sync(&host->timeout_work);
1092 
1093 	host->mrq = mrq;
1094 	host->sg = NULL;
1095 
1096 	usdhi6_timeout_set(host);
1097 	ret = usdhi6_rq_start(host);
1098 	if (ret < 0) {
1099 		mrq->cmd->error = ret;
1100 		usdhi6_request_done(host);
1101 	}
1102 }
1103 
1104 static int usdhi6_get_cd(struct mmc_host *mmc)
1105 {
1106 	struct usdhi6_host *host = mmc_priv(mmc);
1107 	/* Read is atomic, no need to lock */
1108 	u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD;
1109 
1110 /*
1111  *	level	status.CD	CD_ACTIVE_HIGH	card present
1112  *	1	0		0		0
1113  *	1	0		1		1
1114  *	0	1		0		1
1115  *	0	1		1		0
1116  */
1117 	return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
1118 }
1119 
1120 static int usdhi6_get_ro(struct mmc_host *mmc)
1121 {
1122 	struct usdhi6_host *host = mmc_priv(mmc);
1123 	/* No locking as above */
1124 	u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP;
1125 
1126 /*
1127  *	level	status.WP	RO_ACTIVE_HIGH	card read-only
1128  *	1	0		0		0
1129  *	1	0		1		1
1130  *	0	1		0		1
1131  *	0	1		1		0
1132  */
1133 	return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
1134 }
1135 
1136 static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable)
1137 {
1138 	struct usdhi6_host *host = mmc_priv(mmc);
1139 
1140 	dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis");
1141 
1142 	if (enable) {
1143 		host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ;
1144 		usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask);
1145 		usdhi6_write(host, USDHI6_SDIO_MODE, 1);
1146 	} else {
1147 		usdhi6_write(host, USDHI6_SDIO_MODE, 0);
1148 		usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ);
1149 		host->sdio_mask = USDHI6_SDIO_INFO1_IRQ;
1150 	}
1151 }
1152 
1153 static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
1154 {
1155 	if (IS_ERR(host->pins_uhs))
1156 		return 0;
1157 
1158 	switch (voltage) {
1159 	case MMC_SIGNAL_VOLTAGE_180:
1160 	case MMC_SIGNAL_VOLTAGE_120:
1161 		return pinctrl_select_state(host->pinctrl,
1162 					    host->pins_uhs);
1163 
1164 	default:
1165 		return pinctrl_select_state(host->pinctrl,
1166 					    host->pins_default);
1167 	}
1168 }
1169 
1170 static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1171 {
1172 	int ret;
1173 
1174 	ret = mmc_regulator_set_vqmmc(mmc, ios);
1175 	if (ret < 0)
1176 		return ret;
1177 
1178 	ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage);
1179 	if (ret)
1180 		dev_warn_once(mmc_dev(mmc),
1181 			      "Failed to set pinstate err=%d\n", ret);
1182 	return ret;
1183 }
1184 
1185 static const struct mmc_host_ops usdhi6_ops = {
1186 	.request	= usdhi6_request,
1187 	.set_ios	= usdhi6_set_ios,
1188 	.get_cd		= usdhi6_get_cd,
1189 	.get_ro		= usdhi6_get_ro,
1190 	.enable_sdio_irq = usdhi6_enable_sdio_irq,
1191 	.start_signal_voltage_switch = usdhi6_sig_volt_switch,
1192 };
1193 
1194 /*			State machine handlers				*/
1195 
1196 static void usdhi6_resp_cmd12(struct usdhi6_host *host)
1197 {
1198 	struct mmc_command *cmd = host->mrq->stop;
1199 	cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
1200 }
1201 
1202 static void usdhi6_resp_read(struct usdhi6_host *host)
1203 {
1204 	struct mmc_command *cmd = host->mrq->cmd;
1205 	u32 *rsp = cmd->resp, tmp = 0;
1206 	int i;
1207 
1208 /*
1209  * RSP10	39-8
1210  * RSP32	71-40
1211  * RSP54	103-72
1212  * RSP76	127-104
1213  * R2-type response:
1214  * resp[0]	= r[127..96]
1215  * resp[1]	= r[95..64]
1216  * resp[2]	= r[63..32]
1217  * resp[3]	= r[31..0]
1218  * Other responses:
1219  * resp[0]	= r[39..8]
1220  */
1221 
1222 	if (mmc_resp_type(cmd) == MMC_RSP_NONE)
1223 		return;
1224 
1225 	if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) {
1226 		dev_err(mmc_dev(host->mmc),
1227 			"CMD%d: response expected but is missing!\n", cmd->opcode);
1228 		return;
1229 	}
1230 
1231 	if (mmc_resp_type(cmd) & MMC_RSP_136)
1232 		for (i = 0; i < 4; i++) {
1233 			if (i)
1234 				rsp[3 - i] = tmp >> 24;
1235 			tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8);
1236 			rsp[3 - i] |= tmp << 8;
1237 		}
1238 	else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1239 		 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
1240 		/* Read RSP54 to avoid conflict with auto CMD12 */
1241 		rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54);
1242 	else
1243 		rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
1244 
1245 	dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]);
1246 }
1247 
1248 static int usdhi6_blk_read(struct usdhi6_host *host)
1249 {
1250 	struct mmc_data *data = host->mrq->data;
1251 	u32 *p;
1252 	int i, rest;
1253 
1254 	if (host->io_error) {
1255 		data->error = usdhi6_error_code(host);
1256 		goto error;
1257 	}
1258 
1259 	if (host->pg.page) {
1260 		p = host->blk_page + host->offset;
1261 	} else {
1262 		p = usdhi6_sg_map(host);
1263 		if (!p) {
1264 			data->error = -ENOMEM;
1265 			goto error;
1266 		}
1267 	}
1268 
1269 	for (i = 0; i < data->blksz / 4; i++, p++)
1270 		*p = usdhi6_read(host, USDHI6_SD_BUF0);
1271 
1272 	rest = data->blksz % 4;
1273 	for (i = 0; i < (rest + 1) / 2; i++) {
1274 		u16 d = usdhi6_read16(host, USDHI6_SD_BUF0);
1275 		((u8 *)p)[2 * i] = ((u8 *)&d)[0];
1276 		if (rest > 1 && !i)
1277 			((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1];
1278 	}
1279 
1280 	return 0;
1281 
1282 error:
1283 	dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1284 	host->wait = USDHI6_WAIT_FOR_REQUEST;
1285 	return data->error;
1286 }
1287 
1288 static int usdhi6_blk_write(struct usdhi6_host *host)
1289 {
1290 	struct mmc_data *data = host->mrq->data;
1291 	u32 *p;
1292 	int i, rest;
1293 
1294 	if (host->io_error) {
1295 		data->error = usdhi6_error_code(host);
1296 		goto error;
1297 	}
1298 
1299 	if (host->pg.page) {
1300 		p = host->blk_page + host->offset;
1301 	} else {
1302 		p = usdhi6_sg_map(host);
1303 		if (!p) {
1304 			data->error = -ENOMEM;
1305 			goto error;
1306 		}
1307 	}
1308 
1309 	for (i = 0; i < data->blksz / 4; i++, p++)
1310 		usdhi6_write(host, USDHI6_SD_BUF0, *p);
1311 
1312 	rest = data->blksz % 4;
1313 	for (i = 0; i < (rest + 1) / 2; i++) {
1314 		u16 d;
1315 		((u8 *)&d)[0] = ((u8 *)p)[2 * i];
1316 		if (rest > 1 && !i)
1317 			((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1];
1318 		else
1319 			((u8 *)&d)[1] = 0;
1320 		usdhi6_write16(host, USDHI6_SD_BUF0, d);
1321 	}
1322 
1323 	return 0;
1324 
1325 error:
1326 	dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1327 	host->wait = USDHI6_WAIT_FOR_REQUEST;
1328 	return data->error;
1329 }
1330 
1331 static int usdhi6_stop_cmd(struct usdhi6_host *host)
1332 {
1333 	struct mmc_request *mrq = host->mrq;
1334 
1335 	switch (mrq->cmd->opcode) {
1336 	case MMC_READ_MULTIPLE_BLOCK:
1337 	case MMC_WRITE_MULTIPLE_BLOCK:
1338 		if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) {
1339 			host->wait = USDHI6_WAIT_FOR_STOP;
1340 			return 0;
1341 		}
1342 		/* Unsupported STOP command */
1343 	default:
1344 		dev_err(mmc_dev(host->mmc),
1345 			"unsupported stop CMD%d for CMD%d\n",
1346 			mrq->stop->opcode, mrq->cmd->opcode);
1347 		mrq->stop->error = -EOPNOTSUPP;
1348 	}
1349 
1350 	return -EOPNOTSUPP;
1351 }
1352 
1353 static bool usdhi6_end_cmd(struct usdhi6_host *host)
1354 {
1355 	struct mmc_request *mrq = host->mrq;
1356 	struct mmc_command *cmd = mrq->cmd;
1357 
1358 	if (host->io_error) {
1359 		cmd->error = usdhi6_error_code(host);
1360 		return false;
1361 	}
1362 
1363 	usdhi6_resp_read(host);
1364 
1365 	if (!mrq->data)
1366 		return false;
1367 
1368 	if (host->dma_active) {
1369 		usdhi6_dma_kick(host);
1370 		if (!mrq->stop)
1371 			host->wait = USDHI6_WAIT_FOR_DMA;
1372 		else if (usdhi6_stop_cmd(host) < 0)
1373 			return false;
1374 	} else if (mrq->data->flags & MMC_DATA_READ) {
1375 		if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1376 		    (cmd->opcode == SD_IO_RW_EXTENDED &&
1377 		     mrq->data->blocks > 1))
1378 			host->wait = USDHI6_WAIT_FOR_MREAD;
1379 		else
1380 			host->wait = USDHI6_WAIT_FOR_READ;
1381 	} else {
1382 		if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1383 		    (cmd->opcode == SD_IO_RW_EXTENDED &&
1384 		     mrq->data->blocks > 1))
1385 			host->wait = USDHI6_WAIT_FOR_MWRITE;
1386 		else
1387 			host->wait = USDHI6_WAIT_FOR_WRITE;
1388 	}
1389 
1390 	return true;
1391 }
1392 
1393 static bool usdhi6_read_block(struct usdhi6_host *host)
1394 {
1395 	/* ACCESS_END IRQ is already unmasked */
1396 	int ret = usdhi6_blk_read(host);
1397 
1398 	/*
1399 	 * Have to force unmapping both pages: the single block could have been
1400 	 * cross-page, in which case for single-block IO host->page_idx == 0.
1401 	 * So, if we don't force, the second page won't be unmapped.
1402 	 */
1403 	usdhi6_sg_unmap(host, true);
1404 
1405 	if (ret < 0)
1406 		return false;
1407 
1408 	host->wait = USDHI6_WAIT_FOR_DATA_END;
1409 	return true;
1410 }
1411 
1412 static bool usdhi6_mread_block(struct usdhi6_host *host)
1413 {
1414 	int ret = usdhi6_blk_read(host);
1415 
1416 	if (ret < 0)
1417 		return false;
1418 
1419 	usdhi6_sg_advance(host);
1420 
1421 	return !host->mrq->data->error &&
1422 		(host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
1423 }
1424 
1425 static bool usdhi6_write_block(struct usdhi6_host *host)
1426 {
1427 	int ret = usdhi6_blk_write(host);
1428 
1429 	/* See comment in usdhi6_read_block() */
1430 	usdhi6_sg_unmap(host, true);
1431 
1432 	if (ret < 0)
1433 		return false;
1434 
1435 	host->wait = USDHI6_WAIT_FOR_DATA_END;
1436 	return true;
1437 }
1438 
1439 static bool usdhi6_mwrite_block(struct usdhi6_host *host)
1440 {
1441 	int ret = usdhi6_blk_write(host);
1442 
1443 	if (ret < 0)
1444 		return false;
1445 
1446 	usdhi6_sg_advance(host);
1447 
1448 	return !host->mrq->data->error &&
1449 		(host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
1450 }
1451 
1452 /*			Interrupt & timeout handlers			*/
1453 
1454 static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id)
1455 {
1456 	struct usdhi6_host *host = dev_id;
1457 	struct mmc_request *mrq;
1458 	struct mmc_command *cmd;
1459 	struct mmc_data *data;
1460 	bool io_wait = false;
1461 
1462 	cancel_delayed_work_sync(&host->timeout_work);
1463 
1464 	mrq = host->mrq;
1465 	if (!mrq)
1466 		return IRQ_HANDLED;
1467 
1468 	cmd = mrq->cmd;
1469 	data = mrq->data;
1470 
1471 	switch (host->wait) {
1472 	case USDHI6_WAIT_FOR_REQUEST:
1473 		/* We're too late, the timeout has already kicked in */
1474 		return IRQ_HANDLED;
1475 	case USDHI6_WAIT_FOR_CMD:
1476 		/* Wait for data? */
1477 		io_wait = usdhi6_end_cmd(host);
1478 		break;
1479 	case USDHI6_WAIT_FOR_MREAD:
1480 		/* Wait for more data? */
1481 		io_wait = usdhi6_mread_block(host);
1482 		break;
1483 	case USDHI6_WAIT_FOR_READ:
1484 		/* Wait for data end? */
1485 		io_wait = usdhi6_read_block(host);
1486 		break;
1487 	case USDHI6_WAIT_FOR_MWRITE:
1488 		/* Wait data to write? */
1489 		io_wait = usdhi6_mwrite_block(host);
1490 		break;
1491 	case USDHI6_WAIT_FOR_WRITE:
1492 		/* Wait for data end? */
1493 		io_wait = usdhi6_write_block(host);
1494 		break;
1495 	case USDHI6_WAIT_FOR_DMA:
1496 		usdhi6_dma_check_error(host);
1497 		break;
1498 	case USDHI6_WAIT_FOR_STOP:
1499 		usdhi6_write(host, USDHI6_SD_STOP, 0);
1500 		if (host->io_error) {
1501 			int ret = usdhi6_error_code(host);
1502 			if (mrq->stop)
1503 				mrq->stop->error = ret;
1504 			else
1505 				mrq->data->error = ret;
1506 			dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret);
1507 			break;
1508 		}
1509 		usdhi6_resp_cmd12(host);
1510 		mrq->stop->error = 0;
1511 		break;
1512 	case USDHI6_WAIT_FOR_DATA_END:
1513 		if (host->io_error) {
1514 			mrq->data->error = usdhi6_error_code(host);
1515 			dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__,
1516 				 mrq->data->error);
1517 		}
1518 		break;
1519 	default:
1520 		cmd->error = -EFAULT;
1521 		dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
1522 		usdhi6_request_done(host);
1523 		return IRQ_HANDLED;
1524 	}
1525 
1526 	if (io_wait) {
1527 		schedule_delayed_work(&host->timeout_work, host->timeout);
1528 		/* Wait for more data or ACCESS_END */
1529 		if (!host->dma_active)
1530 			usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
1531 		return IRQ_HANDLED;
1532 	}
1533 
1534 	if (!cmd->error) {
1535 		if (data) {
1536 			if (!data->error) {
1537 				if (host->wait != USDHI6_WAIT_FOR_STOP &&
1538 				    host->mrq->stop &&
1539 				    !host->mrq->stop->error &&
1540 				    !usdhi6_stop_cmd(host)) {
1541 					/* Sending STOP */
1542 					usdhi6_wait_for_resp(host);
1543 
1544 					schedule_delayed_work(&host->timeout_work,
1545 							      host->timeout);
1546 
1547 					return IRQ_HANDLED;
1548 				}
1549 
1550 				data->bytes_xfered = data->blocks * data->blksz;
1551 			} else {
1552 				/* Data error: might need to unmap the last page */
1553 				dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n",
1554 					 __func__, data->error);
1555 				usdhi6_sg_unmap(host, true);
1556 			}
1557 		} else if (cmd->opcode == MMC_APP_CMD) {
1558 			host->app_cmd = true;
1559 		}
1560 	}
1561 
1562 	usdhi6_request_done(host);
1563 
1564 	return IRQ_HANDLED;
1565 }
1566 
1567 static irqreturn_t usdhi6_sd(int irq, void *dev_id)
1568 {
1569 	struct usdhi6_host *host = dev_id;
1570 	u16 status, status2, error;
1571 
1572 	status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
1573 		~USDHI6_SD_INFO1_CARD;
1574 	status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask;
1575 
1576 	usdhi6_only_cd(host);
1577 
1578 	dev_dbg(mmc_dev(host->mmc),
1579 		"IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2);
1580 
1581 	if (!status && !status2)
1582 		return IRQ_NONE;
1583 
1584 	error = status2 & USDHI6_SD_INFO2_ERR;
1585 
1586 	/* Ack / clear interrupts */
1587 	if (USDHI6_SD_INFO1_IRQ & status)
1588 		usdhi6_write(host, USDHI6_SD_INFO1,
1589 			     0xffff & ~(USDHI6_SD_INFO1_IRQ & status));
1590 
1591 	if (USDHI6_SD_INFO2_IRQ & status2) {
1592 		if (error)
1593 			/* In error cases BWE and BRE aren't cleared automatically */
1594 			status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE;
1595 
1596 		usdhi6_write(host, USDHI6_SD_INFO2,
1597 			     0xffff & ~(USDHI6_SD_INFO2_IRQ & status2));
1598 	}
1599 
1600 	host->io_error = error;
1601 	host->irq_status = status;
1602 
1603 	if (error) {
1604 		/* Don't pollute the log with unsupported command timeouts */
1605 		if (host->wait != USDHI6_WAIT_FOR_CMD ||
1606 		    error != USDHI6_SD_INFO2_RSP_TOUT)
1607 			dev_warn(mmc_dev(host->mmc),
1608 				 "%s(): INFO2 error bits 0x%08x\n",
1609 				 __func__, error);
1610 		else
1611 			dev_dbg(mmc_dev(host->mmc),
1612 				"%s(): INFO2 error bits 0x%08x\n",
1613 				__func__, error);
1614 	}
1615 
1616 	return IRQ_WAKE_THREAD;
1617 }
1618 
1619 static irqreturn_t usdhi6_sdio(int irq, void *dev_id)
1620 {
1621 	struct usdhi6_host *host = dev_id;
1622 	u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask;
1623 
1624 	dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status);
1625 
1626 	if (!status)
1627 		return IRQ_NONE;
1628 
1629 	usdhi6_write(host, USDHI6_SDIO_INFO1, ~status);
1630 
1631 	mmc_signal_sdio_irq(host->mmc);
1632 
1633 	return IRQ_HANDLED;
1634 }
1635 
1636 static irqreturn_t usdhi6_cd(int irq, void *dev_id)
1637 {
1638 	struct usdhi6_host *host = dev_id;
1639 	struct mmc_host *mmc = host->mmc;
1640 	u16 status;
1641 
1642 	/* We're only interested in hotplug events here */
1643 	status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
1644 		USDHI6_SD_INFO1_CARD;
1645 
1646 	if (!status)
1647 		return IRQ_NONE;
1648 
1649 	/* Ack */
1650 	usdhi6_write(host, USDHI6_SD_INFO1, ~status);
1651 
1652 	if (!work_pending(&mmc->detect.work) &&
1653 	    (((status & USDHI6_SD_INFO1_CARD_INSERT) &&
1654 	      !mmc->card) ||
1655 	     ((status & USDHI6_SD_INFO1_CARD_EJECT) &&
1656 	      mmc->card)))
1657 		mmc_detect_change(mmc, msecs_to_jiffies(100));
1658 
1659 	return IRQ_HANDLED;
1660 }
1661 
1662 /*
1663  * Actually this should not be needed, if the built-in timeout works reliably in
1664  * the both PIO cases and DMA never fails. But if DMA does fail, a timeout
1665  * handler might be the only way to catch the error.
1666  */
1667 static void usdhi6_timeout_work(struct work_struct *work)
1668 {
1669 	struct delayed_work *d = to_delayed_work(work);
1670 	struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
1671 	struct mmc_request *mrq = host->mrq;
1672 	struct mmc_data *data = mrq ? mrq->data : NULL;
1673 	struct scatterlist *sg;
1674 
1675 	dev_warn(mmc_dev(host->mmc),
1676 		 "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
1677 		 host->dma_active ? "DMA" : "PIO",
1678 		 host->wait, mrq ? mrq->cmd->opcode : -1,
1679 		 usdhi6_read(host, USDHI6_SD_INFO1),
1680 		 usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status);
1681 
1682 	if (host->dma_active) {
1683 		usdhi6_dma_kill(host);
1684 		usdhi6_dma_stop_unmap(host);
1685 	}
1686 
1687 	switch (host->wait) {
1688 	default:
1689 		dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
1690 		/* mrq can be NULL in this actually impossible case */
1691 	case USDHI6_WAIT_FOR_CMD:
1692 		usdhi6_error_code(host);
1693 		if (mrq)
1694 			mrq->cmd->error = -ETIMEDOUT;
1695 		break;
1696 	case USDHI6_WAIT_FOR_STOP:
1697 		usdhi6_error_code(host);
1698 		mrq->stop->error = -ETIMEDOUT;
1699 		break;
1700 	case USDHI6_WAIT_FOR_DMA:
1701 	case USDHI6_WAIT_FOR_MREAD:
1702 	case USDHI6_WAIT_FOR_MWRITE:
1703 	case USDHI6_WAIT_FOR_READ:
1704 	case USDHI6_WAIT_FOR_WRITE:
1705 		sg = host->sg ?: data->sg;
1706 		dev_dbg(mmc_dev(host->mmc),
1707 			"%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
1708 			data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
1709 			host->offset, data->blocks, data->blksz, data->sg_len,
1710 			sg_dma_len(sg), sg->offset);
1711 		usdhi6_sg_unmap(host, true);
1712 		/*
1713 		 * If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped
1714 		 * the page
1715 		 */
1716 	case USDHI6_WAIT_FOR_DATA_END:
1717 		usdhi6_error_code(host);
1718 		data->error = -ETIMEDOUT;
1719 	}
1720 
1721 	if (mrq)
1722 		usdhi6_request_done(host);
1723 }
1724 
1725 /*			 Probe / release				*/
1726 
1727 static const struct of_device_id usdhi6_of_match[] = {
1728 	{.compatible = "renesas,usdhi6rol0"},
1729 	{}
1730 };
1731 MODULE_DEVICE_TABLE(of, usdhi6_of_match);
1732 
1733 static int usdhi6_probe(struct platform_device *pdev)
1734 {
1735 	struct device *dev = &pdev->dev;
1736 	struct mmc_host *mmc;
1737 	struct usdhi6_host *host;
1738 	struct resource *res;
1739 	int irq_cd, irq_sd, irq_sdio;
1740 	u32 version;
1741 	int ret;
1742 
1743 	if (!dev->of_node)
1744 		return -ENODEV;
1745 
1746 	irq_cd = platform_get_irq_byname(pdev, "card detect");
1747 	irq_sd = platform_get_irq_byname(pdev, "data");
1748 	irq_sdio = platform_get_irq_byname(pdev, "SDIO");
1749 	if (irq_sd < 0 || irq_sdio < 0)
1750 		return -ENODEV;
1751 
1752 	mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
1753 	if (!mmc)
1754 		return -ENOMEM;
1755 
1756 	ret = mmc_regulator_get_supply(mmc);
1757 	if (ret)
1758 		goto e_free_mmc;
1759 
1760 	ret = mmc_of_parse(mmc);
1761 	if (ret < 0)
1762 		goto e_free_mmc;
1763 
1764 	host		= mmc_priv(mmc);
1765 	host->mmc	= mmc;
1766 	host->wait	= USDHI6_WAIT_FOR_REQUEST;
1767 	host->timeout	= msecs_to_jiffies(4000);
1768 
1769 	host->pinctrl = devm_pinctrl_get(&pdev->dev);
1770 	if (IS_ERR(host->pinctrl)) {
1771 		ret = PTR_ERR(host->pinctrl);
1772 		goto e_free_mmc;
1773 	}
1774 
1775 	host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
1776 	if (!IS_ERR(host->pins_uhs)) {
1777 		host->pins_default = pinctrl_lookup_state(host->pinctrl,
1778 							  PINCTRL_STATE_DEFAULT);
1779 
1780 		if (IS_ERR(host->pins_default)) {
1781 			dev_err(dev,
1782 				"UHS pinctrl requires a default pin state.\n");
1783 			ret = PTR_ERR(host->pins_default);
1784 			goto e_free_mmc;
1785 		}
1786 	}
1787 
1788 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1789 	host->base = devm_ioremap_resource(dev, res);
1790 	if (IS_ERR(host->base)) {
1791 		ret = PTR_ERR(host->base);
1792 		goto e_free_mmc;
1793 	}
1794 
1795 	host->clk = devm_clk_get(dev, NULL);
1796 	if (IS_ERR(host->clk)) {
1797 		ret = PTR_ERR(host->clk);
1798 		goto e_free_mmc;
1799 	}
1800 
1801 	host->imclk = clk_get_rate(host->clk);
1802 
1803 	ret = clk_prepare_enable(host->clk);
1804 	if (ret < 0)
1805 		goto e_free_mmc;
1806 
1807 	version = usdhi6_read(host, USDHI6_VERSION);
1808 	if ((version & 0xfff) != 0xa0d) {
1809 		dev_err(dev, "Version not recognized %x\n", version);
1810 		goto e_clk_off;
1811 	}
1812 
1813 	dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n",
1814 		 usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT);
1815 
1816 	usdhi6_mask_all(host);
1817 
1818 	if (irq_cd >= 0) {
1819 		ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0,
1820 				       dev_name(dev), host);
1821 		if (ret < 0)
1822 			goto e_clk_off;
1823 	} else {
1824 		mmc->caps |= MMC_CAP_NEEDS_POLL;
1825 	}
1826 
1827 	ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0,
1828 			       dev_name(dev), host);
1829 	if (ret < 0)
1830 		goto e_clk_off;
1831 
1832 	ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0,
1833 			       dev_name(dev), host);
1834 	if (ret < 0)
1835 		goto e_clk_off;
1836 
1837 	INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work);
1838 
1839 	usdhi6_dma_request(host, res->start);
1840 
1841 	mmc->ops = &usdhi6_ops;
1842 	mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
1843 		     MMC_CAP_SDIO_IRQ;
1844 	/* Set .max_segs to some random number. Feel free to adjust. */
1845 	mmc->max_segs = 32;
1846 	mmc->max_blk_size = 512;
1847 	mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1848 	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1849 	/*
1850 	 * Setting .max_seg_size to 1 page would simplify our page-mapping code,
1851 	 * But OTOH, having large segments makes DMA more efficient. We could
1852 	 * check, whether we managed to get DMA and fall back to 1 page
1853 	 * segments, but if we do manage to obtain DMA and then it fails at
1854 	 * run-time and we fall back to PIO, we will continue getting large
1855 	 * segments. So, we wouldn't be able to get rid of the code anyway.
1856 	 */
1857 	mmc->max_seg_size = mmc->max_req_size;
1858 	if (!mmc->f_max)
1859 		mmc->f_max = host->imclk;
1860 	mmc->f_min = host->imclk / 512;
1861 
1862 	platform_set_drvdata(pdev, host);
1863 
1864 	ret = mmc_add_host(mmc);
1865 	if (ret < 0)
1866 		goto e_clk_off;
1867 
1868 	return 0;
1869 
1870 e_clk_off:
1871 	clk_disable_unprepare(host->clk);
1872 e_free_mmc:
1873 	mmc_free_host(mmc);
1874 
1875 	return ret;
1876 }
1877 
1878 static int usdhi6_remove(struct platform_device *pdev)
1879 {
1880 	struct usdhi6_host *host = platform_get_drvdata(pdev);
1881 
1882 	mmc_remove_host(host->mmc);
1883 
1884 	usdhi6_mask_all(host);
1885 	cancel_delayed_work_sync(&host->timeout_work);
1886 	usdhi6_dma_release(host);
1887 	clk_disable_unprepare(host->clk);
1888 	mmc_free_host(host->mmc);
1889 
1890 	return 0;
1891 }
1892 
1893 static struct platform_driver usdhi6_driver = {
1894 	.probe		= usdhi6_probe,
1895 	.remove		= usdhi6_remove,
1896 	.driver		= {
1897 		.name	= "usdhi6rol0",
1898 		.of_match_table = usdhi6_of_match,
1899 	},
1900 };
1901 
1902 module_platform_driver(usdhi6_driver);
1903 
1904 MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver");
1905 MODULE_LICENSE("GPL v2");
1906 MODULE_ALIAS("platform:usdhi6rol0");
1907 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1908