xref: /openbmc/linux/drivers/mmc/host/wmt-sdmmc.c (revision 9470114d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  WM8505/WM8650 SD/MMC Host Controller
4  *
5  *  Copyright (C) 2010 Tony Prisk
6  *  Copyright (C) 2008 WonderMedia Technologies, Inc.
7  */
8 
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/platform_device.h>
12 #include <linux/ioport.h>
13 #include <linux/errno.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/clk.h>
19 #include <linux/interrupt.h>
20 
21 #include <linux/of.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 
25 #include <linux/mmc/host.h>
26 #include <linux/mmc/mmc.h>
27 #include <linux/mmc/sd.h>
28 
29 #include <asm/byteorder.h>
30 
31 
32 #define DRIVER_NAME "wmt-sdhc"
33 
34 
35 /* MMC/SD controller registers */
36 #define SDMMC_CTLR			0x00
37 #define SDMMC_CMD			0x01
38 #define SDMMC_RSPTYPE			0x02
39 #define SDMMC_ARG			0x04
40 #define SDMMC_BUSMODE			0x08
41 #define SDMMC_BLKLEN			0x0C
42 #define SDMMC_BLKCNT			0x0E
43 #define SDMMC_RSP			0x10
44 #define SDMMC_CBCR			0x20
45 #define SDMMC_INTMASK0			0x24
46 #define SDMMC_INTMASK1			0x25
47 #define SDMMC_STS0			0x28
48 #define SDMMC_STS1			0x29
49 #define SDMMC_STS2			0x2A
50 #define SDMMC_STS3			0x2B
51 #define SDMMC_RSPTIMEOUT		0x2C
52 #define SDMMC_CLK			0x30	/* VT8500 only */
53 #define SDMMC_EXTCTRL			0x34
54 #define SDMMC_SBLKLEN			0x38
55 #define SDMMC_DMATIMEOUT		0x3C
56 
57 
58 /* SDMMC_CTLR bit fields */
59 #define CTLR_CMD_START			0x01
60 #define CTLR_CMD_WRITE			0x04
61 #define CTLR_FIFO_RESET			0x08
62 
63 /* SDMMC_BUSMODE bit fields */
64 #define BM_SPI_MODE			0x01
65 #define BM_FOURBIT_MODE			0x02
66 #define BM_EIGHTBIT_MODE		0x04
67 #define BM_SD_OFF			0x10
68 #define BM_SPI_CS			0x20
69 #define BM_SD_POWER			0x40
70 #define BM_SOFT_RESET			0x80
71 
72 /* SDMMC_BLKLEN bit fields */
73 #define BLKL_CRCERR_ABORT		0x0800
74 #define BLKL_CD_POL_HIGH		0x1000
75 #define BLKL_GPI_CD			0x2000
76 #define BLKL_DATA3_CD			0x4000
77 #define BLKL_INT_ENABLE			0x8000
78 
79 /* SDMMC_INTMASK0 bit fields */
80 #define INT0_MBLK_TRAN_DONE_INT_EN	0x10
81 #define INT0_BLK_TRAN_DONE_INT_EN	0x20
82 #define INT0_CD_INT_EN			0x40
83 #define INT0_DI_INT_EN			0x80
84 
85 /* SDMMC_INTMASK1 bit fields */
86 #define INT1_CMD_RES_TRAN_DONE_INT_EN	0x02
87 #define INT1_CMD_RES_TOUT_INT_EN	0x04
88 #define INT1_MBLK_AUTO_STOP_INT_EN	0x08
89 #define INT1_DATA_TOUT_INT_EN		0x10
90 #define INT1_RESCRC_ERR_INT_EN		0x20
91 #define INT1_RCRC_ERR_INT_EN		0x40
92 #define INT1_WCRC_ERR_INT_EN		0x80
93 
94 /* SDMMC_STS0 bit fields */
95 #define STS0_WRITE_PROTECT		0x02
96 #define STS0_CD_DATA3			0x04
97 #define STS0_CD_GPI			0x08
98 #define STS0_MBLK_DONE			0x10
99 #define STS0_BLK_DONE			0x20
100 #define STS0_CARD_DETECT		0x40
101 #define STS0_DEVICE_INS			0x80
102 
103 /* SDMMC_STS1 bit fields */
104 #define STS1_SDIO_INT			0x01
105 #define STS1_CMDRSP_DONE		0x02
106 #define STS1_RSP_TIMEOUT		0x04
107 #define STS1_AUTOSTOP_DONE		0x08
108 #define STS1_DATA_TIMEOUT		0x10
109 #define STS1_RSP_CRC_ERR		0x20
110 #define STS1_RCRC_ERR			0x40
111 #define STS1_WCRC_ERR			0x80
112 
113 /* SDMMC_STS2 bit fields */
114 #define STS2_CMD_RES_BUSY		0x10
115 #define STS2_DATARSP_BUSY		0x20
116 #define STS2_DIS_FORCECLK		0x80
117 
118 /* SDMMC_EXTCTRL bit fields */
119 #define EXT_EIGHTBIT			0x04
120 
121 /* MMC/SD DMA Controller Registers */
122 #define SDDMA_GCR			0x100
123 #define SDDMA_IER			0x104
124 #define SDDMA_ISR			0x108
125 #define SDDMA_DESPR			0x10C
126 #define SDDMA_RBR			0x110
127 #define SDDMA_DAR			0x114
128 #define SDDMA_BAR			0x118
129 #define SDDMA_CPR			0x11C
130 #define SDDMA_CCR			0x120
131 
132 
133 /* SDDMA_GCR bit fields */
134 #define DMA_GCR_DMA_EN			0x00000001
135 #define DMA_GCR_SOFT_RESET		0x00000100
136 
137 /* SDDMA_IER bit fields */
138 #define DMA_IER_INT_EN			0x00000001
139 
140 /* SDDMA_ISR bit fields */
141 #define DMA_ISR_INT_STS			0x00000001
142 
143 /* SDDMA_RBR bit fields */
144 #define DMA_RBR_FORMAT			0x40000000
145 #define DMA_RBR_END			0x80000000
146 
147 /* SDDMA_CCR bit fields */
148 #define DMA_CCR_RUN			0x00000080
149 #define DMA_CCR_IF_TO_PERIPHERAL	0x00000000
150 #define DMA_CCR_PERIPHERAL_TO_IF	0x00400000
151 
152 /* SDDMA_CCR event status */
153 #define DMA_CCR_EVT_NO_STATUS		0x00000000
154 #define DMA_CCR_EVT_UNDERRUN		0x00000001
155 #define DMA_CCR_EVT_OVERRUN		0x00000002
156 #define DMA_CCR_EVT_DESP_READ		0x00000003
157 #define DMA_CCR_EVT_DATA_RW		0x00000004
158 #define DMA_CCR_EVT_EARLY_END		0x00000005
159 #define DMA_CCR_EVT_SUCCESS		0x0000000F
160 
161 #define PDMA_READ			0x00
162 #define PDMA_WRITE			0x01
163 
164 #define WMT_SD_POWER_OFF		0
165 #define WMT_SD_POWER_ON			1
166 
167 struct wmt_dma_descriptor {
168 	u32 flags;
169 	u32 data_buffer_addr;
170 	u32 branch_addr;
171 	u32 reserved1;
172 };
173 
174 struct wmt_mci_caps {
175 	unsigned int	f_min;
176 	unsigned int	f_max;
177 	u32		ocr_avail;
178 	u32		caps;
179 	u32		max_seg_size;
180 	u32		max_segs;
181 	u32		max_blk_size;
182 };
183 
184 struct wmt_mci_priv {
185 	struct mmc_host *mmc;
186 	void __iomem *sdmmc_base;
187 
188 	int irq_regular;
189 	int irq_dma;
190 
191 	void *dma_desc_buffer;
192 	dma_addr_t dma_desc_device_addr;
193 
194 	struct completion cmdcomp;
195 	struct completion datacomp;
196 
197 	struct completion *comp_cmd;
198 	struct completion *comp_dma;
199 
200 	struct mmc_request *req;
201 	struct mmc_command *cmd;
202 
203 	struct clk *clk_sdmmc;
204 	struct device *dev;
205 
206 	u8 power_inverted;
207 	u8 cd_inverted;
208 };
209 
210 static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable)
211 {
212 	u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
213 
214 	if (enable ^ priv->power_inverted)
215 		reg_tmp &= ~BM_SD_OFF;
216 	else
217 		reg_tmp |= BM_SD_OFF;
218 
219 	writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE);
220 }
221 
222 static void wmt_mci_read_response(struct mmc_host *mmc)
223 {
224 	struct wmt_mci_priv *priv;
225 	int idx1, idx2;
226 	u8 tmp_resp;
227 	u32 response;
228 
229 	priv = mmc_priv(mmc);
230 
231 	for (idx1 = 0; idx1 < 4; idx1++) {
232 		response = 0;
233 		for (idx2 = 0; idx2 < 4; idx2++) {
234 			if ((idx1 == 3) && (idx2 == 3))
235 				tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP);
236 			else
237 				tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP +
238 						 (idx1*4) + idx2 + 1);
239 			response |= (tmp_resp << (idx2 * 8));
240 		}
241 		priv->cmd->resp[idx1] = cpu_to_be32(response);
242 	}
243 }
244 
245 static void wmt_mci_start_command(struct wmt_mci_priv *priv)
246 {
247 	u32 reg_tmp;
248 
249 	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
250 	writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR);
251 }
252 
253 static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype,
254 				u32 arg, u8 rsptype)
255 {
256 	struct wmt_mci_priv *priv;
257 	u32 reg_tmp;
258 
259 	priv = mmc_priv(mmc);
260 
261 	/* write command, arg, resptype registers */
262 	writeb(command, priv->sdmmc_base + SDMMC_CMD);
263 	writel(arg, priv->sdmmc_base + SDMMC_ARG);
264 	writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE);
265 
266 	/* reset response FIFO */
267 	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
268 	writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
269 
270 	/* ensure clock enabled - VT3465 */
271 	wmt_set_sd_power(priv, WMT_SD_POWER_ON);
272 
273 	/* clear status bits */
274 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
275 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
276 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS2);
277 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS3);
278 
279 	/* set command type */
280 	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
281 	writeb((reg_tmp & 0x0F) | (cmdtype << 4),
282 	       priv->sdmmc_base + SDMMC_CTLR);
283 
284 	return 0;
285 }
286 
287 static void wmt_mci_disable_dma(struct wmt_mci_priv *priv)
288 {
289 	writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR);
290 	writel(0, priv->sdmmc_base + SDDMA_IER);
291 }
292 
293 static void wmt_complete_data_request(struct wmt_mci_priv *priv)
294 {
295 	struct mmc_request *req;
296 	req = priv->req;
297 
298 	req->data->bytes_xfered = req->data->blksz * req->data->blocks;
299 
300 	/* unmap the DMA pages used for write data */
301 	if (req->data->flags & MMC_DATA_WRITE)
302 		dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
303 			     req->data->sg_len, DMA_TO_DEVICE);
304 	else
305 		dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
306 			     req->data->sg_len, DMA_FROM_DEVICE);
307 
308 	/* Check if the DMA ISR returned a data error */
309 	if ((req->cmd->error) || (req->data->error))
310 		mmc_request_done(priv->mmc, req);
311 	else {
312 		wmt_mci_read_response(priv->mmc);
313 		if (!req->data->stop) {
314 			/* single-block read/write requests end here */
315 			mmc_request_done(priv->mmc, req);
316 		} else {
317 			/*
318 			 * we change the priv->cmd variable so the response is
319 			 * stored in the stop struct rather than the original
320 			 * calling command struct
321 			 */
322 			priv->comp_cmd = &priv->cmdcomp;
323 			init_completion(priv->comp_cmd);
324 			priv->cmd = req->data->stop;
325 			wmt_mci_send_command(priv->mmc, req->data->stop->opcode,
326 					     7, req->data->stop->arg, 9);
327 			wmt_mci_start_command(priv);
328 		}
329 	}
330 }
331 
332 static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data)
333 {
334 	struct wmt_mci_priv *priv;
335 
336 	int status;
337 
338 	priv = (struct wmt_mci_priv *)data;
339 
340 	status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F;
341 
342 	if (status != DMA_CCR_EVT_SUCCESS) {
343 		dev_err(priv->dev, "DMA Error: Status = %d\n", status);
344 		priv->req->data->error = -ETIMEDOUT;
345 		complete(priv->comp_dma);
346 		return IRQ_HANDLED;
347 	}
348 
349 	priv->req->data->error = 0;
350 
351 	wmt_mci_disable_dma(priv);
352 
353 	complete(priv->comp_dma);
354 
355 	if (priv->comp_cmd) {
356 		if (completion_done(priv->comp_cmd)) {
357 			/*
358 			 * if the command (regular) interrupt has already
359 			 * completed, finish off the request otherwise we wait
360 			 * for the command interrupt and finish from there.
361 			 */
362 			wmt_complete_data_request(priv);
363 		}
364 	}
365 
366 	return IRQ_HANDLED;
367 }
368 
369 static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data)
370 {
371 	struct wmt_mci_priv *priv;
372 	u32 status0;
373 	u32 status1;
374 	u32 status2;
375 	u32 reg_tmp;
376 	int cmd_done;
377 
378 	priv = (struct wmt_mci_priv *)data;
379 	cmd_done = 0;
380 	status0 = readb(priv->sdmmc_base + SDMMC_STS0);
381 	status1 = readb(priv->sdmmc_base + SDMMC_STS1);
382 	status2 = readb(priv->sdmmc_base + SDMMC_STS2);
383 
384 	/* Check for card insertion */
385 	reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
386 	if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) {
387 		mmc_detect_change(priv->mmc, 0);
388 		if (priv->cmd)
389 			priv->cmd->error = -ETIMEDOUT;
390 		if (priv->comp_cmd)
391 			complete(priv->comp_cmd);
392 		if (priv->comp_dma) {
393 			wmt_mci_disable_dma(priv);
394 			complete(priv->comp_dma);
395 		}
396 		writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0);
397 		return IRQ_HANDLED;
398 	}
399 
400 	if ((!priv->req->data) ||
401 	    ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) {
402 		/* handle non-data & stop_transmission requests */
403 		if (status1 & STS1_CMDRSP_DONE) {
404 			priv->cmd->error = 0;
405 			cmd_done = 1;
406 		} else if ((status1 & STS1_RSP_TIMEOUT) ||
407 			   (status1 & STS1_DATA_TIMEOUT)) {
408 			priv->cmd->error = -ETIMEDOUT;
409 			cmd_done = 1;
410 		}
411 
412 		if (cmd_done) {
413 			priv->comp_cmd = NULL;
414 
415 			if (!priv->cmd->error)
416 				wmt_mci_read_response(priv->mmc);
417 
418 			priv->cmd = NULL;
419 
420 			mmc_request_done(priv->mmc, priv->req);
421 		}
422 	} else {
423 		/* handle data requests */
424 		if (status1 & STS1_CMDRSP_DONE) {
425 			if (priv->cmd)
426 				priv->cmd->error = 0;
427 			if (priv->comp_cmd)
428 				complete(priv->comp_cmd);
429 		}
430 
431 		if ((status1 & STS1_RSP_TIMEOUT) ||
432 		    (status1 & STS1_DATA_TIMEOUT)) {
433 			if (priv->cmd)
434 				priv->cmd->error = -ETIMEDOUT;
435 			if (priv->comp_cmd)
436 				complete(priv->comp_cmd);
437 			if (priv->comp_dma) {
438 				wmt_mci_disable_dma(priv);
439 				complete(priv->comp_dma);
440 			}
441 		}
442 
443 		if (priv->comp_dma) {
444 			/*
445 			 * If the dma interrupt has already completed, finish
446 			 * off the request; otherwise we wait for the DMA
447 			 * interrupt and finish from there.
448 			 */
449 			if (completion_done(priv->comp_dma))
450 				wmt_complete_data_request(priv);
451 		}
452 	}
453 
454 	writeb(status0, priv->sdmmc_base + SDMMC_STS0);
455 	writeb(status1, priv->sdmmc_base + SDMMC_STS1);
456 	writeb(status2, priv->sdmmc_base + SDMMC_STS2);
457 
458 	return IRQ_HANDLED;
459 }
460 
461 static void wmt_reset_hardware(struct mmc_host *mmc)
462 {
463 	struct wmt_mci_priv *priv;
464 	u32 reg_tmp;
465 
466 	priv = mmc_priv(mmc);
467 
468 	/* reset controller */
469 	reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
470 	writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
471 
472 	/* reset response FIFO */
473 	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
474 	writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
475 
476 	/* enable GPI pin to detect card */
477 	writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN);
478 
479 	/* clear interrupt status */
480 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
481 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
482 
483 	/* setup interrupts */
484 	writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base +
485 	       SDMMC_INTMASK0);
486 	writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN |
487 	       INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1);
488 
489 	/* set the DMA timeout */
490 	writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT);
491 
492 	/* auto clock freezing enable */
493 	reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2);
494 	writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2);
495 
496 	/* set a default clock speed of 400Khz */
497 	clk_set_rate(priv->clk_sdmmc, 400000);
498 }
499 
500 static int wmt_dma_init(struct mmc_host *mmc)
501 {
502 	struct wmt_mci_priv *priv;
503 
504 	priv = mmc_priv(mmc);
505 
506 	writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR);
507 	writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR);
508 	if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0)
509 		return 0;
510 	else
511 		return 1;
512 }
513 
514 static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc,
515 		u16 req_count, u32 buffer_addr, u32 branch_addr, int end)
516 {
517 	desc->flags = 0x40000000 | req_count;
518 	if (end)
519 		desc->flags |= 0x80000000;
520 	desc->data_buffer_addr = buffer_addr;
521 	desc->branch_addr = branch_addr;
522 }
523 
524 static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir)
525 {
526 	struct wmt_mci_priv *priv;
527 	u32 reg_tmp;
528 
529 	priv = mmc_priv(mmc);
530 
531 	/* Enable DMA Interrupts */
532 	writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER);
533 
534 	/* Write DMA Descriptor Pointer Register */
535 	writel(descaddr, priv->sdmmc_base + SDDMA_DESPR);
536 
537 	writel(0x00, priv->sdmmc_base + SDDMA_CCR);
538 
539 	if (dir == PDMA_WRITE) {
540 		reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
541 		writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base +
542 		       SDDMA_CCR);
543 	} else {
544 		reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
545 		writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base +
546 		       SDDMA_CCR);
547 	}
548 }
549 
550 static void wmt_dma_start(struct wmt_mci_priv *priv)
551 {
552 	u32 reg_tmp;
553 
554 	reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
555 	writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR);
556 }
557 
558 static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req)
559 {
560 	struct wmt_mci_priv *priv;
561 	struct wmt_dma_descriptor *desc;
562 	u8 command;
563 	u8 cmdtype;
564 	u32 arg;
565 	u8 rsptype;
566 	u32 reg_tmp;
567 
568 	struct scatterlist *sg;
569 	int i;
570 	int sg_cnt;
571 	int offset;
572 	u32 dma_address;
573 	int desc_cnt;
574 
575 	priv = mmc_priv(mmc);
576 	priv->req = req;
577 
578 	/*
579 	 * Use the cmd variable to pass a pointer to the resp[] structure
580 	 * This is required on multi-block requests to pass the pointer to the
581 	 * stop command
582 	 */
583 	priv->cmd = req->cmd;
584 
585 	command = req->cmd->opcode;
586 	arg = req->cmd->arg;
587 	rsptype = mmc_resp_type(req->cmd);
588 	cmdtype = 0;
589 
590 	/* rsptype=7 only valid for SPI commands - should be =2 for SD */
591 	if (rsptype == 7)
592 		rsptype = 2;
593 	/* rsptype=21 is R1B, convert for controller */
594 	if (rsptype == 21)
595 		rsptype = 9;
596 
597 	if (!req->data) {
598 		wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
599 		wmt_mci_start_command(priv);
600 		/* completion is now handled in the regular_isr() */
601 	}
602 	if (req->data) {
603 		priv->comp_cmd = &priv->cmdcomp;
604 		init_completion(priv->comp_cmd);
605 
606 		wmt_dma_init(mmc);
607 
608 		/* set controller data length */
609 		reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
610 		writew((reg_tmp & 0xF800) | (req->data->blksz - 1),
611 		       priv->sdmmc_base + SDMMC_BLKLEN);
612 
613 		/* set controller block count */
614 		writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT);
615 
616 		desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer;
617 
618 		if (req->data->flags & MMC_DATA_WRITE) {
619 			sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
620 					    req->data->sg_len, DMA_TO_DEVICE);
621 			cmdtype = 1;
622 			if (req->data->blocks > 1)
623 				cmdtype = 3;
624 		} else {
625 			sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
626 					    req->data->sg_len, DMA_FROM_DEVICE);
627 			cmdtype = 2;
628 			if (req->data->blocks > 1)
629 				cmdtype = 4;
630 		}
631 
632 		dma_address = priv->dma_desc_device_addr + 16;
633 		desc_cnt = 0;
634 
635 		for_each_sg(req->data->sg, sg, sg_cnt, i) {
636 			offset = 0;
637 			while (offset < sg_dma_len(sg)) {
638 				wmt_dma_init_descriptor(desc, req->data->blksz,
639 						sg_dma_address(sg)+offset,
640 						dma_address, 0);
641 				desc++;
642 				desc_cnt++;
643 				offset += req->data->blksz;
644 				dma_address += 16;
645 				if (desc_cnt == req->data->blocks)
646 					break;
647 			}
648 		}
649 		desc--;
650 		desc->flags |= 0x80000000;
651 
652 		if (req->data->flags & MMC_DATA_WRITE)
653 			wmt_dma_config(mmc, priv->dma_desc_device_addr,
654 				       PDMA_WRITE);
655 		else
656 			wmt_dma_config(mmc, priv->dma_desc_device_addr,
657 				       PDMA_READ);
658 
659 		wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
660 
661 		priv->comp_dma = &priv->datacomp;
662 		init_completion(priv->comp_dma);
663 
664 		wmt_dma_start(priv);
665 		wmt_mci_start_command(priv);
666 	}
667 }
668 
669 static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
670 {
671 	struct wmt_mci_priv *priv;
672 	u32 busmode, extctrl;
673 
674 	priv = mmc_priv(mmc);
675 
676 	if (ios->power_mode == MMC_POWER_UP) {
677 		wmt_reset_hardware(mmc);
678 
679 		wmt_set_sd_power(priv, WMT_SD_POWER_ON);
680 	}
681 	if (ios->power_mode == MMC_POWER_OFF)
682 		wmt_set_sd_power(priv, WMT_SD_POWER_OFF);
683 
684 	if (ios->clock != 0)
685 		clk_set_rate(priv->clk_sdmmc, ios->clock);
686 
687 	busmode = readb(priv->sdmmc_base + SDMMC_BUSMODE);
688 	extctrl = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
689 
690 	busmode &= ~(BM_EIGHTBIT_MODE | BM_FOURBIT_MODE);
691 	extctrl &= ~EXT_EIGHTBIT;
692 
693 	switch (ios->bus_width) {
694 	case MMC_BUS_WIDTH_8:
695 		busmode |= BM_EIGHTBIT_MODE;
696 		extctrl |= EXT_EIGHTBIT;
697 		break;
698 	case MMC_BUS_WIDTH_4:
699 		busmode |= BM_FOURBIT_MODE;
700 		break;
701 	case MMC_BUS_WIDTH_1:
702 		break;
703 	}
704 
705 	writeb(busmode, priv->sdmmc_base + SDMMC_BUSMODE);
706 	writeb(extctrl, priv->sdmmc_base + SDMMC_EXTCTRL);
707 }
708 
709 static int wmt_mci_get_ro(struct mmc_host *mmc)
710 {
711 	struct wmt_mci_priv *priv = mmc_priv(mmc);
712 
713 	return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT);
714 }
715 
716 static int wmt_mci_get_cd(struct mmc_host *mmc)
717 {
718 	struct wmt_mci_priv *priv = mmc_priv(mmc);
719 	u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3;
720 
721 	return !(cd ^ priv->cd_inverted);
722 }
723 
724 static const struct mmc_host_ops wmt_mci_ops = {
725 	.request = wmt_mci_request,
726 	.set_ios = wmt_mci_set_ios,
727 	.get_ro = wmt_mci_get_ro,
728 	.get_cd = wmt_mci_get_cd,
729 };
730 
731 /* Controller capabilities */
732 static struct wmt_mci_caps wm8505_caps = {
733 	.f_min = 390425,
734 	.f_max = 50000000,
735 	.ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34,
736 	.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |
737 		MMC_CAP_SD_HIGHSPEED,
738 	.max_seg_size = 65024,
739 	.max_segs = 128,
740 	.max_blk_size = 2048,
741 };
742 
743 static const struct of_device_id wmt_mci_dt_ids[] = {
744 	{ .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps },
745 	{ /* Sentinel */ },
746 };
747 
748 static int wmt_mci_probe(struct platform_device *pdev)
749 {
750 	struct mmc_host *mmc;
751 	struct wmt_mci_priv *priv;
752 	struct device_node *np = pdev->dev.of_node;
753 	const struct wmt_mci_caps *wmt_caps;
754 	int ret;
755 	int regular_irq, dma_irq;
756 
757 	wmt_caps = of_device_get_match_data(&pdev->dev);
758 	if (!wmt_caps) {
759 		dev_err(&pdev->dev, "Controller capabilities data missing\n");
760 		return -EFAULT;
761 	}
762 
763 	if (!np) {
764 		dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
765 		return -EFAULT;
766 	}
767 
768 	regular_irq = irq_of_parse_and_map(np, 0);
769 	dma_irq = irq_of_parse_and_map(np, 1);
770 
771 	if (!regular_irq || !dma_irq) {
772 		dev_err(&pdev->dev, "Getting IRQs failed!\n");
773 		ret = -ENXIO;
774 		goto fail1;
775 	}
776 
777 	mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev);
778 	if (!mmc) {
779 		dev_err(&pdev->dev, "Failed to allocate mmc_host\n");
780 		ret = -ENOMEM;
781 		goto fail1;
782 	}
783 
784 	mmc->ops = &wmt_mci_ops;
785 	mmc->f_min = wmt_caps->f_min;
786 	mmc->f_max = wmt_caps->f_max;
787 	mmc->ocr_avail = wmt_caps->ocr_avail;
788 	mmc->caps = wmt_caps->caps;
789 
790 	mmc->max_seg_size = wmt_caps->max_seg_size;
791 	mmc->max_segs = wmt_caps->max_segs;
792 	mmc->max_blk_size = wmt_caps->max_blk_size;
793 
794 	mmc->max_req_size = (16*512*mmc->max_segs);
795 	mmc->max_blk_count = mmc->max_req_size / 512;
796 
797 	priv = mmc_priv(mmc);
798 	priv->mmc = mmc;
799 	priv->dev = &pdev->dev;
800 
801 	priv->power_inverted = 0;
802 	priv->cd_inverted = 0;
803 
804 	priv->power_inverted = of_property_read_bool(np, "sdon-inverted");
805 	priv->cd_inverted = of_property_read_bool(np, "cd-inverted");
806 
807 	priv->sdmmc_base = of_iomap(np, 0);
808 	if (!priv->sdmmc_base) {
809 		dev_err(&pdev->dev, "Failed to map IO space\n");
810 		ret = -ENOMEM;
811 		goto fail2;
812 	}
813 
814 	priv->irq_regular = regular_irq;
815 	priv->irq_dma = dma_irq;
816 
817 	ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv);
818 	if (ret) {
819 		dev_err(&pdev->dev, "Register regular IRQ fail\n");
820 		goto fail3;
821 	}
822 
823 	ret = request_irq(dma_irq, wmt_mci_dma_isr, 0, "sdmmc", priv);
824 	if (ret) {
825 		dev_err(&pdev->dev, "Register DMA IRQ fail\n");
826 		goto fail4;
827 	}
828 
829 	/* alloc some DMA buffers for descriptors/transfers */
830 	priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev,
831 						   mmc->max_blk_count * 16,
832 						   &priv->dma_desc_device_addr,
833 						   GFP_KERNEL);
834 	if (!priv->dma_desc_buffer) {
835 		dev_err(&pdev->dev, "DMA alloc fail\n");
836 		ret = -EPERM;
837 		goto fail5;
838 	}
839 
840 	platform_set_drvdata(pdev, mmc);
841 
842 	priv->clk_sdmmc = of_clk_get(np, 0);
843 	if (IS_ERR(priv->clk_sdmmc)) {
844 		dev_err(&pdev->dev, "Error getting clock\n");
845 		ret = PTR_ERR(priv->clk_sdmmc);
846 		goto fail5_and_a_half;
847 	}
848 
849 	ret = clk_prepare_enable(priv->clk_sdmmc);
850 	if (ret)
851 		goto fail6;
852 
853 	/* configure the controller to a known 'ready' state */
854 	wmt_reset_hardware(mmc);
855 
856 	ret = mmc_add_host(mmc);
857 	if (ret)
858 		goto fail7;
859 
860 	dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
861 
862 	return 0;
863 fail7:
864 	clk_disable_unprepare(priv->clk_sdmmc);
865 fail6:
866 	clk_put(priv->clk_sdmmc);
867 fail5_and_a_half:
868 	dma_free_coherent(&pdev->dev, mmc->max_blk_count * 16,
869 			  priv->dma_desc_buffer, priv->dma_desc_device_addr);
870 fail5:
871 	free_irq(dma_irq, priv);
872 fail4:
873 	free_irq(regular_irq, priv);
874 fail3:
875 	iounmap(priv->sdmmc_base);
876 fail2:
877 	mmc_free_host(mmc);
878 fail1:
879 	return ret;
880 }
881 
882 static void wmt_mci_remove(struct platform_device *pdev)
883 {
884 	struct mmc_host *mmc;
885 	struct wmt_mci_priv *priv;
886 	u32 reg_tmp;
887 
888 	mmc = platform_get_drvdata(pdev);
889 	priv = mmc_priv(mmc);
890 
891 	/* reset SD controller */
892 	reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
893 	writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
894 	reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
895 	writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN);
896 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
897 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
898 
899 	/* release the dma buffers */
900 	dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16,
901 			  priv->dma_desc_buffer, priv->dma_desc_device_addr);
902 
903 	mmc_remove_host(mmc);
904 
905 	free_irq(priv->irq_regular, priv);
906 	free_irq(priv->irq_dma, priv);
907 
908 	iounmap(priv->sdmmc_base);
909 
910 	clk_disable_unprepare(priv->clk_sdmmc);
911 	clk_put(priv->clk_sdmmc);
912 
913 	mmc_free_host(mmc);
914 
915 	dev_info(&pdev->dev, "WMT MCI device removed\n");
916 }
917 
918 #ifdef CONFIG_PM
919 static int wmt_mci_suspend(struct device *dev)
920 {
921 	u32 reg_tmp;
922 	struct mmc_host *mmc = dev_get_drvdata(dev);
923 	struct wmt_mci_priv *priv;
924 
925 	if (!mmc)
926 		return 0;
927 
928 	priv = mmc_priv(mmc);
929 	reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
930 	writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
931 	       SDMMC_BUSMODE);
932 
933 	reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
934 	writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
935 
936 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
937 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
938 
939 	clk_disable(priv->clk_sdmmc);
940 	return 0;
941 }
942 
943 static int wmt_mci_resume(struct device *dev)
944 {
945 	u32 reg_tmp;
946 	struct mmc_host *mmc = dev_get_drvdata(dev);
947 	struct wmt_mci_priv *priv;
948 
949 	if (mmc) {
950 		priv = mmc_priv(mmc);
951 		clk_enable(priv->clk_sdmmc);
952 
953 		reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
954 		writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
955 		       SDMMC_BUSMODE);
956 
957 		reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
958 		writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE),
959 		       priv->sdmmc_base + SDMMC_BLKLEN);
960 
961 		reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
962 		writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base +
963 		       SDMMC_INTMASK0);
964 
965 	}
966 
967 	return 0;
968 }
969 
970 static const struct dev_pm_ops wmt_mci_pm = {
971 	.suspend        = wmt_mci_suspend,
972 	.resume         = wmt_mci_resume,
973 };
974 
975 #define wmt_mci_pm_ops (&wmt_mci_pm)
976 
977 #else	/* !CONFIG_PM */
978 
979 #define wmt_mci_pm_ops NULL
980 
981 #endif
982 
983 static struct platform_driver wmt_mci_driver = {
984 	.probe = wmt_mci_probe,
985 	.remove_new = wmt_mci_remove,
986 	.driver = {
987 		.name = DRIVER_NAME,
988 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
989 		.pm = wmt_mci_pm_ops,
990 		.of_match_table = wmt_mci_dt_ids,
991 	},
992 };
993 
994 module_platform_driver(wmt_mci_driver);
995 
996 MODULE_DESCRIPTION("Wondermedia MMC/SD Driver");
997 MODULE_AUTHOR("Tony Prisk");
998 MODULE_LICENSE("GPL v2");
999 MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids);
1000