xref: /openbmc/linux/drivers/mmc/host/pxamci.c (revision 64c70b1c)
1 /*
2  *  linux/drivers/mmc/pxa.c - PXA MMCI driver
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  This hardware is really sick:
11  *   - No way to clear interrupts.
12  *   - Have to turn off the clock whenever we touch the device.
13  *   - Doesn't tell you how many data blocks were transferred.
14  *  Yuck!
15  *
16  *	1 and 3 byte data transfers not supported
17  *	max block length up to 1023
18  */
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/platform_device.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmc/host.h>
27 
28 #include <asm/dma.h>
29 #include <asm/io.h>
30 #include <asm/scatterlist.h>
31 #include <asm/sizes.h>
32 
33 #include <asm/arch/pxa-regs.h>
34 #include <asm/arch/mmc.h>
35 
36 #include "pxamci.h"
37 
38 #define DRIVER_NAME	"pxa2xx-mci"
39 
40 #define NR_SG	1
41 
42 struct pxamci_host {
43 	struct mmc_host		*mmc;
44 	spinlock_t		lock;
45 	struct resource		*res;
46 	void __iomem		*base;
47 	int			irq;
48 	int			dma;
49 	unsigned int		clkrt;
50 	unsigned int		cmdat;
51 	unsigned int		imask;
52 	unsigned int		power_mode;
53 	struct pxamci_platform_data *pdata;
54 
55 	struct mmc_request	*mrq;
56 	struct mmc_command	*cmd;
57 	struct mmc_data		*data;
58 
59 	dma_addr_t		sg_dma;
60 	struct pxa_dma_desc	*sg_cpu;
61 	unsigned int		dma_len;
62 
63 	unsigned int		dma_dir;
64 };
65 
66 static void pxamci_stop_clock(struct pxamci_host *host)
67 {
68 	if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
69 		unsigned long timeout = 10000;
70 		unsigned int v;
71 
72 		writel(STOP_CLOCK, host->base + MMC_STRPCL);
73 
74 		do {
75 			v = readl(host->base + MMC_STAT);
76 			if (!(v & STAT_CLK_EN))
77 				break;
78 			udelay(1);
79 		} while (timeout--);
80 
81 		if (v & STAT_CLK_EN)
82 			dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
83 	}
84 }
85 
86 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
87 {
88 	unsigned long flags;
89 
90 	spin_lock_irqsave(&host->lock, flags);
91 	host->imask &= ~mask;
92 	writel(host->imask, host->base + MMC_I_MASK);
93 	spin_unlock_irqrestore(&host->lock, flags);
94 }
95 
96 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
97 {
98 	unsigned long flags;
99 
100 	spin_lock_irqsave(&host->lock, flags);
101 	host->imask |= mask;
102 	writel(host->imask, host->base + MMC_I_MASK);
103 	spin_unlock_irqrestore(&host->lock, flags);
104 }
105 
106 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
107 {
108 	unsigned int nob = data->blocks;
109 	unsigned long long clks;
110 	unsigned int timeout;
111 	u32 dcmd;
112 	int i;
113 
114 	host->data = data;
115 
116 	if (data->flags & MMC_DATA_STREAM)
117 		nob = 0xffff;
118 
119 	writel(nob, host->base + MMC_NOB);
120 	writel(data->blksz, host->base + MMC_BLKLEN);
121 
122 	clks = (unsigned long long)data->timeout_ns * CLOCKRATE;
123 	do_div(clks, 1000000000UL);
124 	timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
125 	writel((timeout + 255) / 256, host->base + MMC_RDTO);
126 
127 	if (data->flags & MMC_DATA_READ) {
128 		host->dma_dir = DMA_FROM_DEVICE;
129 		dcmd = DCMD_INCTRGADDR | DCMD_FLOWTRG;
130 		DRCMRTXMMC = 0;
131 		DRCMRRXMMC = host->dma | DRCMR_MAPVLD;
132 	} else {
133 		host->dma_dir = DMA_TO_DEVICE;
134 		dcmd = DCMD_INCSRCADDR | DCMD_FLOWSRC;
135 		DRCMRRXMMC = 0;
136 		DRCMRTXMMC = host->dma | DRCMR_MAPVLD;
137 	}
138 
139 	dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
140 
141 	host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
142 				   host->dma_dir);
143 
144 	for (i = 0; i < host->dma_len; i++) {
145 		if (data->flags & MMC_DATA_READ) {
146 			host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
147 			host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
148 		} else {
149 			host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
150 			host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
151 		}
152 		host->sg_cpu[i].dcmd = dcmd | sg_dma_len(&data->sg[i]);
153 		host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
154 					sizeof(struct pxa_dma_desc);
155 	}
156 	host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
157 	wmb();
158 
159 	DDADR(host->dma) = host->sg_dma;
160 	DCSR(host->dma) = DCSR_RUN;
161 }
162 
163 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
164 {
165 	WARN_ON(host->cmd != NULL);
166 	host->cmd = cmd;
167 
168 	if (cmd->flags & MMC_RSP_BUSY)
169 		cmdat |= CMDAT_BUSY;
170 
171 #define RSP_TYPE(x)	((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
172 	switch (RSP_TYPE(mmc_resp_type(cmd))) {
173 	case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
174 		cmdat |= CMDAT_RESP_SHORT;
175 		break;
176 	case RSP_TYPE(MMC_RSP_R3):
177 		cmdat |= CMDAT_RESP_R3;
178 		break;
179 	case RSP_TYPE(MMC_RSP_R2):
180 		cmdat |= CMDAT_RESP_R2;
181 		break;
182 	default:
183 		break;
184 	}
185 
186 	writel(cmd->opcode, host->base + MMC_CMD);
187 	writel(cmd->arg >> 16, host->base + MMC_ARGH);
188 	writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
189 	writel(cmdat, host->base + MMC_CMDAT);
190 	writel(host->clkrt, host->base + MMC_CLKRT);
191 
192 	writel(START_CLOCK, host->base + MMC_STRPCL);
193 
194 	pxamci_enable_irq(host, END_CMD_RES);
195 }
196 
197 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
198 {
199 	host->mrq = NULL;
200 	host->cmd = NULL;
201 	host->data = NULL;
202 	mmc_request_done(host->mmc, mrq);
203 }
204 
205 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
206 {
207 	struct mmc_command *cmd = host->cmd;
208 	int i;
209 	u32 v;
210 
211 	if (!cmd)
212 		return 0;
213 
214 	host->cmd = NULL;
215 
216 	/*
217 	 * Did I mention this is Sick.  We always need to
218 	 * discard the upper 8 bits of the first 16-bit word.
219 	 */
220 	v = readl(host->base + MMC_RES) & 0xffff;
221 	for (i = 0; i < 4; i++) {
222 		u32 w1 = readl(host->base + MMC_RES) & 0xffff;
223 		u32 w2 = readl(host->base + MMC_RES) & 0xffff;
224 		cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
225 		v = w2;
226 	}
227 
228 	if (stat & STAT_TIME_OUT_RESPONSE) {
229 		cmd->error = MMC_ERR_TIMEOUT;
230 	} else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
231 #ifdef CONFIG_PXA27x
232 		/*
233 		 * workaround for erratum #42:
234 		 * Intel PXA27x Family Processor Specification Update Rev 001
235 		 * A bogus CRC error can appear if the msb of a 136 bit
236 		 * response is a one.
237 		 */
238 		if (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000) {
239 			pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
240 		} else
241 #endif
242 		cmd->error = MMC_ERR_BADCRC;
243 	}
244 
245 	pxamci_disable_irq(host, END_CMD_RES);
246 	if (host->data && cmd->error == MMC_ERR_NONE) {
247 		pxamci_enable_irq(host, DATA_TRAN_DONE);
248 	} else {
249 		pxamci_finish_request(host, host->mrq);
250 	}
251 
252 	return 1;
253 }
254 
255 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
256 {
257 	struct mmc_data *data = host->data;
258 
259 	if (!data)
260 		return 0;
261 
262 	DCSR(host->dma) = 0;
263 	dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
264 		     host->dma_dir);
265 
266 	if (stat & STAT_READ_TIME_OUT)
267 		data->error = MMC_ERR_TIMEOUT;
268 	else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
269 		data->error = MMC_ERR_BADCRC;
270 
271 	/*
272 	 * There appears to be a hardware design bug here.  There seems to
273 	 * be no way to find out how much data was transferred to the card.
274 	 * This means that if there was an error on any block, we mark all
275 	 * data blocks as being in error.
276 	 */
277 	if (data->error == MMC_ERR_NONE)
278 		data->bytes_xfered = data->blocks * data->blksz;
279 	else
280 		data->bytes_xfered = 0;
281 
282 	pxamci_disable_irq(host, DATA_TRAN_DONE);
283 
284 	host->data = NULL;
285 	if (host->mrq->stop) {
286 		pxamci_stop_clock(host);
287 		pxamci_start_cmd(host, host->mrq->stop, 0);
288 	} else {
289 		pxamci_finish_request(host, host->mrq);
290 	}
291 
292 	return 1;
293 }
294 
295 static irqreturn_t pxamci_irq(int irq, void *devid)
296 {
297 	struct pxamci_host *host = devid;
298 	unsigned int ireg;
299 	int handled = 0;
300 
301 	ireg = readl(host->base + MMC_I_REG);
302 
303 	if (ireg) {
304 		unsigned stat = readl(host->base + MMC_STAT);
305 
306 		pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
307 
308 		if (ireg & END_CMD_RES)
309 			handled |= pxamci_cmd_done(host, stat);
310 		if (ireg & DATA_TRAN_DONE)
311 			handled |= pxamci_data_done(host, stat);
312 	}
313 
314 	return IRQ_RETVAL(handled);
315 }
316 
317 static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
318 {
319 	struct pxamci_host *host = mmc_priv(mmc);
320 	unsigned int cmdat;
321 
322 	WARN_ON(host->mrq != NULL);
323 
324 	host->mrq = mrq;
325 
326 	pxamci_stop_clock(host);
327 
328 	cmdat = host->cmdat;
329 	host->cmdat &= ~CMDAT_INIT;
330 
331 	if (mrq->data) {
332 		pxamci_setup_data(host, mrq->data);
333 
334 		cmdat &= ~CMDAT_BUSY;
335 		cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
336 		if (mrq->data->flags & MMC_DATA_WRITE)
337 			cmdat |= CMDAT_WRITE;
338 
339 		if (mrq->data->flags & MMC_DATA_STREAM)
340 			cmdat |= CMDAT_STREAM;
341 	}
342 
343 	pxamci_start_cmd(host, mrq->cmd, cmdat);
344 }
345 
346 static int pxamci_get_ro(struct mmc_host *mmc)
347 {
348 	struct pxamci_host *host = mmc_priv(mmc);
349 
350 	if (host->pdata && host->pdata->get_ro)
351 		return host->pdata->get_ro(mmc_dev(mmc));
352 	/* Host doesn't support read only detection so assume writeable */
353 	return 0;
354 }
355 
356 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
357 {
358 	struct pxamci_host *host = mmc_priv(mmc);
359 
360 	if (ios->clock) {
361 		unsigned int clk = CLOCKRATE / ios->clock;
362 		if (CLOCKRATE / clk > ios->clock)
363 			clk <<= 1;
364 		host->clkrt = fls(clk) - 1;
365 		pxa_set_cken(CKEN_MMC, 1);
366 
367 		/*
368 		 * we write clkrt on the next command
369 		 */
370 	} else {
371 		pxamci_stop_clock(host);
372 		pxa_set_cken(CKEN_MMC, 0);
373 	}
374 
375 	if (host->power_mode != ios->power_mode) {
376 		host->power_mode = ios->power_mode;
377 
378 		if (host->pdata && host->pdata->setpower)
379 			host->pdata->setpower(mmc_dev(mmc), ios->vdd);
380 
381 		if (ios->power_mode == MMC_POWER_ON)
382 			host->cmdat |= CMDAT_INIT;
383 	}
384 
385 	pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
386 		 host->clkrt, host->cmdat);
387 }
388 
389 static const struct mmc_host_ops pxamci_ops = {
390 	.request	= pxamci_request,
391 	.get_ro		= pxamci_get_ro,
392 	.set_ios	= pxamci_set_ios,
393 };
394 
395 static void pxamci_dma_irq(int dma, void *devid)
396 {
397 	printk(KERN_ERR "DMA%d: IRQ???\n", dma);
398 	DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
399 }
400 
401 static irqreturn_t pxamci_detect_irq(int irq, void *devid)
402 {
403 	struct pxamci_host *host = mmc_priv(devid);
404 
405 	mmc_detect_change(devid, host->pdata->detect_delay);
406 	return IRQ_HANDLED;
407 }
408 
409 static int pxamci_probe(struct platform_device *pdev)
410 {
411 	struct mmc_host *mmc;
412 	struct pxamci_host *host = NULL;
413 	struct resource *r;
414 	int ret, irq;
415 
416 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
417 	irq = platform_get_irq(pdev, 0);
418 	if (!r || irq < 0)
419 		return -ENXIO;
420 
421 	r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
422 	if (!r)
423 		return -EBUSY;
424 
425 	mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
426 	if (!mmc) {
427 		ret = -ENOMEM;
428 		goto out;
429 	}
430 
431 	mmc->ops = &pxamci_ops;
432 	mmc->f_min = CLOCKRATE_MIN;
433 	mmc->f_max = CLOCKRATE_MAX;
434 
435 	/*
436 	 * We can do SG-DMA, but we don't because we never know how much
437 	 * data we successfully wrote to the card.
438 	 */
439 	mmc->max_phys_segs = NR_SG;
440 
441 	/*
442 	 * Our hardware DMA can handle a maximum of one page per SG entry.
443 	 */
444 	mmc->max_seg_size = PAGE_SIZE;
445 
446 	/*
447 	 * Block length register is 10 bits.
448 	 */
449 	mmc->max_blk_size = 1023;
450 
451 	/*
452 	 * Block count register is 16 bits.
453 	 */
454 	mmc->max_blk_count = 65535;
455 
456 	host = mmc_priv(mmc);
457 	host->mmc = mmc;
458 	host->dma = -1;
459 	host->pdata = pdev->dev.platform_data;
460 	mmc->ocr_avail = host->pdata ?
461 			 host->pdata->ocr_mask :
462 			 MMC_VDD_32_33|MMC_VDD_33_34;
463 
464 	host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
465 	if (!host->sg_cpu) {
466 		ret = -ENOMEM;
467 		goto out;
468 	}
469 
470 	spin_lock_init(&host->lock);
471 	host->res = r;
472 	host->irq = irq;
473 	host->imask = MMC_I_MASK_ALL;
474 
475 	host->base = ioremap(r->start, SZ_4K);
476 	if (!host->base) {
477 		ret = -ENOMEM;
478 		goto out;
479 	}
480 
481 	/*
482 	 * Ensure that the host controller is shut down, and setup
483 	 * with our defaults.
484 	 */
485 	pxamci_stop_clock(host);
486 	writel(0, host->base + MMC_SPI);
487 	writel(64, host->base + MMC_RESTO);
488 	writel(host->imask, host->base + MMC_I_MASK);
489 
490 	host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
491 				    pxamci_dma_irq, host);
492 	if (host->dma < 0) {
493 		ret = -EBUSY;
494 		goto out;
495 	}
496 
497 	ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
498 	if (ret)
499 		goto out;
500 
501 	platform_set_drvdata(pdev, mmc);
502 
503 	if (host->pdata && host->pdata->init)
504 		host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
505 
506 	mmc_add_host(mmc);
507 
508 	return 0;
509 
510  out:
511 	if (host) {
512 		if (host->dma >= 0)
513 			pxa_free_dma(host->dma);
514 		if (host->base)
515 			iounmap(host->base);
516 		if (host->sg_cpu)
517 			dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
518 	}
519 	if (mmc)
520 		mmc_free_host(mmc);
521 	release_resource(r);
522 	return ret;
523 }
524 
525 static int pxamci_remove(struct platform_device *pdev)
526 {
527 	struct mmc_host *mmc = platform_get_drvdata(pdev);
528 
529 	platform_set_drvdata(pdev, NULL);
530 
531 	if (mmc) {
532 		struct pxamci_host *host = mmc_priv(mmc);
533 
534 		if (host->pdata && host->pdata->exit)
535 			host->pdata->exit(&pdev->dev, mmc);
536 
537 		mmc_remove_host(mmc);
538 
539 		pxamci_stop_clock(host);
540 		writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
541 		       END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
542 		       host->base + MMC_I_MASK);
543 
544 		DRCMRRXMMC = 0;
545 		DRCMRTXMMC = 0;
546 
547 		free_irq(host->irq, host);
548 		pxa_free_dma(host->dma);
549 		iounmap(host->base);
550 		dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
551 
552 		release_resource(host->res);
553 
554 		mmc_free_host(mmc);
555 	}
556 	return 0;
557 }
558 
559 #ifdef CONFIG_PM
560 static int pxamci_suspend(struct platform_device *dev, pm_message_t state)
561 {
562 	struct mmc_host *mmc = platform_get_drvdata(dev);
563 	int ret = 0;
564 
565 	if (mmc)
566 		ret = mmc_suspend_host(mmc, state);
567 
568 	return ret;
569 }
570 
571 static int pxamci_resume(struct platform_device *dev)
572 {
573 	struct mmc_host *mmc = platform_get_drvdata(dev);
574 	int ret = 0;
575 
576 	if (mmc)
577 		ret = mmc_resume_host(mmc);
578 
579 	return ret;
580 }
581 #else
582 #define pxamci_suspend	NULL
583 #define pxamci_resume	NULL
584 #endif
585 
586 static struct platform_driver pxamci_driver = {
587 	.probe		= pxamci_probe,
588 	.remove		= pxamci_remove,
589 	.suspend	= pxamci_suspend,
590 	.resume		= pxamci_resume,
591 	.driver		= {
592 		.name	= DRIVER_NAME,
593 	},
594 };
595 
596 static int __init pxamci_init(void)
597 {
598 	return platform_driver_register(&pxamci_driver);
599 }
600 
601 static void __exit pxamci_exit(void)
602 {
603 	platform_driver_unregister(&pxamci_driver);
604 }
605 
606 module_init(pxamci_init);
607 module_exit(pxamci_exit);
608 
609 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
610 MODULE_LICENSE("GPL");
611