xref: /openbmc/linux/drivers/mmc/host/mmci.c (revision 1c6a0718)
1 /*
2  *  linux/drivers/mmc/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3  *
4  *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
19 #include <linux/mmc/host.h>
20 #include <linux/amba/bus.h>
21 #include <linux/clk.h>
22 
23 #include <asm/cacheflush.h>
24 #include <asm/div64.h>
25 #include <asm/io.h>
26 #include <asm/scatterlist.h>
27 #include <asm/sizes.h>
28 #include <asm/mach/mmc.h>
29 
30 #include "mmci.h"
31 
32 #define DRIVER_NAME "mmci-pl18x"
33 
34 #define DBG(host,fmt,args...)	\
35 	pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
36 
37 static unsigned int fmax = 515633;
38 
39 static void
40 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
41 {
42 	writel(0, host->base + MMCICOMMAND);
43 
44 	BUG_ON(host->data);
45 
46 	host->mrq = NULL;
47 	host->cmd = NULL;
48 
49 	if (mrq->data)
50 		mrq->data->bytes_xfered = host->data_xfered;
51 
52 	/*
53 	 * Need to drop the host lock here; mmc_request_done may call
54 	 * back into the driver...
55 	 */
56 	spin_unlock(&host->lock);
57 	mmc_request_done(host->mmc, mrq);
58 	spin_lock(&host->lock);
59 }
60 
61 static void mmci_stop_data(struct mmci_host *host)
62 {
63 	writel(0, host->base + MMCIDATACTRL);
64 	writel(0, host->base + MMCIMASK1);
65 	host->data = NULL;
66 }
67 
68 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
69 {
70 	unsigned int datactrl, timeout, irqmask;
71 	unsigned long long clks;
72 	void __iomem *base;
73 	int blksz_bits;
74 
75 	DBG(host, "blksz %04x blks %04x flags %08x\n",
76 	    data->blksz, data->blocks, data->flags);
77 
78 	host->data = data;
79 	host->size = data->blksz;
80 	host->data_xfered = 0;
81 
82 	mmci_init_sg(host, data);
83 
84 	clks = (unsigned long long)data->timeout_ns * host->cclk;
85 	do_div(clks, 1000000000UL);
86 
87 	timeout = data->timeout_clks + (unsigned int)clks;
88 
89 	base = host->base;
90 	writel(timeout, base + MMCIDATATIMER);
91 	writel(host->size, base + MMCIDATALENGTH);
92 
93 	blksz_bits = ffs(data->blksz) - 1;
94 	BUG_ON(1 << blksz_bits != data->blksz);
95 
96 	datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
97 	if (data->flags & MMC_DATA_READ) {
98 		datactrl |= MCI_DPSM_DIRECTION;
99 		irqmask = MCI_RXFIFOHALFFULLMASK;
100 
101 		/*
102 		 * If we have less than a FIFOSIZE of bytes to transfer,
103 		 * trigger a PIO interrupt as soon as any data is available.
104 		 */
105 		if (host->size < MCI_FIFOSIZE)
106 			irqmask |= MCI_RXDATAAVLBLMASK;
107 	} else {
108 		/*
109 		 * We don't actually need to include "FIFO empty" here
110 		 * since its implicit in "FIFO half empty".
111 		 */
112 		irqmask = MCI_TXFIFOHALFEMPTYMASK;
113 	}
114 
115 	writel(datactrl, base + MMCIDATACTRL);
116 	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
117 	writel(irqmask, base + MMCIMASK1);
118 }
119 
120 static void
121 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
122 {
123 	void __iomem *base = host->base;
124 
125 	DBG(host, "op %02x arg %08x flags %08x\n",
126 	    cmd->opcode, cmd->arg, cmd->flags);
127 
128 	if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
129 		writel(0, base + MMCICOMMAND);
130 		udelay(1);
131 	}
132 
133 	c |= cmd->opcode | MCI_CPSM_ENABLE;
134 	if (cmd->flags & MMC_RSP_PRESENT) {
135 		if (cmd->flags & MMC_RSP_136)
136 			c |= MCI_CPSM_LONGRSP;
137 		c |= MCI_CPSM_RESPONSE;
138 	}
139 	if (/*interrupt*/0)
140 		c |= MCI_CPSM_INTERRUPT;
141 
142 	host->cmd = cmd;
143 
144 	writel(cmd->arg, base + MMCIARGUMENT);
145 	writel(c, base + MMCICOMMAND);
146 }
147 
148 static void
149 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
150 	      unsigned int status)
151 {
152 	if (status & MCI_DATABLOCKEND) {
153 		host->data_xfered += data->blksz;
154 	}
155 	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
156 		if (status & MCI_DATACRCFAIL)
157 			data->error = MMC_ERR_BADCRC;
158 		else if (status & MCI_DATATIMEOUT)
159 			data->error = MMC_ERR_TIMEOUT;
160 		else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
161 			data->error = MMC_ERR_FIFO;
162 		status |= MCI_DATAEND;
163 
164 		/*
165 		 * We hit an error condition.  Ensure that any data
166 		 * partially written to a page is properly coherent.
167 		 */
168 		if (host->sg_len && data->flags & MMC_DATA_READ)
169 			flush_dcache_page(host->sg_ptr->page);
170 	}
171 	if (status & MCI_DATAEND) {
172 		mmci_stop_data(host);
173 
174 		if (!data->stop) {
175 			mmci_request_end(host, data->mrq);
176 		} else {
177 			mmci_start_command(host, data->stop, 0);
178 		}
179 	}
180 }
181 
182 static void
183 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
184 	     unsigned int status)
185 {
186 	void __iomem *base = host->base;
187 
188 	host->cmd = NULL;
189 
190 	cmd->resp[0] = readl(base + MMCIRESPONSE0);
191 	cmd->resp[1] = readl(base + MMCIRESPONSE1);
192 	cmd->resp[2] = readl(base + MMCIRESPONSE2);
193 	cmd->resp[3] = readl(base + MMCIRESPONSE3);
194 
195 	if (status & MCI_CMDTIMEOUT) {
196 		cmd->error = MMC_ERR_TIMEOUT;
197 	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
198 		cmd->error = MMC_ERR_BADCRC;
199 	}
200 
201 	if (!cmd->data || cmd->error != MMC_ERR_NONE) {
202 		if (host->data)
203 			mmci_stop_data(host);
204 		mmci_request_end(host, cmd->mrq);
205 	} else if (!(cmd->data->flags & MMC_DATA_READ)) {
206 		mmci_start_data(host, cmd->data);
207 	}
208 }
209 
210 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
211 {
212 	void __iomem *base = host->base;
213 	char *ptr = buffer;
214 	u32 status;
215 
216 	do {
217 		int count = host->size - (readl(base + MMCIFIFOCNT) << 2);
218 
219 		if (count > remain)
220 			count = remain;
221 
222 		if (count <= 0)
223 			break;
224 
225 		readsl(base + MMCIFIFO, ptr, count >> 2);
226 
227 		ptr += count;
228 		remain -= count;
229 
230 		if (remain == 0)
231 			break;
232 
233 		status = readl(base + MMCISTATUS);
234 	} while (status & MCI_RXDATAAVLBL);
235 
236 	return ptr - buffer;
237 }
238 
239 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
240 {
241 	void __iomem *base = host->base;
242 	char *ptr = buffer;
243 
244 	do {
245 		unsigned int count, maxcnt;
246 
247 		maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
248 		count = min(remain, maxcnt);
249 
250 		writesl(base + MMCIFIFO, ptr, count >> 2);
251 
252 		ptr += count;
253 		remain -= count;
254 
255 		if (remain == 0)
256 			break;
257 
258 		status = readl(base + MMCISTATUS);
259 	} while (status & MCI_TXFIFOHALFEMPTY);
260 
261 	return ptr - buffer;
262 }
263 
264 /*
265  * PIO data transfer IRQ handler.
266  */
267 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
268 {
269 	struct mmci_host *host = dev_id;
270 	void __iomem *base = host->base;
271 	u32 status;
272 
273 	status = readl(base + MMCISTATUS);
274 
275 	DBG(host, "irq1 %08x\n", status);
276 
277 	do {
278 		unsigned long flags;
279 		unsigned int remain, len;
280 		char *buffer;
281 
282 		/*
283 		 * For write, we only need to test the half-empty flag
284 		 * here - if the FIFO is completely empty, then by
285 		 * definition it is more than half empty.
286 		 *
287 		 * For read, check for data available.
288 		 */
289 		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
290 			break;
291 
292 		/*
293 		 * Map the current scatter buffer.
294 		 */
295 		buffer = mmci_kmap_atomic(host, &flags) + host->sg_off;
296 		remain = host->sg_ptr->length - host->sg_off;
297 
298 		len = 0;
299 		if (status & MCI_RXACTIVE)
300 			len = mmci_pio_read(host, buffer, remain);
301 		if (status & MCI_TXACTIVE)
302 			len = mmci_pio_write(host, buffer, remain, status);
303 
304 		/*
305 		 * Unmap the buffer.
306 		 */
307 		mmci_kunmap_atomic(host, buffer, &flags);
308 
309 		host->sg_off += len;
310 		host->size -= len;
311 		remain -= len;
312 
313 		if (remain)
314 			break;
315 
316 		/*
317 		 * If we were reading, and we have completed this
318 		 * page, ensure that the data cache is coherent.
319 		 */
320 		if (status & MCI_RXACTIVE)
321 			flush_dcache_page(host->sg_ptr->page);
322 
323 		if (!mmci_next_sg(host))
324 			break;
325 
326 		status = readl(base + MMCISTATUS);
327 	} while (1);
328 
329 	/*
330 	 * If we're nearing the end of the read, switch to
331 	 * "any data available" mode.
332 	 */
333 	if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
334 		writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
335 
336 	/*
337 	 * If we run out of data, disable the data IRQs; this
338 	 * prevents a race where the FIFO becomes empty before
339 	 * the chip itself has disabled the data path, and
340 	 * stops us racing with our data end IRQ.
341 	 */
342 	if (host->size == 0) {
343 		writel(0, base + MMCIMASK1);
344 		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
345 	}
346 
347 	return IRQ_HANDLED;
348 }
349 
350 /*
351  * Handle completion of command and data transfers.
352  */
353 static irqreturn_t mmci_irq(int irq, void *dev_id)
354 {
355 	struct mmci_host *host = dev_id;
356 	u32 status;
357 	int ret = 0;
358 
359 	spin_lock(&host->lock);
360 
361 	do {
362 		struct mmc_command *cmd;
363 		struct mmc_data *data;
364 
365 		status = readl(host->base + MMCISTATUS);
366 		status &= readl(host->base + MMCIMASK0);
367 		writel(status, host->base + MMCICLEAR);
368 
369 		DBG(host, "irq0 %08x\n", status);
370 
371 		data = host->data;
372 		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
373 			      MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
374 			mmci_data_irq(host, data, status);
375 
376 		cmd = host->cmd;
377 		if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
378 			mmci_cmd_irq(host, cmd, status);
379 
380 		ret = 1;
381 	} while (status);
382 
383 	spin_unlock(&host->lock);
384 
385 	return IRQ_RETVAL(ret);
386 }
387 
388 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
389 {
390 	struct mmci_host *host = mmc_priv(mmc);
391 
392 	WARN_ON(host->mrq != NULL);
393 
394 	spin_lock_irq(&host->lock);
395 
396 	host->mrq = mrq;
397 
398 	if (mrq->data && mrq->data->flags & MMC_DATA_READ)
399 		mmci_start_data(host, mrq->data);
400 
401 	mmci_start_command(host, mrq->cmd, 0);
402 
403 	spin_unlock_irq(&host->lock);
404 }
405 
406 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
407 {
408 	struct mmci_host *host = mmc_priv(mmc);
409 	u32 clk = 0, pwr = 0;
410 
411 	if (ios->clock) {
412 		if (ios->clock >= host->mclk) {
413 			clk = MCI_CLK_BYPASS;
414 			host->cclk = host->mclk;
415 		} else {
416 			clk = host->mclk / (2 * ios->clock) - 1;
417 			if (clk > 256)
418 				clk = 255;
419 			host->cclk = host->mclk / (2 * (clk + 1));
420 		}
421 		clk |= MCI_CLK_ENABLE;
422 	}
423 
424 	if (host->plat->translate_vdd)
425 		pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
426 
427 	switch (ios->power_mode) {
428 	case MMC_POWER_OFF:
429 		break;
430 	case MMC_POWER_UP:
431 		pwr |= MCI_PWR_UP;
432 		break;
433 	case MMC_POWER_ON:
434 		pwr |= MCI_PWR_ON;
435 		break;
436 	}
437 
438 	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
439 		pwr |= MCI_ROD;
440 
441 	writel(clk, host->base + MMCICLOCK);
442 
443 	if (host->pwr != pwr) {
444 		host->pwr = pwr;
445 		writel(pwr, host->base + MMCIPOWER);
446 	}
447 }
448 
449 static const struct mmc_host_ops mmci_ops = {
450 	.request	= mmci_request,
451 	.set_ios	= mmci_set_ios,
452 };
453 
454 static void mmci_check_status(unsigned long data)
455 {
456 	struct mmci_host *host = (struct mmci_host *)data;
457 	unsigned int status;
458 
459 	status = host->plat->status(mmc_dev(host->mmc));
460 	if (status ^ host->oldstat)
461 		mmc_detect_change(host->mmc, 0);
462 
463 	host->oldstat = status;
464 	mod_timer(&host->timer, jiffies + HZ);
465 }
466 
467 static int mmci_probe(struct amba_device *dev, void *id)
468 {
469 	struct mmc_platform_data *plat = dev->dev.platform_data;
470 	struct mmci_host *host;
471 	struct mmc_host *mmc;
472 	int ret;
473 
474 	/* must have platform data */
475 	if (!plat) {
476 		ret = -EINVAL;
477 		goto out;
478 	}
479 
480 	ret = amba_request_regions(dev, DRIVER_NAME);
481 	if (ret)
482 		goto out;
483 
484 	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
485 	if (!mmc) {
486 		ret = -ENOMEM;
487 		goto rel_regions;
488 	}
489 
490 	host = mmc_priv(mmc);
491 	host->clk = clk_get(&dev->dev, "MCLK");
492 	if (IS_ERR(host->clk)) {
493 		ret = PTR_ERR(host->clk);
494 		host->clk = NULL;
495 		goto host_free;
496 	}
497 
498 	ret = clk_enable(host->clk);
499 	if (ret)
500 		goto clk_free;
501 
502 	host->plat = plat;
503 	host->mclk = clk_get_rate(host->clk);
504 	host->mmc = mmc;
505 	host->base = ioremap(dev->res.start, SZ_4K);
506 	if (!host->base) {
507 		ret = -ENOMEM;
508 		goto clk_disable;
509 	}
510 
511 	mmc->ops = &mmci_ops;
512 	mmc->f_min = (host->mclk + 511) / 512;
513 	mmc->f_max = min(host->mclk, fmax);
514 	mmc->ocr_avail = plat->ocr_mask;
515 	mmc->caps = MMC_CAP_MULTIWRITE;
516 
517 	/*
518 	 * We can do SGIO
519 	 */
520 	mmc->max_hw_segs = 16;
521 	mmc->max_phys_segs = NR_SG;
522 
523 	/*
524 	 * Since we only have a 16-bit data length register, we must
525 	 * ensure that we don't exceed 2^16-1 bytes in a single request.
526 	 */
527 	mmc->max_req_size = 65535;
528 
529 	/*
530 	 * Set the maximum segment size.  Since we aren't doing DMA
531 	 * (yet) we are only limited by the data length register.
532 	 */
533 	mmc->max_seg_size = mmc->max_req_size;
534 
535 	/*
536 	 * Block size can be up to 2048 bytes, but must be a power of two.
537 	 */
538 	mmc->max_blk_size = 2048;
539 
540 	/*
541 	 * No limit on the number of blocks transferred.
542 	 */
543 	mmc->max_blk_count = mmc->max_req_size;
544 
545 	spin_lock_init(&host->lock);
546 
547 	writel(0, host->base + MMCIMASK0);
548 	writel(0, host->base + MMCIMASK1);
549 	writel(0xfff, host->base + MMCICLEAR);
550 
551 	ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
552 	if (ret)
553 		goto unmap;
554 
555 	ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
556 	if (ret)
557 		goto irq0_free;
558 
559 	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
560 
561 	amba_set_drvdata(dev, mmc);
562 
563 	mmc_add_host(mmc);
564 
565 	printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
566 		mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
567 		(unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
568 
569 	init_timer(&host->timer);
570 	host->timer.data = (unsigned long)host;
571 	host->timer.function = mmci_check_status;
572 	host->timer.expires = jiffies + HZ;
573 	add_timer(&host->timer);
574 
575 	return 0;
576 
577  irq0_free:
578 	free_irq(dev->irq[0], host);
579  unmap:
580 	iounmap(host->base);
581  clk_disable:
582 	clk_disable(host->clk);
583  clk_free:
584 	clk_put(host->clk);
585  host_free:
586 	mmc_free_host(mmc);
587  rel_regions:
588 	amba_release_regions(dev);
589  out:
590 	return ret;
591 }
592 
593 static int mmci_remove(struct amba_device *dev)
594 {
595 	struct mmc_host *mmc = amba_get_drvdata(dev);
596 
597 	amba_set_drvdata(dev, NULL);
598 
599 	if (mmc) {
600 		struct mmci_host *host = mmc_priv(mmc);
601 
602 		del_timer_sync(&host->timer);
603 
604 		mmc_remove_host(mmc);
605 
606 		writel(0, host->base + MMCIMASK0);
607 		writel(0, host->base + MMCIMASK1);
608 
609 		writel(0, host->base + MMCICOMMAND);
610 		writel(0, host->base + MMCIDATACTRL);
611 
612 		free_irq(dev->irq[0], host);
613 		free_irq(dev->irq[1], host);
614 
615 		iounmap(host->base);
616 		clk_disable(host->clk);
617 		clk_put(host->clk);
618 
619 		mmc_free_host(mmc);
620 
621 		amba_release_regions(dev);
622 	}
623 
624 	return 0;
625 }
626 
627 #ifdef CONFIG_PM
628 static int mmci_suspend(struct amba_device *dev, pm_message_t state)
629 {
630 	struct mmc_host *mmc = amba_get_drvdata(dev);
631 	int ret = 0;
632 
633 	if (mmc) {
634 		struct mmci_host *host = mmc_priv(mmc);
635 
636 		ret = mmc_suspend_host(mmc, state);
637 		if (ret == 0)
638 			writel(0, host->base + MMCIMASK0);
639 	}
640 
641 	return ret;
642 }
643 
644 static int mmci_resume(struct amba_device *dev)
645 {
646 	struct mmc_host *mmc = amba_get_drvdata(dev);
647 	int ret = 0;
648 
649 	if (mmc) {
650 		struct mmci_host *host = mmc_priv(mmc);
651 
652 		writel(MCI_IRQENABLE, host->base + MMCIMASK0);
653 
654 		ret = mmc_resume_host(mmc);
655 	}
656 
657 	return ret;
658 }
659 #else
660 #define mmci_suspend	NULL
661 #define mmci_resume	NULL
662 #endif
663 
664 static struct amba_id mmci_ids[] = {
665 	{
666 		.id	= 0x00041180,
667 		.mask	= 0x000fffff,
668 	},
669 	{
670 		.id	= 0x00041181,
671 		.mask	= 0x000fffff,
672 	},
673 	{ 0, 0 },
674 };
675 
676 static struct amba_driver mmci_driver = {
677 	.drv		= {
678 		.name	= DRIVER_NAME,
679 	},
680 	.probe		= mmci_probe,
681 	.remove		= mmci_remove,
682 	.suspend	= mmci_suspend,
683 	.resume		= mmci_resume,
684 	.id_table	= mmci_ids,
685 };
686 
687 static int __init mmci_init(void)
688 {
689 	return amba_driver_register(&mmci_driver);
690 }
691 
692 static void __exit mmci_exit(void)
693 {
694 	amba_driver_unregister(&mmci_driver);
695 }
696 
697 module_init(mmci_init);
698 module_exit(mmci_exit);
699 module_param(fmax, uint, 0444);
700 
701 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
702 MODULE_LICENSE("GPL");
703