xref: /openbmc/linux/drivers/mmc/host/mmci.c (revision 8c11a94d)
1 /*
2  *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3  *
4  *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5  *  Copyright (C) 2010 ST-Ericsson AB.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/highmem.h>
20 #include <linux/log2.h>
21 #include <linux/mmc/host.h>
22 #include <linux/mmc/card.h>
23 #include <linux/amba/bus.h>
24 #include <linux/clk.h>
25 #include <linux/scatterlist.h>
26 #include <linux/gpio.h>
27 #include <linux/amba/mmci.h>
28 #include <linux/regulator/consumer.h>
29 
30 #include <asm/div64.h>
31 #include <asm/io.h>
32 #include <asm/sizes.h>
33 
34 #include "mmci.h"
35 
36 #define DRIVER_NAME "mmci-pl18x"
37 
38 static unsigned int fmax = 515633;
39 
40 /**
41  * struct variant_data - MMCI variant-specific quirks
42  * @clkreg: default value for MCICLOCK register
43  * @clkreg_enable: enable value for MMCICLOCK register
44  * @datalength_bits: number of bits in the MMCIDATALENGTH register
45  * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
46  *	      is asserted (likewise for RX)
47  * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
48  *		  is asserted (likewise for RX)
49  * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
50  *		and will not work at all.
51  * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when
52  *		using DMA.
53  * @sdio: variant supports SDIO
54  * @st_clkdiv: true if using a ST-specific clock divider algorithm
55  */
56 struct variant_data {
57 	unsigned int		clkreg;
58 	unsigned int		clkreg_enable;
59 	unsigned int		datalength_bits;
60 	unsigned int		fifosize;
61 	unsigned int		fifohalfsize;
62 	bool			broken_blockend;
63 	bool			broken_blockend_dma;
64 	bool			sdio;
65 	bool			st_clkdiv;
66 };
67 
68 static struct variant_data variant_arm = {
69 	.fifosize		= 16 * 4,
70 	.fifohalfsize		= 8 * 4,
71 	.datalength_bits	= 16,
72 };
73 
74 static struct variant_data variant_u300 = {
75 	.fifosize		= 16 * 4,
76 	.fifohalfsize		= 8 * 4,
77 	.clkreg_enable		= 1 << 13, /* HWFCEN */
78 	.datalength_bits	= 16,
79 	.broken_blockend_dma	= true,
80 	.sdio			= true,
81 };
82 
83 static struct variant_data variant_ux500 = {
84 	.fifosize		= 30 * 4,
85 	.fifohalfsize		= 8 * 4,
86 	.clkreg			= MCI_CLK_ENABLE,
87 	.clkreg_enable		= 1 << 14, /* HWFCEN */
88 	.datalength_bits	= 24,
89 	.broken_blockend	= true,
90 	.sdio			= true,
91 	.st_clkdiv		= true,
92 };
93 
94 /*
95  * This must be called with host->lock held
96  */
97 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
98 {
99 	struct variant_data *variant = host->variant;
100 	u32 clk = variant->clkreg;
101 
102 	if (desired) {
103 		if (desired >= host->mclk) {
104 			clk = MCI_CLK_BYPASS;
105 			host->cclk = host->mclk;
106 		} else if (variant->st_clkdiv) {
107 			/*
108 			 * DB8500 TRM says f = mclk / (clkdiv + 2)
109 			 * => clkdiv = (mclk / f) - 2
110 			 * Round the divider up so we don't exceed the max
111 			 * frequency
112 			 */
113 			clk = DIV_ROUND_UP(host->mclk, desired) - 2;
114 			if (clk >= 256)
115 				clk = 255;
116 			host->cclk = host->mclk / (clk + 2);
117 		} else {
118 			/*
119 			 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
120 			 * => clkdiv = mclk / (2 * f) - 1
121 			 */
122 			clk = host->mclk / (2 * desired) - 1;
123 			if (clk >= 256)
124 				clk = 255;
125 			host->cclk = host->mclk / (2 * (clk + 1));
126 		}
127 
128 		clk |= variant->clkreg_enable;
129 		clk |= MCI_CLK_ENABLE;
130 		/* This hasn't proven to be worthwhile */
131 		/* clk |= MCI_CLK_PWRSAVE; */
132 	}
133 
134 	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
135 		clk |= MCI_4BIT_BUS;
136 	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
137 		clk |= MCI_ST_8BIT_BUS;
138 
139 	writel(clk, host->base + MMCICLOCK);
140 }
141 
142 static void
143 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
144 {
145 	writel(0, host->base + MMCICOMMAND);
146 
147 	BUG_ON(host->data);
148 
149 	host->mrq = NULL;
150 	host->cmd = NULL;
151 
152 	if (mrq->data)
153 		mrq->data->bytes_xfered = host->data_xfered;
154 
155 	/*
156 	 * Need to drop the host lock here; mmc_request_done may call
157 	 * back into the driver...
158 	 */
159 	spin_unlock(&host->lock);
160 	mmc_request_done(host->mmc, mrq);
161 	spin_lock(&host->lock);
162 }
163 
164 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
165 {
166 	void __iomem *base = host->base;
167 
168 	if (host->singleirq) {
169 		unsigned int mask0 = readl(base + MMCIMASK0);
170 
171 		mask0 &= ~MCI_IRQ1MASK;
172 		mask0 |= mask;
173 
174 		writel(mask0, base + MMCIMASK0);
175 	}
176 
177 	writel(mask, base + MMCIMASK1);
178 }
179 
180 static void mmci_stop_data(struct mmci_host *host)
181 {
182 	writel(0, host->base + MMCIDATACTRL);
183 	mmci_set_mask1(host, 0);
184 	host->data = NULL;
185 }
186 
187 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
188 {
189 	unsigned int flags = SG_MITER_ATOMIC;
190 
191 	if (data->flags & MMC_DATA_READ)
192 		flags |= SG_MITER_TO_SG;
193 	else
194 		flags |= SG_MITER_FROM_SG;
195 
196 	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
197 }
198 
199 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
200 {
201 	struct variant_data *variant = host->variant;
202 	unsigned int datactrl, timeout, irqmask;
203 	unsigned long long clks;
204 	void __iomem *base;
205 	int blksz_bits;
206 
207 	dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
208 		data->blksz, data->blocks, data->flags);
209 
210 	host->data = data;
211 	host->size = data->blksz * data->blocks;
212 	host->data_xfered = 0;
213 	host->blockend = false;
214 	host->dataend = false;
215 
216 	mmci_init_sg(host, data);
217 
218 	clks = (unsigned long long)data->timeout_ns * host->cclk;
219 	do_div(clks, 1000000000UL);
220 
221 	timeout = data->timeout_clks + (unsigned int)clks;
222 
223 	base = host->base;
224 	writel(timeout, base + MMCIDATATIMER);
225 	writel(host->size, base + MMCIDATALENGTH);
226 
227 	blksz_bits = ffs(data->blksz) - 1;
228 	BUG_ON(1 << blksz_bits != data->blksz);
229 
230 	datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
231 	if (data->flags & MMC_DATA_READ) {
232 		datactrl |= MCI_DPSM_DIRECTION;
233 		irqmask = MCI_RXFIFOHALFFULLMASK;
234 
235 		/*
236 		 * If we have less than a FIFOSIZE of bytes to transfer,
237 		 * trigger a PIO interrupt as soon as any data is available.
238 		 */
239 		if (host->size < variant->fifosize)
240 			irqmask |= MCI_RXDATAAVLBLMASK;
241 	} else {
242 		/*
243 		 * We don't actually need to include "FIFO empty" here
244 		 * since its implicit in "FIFO half empty".
245 		 */
246 		irqmask = MCI_TXFIFOHALFEMPTYMASK;
247 	}
248 
249 	/* The ST Micro variants has a special bit to enable SDIO */
250 	if (variant->sdio && host->mmc->card)
251 		if (mmc_card_sdio(host->mmc->card))
252 			datactrl |= MCI_ST_DPSM_SDIOEN;
253 
254 	writel(datactrl, base + MMCIDATACTRL);
255 	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
256 	mmci_set_mask1(host, irqmask);
257 }
258 
259 static void
260 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
261 {
262 	void __iomem *base = host->base;
263 
264 	dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
265 	    cmd->opcode, cmd->arg, cmd->flags);
266 
267 	if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
268 		writel(0, base + MMCICOMMAND);
269 		udelay(1);
270 	}
271 
272 	c |= cmd->opcode | MCI_CPSM_ENABLE;
273 	if (cmd->flags & MMC_RSP_PRESENT) {
274 		if (cmd->flags & MMC_RSP_136)
275 			c |= MCI_CPSM_LONGRSP;
276 		c |= MCI_CPSM_RESPONSE;
277 	}
278 	if (/*interrupt*/0)
279 		c |= MCI_CPSM_INTERRUPT;
280 
281 	host->cmd = cmd;
282 
283 	writel(cmd->arg, base + MMCIARGUMENT);
284 	writel(c, base + MMCICOMMAND);
285 }
286 
287 static void
288 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
289 	      unsigned int status)
290 {
291 	struct variant_data *variant = host->variant;
292 
293 	/* First check for errors */
294 	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
295 		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
296 		if (status & MCI_DATACRCFAIL)
297 			data->error = -EILSEQ;
298 		else if (status & MCI_DATATIMEOUT)
299 			data->error = -ETIMEDOUT;
300 		else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
301 			data->error = -EIO;
302 
303 		/* Force-complete the transaction */
304 		host->blockend = true;
305 		host->dataend = true;
306 
307 		/*
308 		 * We hit an error condition.  Ensure that any data
309 		 * partially written to a page is properly coherent.
310 		 */
311 		if (data->flags & MMC_DATA_READ) {
312 			struct sg_mapping_iter *sg_miter = &host->sg_miter;
313 			unsigned long flags;
314 
315 			local_irq_save(flags);
316 			if (sg_miter_next(sg_miter)) {
317 				flush_dcache_page(sg_miter->page);
318 				sg_miter_stop(sg_miter);
319 			}
320 			local_irq_restore(flags);
321 		}
322 	}
323 
324 	/*
325 	 * On ARM variants in PIO mode, MCI_DATABLOCKEND
326 	 * is always sent first, and we increase the
327 	 * transfered number of bytes for that IRQ. Then
328 	 * MCI_DATAEND follows and we conclude the transaction.
329 	 *
330 	 * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
331 	 * doesn't seem to immediately clear from the status,
332 	 * so we can't use it keep count when only one irq is
333 	 * used because the irq will hit for other reasons, and
334 	 * then the flag is still up. So we use the MCI_DATAEND
335 	 * IRQ at the end of the entire transfer because
336 	 * MCI_DATABLOCKEND is broken.
337 	 *
338 	 * In the U300, the IRQs can arrive out-of-order,
339 	 * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
340 	 * so for this case we use the flags "blockend" and
341 	 * "dataend" to make sure both IRQs have arrived before
342 	 * concluding the transaction. (This does not apply
343 	 * to the Ux500 which doesn't fire MCI_DATABLOCKEND
344 	 * at all.) In DMA mode it suffers from the same problem
345 	 * as the Ux500.
346 	 */
347 	if (status & MCI_DATABLOCKEND) {
348 		/*
349 		 * Just being a little over-cautious, we do not
350 		 * use this progressive update if the hardware blockend
351 		 * flag is unreliable: since it can stay high between
352 		 * IRQs it will corrupt the transfer counter.
353 		 */
354 		if (!variant->broken_blockend)
355 			host->data_xfered += data->blksz;
356 		host->blockend = true;
357 	}
358 
359 	if (status & MCI_DATAEND)
360 		host->dataend = true;
361 
362 	/*
363 	 * On variants with broken blockend we shall only wait for dataend,
364 	 * on others we must sync with the blockend signal since they can
365 	 * appear out-of-order.
366 	 */
367 	if (host->dataend && (host->blockend || variant->broken_blockend)) {
368 		mmci_stop_data(host);
369 
370 		/* Reset these flags */
371 		host->blockend = false;
372 		host->dataend = false;
373 
374 		/*
375 		 * Variants with broken blockend flags need to handle the
376 		 * end of the entire transfer here.
377 		 */
378 		if (variant->broken_blockend && !data->error)
379 			host->data_xfered += data->blksz * data->blocks;
380 
381 		if (!data->stop) {
382 			mmci_request_end(host, data->mrq);
383 		} else {
384 			mmci_start_command(host, data->stop, 0);
385 		}
386 	}
387 }
388 
389 static void
390 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
391 	     unsigned int status)
392 {
393 	void __iomem *base = host->base;
394 
395 	host->cmd = NULL;
396 
397 	cmd->resp[0] = readl(base + MMCIRESPONSE0);
398 	cmd->resp[1] = readl(base + MMCIRESPONSE1);
399 	cmd->resp[2] = readl(base + MMCIRESPONSE2);
400 	cmd->resp[3] = readl(base + MMCIRESPONSE3);
401 
402 	if (status & MCI_CMDTIMEOUT) {
403 		cmd->error = -ETIMEDOUT;
404 	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
405 		cmd->error = -EILSEQ;
406 	}
407 
408 	if (!cmd->data || cmd->error) {
409 		if (host->data)
410 			mmci_stop_data(host);
411 		mmci_request_end(host, cmd->mrq);
412 	} else if (!(cmd->data->flags & MMC_DATA_READ)) {
413 		mmci_start_data(host, cmd->data);
414 	}
415 }
416 
417 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
418 {
419 	void __iomem *base = host->base;
420 	char *ptr = buffer;
421 	u32 status;
422 	int host_remain = host->size;
423 
424 	do {
425 		int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
426 
427 		if (count > remain)
428 			count = remain;
429 
430 		if (count <= 0)
431 			break;
432 
433 		readsl(base + MMCIFIFO, ptr, count >> 2);
434 
435 		ptr += count;
436 		remain -= count;
437 		host_remain -= count;
438 
439 		if (remain == 0)
440 			break;
441 
442 		status = readl(base + MMCISTATUS);
443 	} while (status & MCI_RXDATAAVLBL);
444 
445 	return ptr - buffer;
446 }
447 
448 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
449 {
450 	struct variant_data *variant = host->variant;
451 	void __iomem *base = host->base;
452 	char *ptr = buffer;
453 
454 	do {
455 		unsigned int count, maxcnt;
456 
457 		maxcnt = status & MCI_TXFIFOEMPTY ?
458 			 variant->fifosize : variant->fifohalfsize;
459 		count = min(remain, maxcnt);
460 
461 		/*
462 		 * The ST Micro variant for SDIO transfer sizes
463 		 * less then 8 bytes should have clock H/W flow
464 		 * control disabled.
465 		 */
466 		if (variant->sdio &&
467 		    mmc_card_sdio(host->mmc->card)) {
468 			if (count < 8)
469 				writel(readl(host->base + MMCICLOCK) &
470 					~variant->clkreg_enable,
471 					host->base + MMCICLOCK);
472 			else
473 				writel(readl(host->base + MMCICLOCK) |
474 					variant->clkreg_enable,
475 					host->base + MMCICLOCK);
476 		}
477 
478 		/*
479 		 * SDIO especially may want to send something that is
480 		 * not divisible by 4 (as opposed to card sectors
481 		 * etc), and the FIFO only accept full 32-bit writes.
482 		 * So compensate by adding +3 on the count, a single
483 		 * byte become a 32bit write, 7 bytes will be two
484 		 * 32bit writes etc.
485 		 */
486 		writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
487 
488 		ptr += count;
489 		remain -= count;
490 
491 		if (remain == 0)
492 			break;
493 
494 		status = readl(base + MMCISTATUS);
495 	} while (status & MCI_TXFIFOHALFEMPTY);
496 
497 	return ptr - buffer;
498 }
499 
500 /*
501  * PIO data transfer IRQ handler.
502  */
503 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
504 {
505 	struct mmci_host *host = dev_id;
506 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
507 	struct variant_data *variant = host->variant;
508 	void __iomem *base = host->base;
509 	unsigned long flags;
510 	u32 status;
511 
512 	status = readl(base + MMCISTATUS);
513 
514 	dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
515 
516 	local_irq_save(flags);
517 
518 	do {
519 		unsigned int remain, len;
520 		char *buffer;
521 
522 		/*
523 		 * For write, we only need to test the half-empty flag
524 		 * here - if the FIFO is completely empty, then by
525 		 * definition it is more than half empty.
526 		 *
527 		 * For read, check for data available.
528 		 */
529 		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
530 			break;
531 
532 		if (!sg_miter_next(sg_miter))
533 			break;
534 
535 		buffer = sg_miter->addr;
536 		remain = sg_miter->length;
537 
538 		len = 0;
539 		if (status & MCI_RXACTIVE)
540 			len = mmci_pio_read(host, buffer, remain);
541 		if (status & MCI_TXACTIVE)
542 			len = mmci_pio_write(host, buffer, remain, status);
543 
544 		sg_miter->consumed = len;
545 
546 		host->size -= len;
547 		remain -= len;
548 
549 		if (remain)
550 			break;
551 
552 		if (status & MCI_RXACTIVE)
553 			flush_dcache_page(sg_miter->page);
554 
555 		status = readl(base + MMCISTATUS);
556 	} while (1);
557 
558 	sg_miter_stop(sg_miter);
559 
560 	local_irq_restore(flags);
561 
562 	/*
563 	 * If we're nearing the end of the read, switch to
564 	 * "any data available" mode.
565 	 */
566 	if (status & MCI_RXACTIVE && host->size < variant->fifosize)
567 		mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
568 
569 	/*
570 	 * If we run out of data, disable the data IRQs; this
571 	 * prevents a race where the FIFO becomes empty before
572 	 * the chip itself has disabled the data path, and
573 	 * stops us racing with our data end IRQ.
574 	 */
575 	if (host->size == 0) {
576 		mmci_set_mask1(host, 0);
577 		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
578 	}
579 
580 	return IRQ_HANDLED;
581 }
582 
583 /*
584  * Handle completion of command and data transfers.
585  */
586 static irqreturn_t mmci_irq(int irq, void *dev_id)
587 {
588 	struct mmci_host *host = dev_id;
589 	u32 status;
590 	int ret = 0;
591 
592 	spin_lock(&host->lock);
593 
594 	do {
595 		struct mmc_command *cmd;
596 		struct mmc_data *data;
597 
598 		status = readl(host->base + MMCISTATUS);
599 
600 		if (host->singleirq) {
601 			if (status & readl(host->base + MMCIMASK1))
602 				mmci_pio_irq(irq, dev_id);
603 
604 			status &= ~MCI_IRQ1MASK;
605 		}
606 
607 		status &= readl(host->base + MMCIMASK0);
608 		writel(status, host->base + MMCICLEAR);
609 
610 		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
611 
612 		data = host->data;
613 		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
614 			      MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
615 			mmci_data_irq(host, data, status);
616 
617 		cmd = host->cmd;
618 		if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
619 			mmci_cmd_irq(host, cmd, status);
620 
621 		ret = 1;
622 	} while (status);
623 
624 	spin_unlock(&host->lock);
625 
626 	return IRQ_RETVAL(ret);
627 }
628 
629 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
630 {
631 	struct mmci_host *host = mmc_priv(mmc);
632 	unsigned long flags;
633 
634 	WARN_ON(host->mrq != NULL);
635 
636 	if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
637 		dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
638 			mrq->data->blksz);
639 		mrq->cmd->error = -EINVAL;
640 		mmc_request_done(mmc, mrq);
641 		return;
642 	}
643 
644 	spin_lock_irqsave(&host->lock, flags);
645 
646 	host->mrq = mrq;
647 
648 	if (mrq->data && mrq->data->flags & MMC_DATA_READ)
649 		mmci_start_data(host, mrq->data);
650 
651 	mmci_start_command(host, mrq->cmd, 0);
652 
653 	spin_unlock_irqrestore(&host->lock, flags);
654 }
655 
656 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
657 {
658 	struct mmci_host *host = mmc_priv(mmc);
659 	u32 pwr = 0;
660 	unsigned long flags;
661 	int ret;
662 
663 	switch (ios->power_mode) {
664 	case MMC_POWER_OFF:
665 		if (host->vcc)
666 			ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
667 		break;
668 	case MMC_POWER_UP:
669 		if (host->vcc) {
670 			ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
671 			if (ret) {
672 				dev_err(mmc_dev(mmc), "unable to set OCR\n");
673 				/*
674 				 * The .set_ios() function in the mmc_host_ops
675 				 * struct return void, and failing to set the
676 				 * power should be rare so we print an error
677 				 * and return here.
678 				 */
679 				return;
680 			}
681 		}
682 		if (host->plat->vdd_handler)
683 			pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
684 						       ios->power_mode);
685 		/* The ST version does not have this, fall through to POWER_ON */
686 		if (host->hw_designer != AMBA_VENDOR_ST) {
687 			pwr |= MCI_PWR_UP;
688 			break;
689 		}
690 	case MMC_POWER_ON:
691 		pwr |= MCI_PWR_ON;
692 		break;
693 	}
694 
695 	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
696 		if (host->hw_designer != AMBA_VENDOR_ST)
697 			pwr |= MCI_ROD;
698 		else {
699 			/*
700 			 * The ST Micro variant use the ROD bit for something
701 			 * else and only has OD (Open Drain).
702 			 */
703 			pwr |= MCI_OD;
704 		}
705 	}
706 
707 	spin_lock_irqsave(&host->lock, flags);
708 
709 	mmci_set_clkreg(host, ios->clock);
710 
711 	if (host->pwr != pwr) {
712 		host->pwr = pwr;
713 		writel(pwr, host->base + MMCIPOWER);
714 	}
715 
716 	spin_unlock_irqrestore(&host->lock, flags);
717 }
718 
719 static int mmci_get_ro(struct mmc_host *mmc)
720 {
721 	struct mmci_host *host = mmc_priv(mmc);
722 
723 	if (host->gpio_wp == -ENOSYS)
724 		return -ENOSYS;
725 
726 	return gpio_get_value_cansleep(host->gpio_wp);
727 }
728 
729 static int mmci_get_cd(struct mmc_host *mmc)
730 {
731 	struct mmci_host *host = mmc_priv(mmc);
732 	struct mmci_platform_data *plat = host->plat;
733 	unsigned int status;
734 
735 	if (host->gpio_cd == -ENOSYS) {
736 		if (!plat->status)
737 			return 1; /* Assume always present */
738 
739 		status = plat->status(mmc_dev(host->mmc));
740 	} else
741 		status = !!gpio_get_value_cansleep(host->gpio_cd)
742 			^ plat->cd_invert;
743 
744 	/*
745 	 * Use positive logic throughout - status is zero for no card,
746 	 * non-zero for card inserted.
747 	 */
748 	return status;
749 }
750 
751 static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
752 {
753 	struct mmci_host *host = dev_id;
754 
755 	mmc_detect_change(host->mmc, msecs_to_jiffies(500));
756 
757 	return IRQ_HANDLED;
758 }
759 
760 static const struct mmc_host_ops mmci_ops = {
761 	.request	= mmci_request,
762 	.set_ios	= mmci_set_ios,
763 	.get_ro		= mmci_get_ro,
764 	.get_cd		= mmci_get_cd,
765 };
766 
767 static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
768 {
769 	struct mmci_platform_data *plat = dev->dev.platform_data;
770 	struct variant_data *variant = id->data;
771 	struct mmci_host *host;
772 	struct mmc_host *mmc;
773 	unsigned int mask;
774 	int ret;
775 
776 	/* must have platform data */
777 	if (!plat) {
778 		ret = -EINVAL;
779 		goto out;
780 	}
781 
782 	ret = amba_request_regions(dev, DRIVER_NAME);
783 	if (ret)
784 		goto out;
785 
786 	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
787 	if (!mmc) {
788 		ret = -ENOMEM;
789 		goto rel_regions;
790 	}
791 
792 	host = mmc_priv(mmc);
793 	host->mmc = mmc;
794 
795 	host->gpio_wp = -ENOSYS;
796 	host->gpio_cd = -ENOSYS;
797 	host->gpio_cd_irq = -1;
798 
799 	host->hw_designer = amba_manf(dev);
800 	host->hw_revision = amba_rev(dev);
801 	dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
802 	dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
803 
804 	host->clk = clk_get(&dev->dev, NULL);
805 	if (IS_ERR(host->clk)) {
806 		ret = PTR_ERR(host->clk);
807 		host->clk = NULL;
808 		goto host_free;
809 	}
810 
811 	ret = clk_enable(host->clk);
812 	if (ret)
813 		goto clk_free;
814 
815 	host->plat = plat;
816 	host->variant = variant;
817 	host->mclk = clk_get_rate(host->clk);
818 	/*
819 	 * According to the spec, mclk is max 100 MHz,
820 	 * so we try to adjust the clock down to this,
821 	 * (if possible).
822 	 */
823 	if (host->mclk > 100000000) {
824 		ret = clk_set_rate(host->clk, 100000000);
825 		if (ret < 0)
826 			goto clk_disable;
827 		host->mclk = clk_get_rate(host->clk);
828 		dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
829 			host->mclk);
830 	}
831 	host->base = ioremap(dev->res.start, resource_size(&dev->res));
832 	if (!host->base) {
833 		ret = -ENOMEM;
834 		goto clk_disable;
835 	}
836 
837 	mmc->ops = &mmci_ops;
838 	mmc->f_min = (host->mclk + 511) / 512;
839 	/*
840 	 * If the platform data supplies a maximum operating
841 	 * frequency, this takes precedence. Else, we fall back
842 	 * to using the module parameter, which has a (low)
843 	 * default value in case it is not specified. Either
844 	 * value must not exceed the clock rate into the block,
845 	 * of course.
846 	 */
847 	if (plat->f_max)
848 		mmc->f_max = min(host->mclk, plat->f_max);
849 	else
850 		mmc->f_max = min(host->mclk, fmax);
851 	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
852 
853 #ifdef CONFIG_REGULATOR
854 	/* If we're using the regulator framework, try to fetch a regulator */
855 	host->vcc = regulator_get(&dev->dev, "vmmc");
856 	if (IS_ERR(host->vcc))
857 		host->vcc = NULL;
858 	else {
859 		int mask = mmc_regulator_get_ocrmask(host->vcc);
860 
861 		if (mask < 0)
862 			dev_err(&dev->dev, "error getting OCR mask (%d)\n",
863 				mask);
864 		else {
865 			host->mmc->ocr_avail = (u32) mask;
866 			if (plat->ocr_mask)
867 				dev_warn(&dev->dev,
868 				 "Provided ocr_mask/setpower will not be used "
869 				 "(using regulator instead)\n");
870 		}
871 	}
872 #endif
873 	/* Fall back to platform data if no regulator is found */
874 	if (host->vcc == NULL)
875 		mmc->ocr_avail = plat->ocr_mask;
876 	mmc->caps = plat->capabilities;
877 
878 	/*
879 	 * We can do SGIO
880 	 */
881 	mmc->max_segs = NR_SG;
882 
883 	/*
884 	 * Since only a certain number of bits are valid in the data length
885 	 * register, we must ensure that we don't exceed 2^num-1 bytes in a
886 	 * single request.
887 	 */
888 	mmc->max_req_size = (1 << variant->datalength_bits) - 1;
889 
890 	/*
891 	 * Set the maximum segment size.  Since we aren't doing DMA
892 	 * (yet) we are only limited by the data length register.
893 	 */
894 	mmc->max_seg_size = mmc->max_req_size;
895 
896 	/*
897 	 * Block size can be up to 2048 bytes, but must be a power of two.
898 	 */
899 	mmc->max_blk_size = 2048;
900 
901 	/*
902 	 * No limit on the number of blocks transferred.
903 	 */
904 	mmc->max_blk_count = mmc->max_req_size;
905 
906 	spin_lock_init(&host->lock);
907 
908 	writel(0, host->base + MMCIMASK0);
909 	writel(0, host->base + MMCIMASK1);
910 	writel(0xfff, host->base + MMCICLEAR);
911 
912 	if (gpio_is_valid(plat->gpio_cd)) {
913 		ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
914 		if (ret == 0)
915 			ret = gpio_direction_input(plat->gpio_cd);
916 		if (ret == 0)
917 			host->gpio_cd = plat->gpio_cd;
918 		else if (ret != -ENOSYS)
919 			goto err_gpio_cd;
920 
921 		ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
922 					      mmci_cd_irq, 0,
923 					      DRIVER_NAME " (cd)", host);
924 		if (ret >= 0)
925 			host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
926 	}
927 	if (gpio_is_valid(plat->gpio_wp)) {
928 		ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
929 		if (ret == 0)
930 			ret = gpio_direction_input(plat->gpio_wp);
931 		if (ret == 0)
932 			host->gpio_wp = plat->gpio_wp;
933 		else if (ret != -ENOSYS)
934 			goto err_gpio_wp;
935 	}
936 
937 	if ((host->plat->status || host->gpio_cd != -ENOSYS)
938 	    && host->gpio_cd_irq < 0)
939 		mmc->caps |= MMC_CAP_NEEDS_POLL;
940 
941 	ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
942 	if (ret)
943 		goto unmap;
944 
945 	if (dev->irq[1] == NO_IRQ)
946 		host->singleirq = true;
947 	else {
948 		ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
949 				  DRIVER_NAME " (pio)", host);
950 		if (ret)
951 			goto irq0_free;
952 	}
953 
954 	mask = MCI_IRQENABLE;
955 	/* Don't use the datablockend flag if it's broken */
956 	if (variant->broken_blockend)
957 		mask &= ~MCI_DATABLOCKEND;
958 
959 	writel(mask, host->base + MMCIMASK0);
960 
961 	amba_set_drvdata(dev, mmc);
962 
963 	dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n",
964 		mmc_hostname(mmc), amba_part(dev), amba_rev(dev),
965 		(unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
966 
967 	mmc_add_host(mmc);
968 
969 	return 0;
970 
971  irq0_free:
972 	free_irq(dev->irq[0], host);
973  unmap:
974 	if (host->gpio_wp != -ENOSYS)
975 		gpio_free(host->gpio_wp);
976  err_gpio_wp:
977 	if (host->gpio_cd_irq >= 0)
978 		free_irq(host->gpio_cd_irq, host);
979 	if (host->gpio_cd != -ENOSYS)
980 		gpio_free(host->gpio_cd);
981  err_gpio_cd:
982 	iounmap(host->base);
983  clk_disable:
984 	clk_disable(host->clk);
985  clk_free:
986 	clk_put(host->clk);
987  host_free:
988 	mmc_free_host(mmc);
989  rel_regions:
990 	amba_release_regions(dev);
991  out:
992 	return ret;
993 }
994 
995 static int __devexit mmci_remove(struct amba_device *dev)
996 {
997 	struct mmc_host *mmc = amba_get_drvdata(dev);
998 
999 	amba_set_drvdata(dev, NULL);
1000 
1001 	if (mmc) {
1002 		struct mmci_host *host = mmc_priv(mmc);
1003 
1004 		mmc_remove_host(mmc);
1005 
1006 		writel(0, host->base + MMCIMASK0);
1007 		writel(0, host->base + MMCIMASK1);
1008 
1009 		writel(0, host->base + MMCICOMMAND);
1010 		writel(0, host->base + MMCIDATACTRL);
1011 
1012 		free_irq(dev->irq[0], host);
1013 		if (!host->singleirq)
1014 			free_irq(dev->irq[1], host);
1015 
1016 		if (host->gpio_wp != -ENOSYS)
1017 			gpio_free(host->gpio_wp);
1018 		if (host->gpio_cd_irq >= 0)
1019 			free_irq(host->gpio_cd_irq, host);
1020 		if (host->gpio_cd != -ENOSYS)
1021 			gpio_free(host->gpio_cd);
1022 
1023 		iounmap(host->base);
1024 		clk_disable(host->clk);
1025 		clk_put(host->clk);
1026 
1027 		if (host->vcc)
1028 			mmc_regulator_set_ocr(mmc, host->vcc, 0);
1029 		regulator_put(host->vcc);
1030 
1031 		mmc_free_host(mmc);
1032 
1033 		amba_release_regions(dev);
1034 	}
1035 
1036 	return 0;
1037 }
1038 
1039 #ifdef CONFIG_PM
1040 static int mmci_suspend(struct amba_device *dev, pm_message_t state)
1041 {
1042 	struct mmc_host *mmc = amba_get_drvdata(dev);
1043 	int ret = 0;
1044 
1045 	if (mmc) {
1046 		struct mmci_host *host = mmc_priv(mmc);
1047 
1048 		ret = mmc_suspend_host(mmc);
1049 		if (ret == 0)
1050 			writel(0, host->base + MMCIMASK0);
1051 	}
1052 
1053 	return ret;
1054 }
1055 
1056 static int mmci_resume(struct amba_device *dev)
1057 {
1058 	struct mmc_host *mmc = amba_get_drvdata(dev);
1059 	int ret = 0;
1060 
1061 	if (mmc) {
1062 		struct mmci_host *host = mmc_priv(mmc);
1063 
1064 		writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1065 
1066 		ret = mmc_resume_host(mmc);
1067 	}
1068 
1069 	return ret;
1070 }
1071 #else
1072 #define mmci_suspend	NULL
1073 #define mmci_resume	NULL
1074 #endif
1075 
1076 static struct amba_id mmci_ids[] = {
1077 	{
1078 		.id	= 0x00041180,
1079 		.mask	= 0x000fffff,
1080 		.data	= &variant_arm,
1081 	},
1082 	{
1083 		.id	= 0x00041181,
1084 		.mask	= 0x000fffff,
1085 		.data	= &variant_arm,
1086 	},
1087 	/* ST Micro variants */
1088 	{
1089 		.id     = 0x00180180,
1090 		.mask   = 0x00ffffff,
1091 		.data	= &variant_u300,
1092 	},
1093 	{
1094 		.id     = 0x00280180,
1095 		.mask   = 0x00ffffff,
1096 		.data	= &variant_u300,
1097 	},
1098 	{
1099 		.id     = 0x00480180,
1100 		.mask   = 0x00ffffff,
1101 		.data	= &variant_ux500,
1102 	},
1103 	{ 0, 0 },
1104 };
1105 
1106 static struct amba_driver mmci_driver = {
1107 	.drv		= {
1108 		.name	= DRIVER_NAME,
1109 	},
1110 	.probe		= mmci_probe,
1111 	.remove		= __devexit_p(mmci_remove),
1112 	.suspend	= mmci_suspend,
1113 	.resume		= mmci_resume,
1114 	.id_table	= mmci_ids,
1115 };
1116 
1117 static int __init mmci_init(void)
1118 {
1119 	return amba_driver_register(&mmci_driver);
1120 }
1121 
1122 static void __exit mmci_exit(void)
1123 {
1124 	amba_driver_unregister(&mmci_driver);
1125 }
1126 
1127 module_init(mmci_init);
1128 module_exit(mmci_exit);
1129 module_param(fmax, uint, 0444);
1130 
1131 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1132 MODULE_LICENSE("GPL");
1133