xref: /openbmc/linux/drivers/mmc/host/mmci.c (revision 10247179)
1 /*
2  *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3  *
4  *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5  *  Copyright (C) 2010 ST-Ericsson AB.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/highmem.h>
20 #include <linux/log2.h>
21 #include <linux/mmc/host.h>
22 #include <linux/amba/bus.h>
23 #include <linux/clk.h>
24 #include <linux/scatterlist.h>
25 #include <linux/gpio.h>
26 #include <linux/amba/mmci.h>
27 #include <linux/regulator/consumer.h>
28 
29 #include <asm/div64.h>
30 #include <asm/io.h>
31 #include <asm/sizes.h>
32 
33 #include "mmci.h"
34 
35 #define DRIVER_NAME "mmci-pl18x"
36 
37 static unsigned int fmax = 515633;
38 
39 /**
40  * struct variant_data - MMCI variant-specific quirks
41  * @clkreg: default value for MCICLOCK register
42  * @clkreg_enable: enable value for MMCICLOCK register
43  * @datalength_bits: number of bits in the MMCIDATALENGTH register
44  */
45 struct variant_data {
46 	unsigned int		clkreg;
47 	unsigned int		clkreg_enable;
48 	unsigned int		datalength_bits;
49 };
50 
51 static struct variant_data variant_arm = {
52 	.datalength_bits	= 16,
53 };
54 
55 static struct variant_data variant_u300 = {
56 	.clkreg_enable		= 1 << 13, /* HWFCEN */
57 	.datalength_bits	= 16,
58 };
59 
60 static struct variant_data variant_ux500 = {
61 	.clkreg			= MCI_CLK_ENABLE,
62 	.clkreg_enable		= 1 << 14, /* HWFCEN */
63 	.datalength_bits	= 24,
64 };
65 /*
66  * This must be called with host->lock held
67  */
68 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
69 {
70 	struct variant_data *variant = host->variant;
71 	u32 clk = variant->clkreg;
72 
73 	if (desired) {
74 		if (desired >= host->mclk) {
75 			clk = MCI_CLK_BYPASS;
76 			host->cclk = host->mclk;
77 		} else {
78 			clk = host->mclk / (2 * desired) - 1;
79 			if (clk >= 256)
80 				clk = 255;
81 			host->cclk = host->mclk / (2 * (clk + 1));
82 		}
83 
84 		clk |= variant->clkreg_enable;
85 		clk |= MCI_CLK_ENABLE;
86 		/* This hasn't proven to be worthwhile */
87 		/* clk |= MCI_CLK_PWRSAVE; */
88 	}
89 
90 	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
91 		clk |= MCI_4BIT_BUS;
92 	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
93 		clk |= MCI_ST_8BIT_BUS;
94 
95 	writel(clk, host->base + MMCICLOCK);
96 }
97 
98 static void
99 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
100 {
101 	writel(0, host->base + MMCICOMMAND);
102 
103 	BUG_ON(host->data);
104 
105 	host->mrq = NULL;
106 	host->cmd = NULL;
107 
108 	if (mrq->data)
109 		mrq->data->bytes_xfered = host->data_xfered;
110 
111 	/*
112 	 * Need to drop the host lock here; mmc_request_done may call
113 	 * back into the driver...
114 	 */
115 	spin_unlock(&host->lock);
116 	mmc_request_done(host->mmc, mrq);
117 	spin_lock(&host->lock);
118 }
119 
120 static void mmci_stop_data(struct mmci_host *host)
121 {
122 	writel(0, host->base + MMCIDATACTRL);
123 	writel(0, host->base + MMCIMASK1);
124 	host->data = NULL;
125 }
126 
127 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
128 {
129 	unsigned int flags = SG_MITER_ATOMIC;
130 
131 	if (data->flags & MMC_DATA_READ)
132 		flags |= SG_MITER_TO_SG;
133 	else
134 		flags |= SG_MITER_FROM_SG;
135 
136 	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
137 }
138 
139 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
140 {
141 	unsigned int datactrl, timeout, irqmask;
142 	unsigned long long clks;
143 	void __iomem *base;
144 	int blksz_bits;
145 
146 	dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
147 		data->blksz, data->blocks, data->flags);
148 
149 	host->data = data;
150 	host->size = data->blksz * data->blocks;
151 	host->data_xfered = 0;
152 
153 	mmci_init_sg(host, data);
154 
155 	clks = (unsigned long long)data->timeout_ns * host->cclk;
156 	do_div(clks, 1000000000UL);
157 
158 	timeout = data->timeout_clks + (unsigned int)clks;
159 
160 	base = host->base;
161 	writel(timeout, base + MMCIDATATIMER);
162 	writel(host->size, base + MMCIDATALENGTH);
163 
164 	blksz_bits = ffs(data->blksz) - 1;
165 	BUG_ON(1 << blksz_bits != data->blksz);
166 
167 	datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
168 	if (data->flags & MMC_DATA_READ) {
169 		datactrl |= MCI_DPSM_DIRECTION;
170 		irqmask = MCI_RXFIFOHALFFULLMASK;
171 
172 		/*
173 		 * If we have less than a FIFOSIZE of bytes to transfer,
174 		 * trigger a PIO interrupt as soon as any data is available.
175 		 */
176 		if (host->size < MCI_FIFOSIZE)
177 			irqmask |= MCI_RXDATAAVLBLMASK;
178 	} else {
179 		/*
180 		 * We don't actually need to include "FIFO empty" here
181 		 * since its implicit in "FIFO half empty".
182 		 */
183 		irqmask = MCI_TXFIFOHALFEMPTYMASK;
184 	}
185 
186 	writel(datactrl, base + MMCIDATACTRL);
187 	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
188 	writel(irqmask, base + MMCIMASK1);
189 }
190 
191 static void
192 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
193 {
194 	void __iomem *base = host->base;
195 
196 	dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
197 	    cmd->opcode, cmd->arg, cmd->flags);
198 
199 	if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
200 		writel(0, base + MMCICOMMAND);
201 		udelay(1);
202 	}
203 
204 	c |= cmd->opcode | MCI_CPSM_ENABLE;
205 	if (cmd->flags & MMC_RSP_PRESENT) {
206 		if (cmd->flags & MMC_RSP_136)
207 			c |= MCI_CPSM_LONGRSP;
208 		c |= MCI_CPSM_RESPONSE;
209 	}
210 	if (/*interrupt*/0)
211 		c |= MCI_CPSM_INTERRUPT;
212 
213 	host->cmd = cmd;
214 
215 	writel(cmd->arg, base + MMCIARGUMENT);
216 	writel(c, base + MMCICOMMAND);
217 }
218 
219 static void
220 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
221 	      unsigned int status)
222 {
223 	if (status & MCI_DATABLOCKEND) {
224 		host->data_xfered += data->blksz;
225 #ifdef CONFIG_ARCH_U300
226 		/*
227 		 * On the U300 some signal or other is
228 		 * badly routed so that a data write does
229 		 * not properly terminate with a MCI_DATAEND
230 		 * status flag. This quirk will make writes
231 		 * work again.
232 		 */
233 		if (data->flags & MMC_DATA_WRITE)
234 			status |= MCI_DATAEND;
235 #endif
236 	}
237 	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
238 		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
239 		if (status & MCI_DATACRCFAIL)
240 			data->error = -EILSEQ;
241 		else if (status & MCI_DATATIMEOUT)
242 			data->error = -ETIMEDOUT;
243 		else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
244 			data->error = -EIO;
245 		status |= MCI_DATAEND;
246 
247 		/*
248 		 * We hit an error condition.  Ensure that any data
249 		 * partially written to a page is properly coherent.
250 		 */
251 		if (data->flags & MMC_DATA_READ) {
252 			struct sg_mapping_iter *sg_miter = &host->sg_miter;
253 			unsigned long flags;
254 
255 			local_irq_save(flags);
256 			if (sg_miter_next(sg_miter)) {
257 				flush_dcache_page(sg_miter->page);
258 				sg_miter_stop(sg_miter);
259 			}
260 			local_irq_restore(flags);
261 		}
262 	}
263 	if (status & MCI_DATAEND) {
264 		mmci_stop_data(host);
265 
266 		if (!data->stop) {
267 			mmci_request_end(host, data->mrq);
268 		} else {
269 			mmci_start_command(host, data->stop, 0);
270 		}
271 	}
272 }
273 
274 static void
275 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
276 	     unsigned int status)
277 {
278 	void __iomem *base = host->base;
279 
280 	host->cmd = NULL;
281 
282 	cmd->resp[0] = readl(base + MMCIRESPONSE0);
283 	cmd->resp[1] = readl(base + MMCIRESPONSE1);
284 	cmd->resp[2] = readl(base + MMCIRESPONSE2);
285 	cmd->resp[3] = readl(base + MMCIRESPONSE3);
286 
287 	if (status & MCI_CMDTIMEOUT) {
288 		cmd->error = -ETIMEDOUT;
289 	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
290 		cmd->error = -EILSEQ;
291 	}
292 
293 	if (!cmd->data || cmd->error) {
294 		if (host->data)
295 			mmci_stop_data(host);
296 		mmci_request_end(host, cmd->mrq);
297 	} else if (!(cmd->data->flags & MMC_DATA_READ)) {
298 		mmci_start_data(host, cmd->data);
299 	}
300 }
301 
302 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
303 {
304 	void __iomem *base = host->base;
305 	char *ptr = buffer;
306 	u32 status;
307 	int host_remain = host->size;
308 
309 	do {
310 		int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
311 
312 		if (count > remain)
313 			count = remain;
314 
315 		if (count <= 0)
316 			break;
317 
318 		readsl(base + MMCIFIFO, ptr, count >> 2);
319 
320 		ptr += count;
321 		remain -= count;
322 		host_remain -= count;
323 
324 		if (remain == 0)
325 			break;
326 
327 		status = readl(base + MMCISTATUS);
328 	} while (status & MCI_RXDATAAVLBL);
329 
330 	return ptr - buffer;
331 }
332 
333 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
334 {
335 	void __iomem *base = host->base;
336 	char *ptr = buffer;
337 
338 	do {
339 		unsigned int count, maxcnt;
340 
341 		maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
342 		count = min(remain, maxcnt);
343 
344 		writesl(base + MMCIFIFO, ptr, count >> 2);
345 
346 		ptr += count;
347 		remain -= count;
348 
349 		if (remain == 0)
350 			break;
351 
352 		status = readl(base + MMCISTATUS);
353 	} while (status & MCI_TXFIFOHALFEMPTY);
354 
355 	return ptr - buffer;
356 }
357 
358 /*
359  * PIO data transfer IRQ handler.
360  */
361 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
362 {
363 	struct mmci_host *host = dev_id;
364 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
365 	void __iomem *base = host->base;
366 	unsigned long flags;
367 	u32 status;
368 
369 	status = readl(base + MMCISTATUS);
370 
371 	dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
372 
373 	local_irq_save(flags);
374 
375 	do {
376 		unsigned int remain, len;
377 		char *buffer;
378 
379 		/*
380 		 * For write, we only need to test the half-empty flag
381 		 * here - if the FIFO is completely empty, then by
382 		 * definition it is more than half empty.
383 		 *
384 		 * For read, check for data available.
385 		 */
386 		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
387 			break;
388 
389 		if (!sg_miter_next(sg_miter))
390 			break;
391 
392 		buffer = sg_miter->addr;
393 		remain = sg_miter->length;
394 
395 		len = 0;
396 		if (status & MCI_RXACTIVE)
397 			len = mmci_pio_read(host, buffer, remain);
398 		if (status & MCI_TXACTIVE)
399 			len = mmci_pio_write(host, buffer, remain, status);
400 
401 		sg_miter->consumed = len;
402 
403 		host->size -= len;
404 		remain -= len;
405 
406 		if (remain)
407 			break;
408 
409 		if (status & MCI_RXACTIVE)
410 			flush_dcache_page(sg_miter->page);
411 
412 		status = readl(base + MMCISTATUS);
413 	} while (1);
414 
415 	sg_miter_stop(sg_miter);
416 
417 	local_irq_restore(flags);
418 
419 	/*
420 	 * If we're nearing the end of the read, switch to
421 	 * "any data available" mode.
422 	 */
423 	if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
424 		writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
425 
426 	/*
427 	 * If we run out of data, disable the data IRQs; this
428 	 * prevents a race where the FIFO becomes empty before
429 	 * the chip itself has disabled the data path, and
430 	 * stops us racing with our data end IRQ.
431 	 */
432 	if (host->size == 0) {
433 		writel(0, base + MMCIMASK1);
434 		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
435 	}
436 
437 	return IRQ_HANDLED;
438 }
439 
440 /*
441  * Handle completion of command and data transfers.
442  */
443 static irqreturn_t mmci_irq(int irq, void *dev_id)
444 {
445 	struct mmci_host *host = dev_id;
446 	u32 status;
447 	int ret = 0;
448 
449 	spin_lock(&host->lock);
450 
451 	do {
452 		struct mmc_command *cmd;
453 		struct mmc_data *data;
454 
455 		status = readl(host->base + MMCISTATUS);
456 		status &= readl(host->base + MMCIMASK0);
457 		writel(status, host->base + MMCICLEAR);
458 
459 		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
460 
461 		data = host->data;
462 		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
463 			      MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
464 			mmci_data_irq(host, data, status);
465 
466 		cmd = host->cmd;
467 		if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
468 			mmci_cmd_irq(host, cmd, status);
469 
470 		ret = 1;
471 	} while (status);
472 
473 	spin_unlock(&host->lock);
474 
475 	return IRQ_RETVAL(ret);
476 }
477 
478 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
479 {
480 	struct mmci_host *host = mmc_priv(mmc);
481 	unsigned long flags;
482 
483 	WARN_ON(host->mrq != NULL);
484 
485 	if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
486 		dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
487 			mrq->data->blksz);
488 		mrq->cmd->error = -EINVAL;
489 		mmc_request_done(mmc, mrq);
490 		return;
491 	}
492 
493 	spin_lock_irqsave(&host->lock, flags);
494 
495 	host->mrq = mrq;
496 
497 	if (mrq->data && mrq->data->flags & MMC_DATA_READ)
498 		mmci_start_data(host, mrq->data);
499 
500 	mmci_start_command(host, mrq->cmd, 0);
501 
502 	spin_unlock_irqrestore(&host->lock, flags);
503 }
504 
505 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
506 {
507 	struct mmci_host *host = mmc_priv(mmc);
508 	u32 pwr = 0;
509 	unsigned long flags;
510 
511 	switch (ios->power_mode) {
512 	case MMC_POWER_OFF:
513 		if(host->vcc &&
514 		   regulator_is_enabled(host->vcc))
515 			regulator_disable(host->vcc);
516 		break;
517 	case MMC_POWER_UP:
518 #ifdef CONFIG_REGULATOR
519 		if (host->vcc)
520 			/* This implicitly enables the regulator */
521 			mmc_regulator_set_ocr(host->vcc, ios->vdd);
522 #endif
523 		if (host->plat->vdd_handler)
524 			pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
525 						       ios->power_mode);
526 		/* The ST version does not have this, fall through to POWER_ON */
527 		if (host->hw_designer != AMBA_VENDOR_ST) {
528 			pwr |= MCI_PWR_UP;
529 			break;
530 		}
531 	case MMC_POWER_ON:
532 		pwr |= MCI_PWR_ON;
533 		break;
534 	}
535 
536 	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
537 		if (host->hw_designer != AMBA_VENDOR_ST)
538 			pwr |= MCI_ROD;
539 		else {
540 			/*
541 			 * The ST Micro variant use the ROD bit for something
542 			 * else and only has OD (Open Drain).
543 			 */
544 			pwr |= MCI_OD;
545 		}
546 	}
547 
548 	spin_lock_irqsave(&host->lock, flags);
549 
550 	mmci_set_clkreg(host, ios->clock);
551 
552 	if (host->pwr != pwr) {
553 		host->pwr = pwr;
554 		writel(pwr, host->base + MMCIPOWER);
555 	}
556 
557 	spin_unlock_irqrestore(&host->lock, flags);
558 }
559 
560 static int mmci_get_ro(struct mmc_host *mmc)
561 {
562 	struct mmci_host *host = mmc_priv(mmc);
563 
564 	if (host->gpio_wp == -ENOSYS)
565 		return -ENOSYS;
566 
567 	return gpio_get_value(host->gpio_wp);
568 }
569 
570 static int mmci_get_cd(struct mmc_host *mmc)
571 {
572 	struct mmci_host *host = mmc_priv(mmc);
573 	unsigned int status;
574 
575 	if (host->gpio_cd == -ENOSYS)
576 		status = host->plat->status(mmc_dev(host->mmc));
577 	else
578 		status = !gpio_get_value(host->gpio_cd);
579 
580 	/*
581 	 * Use positive logic throughout - status is zero for no card,
582 	 * non-zero for card inserted.
583 	 */
584 	return status;
585 }
586 
587 static const struct mmc_host_ops mmci_ops = {
588 	.request	= mmci_request,
589 	.set_ios	= mmci_set_ios,
590 	.get_ro		= mmci_get_ro,
591 	.get_cd		= mmci_get_cd,
592 };
593 
594 static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
595 {
596 	struct mmci_platform_data *plat = dev->dev.platform_data;
597 	struct variant_data *variant = id->data;
598 	struct mmci_host *host;
599 	struct mmc_host *mmc;
600 	int ret;
601 
602 	/* must have platform data */
603 	if (!plat) {
604 		ret = -EINVAL;
605 		goto out;
606 	}
607 
608 	ret = amba_request_regions(dev, DRIVER_NAME);
609 	if (ret)
610 		goto out;
611 
612 	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
613 	if (!mmc) {
614 		ret = -ENOMEM;
615 		goto rel_regions;
616 	}
617 
618 	host = mmc_priv(mmc);
619 	host->mmc = mmc;
620 
621 	host->gpio_wp = -ENOSYS;
622 	host->gpio_cd = -ENOSYS;
623 
624 	host->hw_designer = amba_manf(dev);
625 	host->hw_revision = amba_rev(dev);
626 	dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
627 	dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
628 
629 	host->clk = clk_get(&dev->dev, NULL);
630 	if (IS_ERR(host->clk)) {
631 		ret = PTR_ERR(host->clk);
632 		host->clk = NULL;
633 		goto host_free;
634 	}
635 
636 	ret = clk_enable(host->clk);
637 	if (ret)
638 		goto clk_free;
639 
640 	host->plat = plat;
641 	host->variant = variant;
642 	host->mclk = clk_get_rate(host->clk);
643 	/*
644 	 * According to the spec, mclk is max 100 MHz,
645 	 * so we try to adjust the clock down to this,
646 	 * (if possible).
647 	 */
648 	if (host->mclk > 100000000) {
649 		ret = clk_set_rate(host->clk, 100000000);
650 		if (ret < 0)
651 			goto clk_disable;
652 		host->mclk = clk_get_rate(host->clk);
653 		dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
654 			host->mclk);
655 	}
656 	host->base = ioremap(dev->res.start, resource_size(&dev->res));
657 	if (!host->base) {
658 		ret = -ENOMEM;
659 		goto clk_disable;
660 	}
661 
662 	mmc->ops = &mmci_ops;
663 	mmc->f_min = (host->mclk + 511) / 512;
664 	/*
665 	 * If the platform data supplies a maximum operating
666 	 * frequency, this takes precedence. Else, we fall back
667 	 * to using the module parameter, which has a (low)
668 	 * default value in case it is not specified. Either
669 	 * value must not exceed the clock rate into the block,
670 	 * of course.
671 	 */
672 	if (plat->f_max)
673 		mmc->f_max = min(host->mclk, plat->f_max);
674 	else
675 		mmc->f_max = min(host->mclk, fmax);
676 	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
677 
678 #ifdef CONFIG_REGULATOR
679 	/* If we're using the regulator framework, try to fetch a regulator */
680 	host->vcc = regulator_get(&dev->dev, "vmmc");
681 	if (IS_ERR(host->vcc))
682 		host->vcc = NULL;
683 	else {
684 		int mask = mmc_regulator_get_ocrmask(host->vcc);
685 
686 		if (mask < 0)
687 			dev_err(&dev->dev, "error getting OCR mask (%d)\n",
688 				mask);
689 		else {
690 			host->mmc->ocr_avail = (u32) mask;
691 			if (plat->ocr_mask)
692 				dev_warn(&dev->dev,
693 				 "Provided ocr_mask/setpower will not be used "
694 				 "(using regulator instead)\n");
695 		}
696 	}
697 #endif
698 	/* Fall back to platform data if no regulator is found */
699 	if (host->vcc == NULL)
700 		mmc->ocr_avail = plat->ocr_mask;
701 	mmc->caps = plat->capabilities;
702 	mmc->caps |= MMC_CAP_NEEDS_POLL;
703 
704 	/*
705 	 * We can do SGIO
706 	 */
707 	mmc->max_hw_segs = 16;
708 	mmc->max_phys_segs = NR_SG;
709 
710 	/*
711 	 * Since only a certain number of bits are valid in the data length
712 	 * register, we must ensure that we don't exceed 2^num-1 bytes in a
713 	 * single request.
714 	 */
715 	mmc->max_req_size = (1 << variant->datalength_bits) - 1;
716 
717 	/*
718 	 * Set the maximum segment size.  Since we aren't doing DMA
719 	 * (yet) we are only limited by the data length register.
720 	 */
721 	mmc->max_seg_size = mmc->max_req_size;
722 
723 	/*
724 	 * Block size can be up to 2048 bytes, but must be a power of two.
725 	 */
726 	mmc->max_blk_size = 2048;
727 
728 	/*
729 	 * No limit on the number of blocks transferred.
730 	 */
731 	mmc->max_blk_count = mmc->max_req_size;
732 
733 	spin_lock_init(&host->lock);
734 
735 	writel(0, host->base + MMCIMASK0);
736 	writel(0, host->base + MMCIMASK1);
737 	writel(0xfff, host->base + MMCICLEAR);
738 
739 	if (gpio_is_valid(plat->gpio_cd)) {
740 		ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
741 		if (ret == 0)
742 			ret = gpio_direction_input(plat->gpio_cd);
743 		if (ret == 0)
744 			host->gpio_cd = plat->gpio_cd;
745 		else if (ret != -ENOSYS)
746 			goto err_gpio_cd;
747 	}
748 	if (gpio_is_valid(plat->gpio_wp)) {
749 		ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
750 		if (ret == 0)
751 			ret = gpio_direction_input(plat->gpio_wp);
752 		if (ret == 0)
753 			host->gpio_wp = plat->gpio_wp;
754 		else if (ret != -ENOSYS)
755 			goto err_gpio_wp;
756 	}
757 
758 	ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
759 	if (ret)
760 		goto unmap;
761 
762 	ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
763 	if (ret)
764 		goto irq0_free;
765 
766 	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
767 
768 	amba_set_drvdata(dev, mmc);
769 
770 	mmc_add_host(mmc);
771 
772 	dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
773 		mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
774 		(unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
775 
776 	return 0;
777 
778  irq0_free:
779 	free_irq(dev->irq[0], host);
780  unmap:
781 	if (host->gpio_wp != -ENOSYS)
782 		gpio_free(host->gpio_wp);
783  err_gpio_wp:
784 	if (host->gpio_cd != -ENOSYS)
785 		gpio_free(host->gpio_cd);
786  err_gpio_cd:
787 	iounmap(host->base);
788  clk_disable:
789 	clk_disable(host->clk);
790  clk_free:
791 	clk_put(host->clk);
792  host_free:
793 	mmc_free_host(mmc);
794  rel_regions:
795 	amba_release_regions(dev);
796  out:
797 	return ret;
798 }
799 
800 static int __devexit mmci_remove(struct amba_device *dev)
801 {
802 	struct mmc_host *mmc = amba_get_drvdata(dev);
803 
804 	amba_set_drvdata(dev, NULL);
805 
806 	if (mmc) {
807 		struct mmci_host *host = mmc_priv(mmc);
808 
809 		mmc_remove_host(mmc);
810 
811 		writel(0, host->base + MMCIMASK0);
812 		writel(0, host->base + MMCIMASK1);
813 
814 		writel(0, host->base + MMCICOMMAND);
815 		writel(0, host->base + MMCIDATACTRL);
816 
817 		free_irq(dev->irq[0], host);
818 		free_irq(dev->irq[1], host);
819 
820 		if (host->gpio_wp != -ENOSYS)
821 			gpio_free(host->gpio_wp);
822 		if (host->gpio_cd != -ENOSYS)
823 			gpio_free(host->gpio_cd);
824 
825 		iounmap(host->base);
826 		clk_disable(host->clk);
827 		clk_put(host->clk);
828 
829 		if (regulator_is_enabled(host->vcc))
830 			regulator_disable(host->vcc);
831 		regulator_put(host->vcc);
832 
833 		mmc_free_host(mmc);
834 
835 		amba_release_regions(dev);
836 	}
837 
838 	return 0;
839 }
840 
841 #ifdef CONFIG_PM
842 static int mmci_suspend(struct amba_device *dev, pm_message_t state)
843 {
844 	struct mmc_host *mmc = amba_get_drvdata(dev);
845 	int ret = 0;
846 
847 	if (mmc) {
848 		struct mmci_host *host = mmc_priv(mmc);
849 
850 		ret = mmc_suspend_host(mmc);
851 		if (ret == 0)
852 			writel(0, host->base + MMCIMASK0);
853 	}
854 
855 	return ret;
856 }
857 
858 static int mmci_resume(struct amba_device *dev)
859 {
860 	struct mmc_host *mmc = amba_get_drvdata(dev);
861 	int ret = 0;
862 
863 	if (mmc) {
864 		struct mmci_host *host = mmc_priv(mmc);
865 
866 		writel(MCI_IRQENABLE, host->base + MMCIMASK0);
867 
868 		ret = mmc_resume_host(mmc);
869 	}
870 
871 	return ret;
872 }
873 #else
874 #define mmci_suspend	NULL
875 #define mmci_resume	NULL
876 #endif
877 
878 static struct amba_id mmci_ids[] = {
879 	{
880 		.id	= 0x00041180,
881 		.mask	= 0x000fffff,
882 		.data	= &variant_arm,
883 	},
884 	{
885 		.id	= 0x00041181,
886 		.mask	= 0x000fffff,
887 		.data	= &variant_arm,
888 	},
889 	/* ST Micro variants */
890 	{
891 		.id     = 0x00180180,
892 		.mask   = 0x00ffffff,
893 		.data	= &variant_u300,
894 	},
895 	{
896 		.id     = 0x00280180,
897 		.mask   = 0x00ffffff,
898 		.data	= &variant_u300,
899 	},
900 	{
901 		.id     = 0x00480180,
902 		.mask   = 0x00ffffff,
903 		.data	= &variant_ux500,
904 	},
905 	{ 0, 0 },
906 };
907 
908 static struct amba_driver mmci_driver = {
909 	.drv		= {
910 		.name	= DRIVER_NAME,
911 	},
912 	.probe		= mmci_probe,
913 	.remove		= __devexit_p(mmci_remove),
914 	.suspend	= mmci_suspend,
915 	.resume		= mmci_resume,
916 	.id_table	= mmci_ids,
917 };
918 
919 static int __init mmci_init(void)
920 {
921 	return amba_driver_register(&mmci_driver);
922 }
923 
924 static void __exit mmci_exit(void)
925 {
926 	amba_driver_unregister(&mmci_driver);
927 }
928 
929 module_init(mmci_init);
930 module_exit(mmci_exit);
931 module_param(fmax, uint, 0444);
932 
933 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
934 MODULE_LICENSE("GPL");
935