xref: /openbmc/linux/drivers/mmc/host/mmci.c (revision b627b4ed)
1 /*
2  *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3  *
4  *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
19 #include <linux/log2.h>
20 #include <linux/mmc/host.h>
21 #include <linux/amba/bus.h>
22 #include <linux/clk.h>
23 #include <linux/scatterlist.h>
24 
25 #include <asm/cacheflush.h>
26 #include <asm/div64.h>
27 #include <asm/io.h>
28 #include <asm/sizes.h>
29 #include <asm/mach/mmc.h>
30 
31 #include "mmci.h"
32 
33 #define DRIVER_NAME "mmci-pl18x"
34 
35 #define DBG(host,fmt,args...)	\
36 	pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
37 
38 static unsigned int fmax = 515633;
39 
40 static void
41 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
42 {
43 	writel(0, host->base + MMCICOMMAND);
44 
45 	BUG_ON(host->data);
46 
47 	host->mrq = NULL;
48 	host->cmd = NULL;
49 
50 	if (mrq->data)
51 		mrq->data->bytes_xfered = host->data_xfered;
52 
53 	/*
54 	 * Need to drop the host lock here; mmc_request_done may call
55 	 * back into the driver...
56 	 */
57 	spin_unlock(&host->lock);
58 	mmc_request_done(host->mmc, mrq);
59 	spin_lock(&host->lock);
60 }
61 
62 static void mmci_stop_data(struct mmci_host *host)
63 {
64 	writel(0, host->base + MMCIDATACTRL);
65 	writel(0, host->base + MMCIMASK1);
66 	host->data = NULL;
67 }
68 
69 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
70 {
71 	unsigned int datactrl, timeout, irqmask;
72 	unsigned long long clks;
73 	void __iomem *base;
74 	int blksz_bits;
75 
76 	DBG(host, "blksz %04x blks %04x flags %08x\n",
77 	    data->blksz, data->blocks, data->flags);
78 
79 	host->data = data;
80 	host->size = data->blksz;
81 	host->data_xfered = 0;
82 
83 	mmci_init_sg(host, data);
84 
85 	clks = (unsigned long long)data->timeout_ns * host->cclk;
86 	do_div(clks, 1000000000UL);
87 
88 	timeout = data->timeout_clks + (unsigned int)clks;
89 
90 	base = host->base;
91 	writel(timeout, base + MMCIDATATIMER);
92 	writel(host->size, base + MMCIDATALENGTH);
93 
94 	blksz_bits = ffs(data->blksz) - 1;
95 	BUG_ON(1 << blksz_bits != data->blksz);
96 
97 	datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
98 	if (data->flags & MMC_DATA_READ) {
99 		datactrl |= MCI_DPSM_DIRECTION;
100 		irqmask = MCI_RXFIFOHALFFULLMASK;
101 
102 		/*
103 		 * If we have less than a FIFOSIZE of bytes to transfer,
104 		 * trigger a PIO interrupt as soon as any data is available.
105 		 */
106 		if (host->size < MCI_FIFOSIZE)
107 			irqmask |= MCI_RXDATAAVLBLMASK;
108 	} else {
109 		/*
110 		 * We don't actually need to include "FIFO empty" here
111 		 * since its implicit in "FIFO half empty".
112 		 */
113 		irqmask = MCI_TXFIFOHALFEMPTYMASK;
114 	}
115 
116 	writel(datactrl, base + MMCIDATACTRL);
117 	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
118 	writel(irqmask, base + MMCIMASK1);
119 }
120 
121 static void
122 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
123 {
124 	void __iomem *base = host->base;
125 
126 	DBG(host, "op %02x arg %08x flags %08x\n",
127 	    cmd->opcode, cmd->arg, cmd->flags);
128 
129 	if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
130 		writel(0, base + MMCICOMMAND);
131 		udelay(1);
132 	}
133 
134 	c |= cmd->opcode | MCI_CPSM_ENABLE;
135 	if (cmd->flags & MMC_RSP_PRESENT) {
136 		if (cmd->flags & MMC_RSP_136)
137 			c |= MCI_CPSM_LONGRSP;
138 		c |= MCI_CPSM_RESPONSE;
139 	}
140 	if (/*interrupt*/0)
141 		c |= MCI_CPSM_INTERRUPT;
142 
143 	host->cmd = cmd;
144 
145 	writel(cmd->arg, base + MMCIARGUMENT);
146 	writel(c, base + MMCICOMMAND);
147 }
148 
149 static void
150 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
151 	      unsigned int status)
152 {
153 	if (status & MCI_DATABLOCKEND) {
154 		host->data_xfered += data->blksz;
155 	}
156 	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
157 		if (status & MCI_DATACRCFAIL)
158 			data->error = -EILSEQ;
159 		else if (status & MCI_DATATIMEOUT)
160 			data->error = -ETIMEDOUT;
161 		else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
162 			data->error = -EIO;
163 		status |= MCI_DATAEND;
164 
165 		/*
166 		 * We hit an error condition.  Ensure that any data
167 		 * partially written to a page is properly coherent.
168 		 */
169 		if (host->sg_len && data->flags & MMC_DATA_READ)
170 			flush_dcache_page(sg_page(host->sg_ptr));
171 	}
172 	if (status & MCI_DATAEND) {
173 		mmci_stop_data(host);
174 
175 		if (!data->stop) {
176 			mmci_request_end(host, data->mrq);
177 		} else {
178 			mmci_start_command(host, data->stop, 0);
179 		}
180 	}
181 }
182 
183 static void
184 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
185 	     unsigned int status)
186 {
187 	void __iomem *base = host->base;
188 
189 	host->cmd = NULL;
190 
191 	cmd->resp[0] = readl(base + MMCIRESPONSE0);
192 	cmd->resp[1] = readl(base + MMCIRESPONSE1);
193 	cmd->resp[2] = readl(base + MMCIRESPONSE2);
194 	cmd->resp[3] = readl(base + MMCIRESPONSE3);
195 
196 	if (status & MCI_CMDTIMEOUT) {
197 		cmd->error = -ETIMEDOUT;
198 	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
199 		cmd->error = -EILSEQ;
200 	}
201 
202 	if (!cmd->data || cmd->error) {
203 		if (host->data)
204 			mmci_stop_data(host);
205 		mmci_request_end(host, cmd->mrq);
206 	} else if (!(cmd->data->flags & MMC_DATA_READ)) {
207 		mmci_start_data(host, cmd->data);
208 	}
209 }
210 
211 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
212 {
213 	void __iomem *base = host->base;
214 	char *ptr = buffer;
215 	u32 status;
216 	int host_remain = host->size;
217 
218 	do {
219 		int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
220 
221 		if (count > remain)
222 			count = remain;
223 
224 		if (count <= 0)
225 			break;
226 
227 		readsl(base + MMCIFIFO, ptr, count >> 2);
228 
229 		ptr += count;
230 		remain -= count;
231 		host_remain -= count;
232 
233 		if (remain == 0)
234 			break;
235 
236 		status = readl(base + MMCISTATUS);
237 	} while (status & MCI_RXDATAAVLBL);
238 
239 	return ptr - buffer;
240 }
241 
242 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
243 {
244 	void __iomem *base = host->base;
245 	char *ptr = buffer;
246 
247 	do {
248 		unsigned int count, maxcnt;
249 
250 		maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
251 		count = min(remain, maxcnt);
252 
253 		writesl(base + MMCIFIFO, ptr, count >> 2);
254 
255 		ptr += count;
256 		remain -= count;
257 
258 		if (remain == 0)
259 			break;
260 
261 		status = readl(base + MMCISTATUS);
262 	} while (status & MCI_TXFIFOHALFEMPTY);
263 
264 	return ptr - buffer;
265 }
266 
267 /*
268  * PIO data transfer IRQ handler.
269  */
270 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
271 {
272 	struct mmci_host *host = dev_id;
273 	void __iomem *base = host->base;
274 	u32 status;
275 
276 	status = readl(base + MMCISTATUS);
277 
278 	DBG(host, "irq1 %08x\n", status);
279 
280 	do {
281 		unsigned long flags;
282 		unsigned int remain, len;
283 		char *buffer;
284 
285 		/*
286 		 * For write, we only need to test the half-empty flag
287 		 * here - if the FIFO is completely empty, then by
288 		 * definition it is more than half empty.
289 		 *
290 		 * For read, check for data available.
291 		 */
292 		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
293 			break;
294 
295 		/*
296 		 * Map the current scatter buffer.
297 		 */
298 		buffer = mmci_kmap_atomic(host, &flags) + host->sg_off;
299 		remain = host->sg_ptr->length - host->sg_off;
300 
301 		len = 0;
302 		if (status & MCI_RXACTIVE)
303 			len = mmci_pio_read(host, buffer, remain);
304 		if (status & MCI_TXACTIVE)
305 			len = mmci_pio_write(host, buffer, remain, status);
306 
307 		/*
308 		 * Unmap the buffer.
309 		 */
310 		mmci_kunmap_atomic(host, buffer, &flags);
311 
312 		host->sg_off += len;
313 		host->size -= len;
314 		remain -= len;
315 
316 		if (remain)
317 			break;
318 
319 		/*
320 		 * If we were reading, and we have completed this
321 		 * page, ensure that the data cache is coherent.
322 		 */
323 		if (status & MCI_RXACTIVE)
324 			flush_dcache_page(sg_page(host->sg_ptr));
325 
326 		if (!mmci_next_sg(host))
327 			break;
328 
329 		status = readl(base + MMCISTATUS);
330 	} while (1);
331 
332 	/*
333 	 * If we're nearing the end of the read, switch to
334 	 * "any data available" mode.
335 	 */
336 	if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
337 		writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
338 
339 	/*
340 	 * If we run out of data, disable the data IRQs; this
341 	 * prevents a race where the FIFO becomes empty before
342 	 * the chip itself has disabled the data path, and
343 	 * stops us racing with our data end IRQ.
344 	 */
345 	if (host->size == 0) {
346 		writel(0, base + MMCIMASK1);
347 		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
348 	}
349 
350 	return IRQ_HANDLED;
351 }
352 
353 /*
354  * Handle completion of command and data transfers.
355  */
356 static irqreturn_t mmci_irq(int irq, void *dev_id)
357 {
358 	struct mmci_host *host = dev_id;
359 	u32 status;
360 	int ret = 0;
361 
362 	spin_lock(&host->lock);
363 
364 	do {
365 		struct mmc_command *cmd;
366 		struct mmc_data *data;
367 
368 		status = readl(host->base + MMCISTATUS);
369 		status &= readl(host->base + MMCIMASK0);
370 		writel(status, host->base + MMCICLEAR);
371 
372 		DBG(host, "irq0 %08x\n", status);
373 
374 		data = host->data;
375 		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
376 			      MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
377 			mmci_data_irq(host, data, status);
378 
379 		cmd = host->cmd;
380 		if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
381 			mmci_cmd_irq(host, cmd, status);
382 
383 		ret = 1;
384 	} while (status);
385 
386 	spin_unlock(&host->lock);
387 
388 	return IRQ_RETVAL(ret);
389 }
390 
391 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
392 {
393 	struct mmci_host *host = mmc_priv(mmc);
394 	unsigned long flags;
395 
396 	WARN_ON(host->mrq != NULL);
397 
398 	if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
399 		printk(KERN_ERR "%s: Unsupported block size (%d bytes)\n",
400 			mmc_hostname(mmc), mrq->data->blksz);
401 		mrq->cmd->error = -EINVAL;
402 		mmc_request_done(mmc, mrq);
403 		return;
404 	}
405 
406 	spin_lock_irqsave(&host->lock, flags);
407 
408 	host->mrq = mrq;
409 
410 	if (mrq->data && mrq->data->flags & MMC_DATA_READ)
411 		mmci_start_data(host, mrq->data);
412 
413 	mmci_start_command(host, mrq->cmd, 0);
414 
415 	spin_unlock_irqrestore(&host->lock, flags);
416 }
417 
418 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
419 {
420 	struct mmci_host *host = mmc_priv(mmc);
421 	u32 clk = 0, pwr = 0;
422 
423 	if (ios->clock) {
424 		if (ios->clock >= host->mclk) {
425 			clk = MCI_CLK_BYPASS;
426 			host->cclk = host->mclk;
427 		} else {
428 			clk = host->mclk / (2 * ios->clock) - 1;
429 			if (clk >= 256)
430 				clk = 255;
431 			host->cclk = host->mclk / (2 * (clk + 1));
432 		}
433 		if (host->hw_designer == 0x80)
434 			clk |= MCI_FCEN; /* Bug fix in ST IP block */
435 		clk |= MCI_CLK_ENABLE;
436 	}
437 
438 	if (host->plat->translate_vdd)
439 		pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
440 
441 	switch (ios->power_mode) {
442 	case MMC_POWER_OFF:
443 		break;
444 	case MMC_POWER_UP:
445 		/* The ST version does not have this, fall through to POWER_ON */
446 		if (host->hw_designer != 0x80) {
447 			pwr |= MCI_PWR_UP;
448 			break;
449 		}
450 	case MMC_POWER_ON:
451 		pwr |= MCI_PWR_ON;
452 		break;
453 	}
454 
455 	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
456 		if (host->hw_designer != 0x80)
457 			pwr |= MCI_ROD;
458 		else {
459 			/*
460 			 * The ST Micro variant use the ROD bit for something
461 			 * else and only has OD (Open Drain).
462 			 */
463 			pwr |= MCI_OD;
464 		}
465 	}
466 
467 	writel(clk, host->base + MMCICLOCK);
468 
469 	if (host->pwr != pwr) {
470 		host->pwr = pwr;
471 		writel(pwr, host->base + MMCIPOWER);
472 	}
473 }
474 
475 static const struct mmc_host_ops mmci_ops = {
476 	.request	= mmci_request,
477 	.set_ios	= mmci_set_ios,
478 };
479 
480 static void mmci_check_status(unsigned long data)
481 {
482 	struct mmci_host *host = (struct mmci_host *)data;
483 	unsigned int status;
484 
485 	status = host->plat->status(mmc_dev(host->mmc));
486 	if (status ^ host->oldstat)
487 		mmc_detect_change(host->mmc, 0);
488 
489 	host->oldstat = status;
490 	mod_timer(&host->timer, jiffies + HZ);
491 }
492 
493 static int __devinit mmci_probe(struct amba_device *dev, void *id)
494 {
495 	struct mmc_platform_data *plat = dev->dev.platform_data;
496 	struct mmci_host *host;
497 	struct mmc_host *mmc;
498 	int ret;
499 
500 	/* must have platform data */
501 	if (!plat) {
502 		ret = -EINVAL;
503 		goto out;
504 	}
505 
506 	ret = amba_request_regions(dev, DRIVER_NAME);
507 	if (ret)
508 		goto out;
509 
510 	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
511 	if (!mmc) {
512 		ret = -ENOMEM;
513 		goto rel_regions;
514 	}
515 
516 	host = mmc_priv(mmc);
517 	/* Bits 12 thru 19 is the designer */
518 	host->hw_designer = (dev->periphid >> 12) & 0xff;
519 	/* Bits 20 thru 23 is the revison */
520 	host->hw_revision = (dev->periphid >> 20) & 0xf;
521 	DBG(host, "designer ID = 0x%02x\n", host->hw_designer);
522 	DBG(host, "revision = 0x%01x\n", host->hw_revision);
523 	host->clk = clk_get(&dev->dev, NULL);
524 	if (IS_ERR(host->clk)) {
525 		ret = PTR_ERR(host->clk);
526 		host->clk = NULL;
527 		goto host_free;
528 	}
529 
530 	ret = clk_enable(host->clk);
531 	if (ret)
532 		goto clk_free;
533 
534 	host->plat = plat;
535 	host->mclk = clk_get_rate(host->clk);
536 	/*
537 	 * According to the spec, mclk is max 100 MHz,
538 	 * so we try to adjust the clock down to this,
539 	 * (if possible).
540 	 */
541 	if (host->mclk > 100000000) {
542 		ret = clk_set_rate(host->clk, 100000000);
543 		if (ret < 0)
544 			goto clk_disable;
545 		host->mclk = clk_get_rate(host->clk);
546 		DBG(host, "eventual mclk rate: %u Hz\n", host->mclk);
547 	}
548 	host->mmc = mmc;
549 	host->base = ioremap(dev->res.start, SZ_4K);
550 	if (!host->base) {
551 		ret = -ENOMEM;
552 		goto clk_disable;
553 	}
554 
555 	mmc->ops = &mmci_ops;
556 	mmc->f_min = (host->mclk + 511) / 512;
557 	mmc->f_max = min(host->mclk, fmax);
558 	mmc->ocr_avail = plat->ocr_mask;
559 
560 	/*
561 	 * We can do SGIO
562 	 */
563 	mmc->max_hw_segs = 16;
564 	mmc->max_phys_segs = NR_SG;
565 
566 	/*
567 	 * Since we only have a 16-bit data length register, we must
568 	 * ensure that we don't exceed 2^16-1 bytes in a single request.
569 	 */
570 	mmc->max_req_size = 65535;
571 
572 	/*
573 	 * Set the maximum segment size.  Since we aren't doing DMA
574 	 * (yet) we are only limited by the data length register.
575 	 */
576 	mmc->max_seg_size = mmc->max_req_size;
577 
578 	/*
579 	 * Block size can be up to 2048 bytes, but must be a power of two.
580 	 */
581 	mmc->max_blk_size = 2048;
582 
583 	/*
584 	 * No limit on the number of blocks transferred.
585 	 */
586 	mmc->max_blk_count = mmc->max_req_size;
587 
588 	spin_lock_init(&host->lock);
589 
590 	writel(0, host->base + MMCIMASK0);
591 	writel(0, host->base + MMCIMASK1);
592 	writel(0xfff, host->base + MMCICLEAR);
593 
594 	ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
595 	if (ret)
596 		goto unmap;
597 
598 	ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
599 	if (ret)
600 		goto irq0_free;
601 
602 	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
603 
604 	amba_set_drvdata(dev, mmc);
605 
606 	mmc_add_host(mmc);
607 
608 	printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
609 		mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
610 		(unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
611 
612 	init_timer(&host->timer);
613 	host->timer.data = (unsigned long)host;
614 	host->timer.function = mmci_check_status;
615 	host->timer.expires = jiffies + HZ;
616 	add_timer(&host->timer);
617 
618 	return 0;
619 
620  irq0_free:
621 	free_irq(dev->irq[0], host);
622  unmap:
623 	iounmap(host->base);
624  clk_disable:
625 	clk_disable(host->clk);
626  clk_free:
627 	clk_put(host->clk);
628  host_free:
629 	mmc_free_host(mmc);
630  rel_regions:
631 	amba_release_regions(dev);
632  out:
633 	return ret;
634 }
635 
636 static int __devexit mmci_remove(struct amba_device *dev)
637 {
638 	struct mmc_host *mmc = amba_get_drvdata(dev);
639 
640 	amba_set_drvdata(dev, NULL);
641 
642 	if (mmc) {
643 		struct mmci_host *host = mmc_priv(mmc);
644 
645 		del_timer_sync(&host->timer);
646 
647 		mmc_remove_host(mmc);
648 
649 		writel(0, host->base + MMCIMASK0);
650 		writel(0, host->base + MMCIMASK1);
651 
652 		writel(0, host->base + MMCICOMMAND);
653 		writel(0, host->base + MMCIDATACTRL);
654 
655 		free_irq(dev->irq[0], host);
656 		free_irq(dev->irq[1], host);
657 
658 		iounmap(host->base);
659 		clk_disable(host->clk);
660 		clk_put(host->clk);
661 
662 		mmc_free_host(mmc);
663 
664 		amba_release_regions(dev);
665 	}
666 
667 	return 0;
668 }
669 
670 #ifdef CONFIG_PM
671 static int mmci_suspend(struct amba_device *dev, pm_message_t state)
672 {
673 	struct mmc_host *mmc = amba_get_drvdata(dev);
674 	int ret = 0;
675 
676 	if (mmc) {
677 		struct mmci_host *host = mmc_priv(mmc);
678 
679 		ret = mmc_suspend_host(mmc, state);
680 		if (ret == 0)
681 			writel(0, host->base + MMCIMASK0);
682 	}
683 
684 	return ret;
685 }
686 
687 static int mmci_resume(struct amba_device *dev)
688 {
689 	struct mmc_host *mmc = amba_get_drvdata(dev);
690 	int ret = 0;
691 
692 	if (mmc) {
693 		struct mmci_host *host = mmc_priv(mmc);
694 
695 		writel(MCI_IRQENABLE, host->base + MMCIMASK0);
696 
697 		ret = mmc_resume_host(mmc);
698 	}
699 
700 	return ret;
701 }
702 #else
703 #define mmci_suspend	NULL
704 #define mmci_resume	NULL
705 #endif
706 
707 static struct amba_id mmci_ids[] = {
708 	{
709 		.id	= 0x00041180,
710 		.mask	= 0x000fffff,
711 	},
712 	{
713 		.id	= 0x00041181,
714 		.mask	= 0x000fffff,
715 	},
716 	/* ST Micro variants */
717 	{
718 		.id     = 0x00180180,
719 		.mask   = 0x00ffffff,
720 	},
721 	{
722 		.id     = 0x00280180,
723 		.mask   = 0x00ffffff,
724 	},
725 	{ 0, 0 },
726 };
727 
728 static struct amba_driver mmci_driver = {
729 	.drv		= {
730 		.name	= DRIVER_NAME,
731 	},
732 	.probe		= mmci_probe,
733 	.remove		= __devexit_p(mmci_remove),
734 	.suspend	= mmci_suspend,
735 	.resume		= mmci_resume,
736 	.id_table	= mmci_ids,
737 };
738 
739 static int __init mmci_init(void)
740 {
741 	return amba_driver_register(&mmci_driver);
742 }
743 
744 static void __exit mmci_exit(void)
745 {
746 	amba_driver_unregister(&mmci_driver);
747 }
748 
749 module_init(mmci_init);
750 module_exit(mmci_exit);
751 module_param(fmax, uint, 0444);
752 
753 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
754 MODULE_LICENSE("GPL");
755