xref: /openbmc/linux/drivers/mmc/host/pxamci.c (revision e8e0929d)
1 /*
2  *  linux/drivers/mmc/host/pxa.c - PXA MMCI driver
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  This hardware is really sick:
11  *   - No way to clear interrupts.
12  *   - Have to turn off the clock whenever we touch the device.
13  *   - Doesn't tell you how many data blocks were transferred.
14  *  Yuck!
15  *
16  *	1 and 3 byte data transfers not supported
17  *	max block length up to 1023
18  */
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/platform_device.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/clk.h>
27 #include <linux/err.h>
28 #include <linux/mmc/host.h>
29 #include <linux/io.h>
30 #include <linux/regulator/consumer.h>
31 #include <linux/gpio.h>
32 
33 #include <asm/sizes.h>
34 
35 #include <mach/hardware.h>
36 #include <mach/dma.h>
37 #include <mach/mmc.h>
38 
39 #include "pxamci.h"
40 
41 #define DRIVER_NAME	"pxa2xx-mci"
42 
43 #define NR_SG	1
44 #define CLKRT_OFF	(~0)
45 
46 struct pxamci_host {
47 	struct mmc_host		*mmc;
48 	spinlock_t		lock;
49 	struct resource		*res;
50 	void __iomem		*base;
51 	struct clk		*clk;
52 	unsigned long		clkrate;
53 	int			irq;
54 	int			dma;
55 	unsigned int		clkrt;
56 	unsigned int		cmdat;
57 	unsigned int		imask;
58 	unsigned int		power_mode;
59 	struct pxamci_platform_data *pdata;
60 
61 	struct mmc_request	*mrq;
62 	struct mmc_command	*cmd;
63 	struct mmc_data		*data;
64 
65 	dma_addr_t		sg_dma;
66 	struct pxa_dma_desc	*sg_cpu;
67 	unsigned int		dma_len;
68 
69 	unsigned int		dma_dir;
70 	unsigned int		dma_drcmrrx;
71 	unsigned int		dma_drcmrtx;
72 
73 	struct regulator	*vcc;
74 };
75 
76 static inline void pxamci_init_ocr(struct pxamci_host *host)
77 {
78 #ifdef CONFIG_REGULATOR
79 	host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
80 
81 	if (IS_ERR(host->vcc))
82 		host->vcc = NULL;
83 	else {
84 		host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
85 		if (host->pdata && host->pdata->ocr_mask)
86 			dev_warn(mmc_dev(host->mmc),
87 				"ocr_mask/setpower will not be used\n");
88 	}
89 #endif
90 	if (host->vcc == NULL) {
91 		/* fall-back to platform data */
92 		host->mmc->ocr_avail = host->pdata ?
93 			host->pdata->ocr_mask :
94 			MMC_VDD_32_33 | MMC_VDD_33_34;
95 	}
96 }
97 
98 static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
99 {
100 	int on;
101 
102 #ifdef CONFIG_REGULATOR
103 	if (host->vcc)
104 		mmc_regulator_set_ocr(host->vcc, vdd);
105 #endif
106 	if (!host->vcc && host->pdata &&
107 	    gpio_is_valid(host->pdata->gpio_power)) {
108 		on = ((1 << vdd) & host->pdata->ocr_mask);
109 		gpio_set_value(host->pdata->gpio_power,
110 			       !!on ^ host->pdata->gpio_power_invert);
111 	}
112 	if (!host->vcc && host->pdata && host->pdata->setpower)
113 		host->pdata->setpower(mmc_dev(host->mmc), vdd);
114 }
115 
116 static void pxamci_stop_clock(struct pxamci_host *host)
117 {
118 	if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
119 		unsigned long timeout = 10000;
120 		unsigned int v;
121 
122 		writel(STOP_CLOCK, host->base + MMC_STRPCL);
123 
124 		do {
125 			v = readl(host->base + MMC_STAT);
126 			if (!(v & STAT_CLK_EN))
127 				break;
128 			udelay(1);
129 		} while (timeout--);
130 
131 		if (v & STAT_CLK_EN)
132 			dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
133 	}
134 }
135 
136 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
137 {
138 	unsigned long flags;
139 
140 	spin_lock_irqsave(&host->lock, flags);
141 	host->imask &= ~mask;
142 	writel(host->imask, host->base + MMC_I_MASK);
143 	spin_unlock_irqrestore(&host->lock, flags);
144 }
145 
146 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
147 {
148 	unsigned long flags;
149 
150 	spin_lock_irqsave(&host->lock, flags);
151 	host->imask |= mask;
152 	writel(host->imask, host->base + MMC_I_MASK);
153 	spin_unlock_irqrestore(&host->lock, flags);
154 }
155 
156 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
157 {
158 	unsigned int nob = data->blocks;
159 	unsigned long long clks;
160 	unsigned int timeout;
161 	bool dalgn = 0;
162 	u32 dcmd;
163 	int i;
164 
165 	host->data = data;
166 
167 	if (data->flags & MMC_DATA_STREAM)
168 		nob = 0xffff;
169 
170 	writel(nob, host->base + MMC_NOB);
171 	writel(data->blksz, host->base + MMC_BLKLEN);
172 
173 	clks = (unsigned long long)data->timeout_ns * host->clkrate;
174 	do_div(clks, 1000000000UL);
175 	timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
176 	writel((timeout + 255) / 256, host->base + MMC_RDTO);
177 
178 	if (data->flags & MMC_DATA_READ) {
179 		host->dma_dir = DMA_FROM_DEVICE;
180 		dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
181 		DRCMR(host->dma_drcmrtx) = 0;
182 		DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD;
183 	} else {
184 		host->dma_dir = DMA_TO_DEVICE;
185 		dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
186 		DRCMR(host->dma_drcmrrx) = 0;
187 		DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD;
188 	}
189 
190 	dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
191 
192 	host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
193 				   host->dma_dir);
194 
195 	for (i = 0; i < host->dma_len; i++) {
196 		unsigned int length = sg_dma_len(&data->sg[i]);
197 		host->sg_cpu[i].dcmd = dcmd | length;
198 		if (length & 31 && !(data->flags & MMC_DATA_READ))
199 			host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
200 		/* Not aligned to 8-byte boundary? */
201 		if (sg_dma_address(&data->sg[i]) & 0x7)
202 			dalgn = 1;
203 		if (data->flags & MMC_DATA_READ) {
204 			host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
205 			host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
206 		} else {
207 			host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
208 			host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
209 		}
210 		host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
211 					sizeof(struct pxa_dma_desc);
212 	}
213 	host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
214 	wmb();
215 
216 	/*
217 	 * The PXA27x DMA controller encounters overhead when working with
218 	 * unaligned (to 8-byte boundaries) data, so switch on byte alignment
219 	 * mode only if we have unaligned data.
220 	 */
221 	if (dalgn)
222 		DALGN |= (1 << host->dma);
223 	else
224 		DALGN &= ~(1 << host->dma);
225 	DDADR(host->dma) = host->sg_dma;
226 
227 	/*
228 	 * workaround for erratum #91:
229 	 * only start DMA now if we are doing a read,
230 	 * otherwise we wait until CMD/RESP has finished
231 	 * before starting DMA.
232 	 */
233 	if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
234 		DCSR(host->dma) = DCSR_RUN;
235 }
236 
237 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
238 {
239 	WARN_ON(host->cmd != NULL);
240 	host->cmd = cmd;
241 
242 	if (cmd->flags & MMC_RSP_BUSY)
243 		cmdat |= CMDAT_BUSY;
244 
245 #define RSP_TYPE(x)	((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
246 	switch (RSP_TYPE(mmc_resp_type(cmd))) {
247 	case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
248 		cmdat |= CMDAT_RESP_SHORT;
249 		break;
250 	case RSP_TYPE(MMC_RSP_R3):
251 		cmdat |= CMDAT_RESP_R3;
252 		break;
253 	case RSP_TYPE(MMC_RSP_R2):
254 		cmdat |= CMDAT_RESP_R2;
255 		break;
256 	default:
257 		break;
258 	}
259 
260 	writel(cmd->opcode, host->base + MMC_CMD);
261 	writel(cmd->arg >> 16, host->base + MMC_ARGH);
262 	writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
263 	writel(cmdat, host->base + MMC_CMDAT);
264 	writel(host->clkrt, host->base + MMC_CLKRT);
265 
266 	writel(START_CLOCK, host->base + MMC_STRPCL);
267 
268 	pxamci_enable_irq(host, END_CMD_RES);
269 }
270 
271 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
272 {
273 	host->mrq = NULL;
274 	host->cmd = NULL;
275 	host->data = NULL;
276 	mmc_request_done(host->mmc, mrq);
277 }
278 
279 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
280 {
281 	struct mmc_command *cmd = host->cmd;
282 	int i;
283 	u32 v;
284 
285 	if (!cmd)
286 		return 0;
287 
288 	host->cmd = NULL;
289 
290 	/*
291 	 * Did I mention this is Sick.  We always need to
292 	 * discard the upper 8 bits of the first 16-bit word.
293 	 */
294 	v = readl(host->base + MMC_RES) & 0xffff;
295 	for (i = 0; i < 4; i++) {
296 		u32 w1 = readl(host->base + MMC_RES) & 0xffff;
297 		u32 w2 = readl(host->base + MMC_RES) & 0xffff;
298 		cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
299 		v = w2;
300 	}
301 
302 	if (stat & STAT_TIME_OUT_RESPONSE) {
303 		cmd->error = -ETIMEDOUT;
304 	} else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
305 		/*
306 		 * workaround for erratum #42:
307 		 * Intel PXA27x Family Processor Specification Update Rev 001
308 		 * A bogus CRC error can appear if the msb of a 136 bit
309 		 * response is a one.
310 		 */
311 		if (cpu_is_pxa27x() &&
312 		    (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000))
313 			pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
314 		else
315 			cmd->error = -EILSEQ;
316 	}
317 
318 	pxamci_disable_irq(host, END_CMD_RES);
319 	if (host->data && !cmd->error) {
320 		pxamci_enable_irq(host, DATA_TRAN_DONE);
321 		/*
322 		 * workaround for erratum #91, if doing write
323 		 * enable DMA late
324 		 */
325 		if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
326 			DCSR(host->dma) = DCSR_RUN;
327 	} else {
328 		pxamci_finish_request(host, host->mrq);
329 	}
330 
331 	return 1;
332 }
333 
334 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
335 {
336 	struct mmc_data *data = host->data;
337 
338 	if (!data)
339 		return 0;
340 
341 	DCSR(host->dma) = 0;
342 	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
343 		     host->dma_dir);
344 
345 	if (stat & STAT_READ_TIME_OUT)
346 		data->error = -ETIMEDOUT;
347 	else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
348 		data->error = -EILSEQ;
349 
350 	/*
351 	 * There appears to be a hardware design bug here.  There seems to
352 	 * be no way to find out how much data was transferred to the card.
353 	 * This means that if there was an error on any block, we mark all
354 	 * data blocks as being in error.
355 	 */
356 	if (!data->error)
357 		data->bytes_xfered = data->blocks * data->blksz;
358 	else
359 		data->bytes_xfered = 0;
360 
361 	pxamci_disable_irq(host, DATA_TRAN_DONE);
362 
363 	host->data = NULL;
364 	if (host->mrq->stop) {
365 		pxamci_stop_clock(host);
366 		pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
367 	} else {
368 		pxamci_finish_request(host, host->mrq);
369 	}
370 
371 	return 1;
372 }
373 
374 static irqreturn_t pxamci_irq(int irq, void *devid)
375 {
376 	struct pxamci_host *host = devid;
377 	unsigned int ireg;
378 	int handled = 0;
379 
380 	ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
381 
382 	if (ireg) {
383 		unsigned stat = readl(host->base + MMC_STAT);
384 
385 		pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
386 
387 		if (ireg & END_CMD_RES)
388 			handled |= pxamci_cmd_done(host, stat);
389 		if (ireg & DATA_TRAN_DONE)
390 			handled |= pxamci_data_done(host, stat);
391 		if (ireg & SDIO_INT) {
392 			mmc_signal_sdio_irq(host->mmc);
393 			handled = 1;
394 		}
395 	}
396 
397 	return IRQ_RETVAL(handled);
398 }
399 
400 static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
401 {
402 	struct pxamci_host *host = mmc_priv(mmc);
403 	unsigned int cmdat;
404 
405 	WARN_ON(host->mrq != NULL);
406 
407 	host->mrq = mrq;
408 
409 	pxamci_stop_clock(host);
410 
411 	cmdat = host->cmdat;
412 	host->cmdat &= ~CMDAT_INIT;
413 
414 	if (mrq->data) {
415 		pxamci_setup_data(host, mrq->data);
416 
417 		cmdat &= ~CMDAT_BUSY;
418 		cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
419 		if (mrq->data->flags & MMC_DATA_WRITE)
420 			cmdat |= CMDAT_WRITE;
421 
422 		if (mrq->data->flags & MMC_DATA_STREAM)
423 			cmdat |= CMDAT_STREAM;
424 	}
425 
426 	pxamci_start_cmd(host, mrq->cmd, cmdat);
427 }
428 
429 static int pxamci_get_ro(struct mmc_host *mmc)
430 {
431 	struct pxamci_host *host = mmc_priv(mmc);
432 
433 	if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
434 		if (host->pdata->gpio_card_ro_invert)
435 			return !gpio_get_value(host->pdata->gpio_card_ro);
436 		else
437 			return gpio_get_value(host->pdata->gpio_card_ro);
438 	}
439 	if (host->pdata && host->pdata->get_ro)
440 		return !!host->pdata->get_ro(mmc_dev(mmc));
441 	/*
442 	 * Board doesn't support read only detection; let the mmc core
443 	 * decide what to do.
444 	 */
445 	return -ENOSYS;
446 }
447 
448 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
449 {
450 	struct pxamci_host *host = mmc_priv(mmc);
451 
452 	if (ios->clock) {
453 		unsigned long rate = host->clkrate;
454 		unsigned int clk = rate / ios->clock;
455 
456 		if (host->clkrt == CLKRT_OFF)
457 			clk_enable(host->clk);
458 
459 		if (ios->clock == 26000000) {
460 			/* to support 26MHz on pxa300/pxa310 */
461 			host->clkrt = 7;
462 		} else {
463 			/* to handle (19.5MHz, 26MHz) */
464 			if (!clk)
465 				clk = 1;
466 
467 			/*
468 			 * clk might result in a lower divisor than we
469 			 * desire.  check for that condition and adjust
470 			 * as appropriate.
471 			 */
472 			if (rate / clk > ios->clock)
473 				clk <<= 1;
474 			host->clkrt = fls(clk) - 1;
475 		}
476 
477 		/*
478 		 * we write clkrt on the next command
479 		 */
480 	} else {
481 		pxamci_stop_clock(host);
482 		if (host->clkrt != CLKRT_OFF) {
483 			host->clkrt = CLKRT_OFF;
484 			clk_disable(host->clk);
485 		}
486 	}
487 
488 	if (host->power_mode != ios->power_mode) {
489 		host->power_mode = ios->power_mode;
490 
491 		pxamci_set_power(host, ios->vdd);
492 
493 		if (ios->power_mode == MMC_POWER_ON)
494 			host->cmdat |= CMDAT_INIT;
495 	}
496 
497 	if (ios->bus_width == MMC_BUS_WIDTH_4)
498 		host->cmdat |= CMDAT_SD_4DAT;
499 	else
500 		host->cmdat &= ~CMDAT_SD_4DAT;
501 
502 	pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
503 		 host->clkrt, host->cmdat);
504 }
505 
506 static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
507 {
508 	struct pxamci_host *pxa_host = mmc_priv(host);
509 
510 	if (enable)
511 		pxamci_enable_irq(pxa_host, SDIO_INT);
512 	else
513 		pxamci_disable_irq(pxa_host, SDIO_INT);
514 }
515 
516 static const struct mmc_host_ops pxamci_ops = {
517 	.request		= pxamci_request,
518 	.get_ro			= pxamci_get_ro,
519 	.set_ios		= pxamci_set_ios,
520 	.enable_sdio_irq	= pxamci_enable_sdio_irq,
521 };
522 
523 static void pxamci_dma_irq(int dma, void *devid)
524 {
525 	struct pxamci_host *host = devid;
526 	int dcsr = DCSR(dma);
527 	DCSR(dma) = dcsr & ~DCSR_STOPIRQEN;
528 
529 	if (dcsr & DCSR_ENDINTR) {
530 		writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
531 	} else {
532 		printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
533 		       mmc_hostname(host->mmc), dma, dcsr);
534 		host->data->error = -EIO;
535 		pxamci_data_done(host, 0);
536 	}
537 }
538 
539 static irqreturn_t pxamci_detect_irq(int irq, void *devid)
540 {
541 	struct pxamci_host *host = mmc_priv(devid);
542 
543 	mmc_detect_change(devid, host->pdata->detect_delay);
544 	return IRQ_HANDLED;
545 }
546 
547 static int pxamci_probe(struct platform_device *pdev)
548 {
549 	struct mmc_host *mmc;
550 	struct pxamci_host *host = NULL;
551 	struct resource *r, *dmarx, *dmatx;
552 	int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
553 
554 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
555 	irq = platform_get_irq(pdev, 0);
556 	if (!r || irq < 0)
557 		return -ENXIO;
558 
559 	r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
560 	if (!r)
561 		return -EBUSY;
562 
563 	mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
564 	if (!mmc) {
565 		ret = -ENOMEM;
566 		goto out;
567 	}
568 
569 	mmc->ops = &pxamci_ops;
570 
571 	/*
572 	 * We can do SG-DMA, but we don't because we never know how much
573 	 * data we successfully wrote to the card.
574 	 */
575 	mmc->max_phys_segs = NR_SG;
576 
577 	/*
578 	 * Our hardware DMA can handle a maximum of one page per SG entry.
579 	 */
580 	mmc->max_seg_size = PAGE_SIZE;
581 
582 	/*
583 	 * Block length register is only 10 bits before PXA27x.
584 	 */
585 	mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048;
586 
587 	/*
588 	 * Block count register is 16 bits.
589 	 */
590 	mmc->max_blk_count = 65535;
591 
592 	host = mmc_priv(mmc);
593 	host->mmc = mmc;
594 	host->dma = -1;
595 	host->pdata = pdev->dev.platform_data;
596 	host->clkrt = CLKRT_OFF;
597 
598 	host->clk = clk_get(&pdev->dev, NULL);
599 	if (IS_ERR(host->clk)) {
600 		ret = PTR_ERR(host->clk);
601 		host->clk = NULL;
602 		goto out;
603 	}
604 
605 	host->clkrate = clk_get_rate(host->clk);
606 
607 	/*
608 	 * Calculate minimum clock rate, rounding up.
609 	 */
610 	mmc->f_min = (host->clkrate + 63) / 64;
611 	mmc->f_max = (cpu_is_pxa300() || cpu_is_pxa310()) ? 26000000
612 							  : host->clkrate;
613 
614 	pxamci_init_ocr(host);
615 
616 	mmc->caps = 0;
617 	host->cmdat = 0;
618 	if (!cpu_is_pxa25x()) {
619 		mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
620 		host->cmdat |= CMDAT_SDIO_INT_EN;
621 		if (cpu_is_pxa300() || cpu_is_pxa310())
622 			mmc->caps |= MMC_CAP_MMC_HIGHSPEED |
623 				     MMC_CAP_SD_HIGHSPEED;
624 	}
625 
626 	host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
627 	if (!host->sg_cpu) {
628 		ret = -ENOMEM;
629 		goto out;
630 	}
631 
632 	spin_lock_init(&host->lock);
633 	host->res = r;
634 	host->irq = irq;
635 	host->imask = MMC_I_MASK_ALL;
636 
637 	host->base = ioremap(r->start, SZ_4K);
638 	if (!host->base) {
639 		ret = -ENOMEM;
640 		goto out;
641 	}
642 
643 	/*
644 	 * Ensure that the host controller is shut down, and setup
645 	 * with our defaults.
646 	 */
647 	pxamci_stop_clock(host);
648 	writel(0, host->base + MMC_SPI);
649 	writel(64, host->base + MMC_RESTO);
650 	writel(host->imask, host->base + MMC_I_MASK);
651 
652 	host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
653 				    pxamci_dma_irq, host);
654 	if (host->dma < 0) {
655 		ret = -EBUSY;
656 		goto out;
657 	}
658 
659 	ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
660 	if (ret)
661 		goto out;
662 
663 	platform_set_drvdata(pdev, mmc);
664 
665 	dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
666 	if (!dmarx) {
667 		ret = -ENXIO;
668 		goto out;
669 	}
670 	host->dma_drcmrrx = dmarx->start;
671 
672 	dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
673 	if (!dmatx) {
674 		ret = -ENXIO;
675 		goto out;
676 	}
677 	host->dma_drcmrtx = dmatx->start;
678 
679 	if (host->pdata) {
680 		gpio_cd = host->pdata->gpio_card_detect;
681 		gpio_ro = host->pdata->gpio_card_ro;
682 		gpio_power = host->pdata->gpio_power;
683 	}
684 	if (gpio_is_valid(gpio_power)) {
685 		ret = gpio_request(gpio_power, "mmc card power");
686 		if (ret) {
687 			dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
688 			goto out;
689 		}
690 		gpio_direction_output(gpio_power,
691 				      host->pdata->gpio_power_invert);
692 	}
693 	if (gpio_is_valid(gpio_ro)) {
694 		ret = gpio_request(gpio_ro, "mmc card read only");
695 		if (ret) {
696 			dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_power);
697 			goto err_gpio_ro;
698 		}
699 		gpio_direction_input(gpio_ro);
700 	}
701 	if (gpio_is_valid(gpio_cd)) {
702 		ret = gpio_request(gpio_cd, "mmc card detect");
703 		if (ret) {
704 			dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_power);
705 			goto err_gpio_cd;
706 		}
707 		gpio_direction_input(gpio_cd);
708 
709 		ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
710 				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
711 				  "mmc card detect", mmc);
712 		if (ret) {
713 			dev_err(&pdev->dev, "failed to request card detect IRQ\n");
714 			goto err_request_irq;
715 		}
716 	}
717 
718 	if (host->pdata && host->pdata->init)
719 		host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
720 
721 	if (gpio_is_valid(gpio_power) && host->pdata->setpower)
722 		dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n");
723 	if (gpio_is_valid(gpio_ro) && host->pdata->get_ro)
724 		dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n");
725 
726 	mmc_add_host(mmc);
727 
728 	return 0;
729 
730 err_request_irq:
731 	gpio_free(gpio_cd);
732 err_gpio_cd:
733 	gpio_free(gpio_ro);
734 err_gpio_ro:
735 	gpio_free(gpio_power);
736  out:
737 	if (host) {
738 		if (host->dma >= 0)
739 			pxa_free_dma(host->dma);
740 		if (host->base)
741 			iounmap(host->base);
742 		if (host->sg_cpu)
743 			dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
744 		if (host->clk)
745 			clk_put(host->clk);
746 	}
747 	if (mmc)
748 		mmc_free_host(mmc);
749 	release_resource(r);
750 	return ret;
751 }
752 
753 static int pxamci_remove(struct platform_device *pdev)
754 {
755 	struct mmc_host *mmc = platform_get_drvdata(pdev);
756 	int gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
757 
758 	platform_set_drvdata(pdev, NULL);
759 
760 	if (mmc) {
761 		struct pxamci_host *host = mmc_priv(mmc);
762 
763 		if (host->pdata) {
764 			gpio_cd = host->pdata->gpio_card_detect;
765 			gpio_ro = host->pdata->gpio_card_ro;
766 			gpio_power = host->pdata->gpio_power;
767 		}
768 		if (gpio_is_valid(gpio_cd)) {
769 			free_irq(gpio_to_irq(gpio_cd), mmc);
770 			gpio_free(gpio_cd);
771 		}
772 		if (gpio_is_valid(gpio_ro))
773 			gpio_free(gpio_ro);
774 		if (gpio_is_valid(gpio_power))
775 			gpio_free(gpio_power);
776 		if (host->vcc)
777 			regulator_put(host->vcc);
778 
779 		if (host->pdata && host->pdata->exit)
780 			host->pdata->exit(&pdev->dev, mmc);
781 
782 		mmc_remove_host(mmc);
783 
784 		pxamci_stop_clock(host);
785 		writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
786 		       END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
787 		       host->base + MMC_I_MASK);
788 
789 		DRCMR(host->dma_drcmrrx) = 0;
790 		DRCMR(host->dma_drcmrtx) = 0;
791 
792 		free_irq(host->irq, host);
793 		pxa_free_dma(host->dma);
794 		iounmap(host->base);
795 		dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
796 
797 		clk_put(host->clk);
798 
799 		release_resource(host->res);
800 
801 		mmc_free_host(mmc);
802 	}
803 	return 0;
804 }
805 
806 #ifdef CONFIG_PM
807 static int pxamci_suspend(struct device *dev)
808 {
809 	struct mmc_host *mmc = dev_get_drvdata(dev);
810 	int ret = 0;
811 
812 	if (mmc)
813 		ret = mmc_suspend_host(mmc, PMSG_SUSPEND);
814 
815 	return ret;
816 }
817 
818 static int pxamci_resume(struct device *dev)
819 {
820 	struct mmc_host *mmc = dev_get_drvdata(dev);
821 	int ret = 0;
822 
823 	if (mmc)
824 		ret = mmc_resume_host(mmc);
825 
826 	return ret;
827 }
828 
829 static struct dev_pm_ops pxamci_pm_ops = {
830 	.suspend	= pxamci_suspend,
831 	.resume		= pxamci_resume,
832 };
833 #endif
834 
835 static struct platform_driver pxamci_driver = {
836 	.probe		= pxamci_probe,
837 	.remove		= pxamci_remove,
838 	.driver		= {
839 		.name	= DRIVER_NAME,
840 		.owner	= THIS_MODULE,
841 #ifdef CONFIG_PM
842 		.pm	= &pxamci_pm_ops,
843 #endif
844 	},
845 };
846 
847 static int __init pxamci_init(void)
848 {
849 	return platform_driver_register(&pxamci_driver);
850 }
851 
852 static void __exit pxamci_exit(void)
853 {
854 	platform_driver_unregister(&pxamci_driver);
855 }
856 
857 module_init(pxamci_init);
858 module_exit(pxamci_exit);
859 
860 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
861 MODULE_LICENSE("GPL");
862 MODULE_ALIAS("platform:pxa2xx-mci");
863