xref: /openbmc/linux/drivers/mmc/host/pxamci.c (revision 5d331b7f)
1 /*
2  *  linux/drivers/mmc/host/pxa.c - PXA MMCI driver
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  This hardware is really sick:
11  *   - No way to clear interrupts.
12  *   - Have to turn off the clock whenever we touch the device.
13  *   - Doesn't tell you how many data blocks were transferred.
14  *  Yuck!
15  *
16  *	1 and 3 byte data transfers not supported
17  *	max block length up to 1023
18  */
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/platform_device.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/dmaengine.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/clk.h>
28 #include <linux/err.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/slot-gpio.h>
31 #include <linux/io.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/gpio.h>
34 #include <linux/gfp.h>
35 #include <linux/of.h>
36 #include <linux/of_gpio.h>
37 #include <linux/of_device.h>
38 
39 #include <asm/sizes.h>
40 
41 #include <mach/hardware.h>
42 #include <linux/platform_data/mmc-pxamci.h>
43 
44 #include "pxamci.h"
45 
46 #define DRIVER_NAME	"pxa2xx-mci"
47 
48 #define NR_SG	1
49 #define CLKRT_OFF	(~0)
50 
51 #define mmc_has_26MHz()		(cpu_is_pxa300() || cpu_is_pxa310() \
52 				|| cpu_is_pxa935())
53 
54 struct pxamci_host {
55 	struct mmc_host		*mmc;
56 	spinlock_t		lock;
57 	struct resource		*res;
58 	void __iomem		*base;
59 	struct clk		*clk;
60 	unsigned long		clkrate;
61 	unsigned int		clkrt;
62 	unsigned int		cmdat;
63 	unsigned int		imask;
64 	unsigned int		power_mode;
65 	unsigned long		detect_delay_ms;
66 	struct pxamci_platform_data *pdata;
67 
68 	struct mmc_request	*mrq;
69 	struct mmc_command	*cmd;
70 	struct mmc_data		*data;
71 
72 	struct dma_chan		*dma_chan_rx;
73 	struct dma_chan		*dma_chan_tx;
74 	dma_cookie_t		dma_cookie;
75 	unsigned int		dma_len;
76 	unsigned int		dma_dir;
77 };
78 
79 static int pxamci_init_ocr(struct pxamci_host *host)
80 {
81 	struct mmc_host *mmc = host->mmc;
82 	int ret;
83 
84 	ret = mmc_regulator_get_supply(mmc);
85 	if (ret < 0)
86 		return ret;
87 
88 	if (IS_ERR(mmc->supply.vmmc)) {
89 		/* fall-back to platform data */
90 		mmc->ocr_avail = host->pdata ?
91 			host->pdata->ocr_mask :
92 			MMC_VDD_32_33 | MMC_VDD_33_34;
93 	}
94 
95 	return 0;
96 }
97 
98 static inline int pxamci_set_power(struct pxamci_host *host,
99 				    unsigned char power_mode,
100 				    unsigned int vdd)
101 {
102 	struct mmc_host *mmc = host->mmc;
103 	struct regulator *supply = mmc->supply.vmmc;
104 	int on;
105 
106 	if (!IS_ERR(supply))
107 		return mmc_regulator_set_ocr(mmc, supply, vdd);
108 
109 	if (host->pdata &&
110 	    gpio_is_valid(host->pdata->gpio_power)) {
111 		on = ((1 << vdd) & host->pdata->ocr_mask);
112 		gpio_set_value(host->pdata->gpio_power,
113 			       !!on ^ host->pdata->gpio_power_invert);
114 	}
115 
116 	if (host->pdata && host->pdata->setpower)
117 		return host->pdata->setpower(mmc_dev(host->mmc), vdd);
118 
119 	return 0;
120 }
121 
122 static void pxamci_stop_clock(struct pxamci_host *host)
123 {
124 	if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
125 		unsigned long timeout = 10000;
126 		unsigned int v;
127 
128 		writel(STOP_CLOCK, host->base + MMC_STRPCL);
129 
130 		do {
131 			v = readl(host->base + MMC_STAT);
132 			if (!(v & STAT_CLK_EN))
133 				break;
134 			udelay(1);
135 		} while (timeout--);
136 
137 		if (v & STAT_CLK_EN)
138 			dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
139 	}
140 }
141 
142 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
143 {
144 	unsigned long flags;
145 
146 	spin_lock_irqsave(&host->lock, flags);
147 	host->imask &= ~mask;
148 	writel(host->imask, host->base + MMC_I_MASK);
149 	spin_unlock_irqrestore(&host->lock, flags);
150 }
151 
152 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
153 {
154 	unsigned long flags;
155 
156 	spin_lock_irqsave(&host->lock, flags);
157 	host->imask |= mask;
158 	writel(host->imask, host->base + MMC_I_MASK);
159 	spin_unlock_irqrestore(&host->lock, flags);
160 }
161 
162 static void pxamci_dma_irq(void *param);
163 
164 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
165 {
166 	struct dma_async_tx_descriptor *tx;
167 	enum dma_data_direction direction;
168 	struct dma_slave_config	config;
169 	struct dma_chan *chan;
170 	unsigned int nob = data->blocks;
171 	unsigned long long clks;
172 	unsigned int timeout;
173 	int ret;
174 
175 	host->data = data;
176 
177 	writel(nob, host->base + MMC_NOB);
178 	writel(data->blksz, host->base + MMC_BLKLEN);
179 
180 	clks = (unsigned long long)data->timeout_ns * host->clkrate;
181 	do_div(clks, 1000000000UL);
182 	timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
183 	writel((timeout + 255) / 256, host->base + MMC_RDTO);
184 
185 	memset(&config, 0, sizeof(config));
186 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
187 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
188 	config.src_addr = host->res->start + MMC_RXFIFO;
189 	config.dst_addr = host->res->start + MMC_TXFIFO;
190 	config.src_maxburst = 32;
191 	config.dst_maxburst = 32;
192 
193 	if (data->flags & MMC_DATA_READ) {
194 		host->dma_dir = DMA_FROM_DEVICE;
195 		direction = DMA_DEV_TO_MEM;
196 		chan = host->dma_chan_rx;
197 	} else {
198 		host->dma_dir = DMA_TO_DEVICE;
199 		direction = DMA_MEM_TO_DEV;
200 		chan = host->dma_chan_tx;
201 	}
202 
203 	config.direction = direction;
204 
205 	ret = dmaengine_slave_config(chan, &config);
206 	if (ret < 0) {
207 		dev_err(mmc_dev(host->mmc), "dma slave config failed\n");
208 		return;
209 	}
210 
211 	host->dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
212 				   host->dma_dir);
213 
214 	tx = dmaengine_prep_slave_sg(chan, data->sg, host->dma_len, direction,
215 				     DMA_PREP_INTERRUPT);
216 	if (!tx) {
217 		dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
218 		return;
219 	}
220 
221 	if (!(data->flags & MMC_DATA_READ)) {
222 		tx->callback = pxamci_dma_irq;
223 		tx->callback_param = host;
224 	}
225 
226 	host->dma_cookie = dmaengine_submit(tx);
227 
228 	/*
229 	 * workaround for erratum #91:
230 	 * only start DMA now if we are doing a read,
231 	 * otherwise we wait until CMD/RESP has finished
232 	 * before starting DMA.
233 	 */
234 	if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
235 		dma_async_issue_pending(chan);
236 }
237 
238 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
239 {
240 	WARN_ON(host->cmd != NULL);
241 	host->cmd = cmd;
242 
243 	if (cmd->flags & MMC_RSP_BUSY)
244 		cmdat |= CMDAT_BUSY;
245 
246 #define RSP_TYPE(x)	((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
247 	switch (RSP_TYPE(mmc_resp_type(cmd))) {
248 	case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
249 		cmdat |= CMDAT_RESP_SHORT;
250 		break;
251 	case RSP_TYPE(MMC_RSP_R3):
252 		cmdat |= CMDAT_RESP_R3;
253 		break;
254 	case RSP_TYPE(MMC_RSP_R2):
255 		cmdat |= CMDAT_RESP_R2;
256 		break;
257 	default:
258 		break;
259 	}
260 
261 	writel(cmd->opcode, host->base + MMC_CMD);
262 	writel(cmd->arg >> 16, host->base + MMC_ARGH);
263 	writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
264 	writel(cmdat, host->base + MMC_CMDAT);
265 	writel(host->clkrt, host->base + MMC_CLKRT);
266 
267 	writel(START_CLOCK, host->base + MMC_STRPCL);
268 
269 	pxamci_enable_irq(host, END_CMD_RES);
270 }
271 
272 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
273 {
274 	host->mrq = NULL;
275 	host->cmd = NULL;
276 	host->data = NULL;
277 	mmc_request_done(host->mmc, mrq);
278 }
279 
280 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
281 {
282 	struct mmc_command *cmd = host->cmd;
283 	int i;
284 	u32 v;
285 
286 	if (!cmd)
287 		return 0;
288 
289 	host->cmd = NULL;
290 
291 	/*
292 	 * Did I mention this is Sick.  We always need to
293 	 * discard the upper 8 bits of the first 16-bit word.
294 	 */
295 	v = readl(host->base + MMC_RES) & 0xffff;
296 	for (i = 0; i < 4; i++) {
297 		u32 w1 = readl(host->base + MMC_RES) & 0xffff;
298 		u32 w2 = readl(host->base + MMC_RES) & 0xffff;
299 		cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
300 		v = w2;
301 	}
302 
303 	if (stat & STAT_TIME_OUT_RESPONSE) {
304 		cmd->error = -ETIMEDOUT;
305 	} else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
306 		/*
307 		 * workaround for erratum #42:
308 		 * Intel PXA27x Family Processor Specification Update Rev 001
309 		 * A bogus CRC error can appear if the msb of a 136 bit
310 		 * response is a one.
311 		 */
312 		if (cpu_is_pxa27x() &&
313 		    (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000))
314 			pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
315 		else
316 			cmd->error = -EILSEQ;
317 	}
318 
319 	pxamci_disable_irq(host, END_CMD_RES);
320 	if (host->data && !cmd->error) {
321 		pxamci_enable_irq(host, DATA_TRAN_DONE);
322 		/*
323 		 * workaround for erratum #91, if doing write
324 		 * enable DMA late
325 		 */
326 		if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
327 			dma_async_issue_pending(host->dma_chan_tx);
328 	} else {
329 		pxamci_finish_request(host, host->mrq);
330 	}
331 
332 	return 1;
333 }
334 
335 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
336 {
337 	struct mmc_data *data = host->data;
338 	struct dma_chan *chan;
339 
340 	if (!data)
341 		return 0;
342 
343 	if (data->flags & MMC_DATA_READ)
344 		chan = host->dma_chan_rx;
345 	else
346 		chan = host->dma_chan_tx;
347 	dma_unmap_sg(chan->device->dev,
348 		     data->sg, data->sg_len, host->dma_dir);
349 
350 	if (stat & STAT_READ_TIME_OUT)
351 		data->error = -ETIMEDOUT;
352 	else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
353 		data->error = -EILSEQ;
354 
355 	/*
356 	 * There appears to be a hardware design bug here.  There seems to
357 	 * be no way to find out how much data was transferred to the card.
358 	 * This means that if there was an error on any block, we mark all
359 	 * data blocks as being in error.
360 	 */
361 	if (!data->error)
362 		data->bytes_xfered = data->blocks * data->blksz;
363 	else
364 		data->bytes_xfered = 0;
365 
366 	pxamci_disable_irq(host, DATA_TRAN_DONE);
367 
368 	host->data = NULL;
369 	if (host->mrq->stop) {
370 		pxamci_stop_clock(host);
371 		pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
372 	} else {
373 		pxamci_finish_request(host, host->mrq);
374 	}
375 
376 	return 1;
377 }
378 
379 static irqreturn_t pxamci_irq(int irq, void *devid)
380 {
381 	struct pxamci_host *host = devid;
382 	unsigned int ireg;
383 	int handled = 0;
384 
385 	ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
386 
387 	if (ireg) {
388 		unsigned stat = readl(host->base + MMC_STAT);
389 
390 		pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
391 
392 		if (ireg & END_CMD_RES)
393 			handled |= pxamci_cmd_done(host, stat);
394 		if (ireg & DATA_TRAN_DONE)
395 			handled |= pxamci_data_done(host, stat);
396 		if (ireg & SDIO_INT) {
397 			mmc_signal_sdio_irq(host->mmc);
398 			handled = 1;
399 		}
400 	}
401 
402 	return IRQ_RETVAL(handled);
403 }
404 
405 static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
406 {
407 	struct pxamci_host *host = mmc_priv(mmc);
408 	unsigned int cmdat;
409 
410 	WARN_ON(host->mrq != NULL);
411 
412 	host->mrq = mrq;
413 
414 	pxamci_stop_clock(host);
415 
416 	cmdat = host->cmdat;
417 	host->cmdat &= ~CMDAT_INIT;
418 
419 	if (mrq->data) {
420 		pxamci_setup_data(host, mrq->data);
421 
422 		cmdat &= ~CMDAT_BUSY;
423 		cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
424 		if (mrq->data->flags & MMC_DATA_WRITE)
425 			cmdat |= CMDAT_WRITE;
426 	}
427 
428 	pxamci_start_cmd(host, mrq->cmd, cmdat);
429 }
430 
431 static int pxamci_get_ro(struct mmc_host *mmc)
432 {
433 	struct pxamci_host *host = mmc_priv(mmc);
434 
435 	if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro))
436 		return mmc_gpio_get_ro(mmc);
437 	if (host->pdata && host->pdata->get_ro)
438 		return !!host->pdata->get_ro(mmc_dev(mmc));
439 	/*
440 	 * Board doesn't support read only detection; let the mmc core
441 	 * decide what to do.
442 	 */
443 	return -ENOSYS;
444 }
445 
446 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
447 {
448 	struct pxamci_host *host = mmc_priv(mmc);
449 
450 	if (ios->clock) {
451 		unsigned long rate = host->clkrate;
452 		unsigned int clk = rate / ios->clock;
453 
454 		if (host->clkrt == CLKRT_OFF)
455 			clk_prepare_enable(host->clk);
456 
457 		if (ios->clock == 26000000) {
458 			/* to support 26MHz */
459 			host->clkrt = 7;
460 		} else {
461 			/* to handle (19.5MHz, 26MHz) */
462 			if (!clk)
463 				clk = 1;
464 
465 			/*
466 			 * clk might result in a lower divisor than we
467 			 * desire.  check for that condition and adjust
468 			 * as appropriate.
469 			 */
470 			if (rate / clk > ios->clock)
471 				clk <<= 1;
472 			host->clkrt = fls(clk) - 1;
473 		}
474 
475 		/*
476 		 * we write clkrt on the next command
477 		 */
478 	} else {
479 		pxamci_stop_clock(host);
480 		if (host->clkrt != CLKRT_OFF) {
481 			host->clkrt = CLKRT_OFF;
482 			clk_disable_unprepare(host->clk);
483 		}
484 	}
485 
486 	if (host->power_mode != ios->power_mode) {
487 		int ret;
488 
489 		host->power_mode = ios->power_mode;
490 
491 		ret = pxamci_set_power(host, ios->power_mode, ios->vdd);
492 		if (ret) {
493 			dev_err(mmc_dev(mmc), "unable to set power\n");
494 			/*
495 			 * The .set_ios() function in the mmc_host_ops
496 			 * struct return void, and failing to set the
497 			 * power should be rare so we print an error and
498 			 * return here.
499 			 */
500 			return;
501 		}
502 
503 		if (ios->power_mode == MMC_POWER_ON)
504 			host->cmdat |= CMDAT_INIT;
505 	}
506 
507 	if (ios->bus_width == MMC_BUS_WIDTH_4)
508 		host->cmdat |= CMDAT_SD_4DAT;
509 	else
510 		host->cmdat &= ~CMDAT_SD_4DAT;
511 
512 	dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n",
513 		host->clkrt, host->cmdat);
514 }
515 
516 static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
517 {
518 	struct pxamci_host *pxa_host = mmc_priv(host);
519 
520 	if (enable)
521 		pxamci_enable_irq(pxa_host, SDIO_INT);
522 	else
523 		pxamci_disable_irq(pxa_host, SDIO_INT);
524 }
525 
526 static const struct mmc_host_ops pxamci_ops = {
527 	.request		= pxamci_request,
528 	.get_cd			= mmc_gpio_get_cd,
529 	.get_ro			= pxamci_get_ro,
530 	.set_ios		= pxamci_set_ios,
531 	.enable_sdio_irq	= pxamci_enable_sdio_irq,
532 };
533 
534 static void pxamci_dma_irq(void *param)
535 {
536 	struct pxamci_host *host = param;
537 	struct dma_tx_state state;
538 	enum dma_status status;
539 	struct dma_chan *chan;
540 	unsigned long flags;
541 
542 	spin_lock_irqsave(&host->lock, flags);
543 
544 	if (!host->data)
545 		goto out_unlock;
546 
547 	if (host->data->flags & MMC_DATA_READ)
548 		chan = host->dma_chan_rx;
549 	else
550 		chan = host->dma_chan_tx;
551 
552 	status = dmaengine_tx_status(chan, host->dma_cookie, &state);
553 
554 	if (likely(status == DMA_COMPLETE)) {
555 		writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
556 	} else {
557 		pr_err("%s: DMA error on %s channel\n", mmc_hostname(host->mmc),
558 			host->data->flags & MMC_DATA_READ ? "rx" : "tx");
559 		host->data->error = -EIO;
560 		pxamci_data_done(host, 0);
561 	}
562 
563 out_unlock:
564 	spin_unlock_irqrestore(&host->lock, flags);
565 }
566 
567 static irqreturn_t pxamci_detect_irq(int irq, void *devid)
568 {
569 	struct pxamci_host *host = mmc_priv(devid);
570 
571 	mmc_detect_change(devid, msecs_to_jiffies(host->detect_delay_ms));
572 	return IRQ_HANDLED;
573 }
574 
575 #ifdef CONFIG_OF
576 static const struct of_device_id pxa_mmc_dt_ids[] = {
577         { .compatible = "marvell,pxa-mmc" },
578         { }
579 };
580 
581 MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids);
582 
583 static int pxamci_of_init(struct platform_device *pdev,
584 			  struct mmc_host *mmc)
585 {
586 	struct device_node *np = pdev->dev.of_node;
587 	struct pxamci_host *host = mmc_priv(mmc);
588 	u32 tmp;
589 	int ret;
590 
591 	if (!np)
592 		return 0;
593 
594 	/* pxa-mmc specific */
595 	if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0)
596 		host->detect_delay_ms = tmp;
597 
598 	ret = mmc_of_parse(mmc);
599 	if (ret < 0)
600 		return ret;
601 
602 	return 0;
603 }
604 #else
605 static int pxamci_of_init(struct platform_device *pdev,
606 			  struct mmc_host *mmc)
607 {
608         return 0;
609 }
610 #endif
611 
612 static int pxamci_probe(struct platform_device *pdev)
613 {
614 	struct mmc_host *mmc;
615 	struct pxamci_host *host = NULL;
616 	struct device *dev = &pdev->dev;
617 	struct resource *r;
618 	int ret, irq;
619 
620 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
621 	irq = platform_get_irq(pdev, 0);
622 	if (irq < 0)
623 		return irq;
624 
625 	mmc = mmc_alloc_host(sizeof(struct pxamci_host), dev);
626 	if (!mmc) {
627 		ret = -ENOMEM;
628 		goto out;
629 	}
630 
631 	mmc->ops = &pxamci_ops;
632 
633 	/*
634 	 * We can do SG-DMA, but we don't because we never know how much
635 	 * data we successfully wrote to the card.
636 	 */
637 	mmc->max_segs = NR_SG;
638 
639 	/*
640 	 * Our hardware DMA can handle a maximum of one page per SG entry.
641 	 */
642 	mmc->max_seg_size = PAGE_SIZE;
643 
644 	/*
645 	 * Block length register is only 10 bits before PXA27x.
646 	 */
647 	mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048;
648 
649 	/*
650 	 * Block count register is 16 bits.
651 	 */
652 	mmc->max_blk_count = 65535;
653 
654 	ret = pxamci_of_init(pdev, mmc);
655 	if (ret)
656 		return ret;
657 
658 	host = mmc_priv(mmc);
659 	host->mmc = mmc;
660 	host->pdata = pdev->dev.platform_data;
661 	host->clkrt = CLKRT_OFF;
662 
663 	host->clk = devm_clk_get(dev, NULL);
664 	if (IS_ERR(host->clk)) {
665 		ret = PTR_ERR(host->clk);
666 		host->clk = NULL;
667 		goto out;
668 	}
669 
670 	host->clkrate = clk_get_rate(host->clk);
671 
672 	/*
673 	 * Calculate minimum clock rate, rounding up.
674 	 */
675 	mmc->f_min = (host->clkrate + 63) / 64;
676 	mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate;
677 
678 	ret = pxamci_init_ocr(host);
679 	if (ret < 0)
680 		return ret;
681 
682 	mmc->caps = 0;
683 	host->cmdat = 0;
684 	if (!cpu_is_pxa25x()) {
685 		mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
686 		host->cmdat |= CMDAT_SDIO_INT_EN;
687 		if (mmc_has_26MHz())
688 			mmc->caps |= MMC_CAP_MMC_HIGHSPEED |
689 				     MMC_CAP_SD_HIGHSPEED;
690 	}
691 
692 	spin_lock_init(&host->lock);
693 	host->res = r;
694 	host->imask = MMC_I_MASK_ALL;
695 
696 	host->base = devm_ioremap_resource(dev, r);
697 	if (IS_ERR(host->base)) {
698 		ret = PTR_ERR(host->base);
699 		goto out;
700 	}
701 
702 	/*
703 	 * Ensure that the host controller is shut down, and setup
704 	 * with our defaults.
705 	 */
706 	pxamci_stop_clock(host);
707 	writel(0, host->base + MMC_SPI);
708 	writel(64, host->base + MMC_RESTO);
709 	writel(host->imask, host->base + MMC_I_MASK);
710 
711 	ret = devm_request_irq(dev, irq, pxamci_irq, 0,
712 			       DRIVER_NAME, host);
713 	if (ret)
714 		goto out;
715 
716 	platform_set_drvdata(pdev, mmc);
717 
718 	host->dma_chan_rx = dma_request_slave_channel(dev, "rx");
719 	if (host->dma_chan_rx == NULL) {
720 		dev_err(dev, "unable to request rx dma channel\n");
721 		ret = -ENODEV;
722 		goto out;
723 	}
724 
725 	host->dma_chan_tx = dma_request_slave_channel(dev, "tx");
726 	if (host->dma_chan_tx == NULL) {
727 		dev_err(dev, "unable to request tx dma channel\n");
728 		ret = -ENODEV;
729 		goto out;
730 	}
731 
732 	if (host->pdata) {
733 		int gpio_cd = host->pdata->gpio_card_detect;
734 		int gpio_ro = host->pdata->gpio_card_ro;
735 		int gpio_power = host->pdata->gpio_power;
736 
737 		host->detect_delay_ms = host->pdata->detect_delay_ms;
738 
739 		if (gpio_is_valid(gpio_power)) {
740 			ret = devm_gpio_request(dev, gpio_power,
741 						"mmc card power");
742 			if (ret) {
743 				dev_err(dev,
744 					"Failed requesting gpio_power %d\n",
745 					gpio_power);
746 				goto out;
747 			}
748 			gpio_direction_output(gpio_power,
749 					      host->pdata->gpio_power_invert);
750 		}
751 
752 		if (gpio_is_valid(gpio_ro)) {
753 			ret = mmc_gpio_request_ro(mmc, gpio_ro);
754 			if (ret) {
755 				dev_err(dev,
756 					"Failed requesting gpio_ro %d\n",
757 					gpio_ro);
758 				goto out;
759 			} else {
760 				mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
761 					0 : MMC_CAP2_RO_ACTIVE_HIGH;
762 			}
763 		}
764 
765 		if (gpio_is_valid(gpio_cd))
766 			ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
767 		if (ret) {
768 			dev_err(dev, "Failed requesting gpio_cd %d\n",
769 				gpio_cd);
770 			goto out;
771 		}
772 
773 		if (host->pdata->init)
774 			host->pdata->init(dev, pxamci_detect_irq, mmc);
775 
776 		if (gpio_is_valid(gpio_power) && host->pdata->setpower)
777 			dev_warn(dev, "gpio_power and setpower() both defined\n");
778 		if (gpio_is_valid(gpio_ro) && host->pdata->get_ro)
779 			dev_warn(dev, "gpio_ro and get_ro() both defined\n");
780 	}
781 
782 	mmc_add_host(mmc);
783 
784 	return 0;
785 
786 out:
787 	if (host) {
788 		if (host->dma_chan_rx)
789 			dma_release_channel(host->dma_chan_rx);
790 		if (host->dma_chan_tx)
791 			dma_release_channel(host->dma_chan_tx);
792 	}
793 	if (mmc)
794 		mmc_free_host(mmc);
795 	return ret;
796 }
797 
798 static int pxamci_remove(struct platform_device *pdev)
799 {
800 	struct mmc_host *mmc = platform_get_drvdata(pdev);
801 
802 	if (mmc) {
803 		struct pxamci_host *host = mmc_priv(mmc);
804 
805 		mmc_remove_host(mmc);
806 
807 		if (host->pdata && host->pdata->exit)
808 			host->pdata->exit(&pdev->dev, mmc);
809 
810 		pxamci_stop_clock(host);
811 		writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
812 		       END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
813 		       host->base + MMC_I_MASK);
814 
815 		dmaengine_terminate_all(host->dma_chan_rx);
816 		dmaengine_terminate_all(host->dma_chan_tx);
817 		dma_release_channel(host->dma_chan_rx);
818 		dma_release_channel(host->dma_chan_tx);
819 
820 		mmc_free_host(mmc);
821 	}
822 
823 	return 0;
824 }
825 
826 static struct platform_driver pxamci_driver = {
827 	.probe		= pxamci_probe,
828 	.remove		= pxamci_remove,
829 	.driver		= {
830 		.name	= DRIVER_NAME,
831 		.of_match_table = of_match_ptr(pxa_mmc_dt_ids),
832 	},
833 };
834 
835 module_platform_driver(pxamci_driver);
836 
837 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
838 MODULE_LICENSE("GPL");
839 MODULE_ALIAS("platform:pxa2xx-mci");
840