xref: /openbmc/linux/drivers/mmc/host/pxamci.c (revision 1a59d1b8)
1 /*
2  *  linux/drivers/mmc/host/pxa.c - PXA MMCI driver
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  This hardware is really sick:
11  *   - No way to clear interrupts.
12  *   - Have to turn off the clock whenever we touch the device.
13  *   - Doesn't tell you how many data blocks were transferred.
14  *  Yuck!
15  *
16  *	1 and 3 byte data transfers not supported
17  *	max block length up to 1023
18  */
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/platform_device.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/dmaengine.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/clk.h>
28 #include <linux/err.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/slot-gpio.h>
31 #include <linux/io.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/gpio/consumer.h>
34 #include <linux/gfp.h>
35 #include <linux/of.h>
36 #include <linux/of_device.h>
37 
38 #include <linux/sizes.h>
39 
40 #include <mach/hardware.h>
41 #include <linux/platform_data/mmc-pxamci.h>
42 
43 #include "pxamci.h"
44 
45 #define DRIVER_NAME	"pxa2xx-mci"
46 
47 #define NR_SG	1
48 #define CLKRT_OFF	(~0)
49 
50 #define mmc_has_26MHz()		(cpu_is_pxa300() || cpu_is_pxa310() \
51 				|| cpu_is_pxa935())
52 
53 struct pxamci_host {
54 	struct mmc_host		*mmc;
55 	spinlock_t		lock;
56 	struct resource		*res;
57 	void __iomem		*base;
58 	struct clk		*clk;
59 	unsigned long		clkrate;
60 	unsigned int		clkrt;
61 	unsigned int		cmdat;
62 	unsigned int		imask;
63 	unsigned int		power_mode;
64 	unsigned long		detect_delay_ms;
65 	bool			use_ro_gpio;
66 	struct gpio_desc	*power;
67 	struct pxamci_platform_data *pdata;
68 
69 	struct mmc_request	*mrq;
70 	struct mmc_command	*cmd;
71 	struct mmc_data		*data;
72 
73 	struct dma_chan		*dma_chan_rx;
74 	struct dma_chan		*dma_chan_tx;
75 	dma_cookie_t		dma_cookie;
76 	unsigned int		dma_len;
77 	unsigned int		dma_dir;
78 };
79 
80 static int pxamci_init_ocr(struct pxamci_host *host)
81 {
82 	struct mmc_host *mmc = host->mmc;
83 	int ret;
84 
85 	ret = mmc_regulator_get_supply(mmc);
86 	if (ret < 0)
87 		return ret;
88 
89 	if (IS_ERR(mmc->supply.vmmc)) {
90 		/* fall-back to platform data */
91 		mmc->ocr_avail = host->pdata ?
92 			host->pdata->ocr_mask :
93 			MMC_VDD_32_33 | MMC_VDD_33_34;
94 	}
95 
96 	return 0;
97 }
98 
99 static inline int pxamci_set_power(struct pxamci_host *host,
100 				    unsigned char power_mode,
101 				    unsigned int vdd)
102 {
103 	struct mmc_host *mmc = host->mmc;
104 	struct regulator *supply = mmc->supply.vmmc;
105 
106 	if (!IS_ERR(supply))
107 		return mmc_regulator_set_ocr(mmc, supply, vdd);
108 
109 	if (host->power) {
110 		bool on = !!((1 << vdd) & host->pdata->ocr_mask);
111 		gpiod_set_value(host->power, on);
112 	}
113 
114 	if (host->pdata && host->pdata->setpower)
115 		return host->pdata->setpower(mmc_dev(host->mmc), vdd);
116 
117 	return 0;
118 }
119 
120 static void pxamci_stop_clock(struct pxamci_host *host)
121 {
122 	if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
123 		unsigned long timeout = 10000;
124 		unsigned int v;
125 
126 		writel(STOP_CLOCK, host->base + MMC_STRPCL);
127 
128 		do {
129 			v = readl(host->base + MMC_STAT);
130 			if (!(v & STAT_CLK_EN))
131 				break;
132 			udelay(1);
133 		} while (timeout--);
134 
135 		if (v & STAT_CLK_EN)
136 			dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
137 	}
138 }
139 
140 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
141 {
142 	unsigned long flags;
143 
144 	spin_lock_irqsave(&host->lock, flags);
145 	host->imask &= ~mask;
146 	writel(host->imask, host->base + MMC_I_MASK);
147 	spin_unlock_irqrestore(&host->lock, flags);
148 }
149 
150 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
151 {
152 	unsigned long flags;
153 
154 	spin_lock_irqsave(&host->lock, flags);
155 	host->imask |= mask;
156 	writel(host->imask, host->base + MMC_I_MASK);
157 	spin_unlock_irqrestore(&host->lock, flags);
158 }
159 
160 static void pxamci_dma_irq(void *param);
161 
162 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
163 {
164 	struct dma_async_tx_descriptor *tx;
165 	enum dma_transfer_direction direction;
166 	struct dma_slave_config	config;
167 	struct dma_chan *chan;
168 	unsigned int nob = data->blocks;
169 	unsigned long long clks;
170 	unsigned int timeout;
171 	int ret;
172 
173 	host->data = data;
174 
175 	writel(nob, host->base + MMC_NOB);
176 	writel(data->blksz, host->base + MMC_BLKLEN);
177 
178 	clks = (unsigned long long)data->timeout_ns * host->clkrate;
179 	do_div(clks, 1000000000UL);
180 	timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
181 	writel((timeout + 255) / 256, host->base + MMC_RDTO);
182 
183 	memset(&config, 0, sizeof(config));
184 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
185 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
186 	config.src_addr = host->res->start + MMC_RXFIFO;
187 	config.dst_addr = host->res->start + MMC_TXFIFO;
188 	config.src_maxburst = 32;
189 	config.dst_maxburst = 32;
190 
191 	if (data->flags & MMC_DATA_READ) {
192 		host->dma_dir = DMA_FROM_DEVICE;
193 		direction = DMA_DEV_TO_MEM;
194 		chan = host->dma_chan_rx;
195 	} else {
196 		host->dma_dir = DMA_TO_DEVICE;
197 		direction = DMA_MEM_TO_DEV;
198 		chan = host->dma_chan_tx;
199 	}
200 
201 	config.direction = direction;
202 
203 	ret = dmaengine_slave_config(chan, &config);
204 	if (ret < 0) {
205 		dev_err(mmc_dev(host->mmc), "dma slave config failed\n");
206 		return;
207 	}
208 
209 	host->dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
210 				   host->dma_dir);
211 
212 	tx = dmaengine_prep_slave_sg(chan, data->sg, host->dma_len, direction,
213 				     DMA_PREP_INTERRUPT);
214 	if (!tx) {
215 		dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
216 		return;
217 	}
218 
219 	if (!(data->flags & MMC_DATA_READ)) {
220 		tx->callback = pxamci_dma_irq;
221 		tx->callback_param = host;
222 	}
223 
224 	host->dma_cookie = dmaengine_submit(tx);
225 
226 	/*
227 	 * workaround for erratum #91:
228 	 * only start DMA now if we are doing a read,
229 	 * otherwise we wait until CMD/RESP has finished
230 	 * before starting DMA.
231 	 */
232 	if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
233 		dma_async_issue_pending(chan);
234 }
235 
236 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
237 {
238 	WARN_ON(host->cmd != NULL);
239 	host->cmd = cmd;
240 
241 	if (cmd->flags & MMC_RSP_BUSY)
242 		cmdat |= CMDAT_BUSY;
243 
244 #define RSP_TYPE(x)	((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
245 	switch (RSP_TYPE(mmc_resp_type(cmd))) {
246 	case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
247 		cmdat |= CMDAT_RESP_SHORT;
248 		break;
249 	case RSP_TYPE(MMC_RSP_R3):
250 		cmdat |= CMDAT_RESP_R3;
251 		break;
252 	case RSP_TYPE(MMC_RSP_R2):
253 		cmdat |= CMDAT_RESP_R2;
254 		break;
255 	default:
256 		break;
257 	}
258 
259 	writel(cmd->opcode, host->base + MMC_CMD);
260 	writel(cmd->arg >> 16, host->base + MMC_ARGH);
261 	writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
262 	writel(cmdat, host->base + MMC_CMDAT);
263 	writel(host->clkrt, host->base + MMC_CLKRT);
264 
265 	writel(START_CLOCK, host->base + MMC_STRPCL);
266 
267 	pxamci_enable_irq(host, END_CMD_RES);
268 }
269 
270 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
271 {
272 	host->mrq = NULL;
273 	host->cmd = NULL;
274 	host->data = NULL;
275 	mmc_request_done(host->mmc, mrq);
276 }
277 
278 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
279 {
280 	struct mmc_command *cmd = host->cmd;
281 	int i;
282 	u32 v;
283 
284 	if (!cmd)
285 		return 0;
286 
287 	host->cmd = NULL;
288 
289 	/*
290 	 * Did I mention this is Sick.  We always need to
291 	 * discard the upper 8 bits of the first 16-bit word.
292 	 */
293 	v = readl(host->base + MMC_RES) & 0xffff;
294 	for (i = 0; i < 4; i++) {
295 		u32 w1 = readl(host->base + MMC_RES) & 0xffff;
296 		u32 w2 = readl(host->base + MMC_RES) & 0xffff;
297 		cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
298 		v = w2;
299 	}
300 
301 	if (stat & STAT_TIME_OUT_RESPONSE) {
302 		cmd->error = -ETIMEDOUT;
303 	} else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
304 		/*
305 		 * workaround for erratum #42:
306 		 * Intel PXA27x Family Processor Specification Update Rev 001
307 		 * A bogus CRC error can appear if the msb of a 136 bit
308 		 * response is a one.
309 		 */
310 		if (cpu_is_pxa27x() &&
311 		    (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000))
312 			pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
313 		else
314 			cmd->error = -EILSEQ;
315 	}
316 
317 	pxamci_disable_irq(host, END_CMD_RES);
318 	if (host->data && !cmd->error) {
319 		pxamci_enable_irq(host, DATA_TRAN_DONE);
320 		/*
321 		 * workaround for erratum #91, if doing write
322 		 * enable DMA late
323 		 */
324 		if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
325 			dma_async_issue_pending(host->dma_chan_tx);
326 	} else {
327 		pxamci_finish_request(host, host->mrq);
328 	}
329 
330 	return 1;
331 }
332 
333 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
334 {
335 	struct mmc_data *data = host->data;
336 	struct dma_chan *chan;
337 
338 	if (!data)
339 		return 0;
340 
341 	if (data->flags & MMC_DATA_READ)
342 		chan = host->dma_chan_rx;
343 	else
344 		chan = host->dma_chan_tx;
345 	dma_unmap_sg(chan->device->dev,
346 		     data->sg, data->sg_len, host->dma_dir);
347 
348 	if (stat & STAT_READ_TIME_OUT)
349 		data->error = -ETIMEDOUT;
350 	else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
351 		data->error = -EILSEQ;
352 
353 	/*
354 	 * There appears to be a hardware design bug here.  There seems to
355 	 * be no way to find out how much data was transferred to the card.
356 	 * This means that if there was an error on any block, we mark all
357 	 * data blocks as being in error.
358 	 */
359 	if (!data->error)
360 		data->bytes_xfered = data->blocks * data->blksz;
361 	else
362 		data->bytes_xfered = 0;
363 
364 	pxamci_disable_irq(host, DATA_TRAN_DONE);
365 
366 	host->data = NULL;
367 	if (host->mrq->stop) {
368 		pxamci_stop_clock(host);
369 		pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
370 	} else {
371 		pxamci_finish_request(host, host->mrq);
372 	}
373 
374 	return 1;
375 }
376 
377 static irqreturn_t pxamci_irq(int irq, void *devid)
378 {
379 	struct pxamci_host *host = devid;
380 	unsigned int ireg;
381 	int handled = 0;
382 
383 	ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
384 
385 	if (ireg) {
386 		unsigned stat = readl(host->base + MMC_STAT);
387 
388 		pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
389 
390 		if (ireg & END_CMD_RES)
391 			handled |= pxamci_cmd_done(host, stat);
392 		if (ireg & DATA_TRAN_DONE)
393 			handled |= pxamci_data_done(host, stat);
394 		if (ireg & SDIO_INT) {
395 			mmc_signal_sdio_irq(host->mmc);
396 			handled = 1;
397 		}
398 	}
399 
400 	return IRQ_RETVAL(handled);
401 }
402 
403 static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
404 {
405 	struct pxamci_host *host = mmc_priv(mmc);
406 	unsigned int cmdat;
407 
408 	WARN_ON(host->mrq != NULL);
409 
410 	host->mrq = mrq;
411 
412 	pxamci_stop_clock(host);
413 
414 	cmdat = host->cmdat;
415 	host->cmdat &= ~CMDAT_INIT;
416 
417 	if (mrq->data) {
418 		pxamci_setup_data(host, mrq->data);
419 
420 		cmdat &= ~CMDAT_BUSY;
421 		cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
422 		if (mrq->data->flags & MMC_DATA_WRITE)
423 			cmdat |= CMDAT_WRITE;
424 	}
425 
426 	pxamci_start_cmd(host, mrq->cmd, cmdat);
427 }
428 
429 static int pxamci_get_ro(struct mmc_host *mmc)
430 {
431 	struct pxamci_host *host = mmc_priv(mmc);
432 
433 	if (host->use_ro_gpio)
434 		return mmc_gpio_get_ro(mmc);
435 	if (host->pdata && host->pdata->get_ro)
436 		return !!host->pdata->get_ro(mmc_dev(mmc));
437 	/*
438 	 * Board doesn't support read only detection; let the mmc core
439 	 * decide what to do.
440 	 */
441 	return -ENOSYS;
442 }
443 
444 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
445 {
446 	struct pxamci_host *host = mmc_priv(mmc);
447 
448 	if (ios->clock) {
449 		unsigned long rate = host->clkrate;
450 		unsigned int clk = rate / ios->clock;
451 
452 		if (host->clkrt == CLKRT_OFF)
453 			clk_prepare_enable(host->clk);
454 
455 		if (ios->clock == 26000000) {
456 			/* to support 26MHz */
457 			host->clkrt = 7;
458 		} else {
459 			/* to handle (19.5MHz, 26MHz) */
460 			if (!clk)
461 				clk = 1;
462 
463 			/*
464 			 * clk might result in a lower divisor than we
465 			 * desire.  check for that condition and adjust
466 			 * as appropriate.
467 			 */
468 			if (rate / clk > ios->clock)
469 				clk <<= 1;
470 			host->clkrt = fls(clk) - 1;
471 		}
472 
473 		/*
474 		 * we write clkrt on the next command
475 		 */
476 	} else {
477 		pxamci_stop_clock(host);
478 		if (host->clkrt != CLKRT_OFF) {
479 			host->clkrt = CLKRT_OFF;
480 			clk_disable_unprepare(host->clk);
481 		}
482 	}
483 
484 	if (host->power_mode != ios->power_mode) {
485 		int ret;
486 
487 		host->power_mode = ios->power_mode;
488 
489 		ret = pxamci_set_power(host, ios->power_mode, ios->vdd);
490 		if (ret) {
491 			dev_err(mmc_dev(mmc), "unable to set power\n");
492 			/*
493 			 * The .set_ios() function in the mmc_host_ops
494 			 * struct return void, and failing to set the
495 			 * power should be rare so we print an error and
496 			 * return here.
497 			 */
498 			return;
499 		}
500 
501 		if (ios->power_mode == MMC_POWER_ON)
502 			host->cmdat |= CMDAT_INIT;
503 	}
504 
505 	if (ios->bus_width == MMC_BUS_WIDTH_4)
506 		host->cmdat |= CMDAT_SD_4DAT;
507 	else
508 		host->cmdat &= ~CMDAT_SD_4DAT;
509 
510 	dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n",
511 		host->clkrt, host->cmdat);
512 }
513 
514 static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
515 {
516 	struct pxamci_host *pxa_host = mmc_priv(host);
517 
518 	if (enable)
519 		pxamci_enable_irq(pxa_host, SDIO_INT);
520 	else
521 		pxamci_disable_irq(pxa_host, SDIO_INT);
522 }
523 
524 static const struct mmc_host_ops pxamci_ops = {
525 	.request		= pxamci_request,
526 	.get_cd			= mmc_gpio_get_cd,
527 	.get_ro			= pxamci_get_ro,
528 	.set_ios		= pxamci_set_ios,
529 	.enable_sdio_irq	= pxamci_enable_sdio_irq,
530 };
531 
532 static void pxamci_dma_irq(void *param)
533 {
534 	struct pxamci_host *host = param;
535 	struct dma_tx_state state;
536 	enum dma_status status;
537 	struct dma_chan *chan;
538 	unsigned long flags;
539 
540 	spin_lock_irqsave(&host->lock, flags);
541 
542 	if (!host->data)
543 		goto out_unlock;
544 
545 	if (host->data->flags & MMC_DATA_READ)
546 		chan = host->dma_chan_rx;
547 	else
548 		chan = host->dma_chan_tx;
549 
550 	status = dmaengine_tx_status(chan, host->dma_cookie, &state);
551 
552 	if (likely(status == DMA_COMPLETE)) {
553 		writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
554 	} else {
555 		pr_err("%s: DMA error on %s channel\n", mmc_hostname(host->mmc),
556 			host->data->flags & MMC_DATA_READ ? "rx" : "tx");
557 		host->data->error = -EIO;
558 		pxamci_data_done(host, 0);
559 	}
560 
561 out_unlock:
562 	spin_unlock_irqrestore(&host->lock, flags);
563 }
564 
565 static irqreturn_t pxamci_detect_irq(int irq, void *devid)
566 {
567 	struct pxamci_host *host = mmc_priv(devid);
568 
569 	mmc_detect_change(devid, msecs_to_jiffies(host->detect_delay_ms));
570 	return IRQ_HANDLED;
571 }
572 
573 #ifdef CONFIG_OF
574 static const struct of_device_id pxa_mmc_dt_ids[] = {
575         { .compatible = "marvell,pxa-mmc" },
576         { }
577 };
578 
579 MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids);
580 
581 static int pxamci_of_init(struct platform_device *pdev,
582 			  struct mmc_host *mmc)
583 {
584 	struct device_node *np = pdev->dev.of_node;
585 	struct pxamci_host *host = mmc_priv(mmc);
586 	u32 tmp;
587 	int ret;
588 
589 	if (!np)
590 		return 0;
591 
592 	/* pxa-mmc specific */
593 	if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0)
594 		host->detect_delay_ms = tmp;
595 
596 	ret = mmc_of_parse(mmc);
597 	if (ret < 0)
598 		return ret;
599 
600 	return 0;
601 }
602 #else
603 static int pxamci_of_init(struct platform_device *pdev,
604 			  struct mmc_host *mmc)
605 {
606         return 0;
607 }
608 #endif
609 
610 static int pxamci_probe(struct platform_device *pdev)
611 {
612 	struct mmc_host *mmc;
613 	struct pxamci_host *host = NULL;
614 	struct device *dev = &pdev->dev;
615 	struct resource *r;
616 	int ret, irq;
617 
618 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
619 	irq = platform_get_irq(pdev, 0);
620 	if (irq < 0)
621 		return irq;
622 
623 	mmc = mmc_alloc_host(sizeof(struct pxamci_host), dev);
624 	if (!mmc) {
625 		ret = -ENOMEM;
626 		goto out;
627 	}
628 
629 	mmc->ops = &pxamci_ops;
630 
631 	/*
632 	 * We can do SG-DMA, but we don't because we never know how much
633 	 * data we successfully wrote to the card.
634 	 */
635 	mmc->max_segs = NR_SG;
636 
637 	/*
638 	 * Our hardware DMA can handle a maximum of one page per SG entry.
639 	 */
640 	mmc->max_seg_size = PAGE_SIZE;
641 
642 	/*
643 	 * Block length register is only 10 bits before PXA27x.
644 	 */
645 	mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048;
646 
647 	/*
648 	 * Block count register is 16 bits.
649 	 */
650 	mmc->max_blk_count = 65535;
651 
652 	ret = pxamci_of_init(pdev, mmc);
653 	if (ret)
654 		return ret;
655 
656 	host = mmc_priv(mmc);
657 	host->mmc = mmc;
658 	host->pdata = pdev->dev.platform_data;
659 	host->clkrt = CLKRT_OFF;
660 
661 	host->clk = devm_clk_get(dev, NULL);
662 	if (IS_ERR(host->clk)) {
663 		ret = PTR_ERR(host->clk);
664 		host->clk = NULL;
665 		goto out;
666 	}
667 
668 	host->clkrate = clk_get_rate(host->clk);
669 
670 	/*
671 	 * Calculate minimum clock rate, rounding up.
672 	 */
673 	mmc->f_min = (host->clkrate + 63) / 64;
674 	mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate;
675 
676 	ret = pxamci_init_ocr(host);
677 	if (ret < 0)
678 		return ret;
679 
680 	mmc->caps = 0;
681 	host->cmdat = 0;
682 	if (!cpu_is_pxa25x()) {
683 		mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
684 		host->cmdat |= CMDAT_SDIO_INT_EN;
685 		if (mmc_has_26MHz())
686 			mmc->caps |= MMC_CAP_MMC_HIGHSPEED |
687 				     MMC_CAP_SD_HIGHSPEED;
688 	}
689 
690 	spin_lock_init(&host->lock);
691 	host->res = r;
692 	host->imask = MMC_I_MASK_ALL;
693 
694 	host->base = devm_ioremap_resource(dev, r);
695 	if (IS_ERR(host->base)) {
696 		ret = PTR_ERR(host->base);
697 		goto out;
698 	}
699 
700 	/*
701 	 * Ensure that the host controller is shut down, and setup
702 	 * with our defaults.
703 	 */
704 	pxamci_stop_clock(host);
705 	writel(0, host->base + MMC_SPI);
706 	writel(64, host->base + MMC_RESTO);
707 	writel(host->imask, host->base + MMC_I_MASK);
708 
709 	ret = devm_request_irq(dev, irq, pxamci_irq, 0,
710 			       DRIVER_NAME, host);
711 	if (ret)
712 		goto out;
713 
714 	platform_set_drvdata(pdev, mmc);
715 
716 	host->dma_chan_rx = dma_request_slave_channel(dev, "rx");
717 	if (host->dma_chan_rx == NULL) {
718 		dev_err(dev, "unable to request rx dma channel\n");
719 		ret = -ENODEV;
720 		goto out;
721 	}
722 
723 	host->dma_chan_tx = dma_request_slave_channel(dev, "tx");
724 	if (host->dma_chan_tx == NULL) {
725 		dev_err(dev, "unable to request tx dma channel\n");
726 		ret = -ENODEV;
727 		goto out;
728 	}
729 
730 	if (host->pdata) {
731 		host->detect_delay_ms = host->pdata->detect_delay_ms;
732 
733 		host->power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
734 		if (IS_ERR(host->power)) {
735 			dev_err(dev, "Failed requesting gpio_power\n");
736 			goto out;
737 		}
738 
739 		/* FIXME: should we pass detection delay to debounce? */
740 		ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
741 		if (ret && ret != -ENOENT) {
742 			dev_err(dev, "Failed requesting gpio_cd\n");
743 			goto out;
744 		}
745 
746 		ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
747 		if (ret && ret != -ENOENT) {
748 			dev_err(dev, "Failed requesting gpio_ro\n");
749 			goto out;
750 		}
751 		if (!ret) {
752 			host->use_ro_gpio = true;
753 			mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
754 				0 : MMC_CAP2_RO_ACTIVE_HIGH;
755 		}
756 
757 		if (host->pdata->init)
758 			host->pdata->init(dev, pxamci_detect_irq, mmc);
759 
760 		if (host->power && host->pdata->setpower)
761 			dev_warn(dev, "gpio_power and setpower() both defined\n");
762 		if (host->use_ro_gpio && host->pdata->get_ro)
763 			dev_warn(dev, "gpio_ro and get_ro() both defined\n");
764 	}
765 
766 	mmc_add_host(mmc);
767 
768 	return 0;
769 
770 out:
771 	if (host) {
772 		if (host->dma_chan_rx)
773 			dma_release_channel(host->dma_chan_rx);
774 		if (host->dma_chan_tx)
775 			dma_release_channel(host->dma_chan_tx);
776 	}
777 	if (mmc)
778 		mmc_free_host(mmc);
779 	return ret;
780 }
781 
782 static int pxamci_remove(struct platform_device *pdev)
783 {
784 	struct mmc_host *mmc = platform_get_drvdata(pdev);
785 
786 	if (mmc) {
787 		struct pxamci_host *host = mmc_priv(mmc);
788 
789 		mmc_remove_host(mmc);
790 
791 		if (host->pdata && host->pdata->exit)
792 			host->pdata->exit(&pdev->dev, mmc);
793 
794 		pxamci_stop_clock(host);
795 		writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
796 		       END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
797 		       host->base + MMC_I_MASK);
798 
799 		dmaengine_terminate_all(host->dma_chan_rx);
800 		dmaengine_terminate_all(host->dma_chan_tx);
801 		dma_release_channel(host->dma_chan_rx);
802 		dma_release_channel(host->dma_chan_tx);
803 
804 		mmc_free_host(mmc);
805 	}
806 
807 	return 0;
808 }
809 
810 static struct platform_driver pxamci_driver = {
811 	.probe		= pxamci_probe,
812 	.remove		= pxamci_remove,
813 	.driver		= {
814 		.name	= DRIVER_NAME,
815 		.of_match_table = of_match_ptr(pxa_mmc_dt_ids),
816 	},
817 };
818 
819 module_platform_driver(pxamci_driver);
820 
821 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
822 MODULE_LICENSE("GPL");
823 MODULE_ALIAS("platform:pxa2xx-mci");
824