xref: /openbmc/linux/drivers/mmc/host/pxamci.c (revision 1c2dd16a)
1 /*
2  *  linux/drivers/mmc/host/pxa.c - PXA MMCI driver
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  This hardware is really sick:
11  *   - No way to clear interrupts.
12  *   - Have to turn off the clock whenever we touch the device.
13  *   - Doesn't tell you how many data blocks were transferred.
14  *  Yuck!
15  *
16  *	1 and 3 byte data transfers not supported
17  *	max block length up to 1023
18  */
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/platform_device.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/dmaengine.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dma/pxa-dma.h>
28 #include <linux/clk.h>
29 #include <linux/err.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/slot-gpio.h>
32 #include <linux/io.h>
33 #include <linux/regulator/consumer.h>
34 #include <linux/gpio.h>
35 #include <linux/gfp.h>
36 #include <linux/of.h>
37 #include <linux/of_gpio.h>
38 #include <linux/of_device.h>
39 
40 #include <asm/sizes.h>
41 
42 #include <mach/hardware.h>
43 #include <linux/platform_data/mmc-pxamci.h>
44 
45 #include "pxamci.h"
46 
47 #define DRIVER_NAME	"pxa2xx-mci"
48 
49 #define NR_SG	1
50 #define CLKRT_OFF	(~0)
51 
52 #define mmc_has_26MHz()		(cpu_is_pxa300() || cpu_is_pxa310() \
53 				|| cpu_is_pxa935())
54 
55 struct pxamci_host {
56 	struct mmc_host		*mmc;
57 	spinlock_t		lock;
58 	struct resource		*res;
59 	void __iomem		*base;
60 	struct clk		*clk;
61 	unsigned long		clkrate;
62 	int			irq;
63 	unsigned int		clkrt;
64 	unsigned int		cmdat;
65 	unsigned int		imask;
66 	unsigned int		power_mode;
67 	struct pxamci_platform_data *pdata;
68 
69 	struct mmc_request	*mrq;
70 	struct mmc_command	*cmd;
71 	struct mmc_data		*data;
72 
73 	struct dma_chan		*dma_chan_rx;
74 	struct dma_chan		*dma_chan_tx;
75 	dma_cookie_t		dma_cookie;
76 	dma_addr_t		sg_dma;
77 	unsigned int		dma_len;
78 
79 	unsigned int		dma_dir;
80 	unsigned int		dma_drcmrrx;
81 	unsigned int		dma_drcmrtx;
82 
83 	struct regulator	*vcc;
84 };
85 
86 static inline void pxamci_init_ocr(struct pxamci_host *host)
87 {
88 #ifdef CONFIG_REGULATOR
89 	host->vcc = devm_regulator_get_optional(mmc_dev(host->mmc), "vmmc");
90 
91 	if (IS_ERR(host->vcc))
92 		host->vcc = NULL;
93 	else {
94 		host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
95 		if (host->pdata && host->pdata->ocr_mask)
96 			dev_warn(mmc_dev(host->mmc),
97 				"ocr_mask/setpower will not be used\n");
98 	}
99 #endif
100 	if (host->vcc == NULL) {
101 		/* fall-back to platform data */
102 		host->mmc->ocr_avail = host->pdata ?
103 			host->pdata->ocr_mask :
104 			MMC_VDD_32_33 | MMC_VDD_33_34;
105 	}
106 }
107 
108 static inline int pxamci_set_power(struct pxamci_host *host,
109 				    unsigned char power_mode,
110 				    unsigned int vdd)
111 {
112 	int on;
113 
114 	if (host->vcc) {
115 		int ret;
116 
117 		if (power_mode == MMC_POWER_UP) {
118 			ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
119 			if (ret)
120 				return ret;
121 		} else if (power_mode == MMC_POWER_OFF) {
122 			ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
123 			if (ret)
124 				return ret;
125 		}
126 	}
127 	if (!host->vcc && host->pdata &&
128 	    gpio_is_valid(host->pdata->gpio_power)) {
129 		on = ((1 << vdd) & host->pdata->ocr_mask);
130 		gpio_set_value(host->pdata->gpio_power,
131 			       !!on ^ host->pdata->gpio_power_invert);
132 	}
133 	if (!host->vcc && host->pdata && host->pdata->setpower)
134 		return host->pdata->setpower(mmc_dev(host->mmc), vdd);
135 
136 	return 0;
137 }
138 
139 static void pxamci_stop_clock(struct pxamci_host *host)
140 {
141 	if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
142 		unsigned long timeout = 10000;
143 		unsigned int v;
144 
145 		writel(STOP_CLOCK, host->base + MMC_STRPCL);
146 
147 		do {
148 			v = readl(host->base + MMC_STAT);
149 			if (!(v & STAT_CLK_EN))
150 				break;
151 			udelay(1);
152 		} while (timeout--);
153 
154 		if (v & STAT_CLK_EN)
155 			dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
156 	}
157 }
158 
159 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
160 {
161 	unsigned long flags;
162 
163 	spin_lock_irqsave(&host->lock, flags);
164 	host->imask &= ~mask;
165 	writel(host->imask, host->base + MMC_I_MASK);
166 	spin_unlock_irqrestore(&host->lock, flags);
167 }
168 
169 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
170 {
171 	unsigned long flags;
172 
173 	spin_lock_irqsave(&host->lock, flags);
174 	host->imask |= mask;
175 	writel(host->imask, host->base + MMC_I_MASK);
176 	spin_unlock_irqrestore(&host->lock, flags);
177 }
178 
179 static void pxamci_dma_irq(void *param);
180 
181 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
182 {
183 	struct dma_async_tx_descriptor *tx;
184 	enum dma_data_direction direction;
185 	struct dma_slave_config	config;
186 	struct dma_chan *chan;
187 	unsigned int nob = data->blocks;
188 	unsigned long long clks;
189 	unsigned int timeout;
190 	int ret;
191 
192 	host->data = data;
193 
194 	writel(nob, host->base + MMC_NOB);
195 	writel(data->blksz, host->base + MMC_BLKLEN);
196 
197 	clks = (unsigned long long)data->timeout_ns * host->clkrate;
198 	do_div(clks, 1000000000UL);
199 	timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
200 	writel((timeout + 255) / 256, host->base + MMC_RDTO);
201 
202 	memset(&config, 0, sizeof(config));
203 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
204 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
205 	config.src_addr = host->res->start + MMC_RXFIFO;
206 	config.dst_addr = host->res->start + MMC_TXFIFO;
207 	config.src_maxburst = 32;
208 	config.dst_maxburst = 32;
209 
210 	if (data->flags & MMC_DATA_READ) {
211 		host->dma_dir = DMA_FROM_DEVICE;
212 		direction = DMA_DEV_TO_MEM;
213 		chan = host->dma_chan_rx;
214 	} else {
215 		host->dma_dir = DMA_TO_DEVICE;
216 		direction = DMA_MEM_TO_DEV;
217 		chan = host->dma_chan_tx;
218 	}
219 
220 	config.direction = direction;
221 
222 	ret = dmaengine_slave_config(chan, &config);
223 	if (ret < 0) {
224 		dev_err(mmc_dev(host->mmc), "dma slave config failed\n");
225 		return;
226 	}
227 
228 	host->dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
229 				   host->dma_dir);
230 
231 	tx = dmaengine_prep_slave_sg(chan, data->sg, host->dma_len, direction,
232 				     DMA_PREP_INTERRUPT);
233 	if (!tx) {
234 		dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
235 		return;
236 	}
237 
238 	if (!(data->flags & MMC_DATA_READ)) {
239 		tx->callback = pxamci_dma_irq;
240 		tx->callback_param = host;
241 	}
242 
243 	host->dma_cookie = dmaengine_submit(tx);
244 
245 	/*
246 	 * workaround for erratum #91:
247 	 * only start DMA now if we are doing a read,
248 	 * otherwise we wait until CMD/RESP has finished
249 	 * before starting DMA.
250 	 */
251 	if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
252 		dma_async_issue_pending(chan);
253 }
254 
255 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
256 {
257 	WARN_ON(host->cmd != NULL);
258 	host->cmd = cmd;
259 
260 	if (cmd->flags & MMC_RSP_BUSY)
261 		cmdat |= CMDAT_BUSY;
262 
263 #define RSP_TYPE(x)	((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
264 	switch (RSP_TYPE(mmc_resp_type(cmd))) {
265 	case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
266 		cmdat |= CMDAT_RESP_SHORT;
267 		break;
268 	case RSP_TYPE(MMC_RSP_R3):
269 		cmdat |= CMDAT_RESP_R3;
270 		break;
271 	case RSP_TYPE(MMC_RSP_R2):
272 		cmdat |= CMDAT_RESP_R2;
273 		break;
274 	default:
275 		break;
276 	}
277 
278 	writel(cmd->opcode, host->base + MMC_CMD);
279 	writel(cmd->arg >> 16, host->base + MMC_ARGH);
280 	writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
281 	writel(cmdat, host->base + MMC_CMDAT);
282 	writel(host->clkrt, host->base + MMC_CLKRT);
283 
284 	writel(START_CLOCK, host->base + MMC_STRPCL);
285 
286 	pxamci_enable_irq(host, END_CMD_RES);
287 }
288 
289 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
290 {
291 	host->mrq = NULL;
292 	host->cmd = NULL;
293 	host->data = NULL;
294 	mmc_request_done(host->mmc, mrq);
295 }
296 
297 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
298 {
299 	struct mmc_command *cmd = host->cmd;
300 	int i;
301 	u32 v;
302 
303 	if (!cmd)
304 		return 0;
305 
306 	host->cmd = NULL;
307 
308 	/*
309 	 * Did I mention this is Sick.  We always need to
310 	 * discard the upper 8 bits of the first 16-bit word.
311 	 */
312 	v = readl(host->base + MMC_RES) & 0xffff;
313 	for (i = 0; i < 4; i++) {
314 		u32 w1 = readl(host->base + MMC_RES) & 0xffff;
315 		u32 w2 = readl(host->base + MMC_RES) & 0xffff;
316 		cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
317 		v = w2;
318 	}
319 
320 	if (stat & STAT_TIME_OUT_RESPONSE) {
321 		cmd->error = -ETIMEDOUT;
322 	} else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
323 		/*
324 		 * workaround for erratum #42:
325 		 * Intel PXA27x Family Processor Specification Update Rev 001
326 		 * A bogus CRC error can appear if the msb of a 136 bit
327 		 * response is a one.
328 		 */
329 		if (cpu_is_pxa27x() &&
330 		    (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000))
331 			pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
332 		else
333 			cmd->error = -EILSEQ;
334 	}
335 
336 	pxamci_disable_irq(host, END_CMD_RES);
337 	if (host->data && !cmd->error) {
338 		pxamci_enable_irq(host, DATA_TRAN_DONE);
339 		/*
340 		 * workaround for erratum #91, if doing write
341 		 * enable DMA late
342 		 */
343 		if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
344 			dma_async_issue_pending(host->dma_chan_tx);
345 	} else {
346 		pxamci_finish_request(host, host->mrq);
347 	}
348 
349 	return 1;
350 }
351 
352 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
353 {
354 	struct mmc_data *data = host->data;
355 	struct dma_chan *chan;
356 
357 	if (!data)
358 		return 0;
359 
360 	if (data->flags & MMC_DATA_READ)
361 		chan = host->dma_chan_rx;
362 	else
363 		chan = host->dma_chan_tx;
364 	dma_unmap_sg(chan->device->dev,
365 		     data->sg, data->sg_len, host->dma_dir);
366 
367 	if (stat & STAT_READ_TIME_OUT)
368 		data->error = -ETIMEDOUT;
369 	else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
370 		data->error = -EILSEQ;
371 
372 	/*
373 	 * There appears to be a hardware design bug here.  There seems to
374 	 * be no way to find out how much data was transferred to the card.
375 	 * This means that if there was an error on any block, we mark all
376 	 * data blocks as being in error.
377 	 */
378 	if (!data->error)
379 		data->bytes_xfered = data->blocks * data->blksz;
380 	else
381 		data->bytes_xfered = 0;
382 
383 	pxamci_disable_irq(host, DATA_TRAN_DONE);
384 
385 	host->data = NULL;
386 	if (host->mrq->stop) {
387 		pxamci_stop_clock(host);
388 		pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
389 	} else {
390 		pxamci_finish_request(host, host->mrq);
391 	}
392 
393 	return 1;
394 }
395 
396 static irqreturn_t pxamci_irq(int irq, void *devid)
397 {
398 	struct pxamci_host *host = devid;
399 	unsigned int ireg;
400 	int handled = 0;
401 
402 	ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
403 
404 	if (ireg) {
405 		unsigned stat = readl(host->base + MMC_STAT);
406 
407 		pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
408 
409 		if (ireg & END_CMD_RES)
410 			handled |= pxamci_cmd_done(host, stat);
411 		if (ireg & DATA_TRAN_DONE)
412 			handled |= pxamci_data_done(host, stat);
413 		if (ireg & SDIO_INT) {
414 			mmc_signal_sdio_irq(host->mmc);
415 			handled = 1;
416 		}
417 	}
418 
419 	return IRQ_RETVAL(handled);
420 }
421 
422 static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
423 {
424 	struct pxamci_host *host = mmc_priv(mmc);
425 	unsigned int cmdat;
426 
427 	WARN_ON(host->mrq != NULL);
428 
429 	host->mrq = mrq;
430 
431 	pxamci_stop_clock(host);
432 
433 	cmdat = host->cmdat;
434 	host->cmdat &= ~CMDAT_INIT;
435 
436 	if (mrq->data) {
437 		pxamci_setup_data(host, mrq->data);
438 
439 		cmdat &= ~CMDAT_BUSY;
440 		cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
441 		if (mrq->data->flags & MMC_DATA_WRITE)
442 			cmdat |= CMDAT_WRITE;
443 	}
444 
445 	pxamci_start_cmd(host, mrq->cmd, cmdat);
446 }
447 
448 static int pxamci_get_ro(struct mmc_host *mmc)
449 {
450 	struct pxamci_host *host = mmc_priv(mmc);
451 
452 	if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro))
453 		return mmc_gpio_get_ro(mmc);
454 	if (host->pdata && host->pdata->get_ro)
455 		return !!host->pdata->get_ro(mmc_dev(mmc));
456 	/*
457 	 * Board doesn't support read only detection; let the mmc core
458 	 * decide what to do.
459 	 */
460 	return -ENOSYS;
461 }
462 
463 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
464 {
465 	struct pxamci_host *host = mmc_priv(mmc);
466 
467 	if (ios->clock) {
468 		unsigned long rate = host->clkrate;
469 		unsigned int clk = rate / ios->clock;
470 
471 		if (host->clkrt == CLKRT_OFF)
472 			clk_prepare_enable(host->clk);
473 
474 		if (ios->clock == 26000000) {
475 			/* to support 26MHz */
476 			host->clkrt = 7;
477 		} else {
478 			/* to handle (19.5MHz, 26MHz) */
479 			if (!clk)
480 				clk = 1;
481 
482 			/*
483 			 * clk might result in a lower divisor than we
484 			 * desire.  check for that condition and adjust
485 			 * as appropriate.
486 			 */
487 			if (rate / clk > ios->clock)
488 				clk <<= 1;
489 			host->clkrt = fls(clk) - 1;
490 		}
491 
492 		/*
493 		 * we write clkrt on the next command
494 		 */
495 	} else {
496 		pxamci_stop_clock(host);
497 		if (host->clkrt != CLKRT_OFF) {
498 			host->clkrt = CLKRT_OFF;
499 			clk_disable_unprepare(host->clk);
500 		}
501 	}
502 
503 	if (host->power_mode != ios->power_mode) {
504 		int ret;
505 
506 		host->power_mode = ios->power_mode;
507 
508 		ret = pxamci_set_power(host, ios->power_mode, ios->vdd);
509 		if (ret) {
510 			dev_err(mmc_dev(mmc), "unable to set power\n");
511 			/*
512 			 * The .set_ios() function in the mmc_host_ops
513 			 * struct return void, and failing to set the
514 			 * power should be rare so we print an error and
515 			 * return here.
516 			 */
517 			return;
518 		}
519 
520 		if (ios->power_mode == MMC_POWER_ON)
521 			host->cmdat |= CMDAT_INIT;
522 	}
523 
524 	if (ios->bus_width == MMC_BUS_WIDTH_4)
525 		host->cmdat |= CMDAT_SD_4DAT;
526 	else
527 		host->cmdat &= ~CMDAT_SD_4DAT;
528 
529 	dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n",
530 		host->clkrt, host->cmdat);
531 }
532 
533 static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
534 {
535 	struct pxamci_host *pxa_host = mmc_priv(host);
536 
537 	if (enable)
538 		pxamci_enable_irq(pxa_host, SDIO_INT);
539 	else
540 		pxamci_disable_irq(pxa_host, SDIO_INT);
541 }
542 
543 static const struct mmc_host_ops pxamci_ops = {
544 	.request		= pxamci_request,
545 	.get_cd			= mmc_gpio_get_cd,
546 	.get_ro			= pxamci_get_ro,
547 	.set_ios		= pxamci_set_ios,
548 	.enable_sdio_irq	= pxamci_enable_sdio_irq,
549 };
550 
551 static void pxamci_dma_irq(void *param)
552 {
553 	struct pxamci_host *host = param;
554 	struct dma_tx_state state;
555 	enum dma_status status;
556 	struct dma_chan *chan;
557 	unsigned long flags;
558 
559 	spin_lock_irqsave(&host->lock, flags);
560 
561 	if (!host->data)
562 		goto out_unlock;
563 
564 	if (host->data->flags & MMC_DATA_READ)
565 		chan = host->dma_chan_rx;
566 	else
567 		chan = host->dma_chan_tx;
568 
569 	status = dmaengine_tx_status(chan, host->dma_cookie, &state);
570 
571 	if (likely(status == DMA_COMPLETE)) {
572 		writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
573 	} else {
574 		pr_err("%s: DMA error on %s channel\n", mmc_hostname(host->mmc),
575 			host->data->flags & MMC_DATA_READ ? "rx" : "tx");
576 		host->data->error = -EIO;
577 		pxamci_data_done(host, 0);
578 	}
579 
580 out_unlock:
581 	spin_unlock_irqrestore(&host->lock, flags);
582 }
583 
584 static irqreturn_t pxamci_detect_irq(int irq, void *devid)
585 {
586 	struct pxamci_host *host = mmc_priv(devid);
587 
588 	mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms));
589 	return IRQ_HANDLED;
590 }
591 
592 #ifdef CONFIG_OF
593 static const struct of_device_id pxa_mmc_dt_ids[] = {
594         { .compatible = "marvell,pxa-mmc" },
595         { }
596 };
597 
598 MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids);
599 
600 static int pxamci_of_init(struct platform_device *pdev)
601 {
602         struct device_node *np = pdev->dev.of_node;
603         struct pxamci_platform_data *pdata;
604         u32 tmp;
605 
606         if (!np)
607                 return 0;
608 
609         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
610         if (!pdata)
611                 return -ENOMEM;
612 
613 	pdata->gpio_card_detect =
614 		of_get_named_gpio(np, "cd-gpios", 0);
615 	pdata->gpio_card_ro =
616 		of_get_named_gpio(np, "wp-gpios", 0);
617 
618 	/* pxa-mmc specific */
619 	pdata->gpio_power =
620 		of_get_named_gpio(np, "pxa-mmc,gpio-power", 0);
621 
622 	if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0)
623 		pdata->detect_delay_ms = tmp;
624 
625         pdev->dev.platform_data = pdata;
626 
627         return 0;
628 }
629 #else
630 static int pxamci_of_init(struct platform_device *pdev)
631 {
632         return 0;
633 }
634 #endif
635 
636 static int pxamci_probe(struct platform_device *pdev)
637 {
638 	struct mmc_host *mmc;
639 	struct pxamci_host *host = NULL;
640 	struct resource *r, *dmarx, *dmatx;
641 	struct pxad_param param_rx, param_tx;
642 	int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
643 	dma_cap_mask_t mask;
644 
645 	ret = pxamci_of_init(pdev);
646 	if (ret)
647 		return ret;
648 
649 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
650 	irq = platform_get_irq(pdev, 0);
651 	if (irq < 0)
652 		return irq;
653 
654 	mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
655 	if (!mmc) {
656 		ret = -ENOMEM;
657 		goto out;
658 	}
659 
660 	mmc->ops = &pxamci_ops;
661 
662 	/*
663 	 * We can do SG-DMA, but we don't because we never know how much
664 	 * data we successfully wrote to the card.
665 	 */
666 	mmc->max_segs = NR_SG;
667 
668 	/*
669 	 * Our hardware DMA can handle a maximum of one page per SG entry.
670 	 */
671 	mmc->max_seg_size = PAGE_SIZE;
672 
673 	/*
674 	 * Block length register is only 10 bits before PXA27x.
675 	 */
676 	mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048;
677 
678 	/*
679 	 * Block count register is 16 bits.
680 	 */
681 	mmc->max_blk_count = 65535;
682 
683 	host = mmc_priv(mmc);
684 	host->mmc = mmc;
685 	host->pdata = pdev->dev.platform_data;
686 	host->clkrt = CLKRT_OFF;
687 
688 	host->clk = devm_clk_get(&pdev->dev, NULL);
689 	if (IS_ERR(host->clk)) {
690 		ret = PTR_ERR(host->clk);
691 		host->clk = NULL;
692 		goto out;
693 	}
694 
695 	host->clkrate = clk_get_rate(host->clk);
696 
697 	/*
698 	 * Calculate minimum clock rate, rounding up.
699 	 */
700 	mmc->f_min = (host->clkrate + 63) / 64;
701 	mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate;
702 
703 	pxamci_init_ocr(host);
704 
705 	mmc->caps = 0;
706 	host->cmdat = 0;
707 	if (!cpu_is_pxa25x()) {
708 		mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
709 		host->cmdat |= CMDAT_SDIO_INT_EN;
710 		if (mmc_has_26MHz())
711 			mmc->caps |= MMC_CAP_MMC_HIGHSPEED |
712 				     MMC_CAP_SD_HIGHSPEED;
713 	}
714 
715 	spin_lock_init(&host->lock);
716 	host->res = r;
717 	host->irq = irq;
718 	host->imask = MMC_I_MASK_ALL;
719 
720 	host->base = devm_ioremap_resource(&pdev->dev, r);
721 	if (IS_ERR(host->base)) {
722 		ret = PTR_ERR(host->base);
723 		goto out;
724 	}
725 
726 	/*
727 	 * Ensure that the host controller is shut down, and setup
728 	 * with our defaults.
729 	 */
730 	pxamci_stop_clock(host);
731 	writel(0, host->base + MMC_SPI);
732 	writel(64, host->base + MMC_RESTO);
733 	writel(host->imask, host->base + MMC_I_MASK);
734 
735 	ret = devm_request_irq(&pdev->dev, host->irq, pxamci_irq, 0,
736 			       DRIVER_NAME, host);
737 	if (ret)
738 		goto out;
739 
740 	platform_set_drvdata(pdev, mmc);
741 
742 	if (!pdev->dev.of_node) {
743 		dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
744 		dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
745 		if (!dmarx || !dmatx) {
746 			ret = -ENXIO;
747 			goto out;
748 		}
749 		param_rx.prio = PXAD_PRIO_LOWEST;
750 		param_rx.drcmr = dmarx->start;
751 		param_tx.prio = PXAD_PRIO_LOWEST;
752 		param_tx.drcmr = dmatx->start;
753 	}
754 
755 	dma_cap_zero(mask);
756 	dma_cap_set(DMA_SLAVE, mask);
757 
758 	host->dma_chan_rx =
759 		dma_request_slave_channel_compat(mask, pxad_filter_fn,
760 						 &param_rx, &pdev->dev, "rx");
761 	if (host->dma_chan_rx == NULL) {
762 		dev_err(&pdev->dev, "unable to request rx dma channel\n");
763 		ret = -ENODEV;
764 		goto out;
765 	}
766 
767 	host->dma_chan_tx =
768 		dma_request_slave_channel_compat(mask, pxad_filter_fn,
769 						 &param_tx,  &pdev->dev, "tx");
770 	if (host->dma_chan_tx == NULL) {
771 		dev_err(&pdev->dev, "unable to request tx dma channel\n");
772 		ret = -ENODEV;
773 		goto out;
774 	}
775 
776 	if (host->pdata) {
777 		gpio_cd = host->pdata->gpio_card_detect;
778 		gpio_ro = host->pdata->gpio_card_ro;
779 		gpio_power = host->pdata->gpio_power;
780 	}
781 	if (gpio_is_valid(gpio_power)) {
782 		ret = devm_gpio_request(&pdev->dev, gpio_power,
783 					"mmc card power");
784 		if (ret) {
785 			dev_err(&pdev->dev, "Failed requesting gpio_power %d\n",
786 				gpio_power);
787 			goto out;
788 		}
789 		gpio_direction_output(gpio_power,
790 				      host->pdata->gpio_power_invert);
791 	}
792 	if (gpio_is_valid(gpio_ro)) {
793 		ret = mmc_gpio_request_ro(mmc, gpio_ro);
794 		if (ret) {
795 			dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n",
796 				gpio_ro);
797 			goto out;
798 		} else {
799 			mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
800 				0 : MMC_CAP2_RO_ACTIVE_HIGH;
801 		}
802 	}
803 
804 	if (gpio_is_valid(gpio_cd))
805 		ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
806 	if (ret) {
807 		dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
808 		goto out;
809 	}
810 
811 	if (host->pdata && host->pdata->init)
812 		host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
813 
814 	if (gpio_is_valid(gpio_power) && host->pdata->setpower)
815 		dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n");
816 	if (gpio_is_valid(gpio_ro) && host->pdata->get_ro)
817 		dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n");
818 
819 	mmc_add_host(mmc);
820 
821 	return 0;
822 
823 out:
824 	if (host) {
825 		if (host->dma_chan_rx)
826 			dma_release_channel(host->dma_chan_rx);
827 		if (host->dma_chan_tx)
828 			dma_release_channel(host->dma_chan_tx);
829 	}
830 	if (mmc)
831 		mmc_free_host(mmc);
832 	return ret;
833 }
834 
835 static int pxamci_remove(struct platform_device *pdev)
836 {
837 	struct mmc_host *mmc = platform_get_drvdata(pdev);
838 	int gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
839 
840 	if (mmc) {
841 		struct pxamci_host *host = mmc_priv(mmc);
842 
843 		mmc_remove_host(mmc);
844 
845 		if (host->pdata) {
846 			gpio_cd = host->pdata->gpio_card_detect;
847 			gpio_ro = host->pdata->gpio_card_ro;
848 			gpio_power = host->pdata->gpio_power;
849 		}
850 		if (host->pdata && host->pdata->exit)
851 			host->pdata->exit(&pdev->dev, mmc);
852 
853 		pxamci_stop_clock(host);
854 		writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
855 		       END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
856 		       host->base + MMC_I_MASK);
857 
858 		dmaengine_terminate_all(host->dma_chan_rx);
859 		dmaengine_terminate_all(host->dma_chan_tx);
860 		dma_release_channel(host->dma_chan_rx);
861 		dma_release_channel(host->dma_chan_tx);
862 
863 		mmc_free_host(mmc);
864 	}
865 	return 0;
866 }
867 
868 static struct platform_driver pxamci_driver = {
869 	.probe		= pxamci_probe,
870 	.remove		= pxamci_remove,
871 	.driver		= {
872 		.name	= DRIVER_NAME,
873 		.of_match_table = of_match_ptr(pxa_mmc_dt_ids),
874 	},
875 };
876 
877 module_platform_driver(pxamci_driver);
878 
879 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
880 MODULE_LICENSE("GPL");
881 MODULE_ALIAS("platform:pxa2xx-mci");
882