xref: /openbmc/linux/drivers/mmc/host/pxamci.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   *  linux/drivers/mmc/host/pxa.c - PXA MMCI driver
4   *
5   *  Copyright (C) 2003 Russell King, All Rights Reserved.
6   *
7   *  This hardware is really sick:
8   *   - No way to clear interrupts.
9   *   - Have to turn off the clock whenever we touch the device.
10   *   - Doesn't tell you how many data blocks were transferred.
11   *  Yuck!
12   *
13   *	1 and 3 byte data transfers not supported
14   *	max block length up to 1023
15   */
16  #include <linux/module.h>
17  #include <linux/init.h>
18  #include <linux/ioport.h>
19  #include <linux/platform_device.h>
20  #include <linux/delay.h>
21  #include <linux/interrupt.h>
22  #include <linux/dmaengine.h>
23  #include <linux/dma-mapping.h>
24  #include <linux/clk.h>
25  #include <linux/err.h>
26  #include <linux/mmc/host.h>
27  #include <linux/mmc/slot-gpio.h>
28  #include <linux/io.h>
29  #include <linux/regulator/consumer.h>
30  #include <linux/gpio/consumer.h>
31  #include <linux/gfp.h>
32  #include <linux/of.h>
33  #include <linux/soc/pxa/cpu.h>
34  
35  #include <linux/sizes.h>
36  
37  #include <linux/platform_data/mmc-pxamci.h>
38  
39  #include "pxamci.h"
40  
41  #define DRIVER_NAME	"pxa2xx-mci"
42  
43  #define NR_SG	1
44  #define CLKRT_OFF	(~0)
45  
46  #define mmc_has_26MHz()		(cpu_is_pxa300() || cpu_is_pxa310() \
47  				|| cpu_is_pxa935())
48  
49  struct pxamci_host {
50  	struct mmc_host		*mmc;
51  	spinlock_t		lock;
52  	struct resource		*res;
53  	void __iomem		*base;
54  	struct clk		*clk;
55  	unsigned long		clkrate;
56  	unsigned int		clkrt;
57  	unsigned int		cmdat;
58  	unsigned int		imask;
59  	unsigned int		power_mode;
60  	unsigned long		detect_delay_ms;
61  	bool			use_ro_gpio;
62  	struct gpio_desc	*power;
63  	struct pxamci_platform_data *pdata;
64  
65  	struct mmc_request	*mrq;
66  	struct mmc_command	*cmd;
67  	struct mmc_data		*data;
68  
69  	struct dma_chan		*dma_chan_rx;
70  	struct dma_chan		*dma_chan_tx;
71  	dma_cookie_t		dma_cookie;
72  	unsigned int		dma_len;
73  	unsigned int		dma_dir;
74  };
75  
pxamci_init_ocr(struct pxamci_host * host)76  static int pxamci_init_ocr(struct pxamci_host *host)
77  {
78  	struct mmc_host *mmc = host->mmc;
79  	int ret;
80  
81  	ret = mmc_regulator_get_supply(mmc);
82  	if (ret < 0)
83  		return ret;
84  
85  	if (IS_ERR(mmc->supply.vmmc)) {
86  		/* fall-back to platform data */
87  		mmc->ocr_avail = host->pdata ?
88  			host->pdata->ocr_mask :
89  			MMC_VDD_32_33 | MMC_VDD_33_34;
90  	}
91  
92  	return 0;
93  }
94  
pxamci_set_power(struct pxamci_host * host,unsigned char power_mode,unsigned int vdd)95  static inline int pxamci_set_power(struct pxamci_host *host,
96  				    unsigned char power_mode,
97  				    unsigned int vdd)
98  {
99  	struct mmc_host *mmc = host->mmc;
100  	struct regulator *supply = mmc->supply.vmmc;
101  
102  	if (!IS_ERR(supply))
103  		return mmc_regulator_set_ocr(mmc, supply, vdd);
104  
105  	if (host->power) {
106  		bool on = !!((1 << vdd) & host->pdata->ocr_mask);
107  		gpiod_set_value(host->power, on);
108  	}
109  
110  	if (host->pdata && host->pdata->setpower)
111  		return host->pdata->setpower(mmc_dev(host->mmc), vdd);
112  
113  	return 0;
114  }
115  
pxamci_stop_clock(struct pxamci_host * host)116  static void pxamci_stop_clock(struct pxamci_host *host)
117  {
118  	if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
119  		unsigned long timeout = 10000;
120  		unsigned int v;
121  
122  		writel(STOP_CLOCK, host->base + MMC_STRPCL);
123  
124  		do {
125  			v = readl(host->base + MMC_STAT);
126  			if (!(v & STAT_CLK_EN))
127  				break;
128  			udelay(1);
129  		} while (timeout--);
130  
131  		if (v & STAT_CLK_EN)
132  			dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
133  	}
134  }
135  
pxamci_enable_irq(struct pxamci_host * host,unsigned int mask)136  static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
137  {
138  	unsigned long flags;
139  
140  	spin_lock_irqsave(&host->lock, flags);
141  	host->imask &= ~mask;
142  	writel(host->imask, host->base + MMC_I_MASK);
143  	spin_unlock_irqrestore(&host->lock, flags);
144  }
145  
pxamci_disable_irq(struct pxamci_host * host,unsigned int mask)146  static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
147  {
148  	unsigned long flags;
149  
150  	spin_lock_irqsave(&host->lock, flags);
151  	host->imask |= mask;
152  	writel(host->imask, host->base + MMC_I_MASK);
153  	spin_unlock_irqrestore(&host->lock, flags);
154  }
155  
156  static void pxamci_dma_irq(void *param);
157  
pxamci_setup_data(struct pxamci_host * host,struct mmc_data * data)158  static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
159  {
160  	struct dma_async_tx_descriptor *tx;
161  	enum dma_transfer_direction direction;
162  	struct dma_slave_config	config;
163  	struct dma_chan *chan;
164  	unsigned int nob = data->blocks;
165  	unsigned long long clks;
166  	unsigned int timeout;
167  	int ret;
168  
169  	host->data = data;
170  
171  	writel(nob, host->base + MMC_NOB);
172  	writel(data->blksz, host->base + MMC_BLKLEN);
173  
174  	clks = (unsigned long long)data->timeout_ns * host->clkrate;
175  	do_div(clks, 1000000000UL);
176  	timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
177  	writel((timeout + 255) / 256, host->base + MMC_RDTO);
178  
179  	memset(&config, 0, sizeof(config));
180  	config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
181  	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
182  	config.src_addr = host->res->start + MMC_RXFIFO;
183  	config.dst_addr = host->res->start + MMC_TXFIFO;
184  	config.src_maxburst = 32;
185  	config.dst_maxburst = 32;
186  
187  	if (data->flags & MMC_DATA_READ) {
188  		host->dma_dir = DMA_FROM_DEVICE;
189  		direction = DMA_DEV_TO_MEM;
190  		chan = host->dma_chan_rx;
191  	} else {
192  		host->dma_dir = DMA_TO_DEVICE;
193  		direction = DMA_MEM_TO_DEV;
194  		chan = host->dma_chan_tx;
195  	}
196  
197  	config.direction = direction;
198  
199  	ret = dmaengine_slave_config(chan, &config);
200  	if (ret < 0) {
201  		dev_err(mmc_dev(host->mmc), "dma slave config failed\n");
202  		return;
203  	}
204  
205  	host->dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
206  				   host->dma_dir);
207  
208  	tx = dmaengine_prep_slave_sg(chan, data->sg, host->dma_len, direction,
209  				     DMA_PREP_INTERRUPT);
210  	if (!tx) {
211  		dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
212  		return;
213  	}
214  
215  	if (!(data->flags & MMC_DATA_READ)) {
216  		tx->callback = pxamci_dma_irq;
217  		tx->callback_param = host;
218  	}
219  
220  	host->dma_cookie = dmaengine_submit(tx);
221  
222  	/*
223  	 * workaround for erratum #91:
224  	 * only start DMA now if we are doing a read,
225  	 * otherwise we wait until CMD/RESP has finished
226  	 * before starting DMA.
227  	 */
228  	if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
229  		dma_async_issue_pending(chan);
230  }
231  
pxamci_start_cmd(struct pxamci_host * host,struct mmc_command * cmd,unsigned int cmdat)232  static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
233  {
234  	WARN_ON(host->cmd != NULL);
235  	host->cmd = cmd;
236  
237  	if (cmd->flags & MMC_RSP_BUSY)
238  		cmdat |= CMDAT_BUSY;
239  
240  #define RSP_TYPE(x)	((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
241  	switch (RSP_TYPE(mmc_resp_type(cmd))) {
242  	case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
243  		cmdat |= CMDAT_RESP_SHORT;
244  		break;
245  	case RSP_TYPE(MMC_RSP_R3):
246  		cmdat |= CMDAT_RESP_R3;
247  		break;
248  	case RSP_TYPE(MMC_RSP_R2):
249  		cmdat |= CMDAT_RESP_R2;
250  		break;
251  	default:
252  		break;
253  	}
254  
255  	writel(cmd->opcode, host->base + MMC_CMD);
256  	writel(cmd->arg >> 16, host->base + MMC_ARGH);
257  	writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
258  	writel(cmdat, host->base + MMC_CMDAT);
259  	writel(host->clkrt, host->base + MMC_CLKRT);
260  
261  	writel(START_CLOCK, host->base + MMC_STRPCL);
262  
263  	pxamci_enable_irq(host, END_CMD_RES);
264  }
265  
pxamci_finish_request(struct pxamci_host * host,struct mmc_request * mrq)266  static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
267  {
268  	host->mrq = NULL;
269  	host->cmd = NULL;
270  	host->data = NULL;
271  	mmc_request_done(host->mmc, mrq);
272  }
273  
pxamci_cmd_done(struct pxamci_host * host,unsigned int stat)274  static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
275  {
276  	struct mmc_command *cmd = host->cmd;
277  	int i;
278  	u32 v;
279  
280  	if (!cmd)
281  		return 0;
282  
283  	host->cmd = NULL;
284  
285  	/*
286  	 * Did I mention this is Sick.  We always need to
287  	 * discard the upper 8 bits of the first 16-bit word.
288  	 */
289  	v = readl(host->base + MMC_RES) & 0xffff;
290  	for (i = 0; i < 4; i++) {
291  		u32 w1 = readl(host->base + MMC_RES) & 0xffff;
292  		u32 w2 = readl(host->base + MMC_RES) & 0xffff;
293  		cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
294  		v = w2;
295  	}
296  
297  	if (stat & STAT_TIME_OUT_RESPONSE) {
298  		cmd->error = -ETIMEDOUT;
299  	} else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
300  		/*
301  		 * workaround for erratum #42:
302  		 * Intel PXA27x Family Processor Specification Update Rev 001
303  		 * A bogus CRC error can appear if the msb of a 136 bit
304  		 * response is a one.
305  		 */
306  		if (cpu_is_pxa27x() &&
307  		    (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000))
308  			pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
309  		else
310  			cmd->error = -EILSEQ;
311  	}
312  
313  	pxamci_disable_irq(host, END_CMD_RES);
314  	if (host->data && !cmd->error) {
315  		pxamci_enable_irq(host, DATA_TRAN_DONE);
316  		/*
317  		 * workaround for erratum #91, if doing write
318  		 * enable DMA late
319  		 */
320  		if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
321  			dma_async_issue_pending(host->dma_chan_tx);
322  	} else {
323  		pxamci_finish_request(host, host->mrq);
324  	}
325  
326  	return 1;
327  }
328  
pxamci_data_done(struct pxamci_host * host,unsigned int stat)329  static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
330  {
331  	struct mmc_data *data = host->data;
332  	struct dma_chan *chan;
333  
334  	if (!data)
335  		return 0;
336  
337  	if (data->flags & MMC_DATA_READ)
338  		chan = host->dma_chan_rx;
339  	else
340  		chan = host->dma_chan_tx;
341  	dma_unmap_sg(chan->device->dev,
342  		     data->sg, data->sg_len, host->dma_dir);
343  
344  	if (stat & STAT_READ_TIME_OUT)
345  		data->error = -ETIMEDOUT;
346  	else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
347  		data->error = -EILSEQ;
348  
349  	/*
350  	 * There appears to be a hardware design bug here.  There seems to
351  	 * be no way to find out how much data was transferred to the card.
352  	 * This means that if there was an error on any block, we mark all
353  	 * data blocks as being in error.
354  	 */
355  	if (!data->error)
356  		data->bytes_xfered = data->blocks * data->blksz;
357  	else
358  		data->bytes_xfered = 0;
359  
360  	pxamci_disable_irq(host, DATA_TRAN_DONE);
361  
362  	host->data = NULL;
363  	if (host->mrq->stop) {
364  		pxamci_stop_clock(host);
365  		pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
366  	} else {
367  		pxamci_finish_request(host, host->mrq);
368  	}
369  
370  	return 1;
371  }
372  
pxamci_irq(int irq,void * devid)373  static irqreturn_t pxamci_irq(int irq, void *devid)
374  {
375  	struct pxamci_host *host = devid;
376  	unsigned int ireg;
377  	int handled = 0;
378  
379  	ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
380  
381  	if (ireg) {
382  		unsigned stat = readl(host->base + MMC_STAT);
383  
384  		pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
385  
386  		if (ireg & END_CMD_RES)
387  			handled |= pxamci_cmd_done(host, stat);
388  		if (ireg & DATA_TRAN_DONE)
389  			handled |= pxamci_data_done(host, stat);
390  		if (ireg & SDIO_INT) {
391  			mmc_signal_sdio_irq(host->mmc);
392  			handled = 1;
393  		}
394  	}
395  
396  	return IRQ_RETVAL(handled);
397  }
398  
pxamci_request(struct mmc_host * mmc,struct mmc_request * mrq)399  static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
400  {
401  	struct pxamci_host *host = mmc_priv(mmc);
402  	unsigned int cmdat;
403  
404  	WARN_ON(host->mrq != NULL);
405  
406  	host->mrq = mrq;
407  
408  	pxamci_stop_clock(host);
409  
410  	cmdat = host->cmdat;
411  	host->cmdat &= ~CMDAT_INIT;
412  
413  	if (mrq->data) {
414  		pxamci_setup_data(host, mrq->data);
415  
416  		cmdat &= ~CMDAT_BUSY;
417  		cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
418  		if (mrq->data->flags & MMC_DATA_WRITE)
419  			cmdat |= CMDAT_WRITE;
420  	}
421  
422  	pxamci_start_cmd(host, mrq->cmd, cmdat);
423  }
424  
pxamci_get_ro(struct mmc_host * mmc)425  static int pxamci_get_ro(struct mmc_host *mmc)
426  {
427  	struct pxamci_host *host = mmc_priv(mmc);
428  
429  	if (host->use_ro_gpio)
430  		return mmc_gpio_get_ro(mmc);
431  	if (host->pdata && host->pdata->get_ro)
432  		return !!host->pdata->get_ro(mmc_dev(mmc));
433  	/*
434  	 * Board doesn't support read only detection; let the mmc core
435  	 * decide what to do.
436  	 */
437  	return -ENOSYS;
438  }
439  
pxamci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)440  static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
441  {
442  	struct pxamci_host *host = mmc_priv(mmc);
443  
444  	if (ios->clock) {
445  		unsigned long rate = host->clkrate;
446  		unsigned int clk = rate / ios->clock;
447  
448  		if (host->clkrt == CLKRT_OFF)
449  			clk_prepare_enable(host->clk);
450  
451  		if (ios->clock == 26000000) {
452  			/* to support 26MHz */
453  			host->clkrt = 7;
454  		} else {
455  			/* to handle (19.5MHz, 26MHz) */
456  			if (!clk)
457  				clk = 1;
458  
459  			/*
460  			 * clk might result in a lower divisor than we
461  			 * desire.  check for that condition and adjust
462  			 * as appropriate.
463  			 */
464  			if (rate / clk > ios->clock)
465  				clk <<= 1;
466  			host->clkrt = fls(clk) - 1;
467  		}
468  
469  		/*
470  		 * we write clkrt on the next command
471  		 */
472  	} else {
473  		pxamci_stop_clock(host);
474  		if (host->clkrt != CLKRT_OFF) {
475  			host->clkrt = CLKRT_OFF;
476  			clk_disable_unprepare(host->clk);
477  		}
478  	}
479  
480  	if (host->power_mode != ios->power_mode) {
481  		int ret;
482  
483  		host->power_mode = ios->power_mode;
484  
485  		ret = pxamci_set_power(host, ios->power_mode, ios->vdd);
486  		if (ret) {
487  			dev_err(mmc_dev(mmc), "unable to set power\n");
488  			/*
489  			 * The .set_ios() function in the mmc_host_ops
490  			 * struct return void, and failing to set the
491  			 * power should be rare so we print an error and
492  			 * return here.
493  			 */
494  			return;
495  		}
496  
497  		if (ios->power_mode == MMC_POWER_ON)
498  			host->cmdat |= CMDAT_INIT;
499  	}
500  
501  	if (ios->bus_width == MMC_BUS_WIDTH_4)
502  		host->cmdat |= CMDAT_SD_4DAT;
503  	else
504  		host->cmdat &= ~CMDAT_SD_4DAT;
505  
506  	dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n",
507  		host->clkrt, host->cmdat);
508  }
509  
pxamci_enable_sdio_irq(struct mmc_host * host,int enable)510  static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
511  {
512  	struct pxamci_host *pxa_host = mmc_priv(host);
513  
514  	if (enable)
515  		pxamci_enable_irq(pxa_host, SDIO_INT);
516  	else
517  		pxamci_disable_irq(pxa_host, SDIO_INT);
518  }
519  
520  static const struct mmc_host_ops pxamci_ops = {
521  	.request		= pxamci_request,
522  	.get_cd			= mmc_gpio_get_cd,
523  	.get_ro			= pxamci_get_ro,
524  	.set_ios		= pxamci_set_ios,
525  	.enable_sdio_irq	= pxamci_enable_sdio_irq,
526  };
527  
pxamci_dma_irq(void * param)528  static void pxamci_dma_irq(void *param)
529  {
530  	struct pxamci_host *host = param;
531  	struct dma_tx_state state;
532  	enum dma_status status;
533  	struct dma_chan *chan;
534  	unsigned long flags;
535  
536  	spin_lock_irqsave(&host->lock, flags);
537  
538  	if (!host->data)
539  		goto out_unlock;
540  
541  	if (host->data->flags & MMC_DATA_READ)
542  		chan = host->dma_chan_rx;
543  	else
544  		chan = host->dma_chan_tx;
545  
546  	status = dmaengine_tx_status(chan, host->dma_cookie, &state);
547  
548  	if (likely(status == DMA_COMPLETE)) {
549  		writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
550  	} else {
551  		pr_err("%s: DMA error on %s channel\n", mmc_hostname(host->mmc),
552  			host->data->flags & MMC_DATA_READ ? "rx" : "tx");
553  		host->data->error = -EIO;
554  		pxamci_data_done(host, 0);
555  	}
556  
557  out_unlock:
558  	spin_unlock_irqrestore(&host->lock, flags);
559  }
560  
pxamci_detect_irq(int irq,void * devid)561  static irqreturn_t pxamci_detect_irq(int irq, void *devid)
562  {
563  	struct pxamci_host *host = mmc_priv(devid);
564  
565  	mmc_detect_change(devid, msecs_to_jiffies(host->detect_delay_ms));
566  	return IRQ_HANDLED;
567  }
568  
569  #ifdef CONFIG_OF
570  static const struct of_device_id pxa_mmc_dt_ids[] = {
571          { .compatible = "marvell,pxa-mmc" },
572          { }
573  };
574  
575  MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids);
576  
pxamci_of_init(struct platform_device * pdev,struct mmc_host * mmc)577  static int pxamci_of_init(struct platform_device *pdev,
578  			  struct mmc_host *mmc)
579  {
580  	struct device_node *np = pdev->dev.of_node;
581  	struct pxamci_host *host = mmc_priv(mmc);
582  	u32 tmp;
583  	int ret;
584  
585  	if (!np)
586  		return 0;
587  
588  	/* pxa-mmc specific */
589  	if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0)
590  		host->detect_delay_ms = tmp;
591  
592  	ret = mmc_of_parse(mmc);
593  	if (ret < 0)
594  		return ret;
595  
596  	return 0;
597  }
598  #else
pxamci_of_init(struct platform_device * pdev,struct mmc_host * mmc)599  static int pxamci_of_init(struct platform_device *pdev,
600  			  struct mmc_host *mmc)
601  {
602          return 0;
603  }
604  #endif
605  
pxamci_probe(struct platform_device * pdev)606  static int pxamci_probe(struct platform_device *pdev)
607  {
608  	struct mmc_host *mmc;
609  	struct pxamci_host *host = NULL;
610  	struct device *dev = &pdev->dev;
611  	struct resource *r;
612  	int ret, irq;
613  
614  	irq = platform_get_irq(pdev, 0);
615  	if (irq < 0)
616  		return irq;
617  
618  	mmc = mmc_alloc_host(sizeof(struct pxamci_host), dev);
619  	if (!mmc) {
620  		ret = -ENOMEM;
621  		goto out;
622  	}
623  
624  	mmc->ops = &pxamci_ops;
625  
626  	/*
627  	 * We can do SG-DMA, but we don't because we never know how much
628  	 * data we successfully wrote to the card.
629  	 */
630  	mmc->max_segs = NR_SG;
631  
632  	/*
633  	 * Our hardware DMA can handle a maximum of one page per SG entry.
634  	 */
635  	mmc->max_seg_size = PAGE_SIZE;
636  
637  	/*
638  	 * Block length register is only 10 bits before PXA27x.
639  	 */
640  	mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048;
641  
642  	/*
643  	 * Block count register is 16 bits.
644  	 */
645  	mmc->max_blk_count = 65535;
646  
647  	ret = pxamci_of_init(pdev, mmc);
648  	if (ret)
649  		goto out;
650  
651  	host = mmc_priv(mmc);
652  	host->mmc = mmc;
653  	host->pdata = pdev->dev.platform_data;
654  	host->clkrt = CLKRT_OFF;
655  
656  	host->clk = devm_clk_get(dev, NULL);
657  	if (IS_ERR(host->clk)) {
658  		ret = PTR_ERR(host->clk);
659  		host->clk = NULL;
660  		goto out;
661  	}
662  
663  	host->clkrate = clk_get_rate(host->clk);
664  
665  	/*
666  	 * Calculate minimum clock rate, rounding up.
667  	 */
668  	mmc->f_min = (host->clkrate + 63) / 64;
669  	mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate;
670  
671  	ret = pxamci_init_ocr(host);
672  	if (ret < 0)
673  		goto out;
674  
675  	mmc->caps = 0;
676  	host->cmdat = 0;
677  	if (!cpu_is_pxa25x()) {
678  		mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
679  		host->cmdat |= CMDAT_SDIO_INT_EN;
680  		if (mmc_has_26MHz())
681  			mmc->caps |= MMC_CAP_MMC_HIGHSPEED |
682  				     MMC_CAP_SD_HIGHSPEED;
683  	}
684  
685  	spin_lock_init(&host->lock);
686  	host->imask = MMC_I_MASK_ALL;
687  
688  	host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
689  	if (IS_ERR(host->base)) {
690  		ret = PTR_ERR(host->base);
691  		goto out;
692  	}
693  	host->res = r;
694  
695  	/*
696  	 * Ensure that the host controller is shut down, and setup
697  	 * with our defaults.
698  	 */
699  	pxamci_stop_clock(host);
700  	writel(0, host->base + MMC_SPI);
701  	writel(64, host->base + MMC_RESTO);
702  	writel(host->imask, host->base + MMC_I_MASK);
703  
704  	ret = devm_request_irq(dev, irq, pxamci_irq, 0,
705  			       DRIVER_NAME, host);
706  	if (ret)
707  		goto out;
708  
709  	platform_set_drvdata(pdev, mmc);
710  
711  	host->dma_chan_rx = dma_request_chan(dev, "rx");
712  	if (IS_ERR(host->dma_chan_rx)) {
713  		dev_err(dev, "unable to request rx dma channel\n");
714  		ret = PTR_ERR(host->dma_chan_rx);
715  		host->dma_chan_rx = NULL;
716  		goto out;
717  	}
718  
719  	host->dma_chan_tx = dma_request_chan(dev, "tx");
720  	if (IS_ERR(host->dma_chan_tx)) {
721  		dev_err(dev, "unable to request tx dma channel\n");
722  		ret = PTR_ERR(host->dma_chan_tx);
723  		host->dma_chan_tx = NULL;
724  		goto out;
725  	}
726  
727  	if (host->pdata) {
728  		host->detect_delay_ms = host->pdata->detect_delay_ms;
729  
730  		host->power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
731  		if (IS_ERR(host->power)) {
732  			ret = PTR_ERR(host->power);
733  			dev_err(dev, "Failed requesting gpio_power\n");
734  			goto out;
735  		}
736  
737  		/* FIXME: should we pass detection delay to debounce? */
738  		ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
739  		if (ret && ret != -ENOENT) {
740  			dev_err(dev, "Failed requesting gpio_cd\n");
741  			goto out;
742  		}
743  
744  		if (!host->pdata->gpio_card_ro_invert)
745  			mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
746  
747  		ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
748  		if (ret && ret != -ENOENT) {
749  			dev_err(dev, "Failed requesting gpio_ro\n");
750  			goto out;
751  		}
752  		if (!ret)
753  			host->use_ro_gpio = true;
754  
755  		if (host->pdata->init)
756  			host->pdata->init(dev, pxamci_detect_irq, mmc);
757  
758  		if (host->power && host->pdata->setpower)
759  			dev_warn(dev, "gpio_power and setpower() both defined\n");
760  		if (host->use_ro_gpio && host->pdata->get_ro)
761  			dev_warn(dev, "gpio_ro and get_ro() both defined\n");
762  	}
763  
764  	ret = mmc_add_host(mmc);
765  	if (ret) {
766  		if (host->pdata && host->pdata->exit)
767  			host->pdata->exit(dev, mmc);
768  		goto out;
769  	}
770  
771  	return 0;
772  
773  out:
774  	if (host) {
775  		if (host->dma_chan_rx)
776  			dma_release_channel(host->dma_chan_rx);
777  		if (host->dma_chan_tx)
778  			dma_release_channel(host->dma_chan_tx);
779  	}
780  	if (mmc)
781  		mmc_free_host(mmc);
782  	return ret;
783  }
784  
pxamci_remove(struct platform_device * pdev)785  static void pxamci_remove(struct platform_device *pdev)
786  {
787  	struct mmc_host *mmc = platform_get_drvdata(pdev);
788  
789  	if (mmc) {
790  		struct pxamci_host *host = mmc_priv(mmc);
791  
792  		mmc_remove_host(mmc);
793  
794  		if (host->pdata && host->pdata->exit)
795  			host->pdata->exit(&pdev->dev, mmc);
796  
797  		pxamci_stop_clock(host);
798  		writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
799  		       END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
800  		       host->base + MMC_I_MASK);
801  
802  		dmaengine_terminate_all(host->dma_chan_rx);
803  		dmaengine_terminate_all(host->dma_chan_tx);
804  		dma_release_channel(host->dma_chan_rx);
805  		dma_release_channel(host->dma_chan_tx);
806  
807  		mmc_free_host(mmc);
808  	}
809  }
810  
811  static struct platform_driver pxamci_driver = {
812  	.probe		= pxamci_probe,
813  	.remove_new	= pxamci_remove,
814  	.driver		= {
815  		.name	= DRIVER_NAME,
816  		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
817  		.of_match_table = of_match_ptr(pxa_mmc_dt_ids),
818  	},
819  };
820  
821  module_platform_driver(pxamci_driver);
822  
823  MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
824  MODULE_LICENSE("GPL");
825  MODULE_ALIAS("platform:pxa2xx-mci");
826