xref: /openbmc/linux/drivers/mmc/host/mmci.c (revision 8f7f6b7e)
1 /*
2  *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3  *
4  *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5  *  Copyright (C) 2010 ST-Ericsson SA
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/delay.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/log2.h>
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/card.h>
24 #include <linux/amba/bus.h>
25 #include <linux/clk.h>
26 #include <linux/scatterlist.h>
27 #include <linux/gpio.h>
28 #include <linux/regulator/consumer.h>
29 #include <linux/dmaengine.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/amba/mmci.h>
32 #include <linux/pm_runtime.h>
33 
34 #include <asm/div64.h>
35 #include <asm/io.h>
36 #include <asm/sizes.h>
37 
38 #include "mmci.h"
39 
40 #define DRIVER_NAME "mmci-pl18x"
41 
42 static unsigned int fmax = 515633;
43 
44 /**
45  * struct variant_data - MMCI variant-specific quirks
46  * @clkreg: default value for MCICLOCK register
47  * @clkreg_enable: enable value for MMCICLOCK register
48  * @datalength_bits: number of bits in the MMCIDATALENGTH register
49  * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
50  *	      is asserted (likewise for RX)
51  * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
52  *		  is asserted (likewise for RX)
53  * @sdio: variant supports SDIO
54  * @st_clkdiv: true if using a ST-specific clock divider algorithm
55  * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
56  */
57 struct variant_data {
58 	unsigned int		clkreg;
59 	unsigned int		clkreg_enable;
60 	unsigned int		datalength_bits;
61 	unsigned int		fifosize;
62 	unsigned int		fifohalfsize;
63 	bool			sdio;
64 	bool			st_clkdiv;
65 	bool			blksz_datactrl16;
66 };
67 
68 static struct variant_data variant_arm = {
69 	.fifosize		= 16 * 4,
70 	.fifohalfsize		= 8 * 4,
71 	.datalength_bits	= 16,
72 };
73 
74 static struct variant_data variant_arm_extended_fifo = {
75 	.fifosize		= 128 * 4,
76 	.fifohalfsize		= 64 * 4,
77 	.datalength_bits	= 16,
78 };
79 
80 static struct variant_data variant_u300 = {
81 	.fifosize		= 16 * 4,
82 	.fifohalfsize		= 8 * 4,
83 	.clkreg_enable		= MCI_ST_U300_HWFCEN,
84 	.datalength_bits	= 16,
85 	.sdio			= true,
86 };
87 
88 static struct variant_data variant_ux500 = {
89 	.fifosize		= 30 * 4,
90 	.fifohalfsize		= 8 * 4,
91 	.clkreg			= MCI_CLK_ENABLE,
92 	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
93 	.datalength_bits	= 24,
94 	.sdio			= true,
95 	.st_clkdiv		= true,
96 };
97 
98 static struct variant_data variant_ux500v2 = {
99 	.fifosize		= 30 * 4,
100 	.fifohalfsize		= 8 * 4,
101 	.clkreg			= MCI_CLK_ENABLE,
102 	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
103 	.datalength_bits	= 24,
104 	.sdio			= true,
105 	.st_clkdiv		= true,
106 	.blksz_datactrl16	= true,
107 };
108 
109 /*
110  * This must be called with host->lock held
111  */
112 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
113 {
114 	struct variant_data *variant = host->variant;
115 	u32 clk = variant->clkreg;
116 
117 	if (desired) {
118 		if (desired >= host->mclk) {
119 			clk = MCI_CLK_BYPASS;
120 			if (variant->st_clkdiv)
121 				clk |= MCI_ST_UX500_NEG_EDGE;
122 			host->cclk = host->mclk;
123 		} else if (variant->st_clkdiv) {
124 			/*
125 			 * DB8500 TRM says f = mclk / (clkdiv + 2)
126 			 * => clkdiv = (mclk / f) - 2
127 			 * Round the divider up so we don't exceed the max
128 			 * frequency
129 			 */
130 			clk = DIV_ROUND_UP(host->mclk, desired) - 2;
131 			if (clk >= 256)
132 				clk = 255;
133 			host->cclk = host->mclk / (clk + 2);
134 		} else {
135 			/*
136 			 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
137 			 * => clkdiv = mclk / (2 * f) - 1
138 			 */
139 			clk = host->mclk / (2 * desired) - 1;
140 			if (clk >= 256)
141 				clk = 255;
142 			host->cclk = host->mclk / (2 * (clk + 1));
143 		}
144 
145 		clk |= variant->clkreg_enable;
146 		clk |= MCI_CLK_ENABLE;
147 		/* This hasn't proven to be worthwhile */
148 		/* clk |= MCI_CLK_PWRSAVE; */
149 	}
150 
151 	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
152 		clk |= MCI_4BIT_BUS;
153 	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
154 		clk |= MCI_ST_8BIT_BUS;
155 
156 	writel(clk, host->base + MMCICLOCK);
157 }
158 
159 static void
160 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
161 {
162 	writel(0, host->base + MMCICOMMAND);
163 
164 	BUG_ON(host->data);
165 
166 	host->mrq = NULL;
167 	host->cmd = NULL;
168 
169 	/*
170 	 * Need to drop the host lock here; mmc_request_done may call
171 	 * back into the driver...
172 	 */
173 	spin_unlock(&host->lock);
174 	pm_runtime_put(mmc_dev(host->mmc));
175 	mmc_request_done(host->mmc, mrq);
176 	spin_lock(&host->lock);
177 }
178 
179 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
180 {
181 	void __iomem *base = host->base;
182 
183 	if (host->singleirq) {
184 		unsigned int mask0 = readl(base + MMCIMASK0);
185 
186 		mask0 &= ~MCI_IRQ1MASK;
187 		mask0 |= mask;
188 
189 		writel(mask0, base + MMCIMASK0);
190 	}
191 
192 	writel(mask, base + MMCIMASK1);
193 }
194 
195 static void mmci_stop_data(struct mmci_host *host)
196 {
197 	writel(0, host->base + MMCIDATACTRL);
198 	mmci_set_mask1(host, 0);
199 	host->data = NULL;
200 }
201 
202 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
203 {
204 	unsigned int flags = SG_MITER_ATOMIC;
205 
206 	if (data->flags & MMC_DATA_READ)
207 		flags |= SG_MITER_TO_SG;
208 	else
209 		flags |= SG_MITER_FROM_SG;
210 
211 	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
212 }
213 
214 /*
215  * All the DMA operation mode stuff goes inside this ifdef.
216  * This assumes that you have a generic DMA device interface,
217  * no custom DMA interfaces are supported.
218  */
219 #ifdef CONFIG_DMA_ENGINE
220 static void __devinit mmci_dma_setup(struct mmci_host *host)
221 {
222 	struct mmci_platform_data *plat = host->plat;
223 	const char *rxname, *txname;
224 	dma_cap_mask_t mask;
225 
226 	if (!plat || !plat->dma_filter) {
227 		dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
228 		return;
229 	}
230 
231 	/* initialize pre request cookie */
232 	host->next_data.cookie = 1;
233 
234 	/* Try to acquire a generic DMA engine slave channel */
235 	dma_cap_zero(mask);
236 	dma_cap_set(DMA_SLAVE, mask);
237 
238 	/*
239 	 * If only an RX channel is specified, the driver will
240 	 * attempt to use it bidirectionally, however if it is
241 	 * is specified but cannot be located, DMA will be disabled.
242 	 */
243 	if (plat->dma_rx_param) {
244 		host->dma_rx_channel = dma_request_channel(mask,
245 							   plat->dma_filter,
246 							   plat->dma_rx_param);
247 		/* E.g if no DMA hardware is present */
248 		if (!host->dma_rx_channel)
249 			dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
250 	}
251 
252 	if (plat->dma_tx_param) {
253 		host->dma_tx_channel = dma_request_channel(mask,
254 							   plat->dma_filter,
255 							   plat->dma_tx_param);
256 		if (!host->dma_tx_channel)
257 			dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
258 	} else {
259 		host->dma_tx_channel = host->dma_rx_channel;
260 	}
261 
262 	if (host->dma_rx_channel)
263 		rxname = dma_chan_name(host->dma_rx_channel);
264 	else
265 		rxname = "none";
266 
267 	if (host->dma_tx_channel)
268 		txname = dma_chan_name(host->dma_tx_channel);
269 	else
270 		txname = "none";
271 
272 	dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
273 		 rxname, txname);
274 
275 	/*
276 	 * Limit the maximum segment size in any SG entry according to
277 	 * the parameters of the DMA engine device.
278 	 */
279 	if (host->dma_tx_channel) {
280 		struct device *dev = host->dma_tx_channel->device->dev;
281 		unsigned int max_seg_size = dma_get_max_seg_size(dev);
282 
283 		if (max_seg_size < host->mmc->max_seg_size)
284 			host->mmc->max_seg_size = max_seg_size;
285 	}
286 	if (host->dma_rx_channel) {
287 		struct device *dev = host->dma_rx_channel->device->dev;
288 		unsigned int max_seg_size = dma_get_max_seg_size(dev);
289 
290 		if (max_seg_size < host->mmc->max_seg_size)
291 			host->mmc->max_seg_size = max_seg_size;
292 	}
293 }
294 
295 /*
296  * This is used in __devinit or __devexit so inline it
297  * so it can be discarded.
298  */
299 static inline void mmci_dma_release(struct mmci_host *host)
300 {
301 	struct mmci_platform_data *plat = host->plat;
302 
303 	if (host->dma_rx_channel)
304 		dma_release_channel(host->dma_rx_channel);
305 	if (host->dma_tx_channel && plat->dma_tx_param)
306 		dma_release_channel(host->dma_tx_channel);
307 	host->dma_rx_channel = host->dma_tx_channel = NULL;
308 }
309 
310 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
311 {
312 	struct dma_chan *chan = host->dma_current;
313 	enum dma_data_direction dir;
314 	u32 status;
315 	int i;
316 
317 	/* Wait up to 1ms for the DMA to complete */
318 	for (i = 0; ; i++) {
319 		status = readl(host->base + MMCISTATUS);
320 		if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
321 			break;
322 		udelay(10);
323 	}
324 
325 	/*
326 	 * Check to see whether we still have some data left in the FIFO -
327 	 * this catches DMA controllers which are unable to monitor the
328 	 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
329 	 * contiguous buffers.  On TX, we'll get a FIFO underrun error.
330 	 */
331 	if (status & MCI_RXDATAAVLBLMASK) {
332 		dmaengine_terminate_all(chan);
333 		if (!data->error)
334 			data->error = -EIO;
335 	}
336 
337 	if (data->flags & MMC_DATA_WRITE) {
338 		dir = DMA_TO_DEVICE;
339 	} else {
340 		dir = DMA_FROM_DEVICE;
341 	}
342 
343 	if (!data->host_cookie)
344 		dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
345 
346 	/*
347 	 * Use of DMA with scatter-gather is impossible.
348 	 * Give up with DMA and switch back to PIO mode.
349 	 */
350 	if (status & MCI_RXDATAAVLBLMASK) {
351 		dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
352 		mmci_dma_release(host);
353 	}
354 }
355 
356 static void mmci_dma_data_error(struct mmci_host *host)
357 {
358 	dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
359 	dmaengine_terminate_all(host->dma_current);
360 }
361 
362 static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
363 			      struct mmci_host_next *next)
364 {
365 	struct variant_data *variant = host->variant;
366 	struct dma_slave_config conf = {
367 		.src_addr = host->phybase + MMCIFIFO,
368 		.dst_addr = host->phybase + MMCIFIFO,
369 		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
370 		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
371 		.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
372 		.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
373 	};
374 	struct dma_chan *chan;
375 	struct dma_device *device;
376 	struct dma_async_tx_descriptor *desc;
377 	enum dma_data_direction buffer_dirn;
378 	int nr_sg;
379 
380 	/* Check if next job is already prepared */
381 	if (data->host_cookie && !next &&
382 	    host->dma_current && host->dma_desc_current)
383 		return 0;
384 
385 	if (!next) {
386 		host->dma_current = NULL;
387 		host->dma_desc_current = NULL;
388 	}
389 
390 	if (data->flags & MMC_DATA_READ) {
391 		conf.direction = DMA_DEV_TO_MEM;
392 		buffer_dirn = DMA_FROM_DEVICE;
393 		chan = host->dma_rx_channel;
394 	} else {
395 		conf.direction = DMA_MEM_TO_DEV;
396 		buffer_dirn = DMA_TO_DEVICE;
397 		chan = host->dma_tx_channel;
398 	}
399 
400 	/* If there's no DMA channel, fall back to PIO */
401 	if (!chan)
402 		return -EINVAL;
403 
404 	/* If less than or equal to the fifo size, don't bother with DMA */
405 	if (data->blksz * data->blocks <= variant->fifosize)
406 		return -EINVAL;
407 
408 	device = chan->device;
409 	nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
410 	if (nr_sg == 0)
411 		return -EINVAL;
412 
413 	dmaengine_slave_config(chan, &conf);
414 	desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
415 					    conf.direction, DMA_CTRL_ACK);
416 	if (!desc)
417 		goto unmap_exit;
418 
419 	if (next) {
420 		next->dma_chan = chan;
421 		next->dma_desc = desc;
422 	} else {
423 		host->dma_current = chan;
424 		host->dma_desc_current = desc;
425 	}
426 
427 	return 0;
428 
429  unmap_exit:
430 	if (!next)
431 		dmaengine_terminate_all(chan);
432 	dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
433 	return -ENOMEM;
434 }
435 
436 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
437 {
438 	int ret;
439 	struct mmc_data *data = host->data;
440 
441 	ret = mmci_dma_prep_data(host, host->data, NULL);
442 	if (ret)
443 		return ret;
444 
445 	/* Okay, go for it. */
446 	dev_vdbg(mmc_dev(host->mmc),
447 		 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
448 		 data->sg_len, data->blksz, data->blocks, data->flags);
449 	dmaengine_submit(host->dma_desc_current);
450 	dma_async_issue_pending(host->dma_current);
451 
452 	datactrl |= MCI_DPSM_DMAENABLE;
453 
454 	/* Trigger the DMA transfer */
455 	writel(datactrl, host->base + MMCIDATACTRL);
456 
457 	/*
458 	 * Let the MMCI say when the data is ended and it's time
459 	 * to fire next DMA request. When that happens, MMCI will
460 	 * call mmci_data_end()
461 	 */
462 	writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
463 	       host->base + MMCIMASK0);
464 	return 0;
465 }
466 
467 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
468 {
469 	struct mmci_host_next *next = &host->next_data;
470 
471 	if (data->host_cookie && data->host_cookie != next->cookie) {
472 		pr_warning("[%s] invalid cookie: data->host_cookie %d"
473 		       " host->next_data.cookie %d\n",
474 		       __func__, data->host_cookie, host->next_data.cookie);
475 		data->host_cookie = 0;
476 	}
477 
478 	if (!data->host_cookie)
479 		return;
480 
481 	host->dma_desc_current = next->dma_desc;
482 	host->dma_current = next->dma_chan;
483 
484 	next->dma_desc = NULL;
485 	next->dma_chan = NULL;
486 }
487 
488 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
489 			     bool is_first_req)
490 {
491 	struct mmci_host *host = mmc_priv(mmc);
492 	struct mmc_data *data = mrq->data;
493 	struct mmci_host_next *nd = &host->next_data;
494 
495 	if (!data)
496 		return;
497 
498 	if (data->host_cookie) {
499 		data->host_cookie = 0;
500 		return;
501 	}
502 
503 	/* if config for dma */
504 	if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
505 	    ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
506 		if (mmci_dma_prep_data(host, data, nd))
507 			data->host_cookie = 0;
508 		else
509 			data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
510 	}
511 }
512 
513 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
514 			      int err)
515 {
516 	struct mmci_host *host = mmc_priv(mmc);
517 	struct mmc_data *data = mrq->data;
518 	struct dma_chan *chan;
519 	enum dma_data_direction dir;
520 
521 	if (!data)
522 		return;
523 
524 	if (data->flags & MMC_DATA_READ) {
525 		dir = DMA_FROM_DEVICE;
526 		chan = host->dma_rx_channel;
527 	} else {
528 		dir = DMA_TO_DEVICE;
529 		chan = host->dma_tx_channel;
530 	}
531 
532 
533 	/* if config for dma */
534 	if (chan) {
535 		if (err)
536 			dmaengine_terminate_all(chan);
537 		if (data->host_cookie)
538 			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
539 				     data->sg_len, dir);
540 		mrq->data->host_cookie = 0;
541 	}
542 }
543 
544 #else
545 /* Blank functions if the DMA engine is not available */
546 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
547 {
548 }
549 static inline void mmci_dma_setup(struct mmci_host *host)
550 {
551 }
552 
553 static inline void mmci_dma_release(struct mmci_host *host)
554 {
555 }
556 
557 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
558 {
559 }
560 
561 static inline void mmci_dma_data_error(struct mmci_host *host)
562 {
563 }
564 
565 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
566 {
567 	return -ENOSYS;
568 }
569 
570 #define mmci_pre_request NULL
571 #define mmci_post_request NULL
572 
573 #endif
574 
575 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
576 {
577 	struct variant_data *variant = host->variant;
578 	unsigned int datactrl, timeout, irqmask;
579 	unsigned long long clks;
580 	void __iomem *base;
581 	int blksz_bits;
582 
583 	dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
584 		data->blksz, data->blocks, data->flags);
585 
586 	host->data = data;
587 	host->size = data->blksz * data->blocks;
588 	data->bytes_xfered = 0;
589 
590 	clks = (unsigned long long)data->timeout_ns * host->cclk;
591 	do_div(clks, 1000000000UL);
592 
593 	timeout = data->timeout_clks + (unsigned int)clks;
594 
595 	base = host->base;
596 	writel(timeout, base + MMCIDATATIMER);
597 	writel(host->size, base + MMCIDATALENGTH);
598 
599 	blksz_bits = ffs(data->blksz) - 1;
600 	BUG_ON(1 << blksz_bits != data->blksz);
601 
602 	if (variant->blksz_datactrl16)
603 		datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
604 	else
605 		datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
606 
607 	if (data->flags & MMC_DATA_READ)
608 		datactrl |= MCI_DPSM_DIRECTION;
609 
610 	/*
611 	 * Attempt to use DMA operation mode, if this
612 	 * should fail, fall back to PIO mode
613 	 */
614 	if (!mmci_dma_start_data(host, datactrl))
615 		return;
616 
617 	/* IRQ mode, map the SG list for CPU reading/writing */
618 	mmci_init_sg(host, data);
619 
620 	if (data->flags & MMC_DATA_READ) {
621 		irqmask = MCI_RXFIFOHALFFULLMASK;
622 
623 		/*
624 		 * If we have less than the fifo 'half-full' threshold to
625 		 * transfer, trigger a PIO interrupt as soon as any data
626 		 * is available.
627 		 */
628 		if (host->size < variant->fifohalfsize)
629 			irqmask |= MCI_RXDATAAVLBLMASK;
630 	} else {
631 		/*
632 		 * We don't actually need to include "FIFO empty" here
633 		 * since its implicit in "FIFO half empty".
634 		 */
635 		irqmask = MCI_TXFIFOHALFEMPTYMASK;
636 	}
637 
638 	/* The ST Micro variants has a special bit to enable SDIO */
639 	if (variant->sdio && host->mmc->card)
640 		if (mmc_card_sdio(host->mmc->card))
641 			datactrl |= MCI_ST_DPSM_SDIOEN;
642 
643 	writel(datactrl, base + MMCIDATACTRL);
644 	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
645 	mmci_set_mask1(host, irqmask);
646 }
647 
648 static void
649 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
650 {
651 	void __iomem *base = host->base;
652 
653 	dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
654 	    cmd->opcode, cmd->arg, cmd->flags);
655 
656 	if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
657 		writel(0, base + MMCICOMMAND);
658 		udelay(1);
659 	}
660 
661 	c |= cmd->opcode | MCI_CPSM_ENABLE;
662 	if (cmd->flags & MMC_RSP_PRESENT) {
663 		if (cmd->flags & MMC_RSP_136)
664 			c |= MCI_CPSM_LONGRSP;
665 		c |= MCI_CPSM_RESPONSE;
666 	}
667 	if (/*interrupt*/0)
668 		c |= MCI_CPSM_INTERRUPT;
669 
670 	host->cmd = cmd;
671 
672 	writel(cmd->arg, base + MMCIARGUMENT);
673 	writel(c, base + MMCICOMMAND);
674 }
675 
676 static void
677 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
678 	      unsigned int status)
679 {
680 	/* First check for errors */
681 	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
682 		      MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
683 		u32 remain, success;
684 
685 		/* Terminate the DMA transfer */
686 		if (dma_inprogress(host))
687 			mmci_dma_data_error(host);
688 
689 		/*
690 		 * Calculate how far we are into the transfer.  Note that
691 		 * the data counter gives the number of bytes transferred
692 		 * on the MMC bus, not on the host side.  On reads, this
693 		 * can be as much as a FIFO-worth of data ahead.  This
694 		 * matters for FIFO overruns only.
695 		 */
696 		remain = readl(host->base + MMCIDATACNT);
697 		success = data->blksz * data->blocks - remain;
698 
699 		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
700 			status, success);
701 		if (status & MCI_DATACRCFAIL) {
702 			/* Last block was not successful */
703 			success -= 1;
704 			data->error = -EILSEQ;
705 		} else if (status & MCI_DATATIMEOUT) {
706 			data->error = -ETIMEDOUT;
707 		} else if (status & MCI_STARTBITERR) {
708 			data->error = -ECOMM;
709 		} else if (status & MCI_TXUNDERRUN) {
710 			data->error = -EIO;
711 		} else if (status & MCI_RXOVERRUN) {
712 			if (success > host->variant->fifosize)
713 				success -= host->variant->fifosize;
714 			else
715 				success = 0;
716 			data->error = -EIO;
717 		}
718 		data->bytes_xfered = round_down(success, data->blksz);
719 	}
720 
721 	if (status & MCI_DATABLOCKEND)
722 		dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
723 
724 	if (status & MCI_DATAEND || data->error) {
725 		if (dma_inprogress(host))
726 			mmci_dma_unmap(host, data);
727 		mmci_stop_data(host);
728 
729 		if (!data->error)
730 			/* The error clause is handled above, success! */
731 			data->bytes_xfered = data->blksz * data->blocks;
732 
733 		if (!data->stop) {
734 			mmci_request_end(host, data->mrq);
735 		} else {
736 			mmci_start_command(host, data->stop, 0);
737 		}
738 	}
739 }
740 
741 static void
742 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
743 	     unsigned int status)
744 {
745 	void __iomem *base = host->base;
746 
747 	host->cmd = NULL;
748 
749 	if (status & MCI_CMDTIMEOUT) {
750 		cmd->error = -ETIMEDOUT;
751 	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
752 		cmd->error = -EILSEQ;
753 	} else {
754 		cmd->resp[0] = readl(base + MMCIRESPONSE0);
755 		cmd->resp[1] = readl(base + MMCIRESPONSE1);
756 		cmd->resp[2] = readl(base + MMCIRESPONSE2);
757 		cmd->resp[3] = readl(base + MMCIRESPONSE3);
758 	}
759 
760 	if (!cmd->data || cmd->error) {
761 		if (host->data) {
762 			/* Terminate the DMA transfer */
763 			if (dma_inprogress(host))
764 				mmci_dma_data_error(host);
765 			mmci_stop_data(host);
766 		}
767 		mmci_request_end(host, cmd->mrq);
768 	} else if (!(cmd->data->flags & MMC_DATA_READ)) {
769 		mmci_start_data(host, cmd->data);
770 	}
771 }
772 
773 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
774 {
775 	void __iomem *base = host->base;
776 	char *ptr = buffer;
777 	u32 status;
778 	int host_remain = host->size;
779 
780 	do {
781 		int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
782 
783 		if (count > remain)
784 			count = remain;
785 
786 		if (count <= 0)
787 			break;
788 
789 		readsl(base + MMCIFIFO, ptr, count >> 2);
790 
791 		ptr += count;
792 		remain -= count;
793 		host_remain -= count;
794 
795 		if (remain == 0)
796 			break;
797 
798 		status = readl(base + MMCISTATUS);
799 	} while (status & MCI_RXDATAAVLBL);
800 
801 	return ptr - buffer;
802 }
803 
804 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
805 {
806 	struct variant_data *variant = host->variant;
807 	void __iomem *base = host->base;
808 	char *ptr = buffer;
809 
810 	do {
811 		unsigned int count, maxcnt;
812 
813 		maxcnt = status & MCI_TXFIFOEMPTY ?
814 			 variant->fifosize : variant->fifohalfsize;
815 		count = min(remain, maxcnt);
816 
817 		/*
818 		 * The ST Micro variant for SDIO transfer sizes
819 		 * less then 8 bytes should have clock H/W flow
820 		 * control disabled.
821 		 */
822 		if (variant->sdio &&
823 		    mmc_card_sdio(host->mmc->card)) {
824 			if (count < 8)
825 				writel(readl(host->base + MMCICLOCK) &
826 					~variant->clkreg_enable,
827 					host->base + MMCICLOCK);
828 			else
829 				writel(readl(host->base + MMCICLOCK) |
830 					variant->clkreg_enable,
831 					host->base + MMCICLOCK);
832 		}
833 
834 		/*
835 		 * SDIO especially may want to send something that is
836 		 * not divisible by 4 (as opposed to card sectors
837 		 * etc), and the FIFO only accept full 32-bit writes.
838 		 * So compensate by adding +3 on the count, a single
839 		 * byte become a 32bit write, 7 bytes will be two
840 		 * 32bit writes etc.
841 		 */
842 		writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
843 
844 		ptr += count;
845 		remain -= count;
846 
847 		if (remain == 0)
848 			break;
849 
850 		status = readl(base + MMCISTATUS);
851 	} while (status & MCI_TXFIFOHALFEMPTY);
852 
853 	return ptr - buffer;
854 }
855 
856 /*
857  * PIO data transfer IRQ handler.
858  */
859 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
860 {
861 	struct mmci_host *host = dev_id;
862 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
863 	struct variant_data *variant = host->variant;
864 	void __iomem *base = host->base;
865 	unsigned long flags;
866 	u32 status;
867 
868 	status = readl(base + MMCISTATUS);
869 
870 	dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
871 
872 	local_irq_save(flags);
873 
874 	do {
875 		unsigned int remain, len;
876 		char *buffer;
877 
878 		/*
879 		 * For write, we only need to test the half-empty flag
880 		 * here - if the FIFO is completely empty, then by
881 		 * definition it is more than half empty.
882 		 *
883 		 * For read, check for data available.
884 		 */
885 		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
886 			break;
887 
888 		if (!sg_miter_next(sg_miter))
889 			break;
890 
891 		buffer = sg_miter->addr;
892 		remain = sg_miter->length;
893 
894 		len = 0;
895 		if (status & MCI_RXACTIVE)
896 			len = mmci_pio_read(host, buffer, remain);
897 		if (status & MCI_TXACTIVE)
898 			len = mmci_pio_write(host, buffer, remain, status);
899 
900 		sg_miter->consumed = len;
901 
902 		host->size -= len;
903 		remain -= len;
904 
905 		if (remain)
906 			break;
907 
908 		status = readl(base + MMCISTATUS);
909 	} while (1);
910 
911 	sg_miter_stop(sg_miter);
912 
913 	local_irq_restore(flags);
914 
915 	/*
916 	 * If we have less than the fifo 'half-full' threshold to transfer,
917 	 * trigger a PIO interrupt as soon as any data is available.
918 	 */
919 	if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
920 		mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
921 
922 	/*
923 	 * If we run out of data, disable the data IRQs; this
924 	 * prevents a race where the FIFO becomes empty before
925 	 * the chip itself has disabled the data path, and
926 	 * stops us racing with our data end IRQ.
927 	 */
928 	if (host->size == 0) {
929 		mmci_set_mask1(host, 0);
930 		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
931 	}
932 
933 	return IRQ_HANDLED;
934 }
935 
936 /*
937  * Handle completion of command and data transfers.
938  */
939 static irqreturn_t mmci_irq(int irq, void *dev_id)
940 {
941 	struct mmci_host *host = dev_id;
942 	u32 status;
943 	int ret = 0;
944 
945 	spin_lock(&host->lock);
946 
947 	do {
948 		struct mmc_command *cmd;
949 		struct mmc_data *data;
950 
951 		status = readl(host->base + MMCISTATUS);
952 
953 		if (host->singleirq) {
954 			if (status & readl(host->base + MMCIMASK1))
955 				mmci_pio_irq(irq, dev_id);
956 
957 			status &= ~MCI_IRQ1MASK;
958 		}
959 
960 		status &= readl(host->base + MMCIMASK0);
961 		writel(status, host->base + MMCICLEAR);
962 
963 		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
964 
965 		data = host->data;
966 		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
967 			      MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
968 			      MCI_DATABLOCKEND) && data)
969 			mmci_data_irq(host, data, status);
970 
971 		cmd = host->cmd;
972 		if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
973 			mmci_cmd_irq(host, cmd, status);
974 
975 		ret = 1;
976 	} while (status);
977 
978 	spin_unlock(&host->lock);
979 
980 	return IRQ_RETVAL(ret);
981 }
982 
983 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
984 {
985 	struct mmci_host *host = mmc_priv(mmc);
986 	unsigned long flags;
987 
988 	WARN_ON(host->mrq != NULL);
989 
990 	if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
991 		dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
992 			mrq->data->blksz);
993 		mrq->cmd->error = -EINVAL;
994 		mmc_request_done(mmc, mrq);
995 		return;
996 	}
997 
998 	pm_runtime_get_sync(mmc_dev(mmc));
999 
1000 	spin_lock_irqsave(&host->lock, flags);
1001 
1002 	host->mrq = mrq;
1003 
1004 	if (mrq->data)
1005 		mmci_get_next_data(host, mrq->data);
1006 
1007 	if (mrq->data && mrq->data->flags & MMC_DATA_READ)
1008 		mmci_start_data(host, mrq->data);
1009 
1010 	mmci_start_command(host, mrq->cmd, 0);
1011 
1012 	spin_unlock_irqrestore(&host->lock, flags);
1013 }
1014 
1015 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1016 {
1017 	struct mmci_host *host = mmc_priv(mmc);
1018 	u32 pwr = 0;
1019 	unsigned long flags;
1020 	int ret;
1021 
1022 	switch (ios->power_mode) {
1023 	case MMC_POWER_OFF:
1024 		if (host->vcc)
1025 			ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
1026 		break;
1027 	case MMC_POWER_UP:
1028 		if (host->vcc) {
1029 			ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
1030 			if (ret) {
1031 				dev_err(mmc_dev(mmc), "unable to set OCR\n");
1032 				/*
1033 				 * The .set_ios() function in the mmc_host_ops
1034 				 * struct return void, and failing to set the
1035 				 * power should be rare so we print an error
1036 				 * and return here.
1037 				 */
1038 				return;
1039 			}
1040 		}
1041 		if (host->plat->vdd_handler)
1042 			pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
1043 						       ios->power_mode);
1044 		/* The ST version does not have this, fall through to POWER_ON */
1045 		if (host->hw_designer != AMBA_VENDOR_ST) {
1046 			pwr |= MCI_PWR_UP;
1047 			break;
1048 		}
1049 	case MMC_POWER_ON:
1050 		pwr |= MCI_PWR_ON;
1051 		break;
1052 	}
1053 
1054 	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
1055 		if (host->hw_designer != AMBA_VENDOR_ST)
1056 			pwr |= MCI_ROD;
1057 		else {
1058 			/*
1059 			 * The ST Micro variant use the ROD bit for something
1060 			 * else and only has OD (Open Drain).
1061 			 */
1062 			pwr |= MCI_OD;
1063 		}
1064 	}
1065 
1066 	spin_lock_irqsave(&host->lock, flags);
1067 
1068 	mmci_set_clkreg(host, ios->clock);
1069 
1070 	if (host->pwr != pwr) {
1071 		host->pwr = pwr;
1072 		writel(pwr, host->base + MMCIPOWER);
1073 	}
1074 
1075 	spin_unlock_irqrestore(&host->lock, flags);
1076 }
1077 
1078 static int mmci_get_ro(struct mmc_host *mmc)
1079 {
1080 	struct mmci_host *host = mmc_priv(mmc);
1081 
1082 	if (host->gpio_wp == -ENOSYS)
1083 		return -ENOSYS;
1084 
1085 	return gpio_get_value_cansleep(host->gpio_wp);
1086 }
1087 
1088 static int mmci_get_cd(struct mmc_host *mmc)
1089 {
1090 	struct mmci_host *host = mmc_priv(mmc);
1091 	struct mmci_platform_data *plat = host->plat;
1092 	unsigned int status;
1093 
1094 	if (host->gpio_cd == -ENOSYS) {
1095 		if (!plat->status)
1096 			return 1; /* Assume always present */
1097 
1098 		status = plat->status(mmc_dev(host->mmc));
1099 	} else
1100 		status = !!gpio_get_value_cansleep(host->gpio_cd)
1101 			^ plat->cd_invert;
1102 
1103 	/*
1104 	 * Use positive logic throughout - status is zero for no card,
1105 	 * non-zero for card inserted.
1106 	 */
1107 	return status;
1108 }
1109 
1110 static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
1111 {
1112 	struct mmci_host *host = dev_id;
1113 
1114 	mmc_detect_change(host->mmc, msecs_to_jiffies(500));
1115 
1116 	return IRQ_HANDLED;
1117 }
1118 
1119 static const struct mmc_host_ops mmci_ops = {
1120 	.request	= mmci_request,
1121 	.pre_req	= mmci_pre_request,
1122 	.post_req	= mmci_post_request,
1123 	.set_ios	= mmci_set_ios,
1124 	.get_ro		= mmci_get_ro,
1125 	.get_cd		= mmci_get_cd,
1126 };
1127 
1128 static int __devinit mmci_probe(struct amba_device *dev,
1129 	const struct amba_id *id)
1130 {
1131 	struct mmci_platform_data *plat = dev->dev.platform_data;
1132 	struct variant_data *variant = id->data;
1133 	struct mmci_host *host;
1134 	struct mmc_host *mmc;
1135 	int ret;
1136 
1137 	/* must have platform data */
1138 	if (!plat) {
1139 		ret = -EINVAL;
1140 		goto out;
1141 	}
1142 
1143 	ret = amba_request_regions(dev, DRIVER_NAME);
1144 	if (ret)
1145 		goto out;
1146 
1147 	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1148 	if (!mmc) {
1149 		ret = -ENOMEM;
1150 		goto rel_regions;
1151 	}
1152 
1153 	host = mmc_priv(mmc);
1154 	host->mmc = mmc;
1155 
1156 	host->gpio_wp = -ENOSYS;
1157 	host->gpio_cd = -ENOSYS;
1158 	host->gpio_cd_irq = -1;
1159 
1160 	host->hw_designer = amba_manf(dev);
1161 	host->hw_revision = amba_rev(dev);
1162 	dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1163 	dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1164 
1165 	host->clk = clk_get(&dev->dev, NULL);
1166 	if (IS_ERR(host->clk)) {
1167 		ret = PTR_ERR(host->clk);
1168 		host->clk = NULL;
1169 		goto host_free;
1170 	}
1171 
1172 	ret = clk_prepare(host->clk);
1173 	if (ret)
1174 		goto clk_free;
1175 
1176 	ret = clk_enable(host->clk);
1177 	if (ret)
1178 		goto clk_unprep;
1179 
1180 	host->plat = plat;
1181 	host->variant = variant;
1182 	host->mclk = clk_get_rate(host->clk);
1183 	/*
1184 	 * According to the spec, mclk is max 100 MHz,
1185 	 * so we try to adjust the clock down to this,
1186 	 * (if possible).
1187 	 */
1188 	if (host->mclk > 100000000) {
1189 		ret = clk_set_rate(host->clk, 100000000);
1190 		if (ret < 0)
1191 			goto clk_disable;
1192 		host->mclk = clk_get_rate(host->clk);
1193 		dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1194 			host->mclk);
1195 	}
1196 	host->phybase = dev->res.start;
1197 	host->base = ioremap(dev->res.start, resource_size(&dev->res));
1198 	if (!host->base) {
1199 		ret = -ENOMEM;
1200 		goto clk_disable;
1201 	}
1202 
1203 	mmc->ops = &mmci_ops;
1204 	/*
1205 	 * The ARM and ST versions of the block have slightly different
1206 	 * clock divider equations which means that the minimum divider
1207 	 * differs too.
1208 	 */
1209 	if (variant->st_clkdiv)
1210 		mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1211 	else
1212 		mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1213 	/*
1214 	 * If the platform data supplies a maximum operating
1215 	 * frequency, this takes precedence. Else, we fall back
1216 	 * to using the module parameter, which has a (low)
1217 	 * default value in case it is not specified. Either
1218 	 * value must not exceed the clock rate into the block,
1219 	 * of course.
1220 	 */
1221 	if (plat->f_max)
1222 		mmc->f_max = min(host->mclk, plat->f_max);
1223 	else
1224 		mmc->f_max = min(host->mclk, fmax);
1225 	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1226 
1227 #ifdef CONFIG_REGULATOR
1228 	/* If we're using the regulator framework, try to fetch a regulator */
1229 	host->vcc = regulator_get(&dev->dev, "vmmc");
1230 	if (IS_ERR(host->vcc))
1231 		host->vcc = NULL;
1232 	else {
1233 		int mask = mmc_regulator_get_ocrmask(host->vcc);
1234 
1235 		if (mask < 0)
1236 			dev_err(&dev->dev, "error getting OCR mask (%d)\n",
1237 				mask);
1238 		else {
1239 			host->mmc->ocr_avail = (u32) mask;
1240 			if (plat->ocr_mask)
1241 				dev_warn(&dev->dev,
1242 				 "Provided ocr_mask/setpower will not be used "
1243 				 "(using regulator instead)\n");
1244 		}
1245 	}
1246 #endif
1247 	/* Fall back to platform data if no regulator is found */
1248 	if (host->vcc == NULL)
1249 		mmc->ocr_avail = plat->ocr_mask;
1250 	mmc->caps = plat->capabilities;
1251 	mmc->caps2 = plat->capabilities2;
1252 
1253 	/*
1254 	 * We can do SGIO
1255 	 */
1256 	mmc->max_segs = NR_SG;
1257 
1258 	/*
1259 	 * Since only a certain number of bits are valid in the data length
1260 	 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1261 	 * single request.
1262 	 */
1263 	mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1264 
1265 	/*
1266 	 * Set the maximum segment size.  Since we aren't doing DMA
1267 	 * (yet) we are only limited by the data length register.
1268 	 */
1269 	mmc->max_seg_size = mmc->max_req_size;
1270 
1271 	/*
1272 	 * Block size can be up to 2048 bytes, but must be a power of two.
1273 	 */
1274 	mmc->max_blk_size = 1 << 11;
1275 
1276 	/*
1277 	 * Limit the number of blocks transferred so that we don't overflow
1278 	 * the maximum request size.
1279 	 */
1280 	mmc->max_blk_count = mmc->max_req_size >> 11;
1281 
1282 	spin_lock_init(&host->lock);
1283 
1284 	writel(0, host->base + MMCIMASK0);
1285 	writel(0, host->base + MMCIMASK1);
1286 	writel(0xfff, host->base + MMCICLEAR);
1287 
1288 	if (gpio_is_valid(plat->gpio_cd)) {
1289 		ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
1290 		if (ret == 0)
1291 			ret = gpio_direction_input(plat->gpio_cd);
1292 		if (ret == 0)
1293 			host->gpio_cd = plat->gpio_cd;
1294 		else if (ret != -ENOSYS)
1295 			goto err_gpio_cd;
1296 
1297 		/*
1298 		 * A gpio pin that will detect cards when inserted and removed
1299 		 * will most likely want to trigger on the edges if it is
1300 		 * 0 when ejected and 1 when inserted (or mutatis mutandis
1301 		 * for the inverted case) so we request triggers on both
1302 		 * edges.
1303 		 */
1304 		ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
1305 				mmci_cd_irq,
1306 				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1307 				DRIVER_NAME " (cd)", host);
1308 		if (ret >= 0)
1309 			host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
1310 	}
1311 	if (gpio_is_valid(plat->gpio_wp)) {
1312 		ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
1313 		if (ret == 0)
1314 			ret = gpio_direction_input(plat->gpio_wp);
1315 		if (ret == 0)
1316 			host->gpio_wp = plat->gpio_wp;
1317 		else if (ret != -ENOSYS)
1318 			goto err_gpio_wp;
1319 	}
1320 
1321 	if ((host->plat->status || host->gpio_cd != -ENOSYS)
1322 	    && host->gpio_cd_irq < 0)
1323 		mmc->caps |= MMC_CAP_NEEDS_POLL;
1324 
1325 	ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
1326 	if (ret)
1327 		goto unmap;
1328 
1329 	if (dev->irq[1] == NO_IRQ)
1330 		host->singleirq = true;
1331 	else {
1332 		ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
1333 				  DRIVER_NAME " (pio)", host);
1334 		if (ret)
1335 			goto irq0_free;
1336 	}
1337 
1338 	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1339 
1340 	amba_set_drvdata(dev, mmc);
1341 
1342 	dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1343 		 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1344 		 amba_rev(dev), (unsigned long long)dev->res.start,
1345 		 dev->irq[0], dev->irq[1]);
1346 
1347 	mmci_dma_setup(host);
1348 
1349 	pm_runtime_put(&dev->dev);
1350 
1351 	mmc_add_host(mmc);
1352 
1353 	return 0;
1354 
1355  irq0_free:
1356 	free_irq(dev->irq[0], host);
1357  unmap:
1358 	if (host->gpio_wp != -ENOSYS)
1359 		gpio_free(host->gpio_wp);
1360  err_gpio_wp:
1361 	if (host->gpio_cd_irq >= 0)
1362 		free_irq(host->gpio_cd_irq, host);
1363 	if (host->gpio_cd != -ENOSYS)
1364 		gpio_free(host->gpio_cd);
1365  err_gpio_cd:
1366 	iounmap(host->base);
1367  clk_disable:
1368 	clk_disable(host->clk);
1369  clk_unprep:
1370 	clk_unprepare(host->clk);
1371  clk_free:
1372 	clk_put(host->clk);
1373  host_free:
1374 	mmc_free_host(mmc);
1375  rel_regions:
1376 	amba_release_regions(dev);
1377  out:
1378 	return ret;
1379 }
1380 
1381 static int __devexit mmci_remove(struct amba_device *dev)
1382 {
1383 	struct mmc_host *mmc = amba_get_drvdata(dev);
1384 
1385 	amba_set_drvdata(dev, NULL);
1386 
1387 	if (mmc) {
1388 		struct mmci_host *host = mmc_priv(mmc);
1389 
1390 		/*
1391 		 * Undo pm_runtime_put() in probe.  We use the _sync
1392 		 * version here so that we can access the primecell.
1393 		 */
1394 		pm_runtime_get_sync(&dev->dev);
1395 
1396 		mmc_remove_host(mmc);
1397 
1398 		writel(0, host->base + MMCIMASK0);
1399 		writel(0, host->base + MMCIMASK1);
1400 
1401 		writel(0, host->base + MMCICOMMAND);
1402 		writel(0, host->base + MMCIDATACTRL);
1403 
1404 		mmci_dma_release(host);
1405 		free_irq(dev->irq[0], host);
1406 		if (!host->singleirq)
1407 			free_irq(dev->irq[1], host);
1408 
1409 		if (host->gpio_wp != -ENOSYS)
1410 			gpio_free(host->gpio_wp);
1411 		if (host->gpio_cd_irq >= 0)
1412 			free_irq(host->gpio_cd_irq, host);
1413 		if (host->gpio_cd != -ENOSYS)
1414 			gpio_free(host->gpio_cd);
1415 
1416 		iounmap(host->base);
1417 		clk_disable(host->clk);
1418 		clk_unprepare(host->clk);
1419 		clk_put(host->clk);
1420 
1421 		if (host->vcc)
1422 			mmc_regulator_set_ocr(mmc, host->vcc, 0);
1423 		regulator_put(host->vcc);
1424 
1425 		mmc_free_host(mmc);
1426 
1427 		amba_release_regions(dev);
1428 	}
1429 
1430 	return 0;
1431 }
1432 
1433 #ifdef CONFIG_PM
1434 static int mmci_suspend(struct amba_device *dev, pm_message_t state)
1435 {
1436 	struct mmc_host *mmc = amba_get_drvdata(dev);
1437 	int ret = 0;
1438 
1439 	if (mmc) {
1440 		struct mmci_host *host = mmc_priv(mmc);
1441 
1442 		ret = mmc_suspend_host(mmc);
1443 		if (ret == 0)
1444 			writel(0, host->base + MMCIMASK0);
1445 	}
1446 
1447 	return ret;
1448 }
1449 
1450 static int mmci_resume(struct amba_device *dev)
1451 {
1452 	struct mmc_host *mmc = amba_get_drvdata(dev);
1453 	int ret = 0;
1454 
1455 	if (mmc) {
1456 		struct mmci_host *host = mmc_priv(mmc);
1457 
1458 		writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1459 
1460 		ret = mmc_resume_host(mmc);
1461 	}
1462 
1463 	return ret;
1464 }
1465 #else
1466 #define mmci_suspend	NULL
1467 #define mmci_resume	NULL
1468 #endif
1469 
1470 static struct amba_id mmci_ids[] = {
1471 	{
1472 		.id	= 0x00041180,
1473 		.mask	= 0xff0fffff,
1474 		.data	= &variant_arm,
1475 	},
1476 	{
1477 		.id	= 0x01041180,
1478 		.mask	= 0xff0fffff,
1479 		.data	= &variant_arm_extended_fifo,
1480 	},
1481 	{
1482 		.id	= 0x00041181,
1483 		.mask	= 0x000fffff,
1484 		.data	= &variant_arm,
1485 	},
1486 	/* ST Micro variants */
1487 	{
1488 		.id     = 0x00180180,
1489 		.mask   = 0x00ffffff,
1490 		.data	= &variant_u300,
1491 	},
1492 	{
1493 		.id     = 0x00280180,
1494 		.mask   = 0x00ffffff,
1495 		.data	= &variant_u300,
1496 	},
1497 	{
1498 		.id     = 0x00480180,
1499 		.mask   = 0xf0ffffff,
1500 		.data	= &variant_ux500,
1501 	},
1502 	{
1503 		.id     = 0x10480180,
1504 		.mask   = 0xf0ffffff,
1505 		.data	= &variant_ux500v2,
1506 	},
1507 	{ 0, 0 },
1508 };
1509 
1510 MODULE_DEVICE_TABLE(amba, mmci_ids);
1511 
1512 static struct amba_driver mmci_driver = {
1513 	.drv		= {
1514 		.name	= DRIVER_NAME,
1515 	},
1516 	.probe		= mmci_probe,
1517 	.remove		= __devexit_p(mmci_remove),
1518 	.suspend	= mmci_suspend,
1519 	.resume		= mmci_resume,
1520 	.id_table	= mmci_ids,
1521 };
1522 
1523 static int __init mmci_init(void)
1524 {
1525 	return amba_driver_register(&mmci_driver);
1526 }
1527 
1528 static void __exit mmci_exit(void)
1529 {
1530 	amba_driver_unregister(&mmci_driver);
1531 }
1532 
1533 module_init(mmci_init);
1534 module_exit(mmci_exit);
1535 module_param(fmax, uint, 0444);
1536 
1537 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1538 MODULE_LICENSE("GPL");
1539