xref: /openbmc/linux/drivers/mmc/host/tmio_mmc_core.c (revision 86beb538)
1426e95d1SSimon Horman /*
2426e95d1SSimon Horman  * Driver for the MMC / SD / SDIO IP found in:
3426e95d1SSimon Horman  *
4426e95d1SSimon Horman  * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
5426e95d1SSimon Horman  *
6426e95d1SSimon Horman  * Copyright (C) 2016 Sang Engineering, Wolfram Sang
7426e95d1SSimon Horman  * Copyright (C) 2015-16 Renesas Electronics Corporation
8426e95d1SSimon Horman  * Copyright (C) 2011 Guennadi Liakhovetski
9426e95d1SSimon Horman  * Copyright (C) 2007 Ian Molton
10426e95d1SSimon Horman  * Copyright (C) 2004 Ian Molton
11426e95d1SSimon Horman  *
12426e95d1SSimon Horman  * This program is free software; you can redistribute it and/or modify
13426e95d1SSimon Horman  * it under the terms of the GNU General Public License version 2 as
14426e95d1SSimon Horman  * published by the Free Software Foundation.
15426e95d1SSimon Horman  *
16426e95d1SSimon Horman  * This driver draws mainly on scattered spec sheets, Reverse engineering
17426e95d1SSimon Horman  * of the toshiba e800  SD driver and some parts of the 2.4 ASIC3 driver (4 bit
18426e95d1SSimon Horman  * support). (Further 4 bit support from a later datasheet).
19426e95d1SSimon Horman  *
20426e95d1SSimon Horman  * TODO:
21426e95d1SSimon Horman  *   Investigate using a workqueue for PIO transfers
22426e95d1SSimon Horman  *   Eliminate FIXMEs
23426e95d1SSimon Horman  *   Better Power management
24426e95d1SSimon Horman  *   Handle MMC errors better
25426e95d1SSimon Horman  *   double buffer support
26426e95d1SSimon Horman  *
27426e95d1SSimon Horman  */
28426e95d1SSimon Horman 
29426e95d1SSimon Horman #include <linux/delay.h>
30426e95d1SSimon Horman #include <linux/device.h>
31426e95d1SSimon Horman #include <linux/highmem.h>
32426e95d1SSimon Horman #include <linux/interrupt.h>
33426e95d1SSimon Horman #include <linux/io.h>
34426e95d1SSimon Horman #include <linux/irq.h>
35426e95d1SSimon Horman #include <linux/mfd/tmio.h>
36426e95d1SSimon Horman #include <linux/mmc/card.h>
37426e95d1SSimon Horman #include <linux/mmc/host.h>
38426e95d1SSimon Horman #include <linux/mmc/mmc.h>
39426e95d1SSimon Horman #include <linux/mmc/slot-gpio.h>
40426e95d1SSimon Horman #include <linux/module.h>
41426e95d1SSimon Horman #include <linux/pagemap.h>
42426e95d1SSimon Horman #include <linux/platform_device.h>
43426e95d1SSimon Horman #include <linux/pm_qos.h>
44426e95d1SSimon Horman #include <linux/pm_runtime.h>
45426e95d1SSimon Horman #include <linux/regulator/consumer.h>
46426e95d1SSimon Horman #include <linux/mmc/sdio.h>
47426e95d1SSimon Horman #include <linux/scatterlist.h>
48426e95d1SSimon Horman #include <linux/spinlock.h>
49426e95d1SSimon Horman #include <linux/workqueue.h>
50426e95d1SSimon Horman 
51426e95d1SSimon Horman #include "tmio_mmc.h"
52426e95d1SSimon Horman 
53426e95d1SSimon Horman static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
54426e95d1SSimon Horman 				      struct mmc_data *data)
55426e95d1SSimon Horman {
56426e95d1SSimon Horman 	if (host->dma_ops)
57426e95d1SSimon Horman 		host->dma_ops->start(host, data);
58426e95d1SSimon Horman }
59426e95d1SSimon Horman 
60426e95d1SSimon Horman static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
61426e95d1SSimon Horman {
62426e95d1SSimon Horman 	if (host->dma_ops)
63426e95d1SSimon Horman 		host->dma_ops->enable(host, enable);
64426e95d1SSimon Horman }
65426e95d1SSimon Horman 
66426e95d1SSimon Horman static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
67426e95d1SSimon Horman 					struct tmio_mmc_data *pdata)
68426e95d1SSimon Horman {
69426e95d1SSimon Horman 	if (host->dma_ops) {
70426e95d1SSimon Horman 		host->dma_ops->request(host, pdata);
71426e95d1SSimon Horman 	} else {
72426e95d1SSimon Horman 		host->chan_tx = NULL;
73426e95d1SSimon Horman 		host->chan_rx = NULL;
74426e95d1SSimon Horman 	}
75426e95d1SSimon Horman }
76426e95d1SSimon Horman 
77426e95d1SSimon Horman static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
78426e95d1SSimon Horman {
79426e95d1SSimon Horman 	if (host->dma_ops)
80426e95d1SSimon Horman 		host->dma_ops->release(host);
81426e95d1SSimon Horman }
82426e95d1SSimon Horman 
83426e95d1SSimon Horman static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
84426e95d1SSimon Horman {
85426e95d1SSimon Horman 	if (host->dma_ops)
86426e95d1SSimon Horman 		host->dma_ops->abort(host);
87426e95d1SSimon Horman }
88426e95d1SSimon Horman 
89426e95d1SSimon Horman void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
90426e95d1SSimon Horman {
91426e95d1SSimon Horman 	host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
92426e95d1SSimon Horman 	sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
93426e95d1SSimon Horman }
94426e95d1SSimon Horman EXPORT_SYMBOL(tmio_mmc_enable_mmc_irqs);
95426e95d1SSimon Horman 
96426e95d1SSimon Horman void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
97426e95d1SSimon Horman {
98426e95d1SSimon Horman 	host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
99426e95d1SSimon Horman 	sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
100426e95d1SSimon Horman }
101426e95d1SSimon Horman EXPORT_SYMBOL(tmio_mmc_disable_mmc_irqs);
102426e95d1SSimon Horman 
103426e95d1SSimon Horman static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
104426e95d1SSimon Horman {
105426e95d1SSimon Horman 	sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
106426e95d1SSimon Horman }
107426e95d1SSimon Horman 
108426e95d1SSimon Horman static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
109426e95d1SSimon Horman {
110426e95d1SSimon Horman 	host->sg_len = data->sg_len;
111426e95d1SSimon Horman 	host->sg_ptr = data->sg;
112426e95d1SSimon Horman 	host->sg_orig = data->sg;
113426e95d1SSimon Horman 	host->sg_off = 0;
114426e95d1SSimon Horman }
115426e95d1SSimon Horman 
116426e95d1SSimon Horman static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
117426e95d1SSimon Horman {
118426e95d1SSimon Horman 	host->sg_ptr = sg_next(host->sg_ptr);
119426e95d1SSimon Horman 	host->sg_off = 0;
120426e95d1SSimon Horman 	return --host->sg_len;
121426e95d1SSimon Horman }
122426e95d1SSimon Horman 
123426e95d1SSimon Horman #define CMDREQ_TIMEOUT	5000
124426e95d1SSimon Horman 
125426e95d1SSimon Horman #ifdef CONFIG_MMC_DEBUG
126426e95d1SSimon Horman 
127426e95d1SSimon Horman #define STATUS_TO_TEXT(a, status, i) \
128426e95d1SSimon Horman 	do { \
129426e95d1SSimon Horman 		if (status & TMIO_STAT_##a) { \
130426e95d1SSimon Horman 			if (i++) \
131426e95d1SSimon Horman 				printk(" | "); \
132426e95d1SSimon Horman 			printk(#a); \
133426e95d1SSimon Horman 		} \
134426e95d1SSimon Horman 	} while (0)
135426e95d1SSimon Horman 
136426e95d1SSimon Horman static void pr_debug_status(u32 status)
137426e95d1SSimon Horman {
138426e95d1SSimon Horman 	int i = 0;
139426e95d1SSimon Horman 	pr_debug("status: %08x = ", status);
140426e95d1SSimon Horman 	STATUS_TO_TEXT(CARD_REMOVE, status, i);
141426e95d1SSimon Horman 	STATUS_TO_TEXT(CARD_INSERT, status, i);
142426e95d1SSimon Horman 	STATUS_TO_TEXT(SIGSTATE, status, i);
143426e95d1SSimon Horman 	STATUS_TO_TEXT(WRPROTECT, status, i);
144426e95d1SSimon Horman 	STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
145426e95d1SSimon Horman 	STATUS_TO_TEXT(CARD_INSERT_A, status, i);
146426e95d1SSimon Horman 	STATUS_TO_TEXT(SIGSTATE_A, status, i);
147426e95d1SSimon Horman 	STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
148426e95d1SSimon Horman 	STATUS_TO_TEXT(STOPBIT_ERR, status, i);
149426e95d1SSimon Horman 	STATUS_TO_TEXT(ILL_FUNC, status, i);
150426e95d1SSimon Horman 	STATUS_TO_TEXT(CMD_BUSY, status, i);
151426e95d1SSimon Horman 	STATUS_TO_TEXT(CMDRESPEND, status, i);
152426e95d1SSimon Horman 	STATUS_TO_TEXT(DATAEND, status, i);
153426e95d1SSimon Horman 	STATUS_TO_TEXT(CRCFAIL, status, i);
154426e95d1SSimon Horman 	STATUS_TO_TEXT(DATATIMEOUT, status, i);
155426e95d1SSimon Horman 	STATUS_TO_TEXT(CMDTIMEOUT, status, i);
156426e95d1SSimon Horman 	STATUS_TO_TEXT(RXOVERFLOW, status, i);
157426e95d1SSimon Horman 	STATUS_TO_TEXT(TXUNDERRUN, status, i);
158426e95d1SSimon Horman 	STATUS_TO_TEXT(RXRDY, status, i);
159426e95d1SSimon Horman 	STATUS_TO_TEXT(TXRQ, status, i);
160426e95d1SSimon Horman 	STATUS_TO_TEXT(ILL_ACCESS, status, i);
161426e95d1SSimon Horman 	printk("\n");
162426e95d1SSimon Horman }
163426e95d1SSimon Horman 
164426e95d1SSimon Horman #else
165426e95d1SSimon Horman #define pr_debug_status(s)  do { } while (0)
166426e95d1SSimon Horman #endif
167426e95d1SSimon Horman 
168426e95d1SSimon Horman static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
169426e95d1SSimon Horman {
170426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(mmc);
171426e95d1SSimon Horman 
172426e95d1SSimon Horman 	if (enable && !host->sdio_irq_enabled) {
173426e95d1SSimon Horman 		u16 sdio_status;
174426e95d1SSimon Horman 
175426e95d1SSimon Horman 		/* Keep device active while SDIO irq is enabled */
176426e95d1SSimon Horman 		pm_runtime_get_sync(mmc_dev(mmc));
177426e95d1SSimon Horman 
178426e95d1SSimon Horman 		host->sdio_irq_enabled = true;
179426e95d1SSimon Horman 		host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
180426e95d1SSimon Horman 					~TMIO_SDIO_STAT_IOIRQ;
181426e95d1SSimon Horman 
182426e95d1SSimon Horman 		/* Clear obsolete interrupts before enabling */
183426e95d1SSimon Horman 		sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
184426e95d1SSimon Horman 		if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
185426e95d1SSimon Horman 			sdio_status |= TMIO_SDIO_SETBITS_MASK;
186426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
187426e95d1SSimon Horman 
188426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
189426e95d1SSimon Horman 	} else if (!enable && host->sdio_irq_enabled) {
190426e95d1SSimon Horman 		host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
191426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
192426e95d1SSimon Horman 
193426e95d1SSimon Horman 		host->sdio_irq_enabled = false;
194426e95d1SSimon Horman 		pm_runtime_mark_last_busy(mmc_dev(mmc));
195426e95d1SSimon Horman 		pm_runtime_put_autosuspend(mmc_dev(mmc));
196426e95d1SSimon Horman 	}
197426e95d1SSimon Horman }
198426e95d1SSimon Horman 
199426e95d1SSimon Horman static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
200426e95d1SSimon Horman {
201426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
202426e95d1SSimon Horman 		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
203426e95d1SSimon Horman 	msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 1 : 10);
204426e95d1SSimon Horman 
205426e95d1SSimon Horman 	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
206426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
207426e95d1SSimon Horman 		msleep(10);
208426e95d1SSimon Horman 	}
209426e95d1SSimon Horman }
210426e95d1SSimon Horman 
211426e95d1SSimon Horman static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
212426e95d1SSimon Horman {
213426e95d1SSimon Horman 	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
214426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
215426e95d1SSimon Horman 		msleep(10);
216426e95d1SSimon Horman 	}
217426e95d1SSimon Horman 
218426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
219426e95d1SSimon Horman 		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
220426e95d1SSimon Horman 	msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 5 : 10);
221426e95d1SSimon Horman }
222426e95d1SSimon Horman 
223426e95d1SSimon Horman static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
224426e95d1SSimon Horman 				unsigned int new_clock)
225426e95d1SSimon Horman {
226426e95d1SSimon Horman 	u32 clk = 0, clock;
227426e95d1SSimon Horman 
228426e95d1SSimon Horman 	if (new_clock == 0) {
229426e95d1SSimon Horman 		tmio_mmc_clk_stop(host);
230426e95d1SSimon Horman 		return;
231426e95d1SSimon Horman 	}
232426e95d1SSimon Horman 
233426e95d1SSimon Horman 	if (host->clk_update)
234426e95d1SSimon Horman 		clock = host->clk_update(host, new_clock) / 512;
235426e95d1SSimon Horman 	else
236426e95d1SSimon Horman 		clock = host->mmc->f_min;
237426e95d1SSimon Horman 
238426e95d1SSimon Horman 	for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
239426e95d1SSimon Horman 		clock <<= 1;
240426e95d1SSimon Horman 
241426e95d1SSimon Horman 	/* 1/1 clock is option */
242426e95d1SSimon Horman 	if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
243426e95d1SSimon Horman 		clk |= 0xff;
244426e95d1SSimon Horman 
245426e95d1SSimon Horman 	if (host->set_clk_div)
246426e95d1SSimon Horman 		host->set_clk_div(host->pdev, (clk >> 22) & 1);
247426e95d1SSimon Horman 
248426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
249426e95d1SSimon Horman 			sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
250426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
251426e95d1SSimon Horman 	if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
252426e95d1SSimon Horman 		msleep(10);
253426e95d1SSimon Horman 
254426e95d1SSimon Horman 	tmio_mmc_clk_start(host);
255426e95d1SSimon Horman }
256426e95d1SSimon Horman 
257426e95d1SSimon Horman static void tmio_mmc_reset(struct tmio_mmc_host *host)
258426e95d1SSimon Horman {
259426e95d1SSimon Horman 	/* FIXME - should we set stop clock reg here */
260426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
261426e95d1SSimon Horman 	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
262426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
263426e95d1SSimon Horman 	msleep(10);
264426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
265426e95d1SSimon Horman 	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
266426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
267426e95d1SSimon Horman 	msleep(10);
26886beb538SWolfram Sang 
26986beb538SWolfram Sang 	if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
27086beb538SWolfram Sang 		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
27186beb538SWolfram Sang 		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
27286beb538SWolfram Sang 	}
27386beb538SWolfram Sang 
274426e95d1SSimon Horman }
275426e95d1SSimon Horman 
276426e95d1SSimon Horman static void tmio_mmc_reset_work(struct work_struct *work)
277426e95d1SSimon Horman {
278426e95d1SSimon Horman 	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
279426e95d1SSimon Horman 						  delayed_reset_work.work);
280426e95d1SSimon Horman 	struct mmc_request *mrq;
281426e95d1SSimon Horman 	unsigned long flags;
282426e95d1SSimon Horman 
283426e95d1SSimon Horman 	spin_lock_irqsave(&host->lock, flags);
284426e95d1SSimon Horman 	mrq = host->mrq;
285426e95d1SSimon Horman 
286426e95d1SSimon Horman 	/*
287426e95d1SSimon Horman 	 * is request already finished? Since we use a non-blocking
288426e95d1SSimon Horman 	 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
289426e95d1SSimon Horman 	 * us, so, have to check for IS_ERR(host->mrq)
290426e95d1SSimon Horman 	 */
291426e95d1SSimon Horman 	if (IS_ERR_OR_NULL(mrq)
292426e95d1SSimon Horman 	    || time_is_after_jiffies(host->last_req_ts +
293426e95d1SSimon Horman 		msecs_to_jiffies(CMDREQ_TIMEOUT))) {
294426e95d1SSimon Horman 		spin_unlock_irqrestore(&host->lock, flags);
295426e95d1SSimon Horman 		return;
296426e95d1SSimon Horman 	}
297426e95d1SSimon Horman 
298426e95d1SSimon Horman 	dev_warn(&host->pdev->dev,
299426e95d1SSimon Horman 		"timeout waiting for hardware interrupt (CMD%u)\n",
300426e95d1SSimon Horman 		mrq->cmd->opcode);
301426e95d1SSimon Horman 
302426e95d1SSimon Horman 	if (host->data)
303426e95d1SSimon Horman 		host->data->error = -ETIMEDOUT;
304426e95d1SSimon Horman 	else if (host->cmd)
305426e95d1SSimon Horman 		host->cmd->error = -ETIMEDOUT;
306426e95d1SSimon Horman 	else
307426e95d1SSimon Horman 		mrq->cmd->error = -ETIMEDOUT;
308426e95d1SSimon Horman 
309426e95d1SSimon Horman 	host->cmd = NULL;
310426e95d1SSimon Horman 	host->data = NULL;
311426e95d1SSimon Horman 	host->force_pio = false;
312426e95d1SSimon Horman 
313426e95d1SSimon Horman 	spin_unlock_irqrestore(&host->lock, flags);
314426e95d1SSimon Horman 
315426e95d1SSimon Horman 	tmio_mmc_reset(host);
316426e95d1SSimon Horman 
317426e95d1SSimon Horman 	/* Ready for new calls */
318426e95d1SSimon Horman 	host->mrq = NULL;
319426e95d1SSimon Horman 
320426e95d1SSimon Horman 	tmio_mmc_abort_dma(host);
321426e95d1SSimon Horman 	mmc_request_done(host->mmc, mrq);
322426e95d1SSimon Horman }
323426e95d1SSimon Horman 
324426e95d1SSimon Horman /* These are the bitmasks the tmio chip requires to implement the MMC response
325426e95d1SSimon Horman  * types. Note that R1 and R6 are the same in this scheme. */
326426e95d1SSimon Horman #define APP_CMD        0x0040
327426e95d1SSimon Horman #define RESP_NONE      0x0300
328426e95d1SSimon Horman #define RESP_R1        0x0400
329426e95d1SSimon Horman #define RESP_R1B       0x0500
330426e95d1SSimon Horman #define RESP_R2        0x0600
331426e95d1SSimon Horman #define RESP_R3        0x0700
332426e95d1SSimon Horman #define DATA_PRESENT   0x0800
333426e95d1SSimon Horman #define TRANSFER_READ  0x1000
334426e95d1SSimon Horman #define TRANSFER_MULTI 0x2000
335426e95d1SSimon Horman #define SECURITY_CMD   0x4000
336426e95d1SSimon Horman #define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
337426e95d1SSimon Horman 
338426e95d1SSimon Horman static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
339426e95d1SSimon Horman {
340426e95d1SSimon Horman 	struct mmc_data *data = host->data;
341426e95d1SSimon Horman 	int c = cmd->opcode;
342426e95d1SSimon Horman 	u32 irq_mask = TMIO_MASK_CMD;
343426e95d1SSimon Horman 
344426e95d1SSimon Horman 	/* CMD12 is handled by hardware */
345426e95d1SSimon Horman 	if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) {
346426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_STP);
347426e95d1SSimon Horman 		return 0;
348426e95d1SSimon Horman 	}
349426e95d1SSimon Horman 
350426e95d1SSimon Horman 	switch (mmc_resp_type(cmd)) {
351426e95d1SSimon Horman 	case MMC_RSP_NONE: c |= RESP_NONE; break;
352426e95d1SSimon Horman 	case MMC_RSP_R1:
353426e95d1SSimon Horman 	case MMC_RSP_R1_NO_CRC:
354426e95d1SSimon Horman 			   c |= RESP_R1;   break;
355426e95d1SSimon Horman 	case MMC_RSP_R1B:  c |= RESP_R1B;  break;
356426e95d1SSimon Horman 	case MMC_RSP_R2:   c |= RESP_R2;   break;
357426e95d1SSimon Horman 	case MMC_RSP_R3:   c |= RESP_R3;   break;
358426e95d1SSimon Horman 	default:
359426e95d1SSimon Horman 		pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
360426e95d1SSimon Horman 		return -EINVAL;
361426e95d1SSimon Horman 	}
362426e95d1SSimon Horman 
363426e95d1SSimon Horman 	host->cmd = cmd;
364426e95d1SSimon Horman 
365426e95d1SSimon Horman /* FIXME - this seems to be ok commented out but the spec suggest this bit
366426e95d1SSimon Horman  *         should be set when issuing app commands.
367426e95d1SSimon Horman  *	if(cmd->flags & MMC_FLAG_ACMD)
368426e95d1SSimon Horman  *		c |= APP_CMD;
369426e95d1SSimon Horman  */
370426e95d1SSimon Horman 	if (data) {
371426e95d1SSimon Horman 		c |= DATA_PRESENT;
372426e95d1SSimon Horman 		if (data->blocks > 1) {
373426e95d1SSimon Horman 			sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_SEC);
374426e95d1SSimon Horman 			c |= TRANSFER_MULTI;
375426e95d1SSimon Horman 
376426e95d1SSimon Horman 			/*
3778b22c3c1SWolfram Sang 			 * Disable auto CMD12 at IO_RW_EXTENDED and SET_BLOCK_COUNT
3788b22c3c1SWolfram Sang 			 * when doing multiple block transfer
379426e95d1SSimon Horman 			 */
380426e95d1SSimon Horman 			if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
3818b22c3c1SWolfram Sang 			    (cmd->opcode == SD_IO_RW_EXTENDED || host->mrq->sbc))
382426e95d1SSimon Horman 				c |= NO_CMD12_ISSUE;
383426e95d1SSimon Horman 		}
384426e95d1SSimon Horman 		if (data->flags & MMC_DATA_READ)
385426e95d1SSimon Horman 			c |= TRANSFER_READ;
386426e95d1SSimon Horman 	}
387426e95d1SSimon Horman 
388426e95d1SSimon Horman 	if (!host->native_hotplug)
389426e95d1SSimon Horman 		irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
390426e95d1SSimon Horman 	tmio_mmc_enable_mmc_irqs(host, irq_mask);
391426e95d1SSimon Horman 
392426e95d1SSimon Horman 	/* Fire off the command */
393426e95d1SSimon Horman 	sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
394426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_CMD, c);
395426e95d1SSimon Horman 
396426e95d1SSimon Horman 	return 0;
397426e95d1SSimon Horman }
398426e95d1SSimon Horman 
399426e95d1SSimon Horman static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
400426e95d1SSimon Horman 				   unsigned short *buf,
401426e95d1SSimon Horman 				   unsigned int count)
402426e95d1SSimon Horman {
403426e95d1SSimon Horman 	int is_read = host->data->flags & MMC_DATA_READ;
404426e95d1SSimon Horman 	u8  *buf8;
405426e95d1SSimon Horman 
406426e95d1SSimon Horman 	/*
407426e95d1SSimon Horman 	 * Transfer the data
408426e95d1SSimon Horman 	 */
409426e95d1SSimon Horman 	if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
410426e95d1SSimon Horman 		u8 data[4] = { };
411426e95d1SSimon Horman 
412426e95d1SSimon Horman 		if (is_read)
413426e95d1SSimon Horman 			sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
414426e95d1SSimon Horman 					   count >> 2);
415426e95d1SSimon Horman 		else
416426e95d1SSimon Horman 			sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
417426e95d1SSimon Horman 					    count >> 2);
418426e95d1SSimon Horman 
419426e95d1SSimon Horman 		/* if count was multiple of 4 */
420426e95d1SSimon Horman 		if (!(count & 0x3))
421426e95d1SSimon Horman 			return;
422426e95d1SSimon Horman 
423426e95d1SSimon Horman 		buf8 = (u8 *)(buf + (count >> 2));
424426e95d1SSimon Horman 		count %= 4;
425426e95d1SSimon Horman 
426426e95d1SSimon Horman 		if (is_read) {
427426e95d1SSimon Horman 			sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT,
428426e95d1SSimon Horman 					   (u32 *)data, 1);
429426e95d1SSimon Horman 			memcpy(buf8, data, count);
430426e95d1SSimon Horman 		} else {
431426e95d1SSimon Horman 			memcpy(data, buf8, count);
432426e95d1SSimon Horman 			sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT,
433426e95d1SSimon Horman 					    (u32 *)data, 1);
434426e95d1SSimon Horman 		}
435426e95d1SSimon Horman 
436426e95d1SSimon Horman 		return;
437426e95d1SSimon Horman 	}
438426e95d1SSimon Horman 
439426e95d1SSimon Horman 	if (is_read)
440426e95d1SSimon Horman 		sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
441426e95d1SSimon Horman 	else
442426e95d1SSimon Horman 		sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
443426e95d1SSimon Horman 
444426e95d1SSimon Horman 	/* if count was even number */
445426e95d1SSimon Horman 	if (!(count & 0x1))
446426e95d1SSimon Horman 		return;
447426e95d1SSimon Horman 
448426e95d1SSimon Horman 	/* if count was odd number */
449426e95d1SSimon Horman 	buf8 = (u8 *)(buf + (count >> 1));
450426e95d1SSimon Horman 
451426e95d1SSimon Horman 	/*
452426e95d1SSimon Horman 	 * FIXME
453426e95d1SSimon Horman 	 *
454426e95d1SSimon Horman 	 * driver and this function are assuming that
455426e95d1SSimon Horman 	 * it is used as little endian
456426e95d1SSimon Horman 	 */
457426e95d1SSimon Horman 	if (is_read)
458426e95d1SSimon Horman 		*buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
459426e95d1SSimon Horman 	else
460426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
461426e95d1SSimon Horman }
462426e95d1SSimon Horman 
463426e95d1SSimon Horman /*
464426e95d1SSimon Horman  * This chip always returns (at least?) as much data as you ask for.
465426e95d1SSimon Horman  * I'm unsure what happens if you ask for less than a block. This should be
466426e95d1SSimon Horman  * looked into to ensure that a funny length read doesn't hose the controller.
467426e95d1SSimon Horman  */
468426e95d1SSimon Horman static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
469426e95d1SSimon Horman {
470426e95d1SSimon Horman 	struct mmc_data *data = host->data;
471426e95d1SSimon Horman 	void *sg_virt;
472426e95d1SSimon Horman 	unsigned short *buf;
473426e95d1SSimon Horman 	unsigned int count;
474426e95d1SSimon Horman 	unsigned long flags;
475426e95d1SSimon Horman 
476426e95d1SSimon Horman 	if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
477426e95d1SSimon Horman 		pr_err("PIO IRQ in DMA mode!\n");
478426e95d1SSimon Horman 		return;
479426e95d1SSimon Horman 	} else if (!data) {
480426e95d1SSimon Horman 		pr_debug("Spurious PIO IRQ\n");
481426e95d1SSimon Horman 		return;
482426e95d1SSimon Horman 	}
483426e95d1SSimon Horman 
484426e95d1SSimon Horman 	sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
485426e95d1SSimon Horman 	buf = (unsigned short *)(sg_virt + host->sg_off);
486426e95d1SSimon Horman 
487426e95d1SSimon Horman 	count = host->sg_ptr->length - host->sg_off;
488426e95d1SSimon Horman 	if (count > data->blksz)
489426e95d1SSimon Horman 		count = data->blksz;
490426e95d1SSimon Horman 
491426e95d1SSimon Horman 	pr_debug("count: %08x offset: %08x flags %08x\n",
492426e95d1SSimon Horman 		 count, host->sg_off, data->flags);
493426e95d1SSimon Horman 
494426e95d1SSimon Horman 	/* Transfer the data */
495426e95d1SSimon Horman 	tmio_mmc_transfer_data(host, buf, count);
496426e95d1SSimon Horman 
497426e95d1SSimon Horman 	host->sg_off += count;
498426e95d1SSimon Horman 
499426e95d1SSimon Horman 	tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
500426e95d1SSimon Horman 
501426e95d1SSimon Horman 	if (host->sg_off == host->sg_ptr->length)
502426e95d1SSimon Horman 		tmio_mmc_next_sg(host);
503426e95d1SSimon Horman 
504426e95d1SSimon Horman 	return;
505426e95d1SSimon Horman }
506426e95d1SSimon Horman 
507426e95d1SSimon Horman static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
508426e95d1SSimon Horman {
509426e95d1SSimon Horman 	if (host->sg_ptr == &host->bounce_sg) {
510426e95d1SSimon Horman 		unsigned long flags;
511426e95d1SSimon Horman 		void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
512426e95d1SSimon Horman 		memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
513426e95d1SSimon Horman 		tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
514426e95d1SSimon Horman 	}
515426e95d1SSimon Horman }
516426e95d1SSimon Horman 
517426e95d1SSimon Horman /* needs to be called with host->lock held */
518426e95d1SSimon Horman void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
519426e95d1SSimon Horman {
520426e95d1SSimon Horman 	struct mmc_data *data = host->data;
521426e95d1SSimon Horman 	struct mmc_command *stop;
522426e95d1SSimon Horman 
523426e95d1SSimon Horman 	host->data = NULL;
524426e95d1SSimon Horman 
525426e95d1SSimon Horman 	if (!data) {
526426e95d1SSimon Horman 		dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
527426e95d1SSimon Horman 		return;
528426e95d1SSimon Horman 	}
529426e95d1SSimon Horman 	stop = data->stop;
530426e95d1SSimon Horman 
531426e95d1SSimon Horman 	/* FIXME - return correct transfer count on errors */
532426e95d1SSimon Horman 	if (!data->error)
533426e95d1SSimon Horman 		data->bytes_xfered = data->blocks * data->blksz;
534426e95d1SSimon Horman 	else
535426e95d1SSimon Horman 		data->bytes_xfered = 0;
536426e95d1SSimon Horman 
537426e95d1SSimon Horman 	pr_debug("Completed data request\n");
538426e95d1SSimon Horman 
539426e95d1SSimon Horman 	/*
540426e95d1SSimon Horman 	 * FIXME: other drivers allow an optional stop command of any given type
541426e95d1SSimon Horman 	 *        which we dont do, as the chip can auto generate them.
542426e95d1SSimon Horman 	 *        Perhaps we can be smarter about when to use auto CMD12 and
543426e95d1SSimon Horman 	 *        only issue the auto request when we know this is the desired
544426e95d1SSimon Horman 	 *        stop command, allowing fallback to the stop command the
545426e95d1SSimon Horman 	 *        upper layers expect. For now, we do what works.
546426e95d1SSimon Horman 	 */
547426e95d1SSimon Horman 
548426e95d1SSimon Horman 	if (data->flags & MMC_DATA_READ) {
549426e95d1SSimon Horman 		if (host->chan_rx && !host->force_pio)
550426e95d1SSimon Horman 			tmio_mmc_check_bounce_buffer(host);
551426e95d1SSimon Horman 		dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
552426e95d1SSimon Horman 			host->mrq);
553426e95d1SSimon Horman 	} else {
554426e95d1SSimon Horman 		dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
555426e95d1SSimon Horman 			host->mrq);
556426e95d1SSimon Horman 	}
557426e95d1SSimon Horman 
5588b22c3c1SWolfram Sang 	if (stop && !host->mrq->sbc) {
559426e95d1SSimon Horman 		if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg)
560426e95d1SSimon Horman 			dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
561426e95d1SSimon Horman 				stop->opcode, stop->arg);
562426e95d1SSimon Horman 
563426e95d1SSimon Horman 		/* fill in response from auto CMD12 */
564426e95d1SSimon Horman 		stop->resp[0] = sd_ctrl_read16_and_16_as_32(host, CTL_RESPONSE);
565426e95d1SSimon Horman 
566426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0);
567426e95d1SSimon Horman 	}
568426e95d1SSimon Horman 
569426e95d1SSimon Horman 	schedule_work(&host->done);
570426e95d1SSimon Horman }
571426e95d1SSimon Horman EXPORT_SYMBOL(tmio_mmc_do_data_irq);
572426e95d1SSimon Horman 
573426e95d1SSimon Horman static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
574426e95d1SSimon Horman {
575426e95d1SSimon Horman 	struct mmc_data *data;
576426e95d1SSimon Horman 	spin_lock(&host->lock);
577426e95d1SSimon Horman 	data = host->data;
578426e95d1SSimon Horman 
579426e95d1SSimon Horman 	if (!data)
580426e95d1SSimon Horman 		goto out;
581426e95d1SSimon Horman 
582426e95d1SSimon Horman 	if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
583426e95d1SSimon Horman 	    stat & TMIO_STAT_TXUNDERRUN)
584426e95d1SSimon Horman 		data->error = -EILSEQ;
585426e95d1SSimon Horman 	if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
586426e95d1SSimon Horman 		u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
587426e95d1SSimon Horman 		bool done = false;
588426e95d1SSimon Horman 
589426e95d1SSimon Horman 		/*
590426e95d1SSimon Horman 		 * Has all data been written out yet? Testing on SuperH showed,
591426e95d1SSimon Horman 		 * that in most cases the first interrupt comes already with the
592426e95d1SSimon Horman 		 * BUSY status bit clear, but on some operations, like mount or
593426e95d1SSimon Horman 		 * in the beginning of a write / sync / umount, there is one
594426e95d1SSimon Horman 		 * DATAEND interrupt with the BUSY bit set, in this cases
595426e95d1SSimon Horman 		 * waiting for one more interrupt fixes the problem.
596426e95d1SSimon Horman 		 */
597426e95d1SSimon Horman 		if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
598426e95d1SSimon Horman 			if (status & TMIO_STAT_SCLKDIVEN)
599426e95d1SSimon Horman 				done = true;
600426e95d1SSimon Horman 		} else {
601426e95d1SSimon Horman 			if (!(status & TMIO_STAT_CMD_BUSY))
602426e95d1SSimon Horman 				done = true;
603426e95d1SSimon Horman 		}
604426e95d1SSimon Horman 
605426e95d1SSimon Horman 		if (done) {
606426e95d1SSimon Horman 			tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
607426e95d1SSimon Horman 			complete(&host->dma_dataend);
608426e95d1SSimon Horman 		}
609426e95d1SSimon Horman 	} else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
610426e95d1SSimon Horman 		tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
611426e95d1SSimon Horman 		complete(&host->dma_dataend);
612426e95d1SSimon Horman 	} else {
613426e95d1SSimon Horman 		tmio_mmc_do_data_irq(host);
614426e95d1SSimon Horman 		tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
615426e95d1SSimon Horman 	}
616426e95d1SSimon Horman out:
617426e95d1SSimon Horman 	spin_unlock(&host->lock);
618426e95d1SSimon Horman }
619426e95d1SSimon Horman 
620426e95d1SSimon Horman static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
621426e95d1SSimon Horman 	unsigned int stat)
622426e95d1SSimon Horman {
623426e95d1SSimon Horman 	struct mmc_command *cmd = host->cmd;
624426e95d1SSimon Horman 	int i, addr;
625426e95d1SSimon Horman 
626426e95d1SSimon Horman 	spin_lock(&host->lock);
627426e95d1SSimon Horman 
628426e95d1SSimon Horman 	if (!host->cmd) {
629426e95d1SSimon Horman 		pr_debug("Spurious CMD irq\n");
630426e95d1SSimon Horman 		goto out;
631426e95d1SSimon Horman 	}
632426e95d1SSimon Horman 
633426e95d1SSimon Horman 	/* This controller is sicker than the PXA one. Not only do we need to
634426e95d1SSimon Horman 	 * drop the top 8 bits of the first response word, we also need to
635426e95d1SSimon Horman 	 * modify the order of the response for short response command types.
636426e95d1SSimon Horman 	 */
637426e95d1SSimon Horman 
638426e95d1SSimon Horman 	for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
639426e95d1SSimon Horman 		cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
640426e95d1SSimon Horman 
641426e95d1SSimon Horman 	if (cmd->flags &  MMC_RSP_136) {
642426e95d1SSimon Horman 		cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
643426e95d1SSimon Horman 		cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
644426e95d1SSimon Horman 		cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
645426e95d1SSimon Horman 		cmd->resp[3] <<= 8;
646426e95d1SSimon Horman 	} else if (cmd->flags & MMC_RSP_R3) {
647426e95d1SSimon Horman 		cmd->resp[0] = cmd->resp[3];
648426e95d1SSimon Horman 	}
649426e95d1SSimon Horman 
650426e95d1SSimon Horman 	if (stat & TMIO_STAT_CMDTIMEOUT)
651426e95d1SSimon Horman 		cmd->error = -ETIMEDOUT;
652426e95d1SSimon Horman 	else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
653426e95d1SSimon Horman 		 stat & TMIO_STAT_STOPBIT_ERR ||
654426e95d1SSimon Horman 		 stat & TMIO_STAT_CMD_IDX_ERR)
655426e95d1SSimon Horman 		cmd->error = -EILSEQ;
656426e95d1SSimon Horman 
657426e95d1SSimon Horman 	/* If there is data to handle we enable data IRQs here, and
658426e95d1SSimon Horman 	 * we will ultimatley finish the request in the data_end handler.
659426e95d1SSimon Horman 	 * If theres no data or we encountered an error, finish now.
660426e95d1SSimon Horman 	 */
661426e95d1SSimon Horman 	if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
662426e95d1SSimon Horman 		if (host->data->flags & MMC_DATA_READ) {
663426e95d1SSimon Horman 			if (host->force_pio || !host->chan_rx)
664426e95d1SSimon Horman 				tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
665426e95d1SSimon Horman 			else
666426e95d1SSimon Horman 				tasklet_schedule(&host->dma_issue);
667426e95d1SSimon Horman 		} else {
668426e95d1SSimon Horman 			if (host->force_pio || !host->chan_tx)
669426e95d1SSimon Horman 				tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
670426e95d1SSimon Horman 			else
671426e95d1SSimon Horman 				tasklet_schedule(&host->dma_issue);
672426e95d1SSimon Horman 		}
673426e95d1SSimon Horman 	} else {
674426e95d1SSimon Horman 		schedule_work(&host->done);
675426e95d1SSimon Horman 	}
676426e95d1SSimon Horman 
677426e95d1SSimon Horman out:
678426e95d1SSimon Horman 	spin_unlock(&host->lock);
679426e95d1SSimon Horman }
680426e95d1SSimon Horman 
681426e95d1SSimon Horman static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
682426e95d1SSimon Horman 				      int ireg, int status)
683426e95d1SSimon Horman {
684426e95d1SSimon Horman 	struct mmc_host *mmc = host->mmc;
685426e95d1SSimon Horman 
686426e95d1SSimon Horman 	/* Card insert / remove attempts */
687426e95d1SSimon Horman 	if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
688426e95d1SSimon Horman 		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
689426e95d1SSimon Horman 			TMIO_STAT_CARD_REMOVE);
690426e95d1SSimon Horman 		if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
691426e95d1SSimon Horman 		     ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
692426e95d1SSimon Horman 		    !work_pending(&mmc->detect.work))
693426e95d1SSimon Horman 			mmc_detect_change(host->mmc, msecs_to_jiffies(100));
694426e95d1SSimon Horman 		return true;
695426e95d1SSimon Horman 	}
696426e95d1SSimon Horman 
697426e95d1SSimon Horman 	return false;
698426e95d1SSimon Horman }
699426e95d1SSimon Horman 
700426e95d1SSimon Horman static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
701426e95d1SSimon Horman 				 int ireg, int status)
702426e95d1SSimon Horman {
703426e95d1SSimon Horman 	/* Command completion */
704426e95d1SSimon Horman 	if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
705426e95d1SSimon Horman 		tmio_mmc_ack_mmc_irqs(host,
706426e95d1SSimon Horman 			     TMIO_STAT_CMDRESPEND |
707426e95d1SSimon Horman 			     TMIO_STAT_CMDTIMEOUT);
708426e95d1SSimon Horman 		tmio_mmc_cmd_irq(host, status);
709426e95d1SSimon Horman 		return true;
710426e95d1SSimon Horman 	}
711426e95d1SSimon Horman 
712426e95d1SSimon Horman 	/* Data transfer */
713426e95d1SSimon Horman 	if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
714426e95d1SSimon Horman 		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
715426e95d1SSimon Horman 		tmio_mmc_pio_irq(host);
716426e95d1SSimon Horman 		return true;
717426e95d1SSimon Horman 	}
718426e95d1SSimon Horman 
719426e95d1SSimon Horman 	/* Data transfer completion */
720426e95d1SSimon Horman 	if (ireg & TMIO_STAT_DATAEND) {
721426e95d1SSimon Horman 		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
722426e95d1SSimon Horman 		tmio_mmc_data_irq(host, status);
723426e95d1SSimon Horman 		return true;
724426e95d1SSimon Horman 	}
725426e95d1SSimon Horman 
726426e95d1SSimon Horman 	return false;
727426e95d1SSimon Horman }
728426e95d1SSimon Horman 
729426e95d1SSimon Horman static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
730426e95d1SSimon Horman {
731426e95d1SSimon Horman 	struct mmc_host *mmc = host->mmc;
732426e95d1SSimon Horman 	struct tmio_mmc_data *pdata = host->pdata;
733426e95d1SSimon Horman 	unsigned int ireg, status;
734426e95d1SSimon Horman 	unsigned int sdio_status;
735426e95d1SSimon Horman 
736426e95d1SSimon Horman 	if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
737426e95d1SSimon Horman 		return;
738426e95d1SSimon Horman 
739426e95d1SSimon Horman 	status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
740426e95d1SSimon Horman 	ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
741426e95d1SSimon Horman 
742426e95d1SSimon Horman 	sdio_status = status & ~TMIO_SDIO_MASK_ALL;
743426e95d1SSimon Horman 	if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
744426e95d1SSimon Horman 		sdio_status |= TMIO_SDIO_SETBITS_MASK;
745426e95d1SSimon Horman 
746426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
747426e95d1SSimon Horman 
748426e95d1SSimon Horman 	if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
749426e95d1SSimon Horman 		mmc_signal_sdio_irq(mmc);
750426e95d1SSimon Horman }
751426e95d1SSimon Horman 
752426e95d1SSimon Horman irqreturn_t tmio_mmc_irq(int irq, void *devid)
753426e95d1SSimon Horman {
754426e95d1SSimon Horman 	struct tmio_mmc_host *host = devid;
755426e95d1SSimon Horman 	unsigned int ireg, status;
756426e95d1SSimon Horman 
757426e95d1SSimon Horman 	status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
758426e95d1SSimon Horman 	ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
759426e95d1SSimon Horman 
760426e95d1SSimon Horman 	pr_debug_status(status);
761426e95d1SSimon Horman 	pr_debug_status(ireg);
762426e95d1SSimon Horman 
763426e95d1SSimon Horman 	/* Clear the status except the interrupt status */
764426e95d1SSimon Horman 	sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
765426e95d1SSimon Horman 
766426e95d1SSimon Horman 	if (__tmio_mmc_card_detect_irq(host, ireg, status))
767426e95d1SSimon Horman 		return IRQ_HANDLED;
768426e95d1SSimon Horman 	if (__tmio_mmc_sdcard_irq(host, ireg, status))
769426e95d1SSimon Horman 		return IRQ_HANDLED;
770426e95d1SSimon Horman 
771426e95d1SSimon Horman 	__tmio_mmc_sdio_irq(host);
772426e95d1SSimon Horman 
773426e95d1SSimon Horman 	return IRQ_HANDLED;
774426e95d1SSimon Horman }
775426e95d1SSimon Horman EXPORT_SYMBOL(tmio_mmc_irq);
776426e95d1SSimon Horman 
777426e95d1SSimon Horman static int tmio_mmc_start_data(struct tmio_mmc_host *host,
778426e95d1SSimon Horman 	struct mmc_data *data)
779426e95d1SSimon Horman {
780426e95d1SSimon Horman 	struct tmio_mmc_data *pdata = host->pdata;
781426e95d1SSimon Horman 
782426e95d1SSimon Horman 	pr_debug("setup data transfer: blocksize %08x  nr_blocks %d\n",
783426e95d1SSimon Horman 		 data->blksz, data->blocks);
784426e95d1SSimon Horman 
785426e95d1SSimon Horman 	/* Some hardware cannot perform 2 byte requests in 4/8 bit mode */
786426e95d1SSimon Horman 	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
787426e95d1SSimon Horman 	    host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
788426e95d1SSimon Horman 		int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
789426e95d1SSimon Horman 
790426e95d1SSimon Horman 		if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
791426e95d1SSimon Horman 			pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
792426e95d1SSimon Horman 			       mmc_hostname(host->mmc), data->blksz);
793426e95d1SSimon Horman 			return -EINVAL;
794426e95d1SSimon Horman 		}
795426e95d1SSimon Horman 	}
796426e95d1SSimon Horman 
797426e95d1SSimon Horman 	tmio_mmc_init_sg(host, data);
798426e95d1SSimon Horman 	host->data = data;
799426e95d1SSimon Horman 
800426e95d1SSimon Horman 	/* Set transfer length / blocksize */
801426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
802426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
803426e95d1SSimon Horman 
804426e95d1SSimon Horman 	tmio_mmc_start_dma(host, data);
805426e95d1SSimon Horman 
806426e95d1SSimon Horman 	return 0;
807426e95d1SSimon Horman }
808426e95d1SSimon Horman 
809426e95d1SSimon Horman static void tmio_mmc_hw_reset(struct mmc_host *mmc)
810426e95d1SSimon Horman {
811426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(mmc);
812426e95d1SSimon Horman 
813426e95d1SSimon Horman 	if (host->hw_reset)
814426e95d1SSimon Horman 		host->hw_reset(host);
815426e95d1SSimon Horman }
816426e95d1SSimon Horman 
817426e95d1SSimon Horman static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
818426e95d1SSimon Horman {
819426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(mmc);
820426e95d1SSimon Horman 	int i, ret = 0;
821426e95d1SSimon Horman 
822426e95d1SSimon Horman 	if (!host->init_tuning || !host->select_tuning)
823426e95d1SSimon Horman 		/* Tuning is not supported */
824426e95d1SSimon Horman 		goto out;
825426e95d1SSimon Horman 
826426e95d1SSimon Horman 	host->tap_num = host->init_tuning(host);
827426e95d1SSimon Horman 	if (!host->tap_num)
828426e95d1SSimon Horman 		/* Tuning is not supported */
829426e95d1SSimon Horman 		goto out;
830426e95d1SSimon Horman 
831426e95d1SSimon Horman 	if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
832426e95d1SSimon Horman 		dev_warn_once(&host->pdev->dev,
833426e95d1SSimon Horman 		      "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
834426e95d1SSimon Horman 		goto out;
835426e95d1SSimon Horman 	}
836426e95d1SSimon Horman 
837426e95d1SSimon Horman 	bitmap_zero(host->taps, host->tap_num * 2);
838426e95d1SSimon Horman 
839426e95d1SSimon Horman 	/* Issue CMD19 twice for each tap */
840426e95d1SSimon Horman 	for (i = 0; i < 2 * host->tap_num; i++) {
841426e95d1SSimon Horman 		if (host->prepare_tuning)
842426e95d1SSimon Horman 			host->prepare_tuning(host, i % host->tap_num);
843426e95d1SSimon Horman 
844426e95d1SSimon Horman 		ret = mmc_send_tuning(mmc, opcode, NULL);
845426e95d1SSimon Horman 		if (ret && ret != -EILSEQ)
846426e95d1SSimon Horman 			goto out;
847426e95d1SSimon Horman 		if (ret == 0)
848426e95d1SSimon Horman 			set_bit(i, host->taps);
849426e95d1SSimon Horman 
850426e95d1SSimon Horman 		mdelay(1);
851426e95d1SSimon Horman 	}
852426e95d1SSimon Horman 
853426e95d1SSimon Horman 	ret = host->select_tuning(host);
854426e95d1SSimon Horman 
855426e95d1SSimon Horman out:
856426e95d1SSimon Horman 	if (ret < 0) {
857426e95d1SSimon Horman 		dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
858426e95d1SSimon Horman 		tmio_mmc_hw_reset(mmc);
859426e95d1SSimon Horman 	}
860426e95d1SSimon Horman 
861426e95d1SSimon Horman 	return ret;
862426e95d1SSimon Horman }
863426e95d1SSimon Horman 
864de2a6bb9SWolfram Sang static void tmio_process_mrq(struct tmio_mmc_host *host, struct mmc_request *mrq)
865426e95d1SSimon Horman {
8668b22c3c1SWolfram Sang 	struct mmc_command *cmd;
867426e95d1SSimon Horman 	int ret;
868426e95d1SSimon Horman 
8698b22c3c1SWolfram Sang 	if (mrq->sbc && host->cmd != mrq->sbc) {
8708b22c3c1SWolfram Sang 		cmd = mrq->sbc;
8718b22c3c1SWolfram Sang 	} else {
8728b22c3c1SWolfram Sang 		cmd = mrq->cmd;
873426e95d1SSimon Horman 		if (mrq->data) {
874426e95d1SSimon Horman 			ret = tmio_mmc_start_data(host, mrq->data);
875426e95d1SSimon Horman 			if (ret)
876426e95d1SSimon Horman 				goto fail;
877426e95d1SSimon Horman 		}
8788b22c3c1SWolfram Sang 	}
879426e95d1SSimon Horman 
8808b22c3c1SWolfram Sang 	ret = tmio_mmc_start_command(host, cmd);
88110c998efSWolfram Sang 	if (ret)
88210c998efSWolfram Sang 		goto fail;
88310c998efSWolfram Sang 
884426e95d1SSimon Horman 	schedule_delayed_work(&host->delayed_reset_work,
885426e95d1SSimon Horman 			      msecs_to_jiffies(CMDREQ_TIMEOUT));
886426e95d1SSimon Horman 	return;
887426e95d1SSimon Horman 
888426e95d1SSimon Horman fail:
889426e95d1SSimon Horman 	host->force_pio = false;
890426e95d1SSimon Horman 	host->mrq = NULL;
891426e95d1SSimon Horman 	mrq->cmd->error = ret;
892de2a6bb9SWolfram Sang 	mmc_request_done(host->mmc, mrq);
893de2a6bb9SWolfram Sang }
894de2a6bb9SWolfram Sang 
895de2a6bb9SWolfram Sang /* Process requests from the MMC layer */
896de2a6bb9SWolfram Sang static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
897de2a6bb9SWolfram Sang {
898de2a6bb9SWolfram Sang 	struct tmio_mmc_host *host = mmc_priv(mmc);
899de2a6bb9SWolfram Sang 	unsigned long flags;
900de2a6bb9SWolfram Sang 
901de2a6bb9SWolfram Sang 	spin_lock_irqsave(&host->lock, flags);
902de2a6bb9SWolfram Sang 
903de2a6bb9SWolfram Sang 	if (host->mrq) {
904de2a6bb9SWolfram Sang 		pr_debug("request not null\n");
905de2a6bb9SWolfram Sang 		if (IS_ERR(host->mrq)) {
906de2a6bb9SWolfram Sang 			spin_unlock_irqrestore(&host->lock, flags);
907de2a6bb9SWolfram Sang 			mrq->cmd->error = -EAGAIN;
908426e95d1SSimon Horman 			mmc_request_done(mmc, mrq);
909de2a6bb9SWolfram Sang 			return;
910de2a6bb9SWolfram Sang 		}
911de2a6bb9SWolfram Sang 	}
912de2a6bb9SWolfram Sang 
913de2a6bb9SWolfram Sang 	host->last_req_ts = jiffies;
914de2a6bb9SWolfram Sang 	wmb();
915de2a6bb9SWolfram Sang 	host->mrq = mrq;
916de2a6bb9SWolfram Sang 
917de2a6bb9SWolfram Sang 	spin_unlock_irqrestore(&host->lock, flags);
918de2a6bb9SWolfram Sang 
919de2a6bb9SWolfram Sang 	tmio_process_mrq(host, mrq);
920426e95d1SSimon Horman }
921426e95d1SSimon Horman 
922f5fdcd1dSWolfram Sang static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
923f5fdcd1dSWolfram Sang {
924f5fdcd1dSWolfram Sang 	struct mmc_request *mrq;
925f5fdcd1dSWolfram Sang 	unsigned long flags;
926f5fdcd1dSWolfram Sang 
927f5fdcd1dSWolfram Sang 	spin_lock_irqsave(&host->lock, flags);
928f5fdcd1dSWolfram Sang 
929f5fdcd1dSWolfram Sang 	mrq = host->mrq;
930f5fdcd1dSWolfram Sang 	if (IS_ERR_OR_NULL(mrq)) {
931f5fdcd1dSWolfram Sang 		spin_unlock_irqrestore(&host->lock, flags);
932f5fdcd1dSWolfram Sang 		return;
933f5fdcd1dSWolfram Sang 	}
934f5fdcd1dSWolfram Sang 
9358b22c3c1SWolfram Sang 	/* If not SET_BLOCK_COUNT, clear old data */
9368b22c3c1SWolfram Sang 	if (host->cmd != mrq->sbc) {
937f5fdcd1dSWolfram Sang 		host->cmd = NULL;
938f5fdcd1dSWolfram Sang 		host->data = NULL;
939f5fdcd1dSWolfram Sang 		host->force_pio = false;
9408b22c3c1SWolfram Sang 		host->mrq = NULL;
9418b22c3c1SWolfram Sang 	}
942f5fdcd1dSWolfram Sang 
943f5fdcd1dSWolfram Sang 	cancel_delayed_work(&host->delayed_reset_work);
944f5fdcd1dSWolfram Sang 
945f5fdcd1dSWolfram Sang 	spin_unlock_irqrestore(&host->lock, flags);
946f5fdcd1dSWolfram Sang 
947f5fdcd1dSWolfram Sang 	if (mrq->cmd->error || (mrq->data && mrq->data->error))
948f5fdcd1dSWolfram Sang 		tmio_mmc_abort_dma(host);
949f5fdcd1dSWolfram Sang 
950f5fdcd1dSWolfram Sang 	if (host->check_scc_error)
951f5fdcd1dSWolfram Sang 		host->check_scc_error(host);
952f5fdcd1dSWolfram Sang 
9538b22c3c1SWolfram Sang 	/* If SET_BLOCK_COUNT, continue with main command */
9548b22c3c1SWolfram Sang 	if (host->mrq) {
9558b22c3c1SWolfram Sang 		tmio_process_mrq(host, mrq);
9568b22c3c1SWolfram Sang 		return;
9578b22c3c1SWolfram Sang 	}
9588b22c3c1SWolfram Sang 
959f5fdcd1dSWolfram Sang 	mmc_request_done(host->mmc, mrq);
960f5fdcd1dSWolfram Sang }
961f5fdcd1dSWolfram Sang 
962f5fdcd1dSWolfram Sang static void tmio_mmc_done_work(struct work_struct *work)
963f5fdcd1dSWolfram Sang {
964f5fdcd1dSWolfram Sang 	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
965f5fdcd1dSWolfram Sang 						  done);
966f5fdcd1dSWolfram Sang 	tmio_mmc_finish_request(host);
967f5fdcd1dSWolfram Sang }
968f5fdcd1dSWolfram Sang 
969426e95d1SSimon Horman static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
970426e95d1SSimon Horman {
971426e95d1SSimon Horman 	if (!host->clk_enable)
972426e95d1SSimon Horman 		return -ENOTSUPP;
973426e95d1SSimon Horman 
974426e95d1SSimon Horman 	return host->clk_enable(host);
975426e95d1SSimon Horman }
976426e95d1SSimon Horman 
977426e95d1SSimon Horman static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
978426e95d1SSimon Horman {
979426e95d1SSimon Horman 	if (host->clk_disable)
980426e95d1SSimon Horman 		host->clk_disable(host);
981426e95d1SSimon Horman }
982426e95d1SSimon Horman 
983426e95d1SSimon Horman static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
984426e95d1SSimon Horman {
985426e95d1SSimon Horman 	struct mmc_host *mmc = host->mmc;
986426e95d1SSimon Horman 	int ret = 0;
987426e95d1SSimon Horman 
988426e95d1SSimon Horman 	/* .set_ios() is returning void, so, no chance to report an error */
989426e95d1SSimon Horman 
990426e95d1SSimon Horman 	if (host->set_pwr)
991426e95d1SSimon Horman 		host->set_pwr(host->pdev, 1);
992426e95d1SSimon Horman 
993426e95d1SSimon Horman 	if (!IS_ERR(mmc->supply.vmmc)) {
994426e95d1SSimon Horman 		ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
995426e95d1SSimon Horman 		/*
996426e95d1SSimon Horman 		 * Attention: empiric value. With a b43 WiFi SDIO card this
997426e95d1SSimon Horman 		 * delay proved necessary for reliable card-insertion probing.
998426e95d1SSimon Horman 		 * 100us were not enough. Is this the same 140us delay, as in
999426e95d1SSimon Horman 		 * tmio_mmc_set_ios()?
1000426e95d1SSimon Horman 		 */
1001426e95d1SSimon Horman 		udelay(200);
1002426e95d1SSimon Horman 	}
1003426e95d1SSimon Horman 	/*
1004426e95d1SSimon Horman 	 * It seems, VccQ should be switched on after Vcc, this is also what the
1005426e95d1SSimon Horman 	 * omap_hsmmc.c driver does.
1006426e95d1SSimon Horman 	 */
1007426e95d1SSimon Horman 	if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
1008426e95d1SSimon Horman 		ret = regulator_enable(mmc->supply.vqmmc);
1009426e95d1SSimon Horman 		udelay(200);
1010426e95d1SSimon Horman 	}
1011426e95d1SSimon Horman 
1012426e95d1SSimon Horman 	if (ret < 0)
1013426e95d1SSimon Horman 		dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
1014426e95d1SSimon Horman 			ret);
1015426e95d1SSimon Horman }
1016426e95d1SSimon Horman 
1017426e95d1SSimon Horman static void tmio_mmc_power_off(struct tmio_mmc_host *host)
1018426e95d1SSimon Horman {
1019426e95d1SSimon Horman 	struct mmc_host *mmc = host->mmc;
1020426e95d1SSimon Horman 
1021426e95d1SSimon Horman 	if (!IS_ERR(mmc->supply.vqmmc))
1022426e95d1SSimon Horman 		regulator_disable(mmc->supply.vqmmc);
1023426e95d1SSimon Horman 
1024426e95d1SSimon Horman 	if (!IS_ERR(mmc->supply.vmmc))
1025426e95d1SSimon Horman 		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1026426e95d1SSimon Horman 
1027426e95d1SSimon Horman 	if (host->set_pwr)
1028426e95d1SSimon Horman 		host->set_pwr(host->pdev, 0);
1029426e95d1SSimon Horman }
1030426e95d1SSimon Horman 
1031426e95d1SSimon Horman static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
1032426e95d1SSimon Horman 				unsigned char bus_width)
1033426e95d1SSimon Horman {
1034426e95d1SSimon Horman 	u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
1035426e95d1SSimon Horman 				& ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
1036426e95d1SSimon Horman 
1037426e95d1SSimon Horman 	/* reg now applies to MMC_BUS_WIDTH_4 */
1038426e95d1SSimon Horman 	if (bus_width == MMC_BUS_WIDTH_1)
1039426e95d1SSimon Horman 		reg |= CARD_OPT_WIDTH;
1040426e95d1SSimon Horman 	else if (bus_width == MMC_BUS_WIDTH_8)
1041426e95d1SSimon Horman 		reg |= CARD_OPT_WIDTH8;
1042426e95d1SSimon Horman 
1043426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
1044426e95d1SSimon Horman }
1045426e95d1SSimon Horman 
1046426e95d1SSimon Horman /* Set MMC clock / power.
1047426e95d1SSimon Horman  * Note: This controller uses a simple divider scheme therefore it cannot
1048426e95d1SSimon Horman  * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
1049426e95d1SSimon Horman  * MMC wont run that fast, it has to be clocked at 12MHz which is the next
1050426e95d1SSimon Horman  * slowest setting.
1051426e95d1SSimon Horman  */
1052426e95d1SSimon Horman static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1053426e95d1SSimon Horman {
1054426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(mmc);
1055426e95d1SSimon Horman 	struct device *dev = &host->pdev->dev;
1056426e95d1SSimon Horman 	unsigned long flags;
1057426e95d1SSimon Horman 
1058426e95d1SSimon Horman 	mutex_lock(&host->ios_lock);
1059426e95d1SSimon Horman 
1060426e95d1SSimon Horman 	spin_lock_irqsave(&host->lock, flags);
1061426e95d1SSimon Horman 	if (host->mrq) {
1062426e95d1SSimon Horman 		if (IS_ERR(host->mrq)) {
1063426e95d1SSimon Horman 			dev_dbg(dev,
1064426e95d1SSimon Horman 				"%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
1065426e95d1SSimon Horman 				current->comm, task_pid_nr(current),
1066426e95d1SSimon Horman 				ios->clock, ios->power_mode);
1067426e95d1SSimon Horman 			host->mrq = ERR_PTR(-EINTR);
1068426e95d1SSimon Horman 		} else {
1069426e95d1SSimon Horman 			dev_dbg(dev,
1070426e95d1SSimon Horman 				"%s.%d: CMD%u active since %lu, now %lu!\n",
1071426e95d1SSimon Horman 				current->comm, task_pid_nr(current),
1072426e95d1SSimon Horman 				host->mrq->cmd->opcode, host->last_req_ts, jiffies);
1073426e95d1SSimon Horman 		}
1074426e95d1SSimon Horman 		spin_unlock_irqrestore(&host->lock, flags);
1075426e95d1SSimon Horman 
1076426e95d1SSimon Horman 		mutex_unlock(&host->ios_lock);
1077426e95d1SSimon Horman 		return;
1078426e95d1SSimon Horman 	}
1079426e95d1SSimon Horman 
1080426e95d1SSimon Horman 	host->mrq = ERR_PTR(-EBUSY);
1081426e95d1SSimon Horman 
1082426e95d1SSimon Horman 	spin_unlock_irqrestore(&host->lock, flags);
1083426e95d1SSimon Horman 
1084426e95d1SSimon Horman 	switch (ios->power_mode) {
1085426e95d1SSimon Horman 	case MMC_POWER_OFF:
1086426e95d1SSimon Horman 		tmio_mmc_power_off(host);
1087426e95d1SSimon Horman 		tmio_mmc_clk_stop(host);
1088426e95d1SSimon Horman 		break;
1089426e95d1SSimon Horman 	case MMC_POWER_UP:
1090426e95d1SSimon Horman 		tmio_mmc_power_on(host, ios->vdd);
1091426e95d1SSimon Horman 		tmio_mmc_set_clock(host, ios->clock);
1092426e95d1SSimon Horman 		tmio_mmc_set_bus_width(host, ios->bus_width);
1093426e95d1SSimon Horman 		break;
1094426e95d1SSimon Horman 	case MMC_POWER_ON:
1095426e95d1SSimon Horman 		tmio_mmc_set_clock(host, ios->clock);
1096426e95d1SSimon Horman 		tmio_mmc_set_bus_width(host, ios->bus_width);
1097426e95d1SSimon Horman 		break;
1098426e95d1SSimon Horman 	}
1099426e95d1SSimon Horman 
1100426e95d1SSimon Horman 	/* Let things settle. delay taken from winCE driver */
1101426e95d1SSimon Horman 	udelay(140);
1102426e95d1SSimon Horman 	if (PTR_ERR(host->mrq) == -EINTR)
1103426e95d1SSimon Horman 		dev_dbg(&host->pdev->dev,
1104426e95d1SSimon Horman 			"%s.%d: IOS interrupted: clk %u, mode %u",
1105426e95d1SSimon Horman 			current->comm, task_pid_nr(current),
1106426e95d1SSimon Horman 			ios->clock, ios->power_mode);
1107426e95d1SSimon Horman 	host->mrq = NULL;
1108426e95d1SSimon Horman 
1109426e95d1SSimon Horman 	host->clk_cache = ios->clock;
1110426e95d1SSimon Horman 
1111426e95d1SSimon Horman 	mutex_unlock(&host->ios_lock);
1112426e95d1SSimon Horman }
1113426e95d1SSimon Horman 
1114426e95d1SSimon Horman static int tmio_mmc_get_ro(struct mmc_host *mmc)
1115426e95d1SSimon Horman {
1116426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(mmc);
1117426e95d1SSimon Horman 	struct tmio_mmc_data *pdata = host->pdata;
1118426e95d1SSimon Horman 	int ret = mmc_gpio_get_ro(mmc);
1119426e95d1SSimon Horman 	if (ret >= 0)
1120426e95d1SSimon Horman 		return ret;
1121426e95d1SSimon Horman 
1122426e95d1SSimon Horman 	ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
1123426e95d1SSimon Horman 		(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
1124426e95d1SSimon Horman 
1125426e95d1SSimon Horman 	return ret;
1126426e95d1SSimon Horman }
1127426e95d1SSimon Horman 
1128426e95d1SSimon Horman static int tmio_multi_io_quirk(struct mmc_card *card,
1129426e95d1SSimon Horman 			       unsigned int direction, int blk_size)
1130426e95d1SSimon Horman {
1131426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(card->host);
1132426e95d1SSimon Horman 
1133426e95d1SSimon Horman 	if (host->multi_io_quirk)
1134426e95d1SSimon Horman 		return host->multi_io_quirk(card, direction, blk_size);
1135426e95d1SSimon Horman 
1136426e95d1SSimon Horman 	return blk_size;
1137426e95d1SSimon Horman }
1138426e95d1SSimon Horman 
1139426e95d1SSimon Horman static struct mmc_host_ops tmio_mmc_ops = {
1140426e95d1SSimon Horman 	.request	= tmio_mmc_request,
1141426e95d1SSimon Horman 	.set_ios	= tmio_mmc_set_ios,
1142426e95d1SSimon Horman 	.get_ro         = tmio_mmc_get_ro,
1143426e95d1SSimon Horman 	.get_cd		= mmc_gpio_get_cd,
1144426e95d1SSimon Horman 	.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
1145426e95d1SSimon Horman 	.multi_io_quirk	= tmio_multi_io_quirk,
1146426e95d1SSimon Horman 	.hw_reset	= tmio_mmc_hw_reset,
1147426e95d1SSimon Horman 	.execute_tuning = tmio_mmc_execute_tuning,
1148426e95d1SSimon Horman };
1149426e95d1SSimon Horman 
1150426e95d1SSimon Horman static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
1151426e95d1SSimon Horman {
1152426e95d1SSimon Horman 	struct tmio_mmc_data *pdata = host->pdata;
1153426e95d1SSimon Horman 	struct mmc_host *mmc = host->mmc;
1154426e95d1SSimon Horman 
1155426e95d1SSimon Horman 	mmc_regulator_get_supply(mmc);
1156426e95d1SSimon Horman 
1157426e95d1SSimon Horman 	/* use ocr_mask if no regulator */
1158426e95d1SSimon Horman 	if (!mmc->ocr_avail)
1159426e95d1SSimon Horman 		mmc->ocr_avail =  pdata->ocr_mask;
1160426e95d1SSimon Horman 
1161426e95d1SSimon Horman 	/*
1162426e95d1SSimon Horman 	 * try again.
1163426e95d1SSimon Horman 	 * There is possibility that regulator has not been probed
1164426e95d1SSimon Horman 	 */
1165426e95d1SSimon Horman 	if (!mmc->ocr_avail)
1166426e95d1SSimon Horman 		return -EPROBE_DEFER;
1167426e95d1SSimon Horman 
1168426e95d1SSimon Horman 	return 0;
1169426e95d1SSimon Horman }
1170426e95d1SSimon Horman 
1171426e95d1SSimon Horman static void tmio_mmc_of_parse(struct platform_device *pdev,
1172426e95d1SSimon Horman 			      struct tmio_mmc_data *pdata)
1173426e95d1SSimon Horman {
1174426e95d1SSimon Horman 	const struct device_node *np = pdev->dev.of_node;
1175426e95d1SSimon Horman 	if (!np)
1176426e95d1SSimon Horman 		return;
1177426e95d1SSimon Horman 
1178426e95d1SSimon Horman 	if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
1179426e95d1SSimon Horman 		pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
1180426e95d1SSimon Horman }
1181426e95d1SSimon Horman 
1182426e95d1SSimon Horman struct tmio_mmc_host*
1183426e95d1SSimon Horman tmio_mmc_host_alloc(struct platform_device *pdev)
1184426e95d1SSimon Horman {
1185426e95d1SSimon Horman 	struct tmio_mmc_host *host;
1186426e95d1SSimon Horman 	struct mmc_host *mmc;
1187426e95d1SSimon Horman 
1188426e95d1SSimon Horman 	mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
1189426e95d1SSimon Horman 	if (!mmc)
1190426e95d1SSimon Horman 		return NULL;
1191426e95d1SSimon Horman 
1192426e95d1SSimon Horman 	host = mmc_priv(mmc);
1193426e95d1SSimon Horman 	host->mmc = mmc;
1194426e95d1SSimon Horman 	host->pdev = pdev;
1195426e95d1SSimon Horman 
1196426e95d1SSimon Horman 	return host;
1197426e95d1SSimon Horman }
1198426e95d1SSimon Horman EXPORT_SYMBOL(tmio_mmc_host_alloc);
1199426e95d1SSimon Horman 
1200426e95d1SSimon Horman void tmio_mmc_host_free(struct tmio_mmc_host *host)
1201426e95d1SSimon Horman {
1202426e95d1SSimon Horman 	mmc_free_host(host->mmc);
1203426e95d1SSimon Horman }
1204426e95d1SSimon Horman EXPORT_SYMBOL(tmio_mmc_host_free);
1205426e95d1SSimon Horman 
1206426e95d1SSimon Horman int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1207426e95d1SSimon Horman 			struct tmio_mmc_data *pdata,
1208426e95d1SSimon Horman 			const struct tmio_mmc_dma_ops *dma_ops)
1209426e95d1SSimon Horman {
1210426e95d1SSimon Horman 	struct platform_device *pdev = _host->pdev;
1211426e95d1SSimon Horman 	struct mmc_host *mmc = _host->mmc;
1212426e95d1SSimon Horman 	struct resource *res_ctl;
1213426e95d1SSimon Horman 	int ret;
1214426e95d1SSimon Horman 	u32 irq_mask = TMIO_MASK_CMD;
1215426e95d1SSimon Horman 
1216426e95d1SSimon Horman 	tmio_mmc_of_parse(pdev, pdata);
1217426e95d1SSimon Horman 
1218426e95d1SSimon Horman 	if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
1219426e95d1SSimon Horman 		_host->write16_hook = NULL;
1220426e95d1SSimon Horman 
1221426e95d1SSimon Horman 	res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1222426e95d1SSimon Horman 	if (!res_ctl)
1223426e95d1SSimon Horman 		return -EINVAL;
1224426e95d1SSimon Horman 
1225426e95d1SSimon Horman 	ret = mmc_of_parse(mmc);
1226426e95d1SSimon Horman 	if (ret < 0)
1227426e95d1SSimon Horman 		return ret;
1228426e95d1SSimon Horman 
1229426e95d1SSimon Horman 	_host->pdata = pdata;
1230426e95d1SSimon Horman 	platform_set_drvdata(pdev, mmc);
1231426e95d1SSimon Horman 
1232426e95d1SSimon Horman 	_host->set_pwr = pdata->set_pwr;
1233426e95d1SSimon Horman 	_host->set_clk_div = pdata->set_clk_div;
1234426e95d1SSimon Horman 
1235426e95d1SSimon Horman 	ret = tmio_mmc_init_ocr(_host);
1236426e95d1SSimon Horman 	if (ret < 0)
1237426e95d1SSimon Horman 		return ret;
1238426e95d1SSimon Horman 
1239426e95d1SSimon Horman 	_host->ctl = devm_ioremap(&pdev->dev,
1240426e95d1SSimon Horman 				  res_ctl->start, resource_size(res_ctl));
1241426e95d1SSimon Horman 	if (!_host->ctl)
1242426e95d1SSimon Horman 		return -ENOMEM;
1243426e95d1SSimon Horman 
1244426e95d1SSimon Horman 	tmio_mmc_ops.card_busy = _host->card_busy;
1245426e95d1SSimon Horman 	tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
1246426e95d1SSimon Horman 	mmc->ops = &tmio_mmc_ops;
1247426e95d1SSimon Horman 
1248426e95d1SSimon Horman 	mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
1249426e95d1SSimon Horman 	mmc->caps2 |= pdata->capabilities2;
1250426e95d1SSimon Horman 	mmc->max_segs = 32;
1251426e95d1SSimon Horman 	mmc->max_blk_size = 512;
1252426e95d1SSimon Horman 	mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
1253426e95d1SSimon Horman 		mmc->max_segs;
1254426e95d1SSimon Horman 	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1255426e95d1SSimon Horman 	mmc->max_seg_size = mmc->max_req_size;
1256426e95d1SSimon Horman 
1257426e95d1SSimon Horman 	_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
1258426e95d1SSimon Horman 				  mmc->caps & MMC_CAP_NEEDS_POLL ||
1259426e95d1SSimon Horman 				  !mmc_card_is_removable(mmc));
1260426e95d1SSimon Horman 
1261426e95d1SSimon Horman 	/*
1262426e95d1SSimon Horman 	 * On Gen2+, eMMC with NONREMOVABLE currently fails because native
1263426e95d1SSimon Horman 	 * hotplug gets disabled. It seems RuntimePM related yet we need further
1264426e95d1SSimon Horman 	 * research. Since we are planning a PM overhaul anyway, let's enforce
1265426e95d1SSimon Horman 	 * for now the device being active by enabling native hotplug always.
1266426e95d1SSimon Horman 	 */
1267426e95d1SSimon Horman 	if (pdata->flags & TMIO_MMC_MIN_RCAR2)
1268426e95d1SSimon Horman 		_host->native_hotplug = true;
1269426e95d1SSimon Horman 
1270426e95d1SSimon Horman 	if (tmio_mmc_clk_enable(_host) < 0) {
1271426e95d1SSimon Horman 		mmc->f_max = pdata->hclk;
1272426e95d1SSimon Horman 		mmc->f_min = mmc->f_max / 512;
1273426e95d1SSimon Horman 	}
1274426e95d1SSimon Horman 
1275426e95d1SSimon Horman 	/*
1276426e95d1SSimon Horman 	 * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
1277426e95d1SSimon Horman 	 * looping forever...
1278426e95d1SSimon Horman 	 */
1279426e95d1SSimon Horman 	if (mmc->f_min == 0)
1280426e95d1SSimon Horman 		return -EINVAL;
1281426e95d1SSimon Horman 
1282426e95d1SSimon Horman 	/*
1283426e95d1SSimon Horman 	 * While using internal tmio hardware logic for card detection, we need
1284426e95d1SSimon Horman 	 * to ensure it stays powered for it to work.
1285426e95d1SSimon Horman 	 */
1286426e95d1SSimon Horman 	if (_host->native_hotplug)
1287426e95d1SSimon Horman 		pm_runtime_get_noresume(&pdev->dev);
1288426e95d1SSimon Horman 
128986beb538SWolfram Sang 	_host->sdio_irq_enabled = false;
129086beb538SWolfram Sang 	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
129186beb538SWolfram Sang 		_host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
129286beb538SWolfram Sang 
1293426e95d1SSimon Horman 	tmio_mmc_clk_stop(_host);
1294426e95d1SSimon Horman 	tmio_mmc_reset(_host);
1295426e95d1SSimon Horman 
1296426e95d1SSimon Horman 	_host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
1297426e95d1SSimon Horman 	tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
1298426e95d1SSimon Horman 
1299426e95d1SSimon Horman 	/* Unmask the IRQs we want to know about */
1300426e95d1SSimon Horman 	if (!_host->chan_rx)
1301426e95d1SSimon Horman 		irq_mask |= TMIO_MASK_READOP;
1302426e95d1SSimon Horman 	if (!_host->chan_tx)
1303426e95d1SSimon Horman 		irq_mask |= TMIO_MASK_WRITEOP;
1304426e95d1SSimon Horman 	if (!_host->native_hotplug)
1305426e95d1SSimon Horman 		irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1306426e95d1SSimon Horman 
1307426e95d1SSimon Horman 	_host->sdcard_irq_mask &= ~irq_mask;
1308426e95d1SSimon Horman 
1309426e95d1SSimon Horman 	spin_lock_init(&_host->lock);
1310426e95d1SSimon Horman 	mutex_init(&_host->ios_lock);
1311426e95d1SSimon Horman 
1312426e95d1SSimon Horman 	/* Init delayed work for request timeouts */
1313426e95d1SSimon Horman 	INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
1314426e95d1SSimon Horman 	INIT_WORK(&_host->done, tmio_mmc_done_work);
1315426e95d1SSimon Horman 
1316426e95d1SSimon Horman 	/* See if we also get DMA */
1317426e95d1SSimon Horman 	_host->dma_ops = dma_ops;
1318426e95d1SSimon Horman 	tmio_mmc_request_dma(_host, pdata);
1319426e95d1SSimon Horman 
1320426e95d1SSimon Horman 	pm_runtime_set_active(&pdev->dev);
1321426e95d1SSimon Horman 	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1322426e95d1SSimon Horman 	pm_runtime_use_autosuspend(&pdev->dev);
1323426e95d1SSimon Horman 	pm_runtime_enable(&pdev->dev);
1324426e95d1SSimon Horman 
1325426e95d1SSimon Horman 	ret = mmc_add_host(mmc);
1326426e95d1SSimon Horman 	if (ret < 0) {
1327426e95d1SSimon Horman 		tmio_mmc_host_remove(_host);
1328426e95d1SSimon Horman 		return ret;
1329426e95d1SSimon Horman 	}
1330426e95d1SSimon Horman 
1331426e95d1SSimon Horman 	dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1332426e95d1SSimon Horman 
1333426e95d1SSimon Horman 	if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
1334426e95d1SSimon Horman 		ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
1335426e95d1SSimon Horman 		if (ret < 0) {
1336426e95d1SSimon Horman 			tmio_mmc_host_remove(_host);
1337426e95d1SSimon Horman 			return ret;
1338426e95d1SSimon Horman 		}
1339426e95d1SSimon Horman 		mmc_gpiod_request_cd_irq(mmc);
1340426e95d1SSimon Horman 	}
1341426e95d1SSimon Horman 
1342426e95d1SSimon Horman 	return 0;
1343426e95d1SSimon Horman }
1344426e95d1SSimon Horman EXPORT_SYMBOL(tmio_mmc_host_probe);
1345426e95d1SSimon Horman 
1346426e95d1SSimon Horman void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1347426e95d1SSimon Horman {
1348426e95d1SSimon Horman 	struct platform_device *pdev = host->pdev;
1349426e95d1SSimon Horman 	struct mmc_host *mmc = host->mmc;
1350426e95d1SSimon Horman 
1351426e95d1SSimon Horman 	if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
1352426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
1353426e95d1SSimon Horman 
1354426e95d1SSimon Horman 	if (!host->native_hotplug)
1355426e95d1SSimon Horman 		pm_runtime_get_sync(&pdev->dev);
1356426e95d1SSimon Horman 
1357426e95d1SSimon Horman 	dev_pm_qos_hide_latency_limit(&pdev->dev);
1358426e95d1SSimon Horman 
1359426e95d1SSimon Horman 	mmc_remove_host(mmc);
1360426e95d1SSimon Horman 	cancel_work_sync(&host->done);
1361426e95d1SSimon Horman 	cancel_delayed_work_sync(&host->delayed_reset_work);
1362426e95d1SSimon Horman 	tmio_mmc_release_dma(host);
1363426e95d1SSimon Horman 
1364426e95d1SSimon Horman 	pm_runtime_put_sync(&pdev->dev);
1365426e95d1SSimon Horman 	pm_runtime_disable(&pdev->dev);
1366426e95d1SSimon Horman 
1367426e95d1SSimon Horman 	tmio_mmc_clk_disable(host);
1368426e95d1SSimon Horman }
1369426e95d1SSimon Horman EXPORT_SYMBOL(tmio_mmc_host_remove);
1370426e95d1SSimon Horman 
1371426e95d1SSimon Horman #ifdef CONFIG_PM
1372426e95d1SSimon Horman int tmio_mmc_host_runtime_suspend(struct device *dev)
1373426e95d1SSimon Horman {
1374426e95d1SSimon Horman 	struct mmc_host *mmc = dev_get_drvdata(dev);
1375426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(mmc);
1376426e95d1SSimon Horman 
1377426e95d1SSimon Horman 	tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
1378426e95d1SSimon Horman 
1379426e95d1SSimon Horman 	if (host->clk_cache)
1380426e95d1SSimon Horman 		tmio_mmc_clk_stop(host);
1381426e95d1SSimon Horman 
1382426e95d1SSimon Horman 	tmio_mmc_clk_disable(host);
1383426e95d1SSimon Horman 
1384426e95d1SSimon Horman 	return 0;
1385426e95d1SSimon Horman }
1386426e95d1SSimon Horman EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
1387426e95d1SSimon Horman 
1388426e95d1SSimon Horman static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
1389426e95d1SSimon Horman {
1390426e95d1SSimon Horman 	return host->tap_num && mmc_can_retune(host->mmc);
1391426e95d1SSimon Horman }
1392426e95d1SSimon Horman 
1393426e95d1SSimon Horman int tmio_mmc_host_runtime_resume(struct device *dev)
1394426e95d1SSimon Horman {
1395426e95d1SSimon Horman 	struct mmc_host *mmc = dev_get_drvdata(dev);
1396426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(mmc);
1397426e95d1SSimon Horman 
1398426e95d1SSimon Horman 	tmio_mmc_reset(host);
1399426e95d1SSimon Horman 	tmio_mmc_clk_enable(host);
1400426e95d1SSimon Horman 
1401426e95d1SSimon Horman 	if (host->clk_cache)
1402426e95d1SSimon Horman 		tmio_mmc_set_clock(host, host->clk_cache);
1403426e95d1SSimon Horman 
1404426e95d1SSimon Horman 	tmio_mmc_enable_dma(host, true);
1405426e95d1SSimon Horman 
1406426e95d1SSimon Horman 	if (tmio_mmc_can_retune(host) && host->select_tuning(host))
1407426e95d1SSimon Horman 		dev_warn(&host->pdev->dev, "Tuning selection failed\n");
1408426e95d1SSimon Horman 
1409426e95d1SSimon Horman 	return 0;
1410426e95d1SSimon Horman }
1411426e95d1SSimon Horman EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
1412426e95d1SSimon Horman #endif
1413426e95d1SSimon Horman 
1414426e95d1SSimon Horman MODULE_LICENSE("GPL v2");
1415