xref: /openbmc/linux/drivers/mmc/host/tmio_mmc_core.c (revision b12a7a28)
1426e95d1SSimon Horman /*
2426e95d1SSimon Horman  * Driver for the MMC / SD / SDIO IP found in:
3426e95d1SSimon Horman  *
4426e95d1SSimon Horman  * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
5426e95d1SSimon Horman  *
687317c4dSSimon Horman  * Copyright (C) 2015-17 Renesas Electronics Corporation
787317c4dSSimon Horman  * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
887317c4dSSimon Horman  * Copyright (C) 2017 Horms Solutions, Simon Horman
9426e95d1SSimon Horman  * Copyright (C) 2011 Guennadi Liakhovetski
10426e95d1SSimon Horman  * Copyright (C) 2007 Ian Molton
11426e95d1SSimon Horman  * Copyright (C) 2004 Ian Molton
12426e95d1SSimon Horman  *
13426e95d1SSimon Horman  * This program is free software; you can redistribute it and/or modify
14426e95d1SSimon Horman  * it under the terms of the GNU General Public License version 2 as
15426e95d1SSimon Horman  * published by the Free Software Foundation.
16426e95d1SSimon Horman  *
17426e95d1SSimon Horman  * This driver draws mainly on scattered spec sheets, Reverse engineering
18426e95d1SSimon Horman  * of the toshiba e800  SD driver and some parts of the 2.4 ASIC3 driver (4 bit
19426e95d1SSimon Horman  * support). (Further 4 bit support from a later datasheet).
20426e95d1SSimon Horman  *
21426e95d1SSimon Horman  * TODO:
22426e95d1SSimon Horman  *   Investigate using a workqueue for PIO transfers
23426e95d1SSimon Horman  *   Eliminate FIXMEs
24426e95d1SSimon Horman  *   Better Power management
25426e95d1SSimon Horman  *   Handle MMC errors better
26426e95d1SSimon Horman  *   double buffer support
27426e95d1SSimon Horman  *
28426e95d1SSimon Horman  */
29426e95d1SSimon Horman 
30426e95d1SSimon Horman #include <linux/delay.h>
31426e95d1SSimon Horman #include <linux/device.h>
32426e95d1SSimon Horman #include <linux/highmem.h>
33426e95d1SSimon Horman #include <linux/interrupt.h>
34426e95d1SSimon Horman #include <linux/io.h>
35426e95d1SSimon Horman #include <linux/irq.h>
36426e95d1SSimon Horman #include <linux/mfd/tmio.h>
37426e95d1SSimon Horman #include <linux/mmc/card.h>
38426e95d1SSimon Horman #include <linux/mmc/host.h>
39426e95d1SSimon Horman #include <linux/mmc/mmc.h>
40426e95d1SSimon Horman #include <linux/mmc/slot-gpio.h>
41426e95d1SSimon Horman #include <linux/module.h>
42426e95d1SSimon Horman #include <linux/pagemap.h>
43426e95d1SSimon Horman #include <linux/platform_device.h>
44426e95d1SSimon Horman #include <linux/pm_qos.h>
45426e95d1SSimon Horman #include <linux/pm_runtime.h>
46426e95d1SSimon Horman #include <linux/regulator/consumer.h>
47426e95d1SSimon Horman #include <linux/mmc/sdio.h>
48426e95d1SSimon Horman #include <linux/scatterlist.h>
49426e95d1SSimon Horman #include <linux/spinlock.h>
50e90e8da7SYoshihiro Shimoda #include <linux/swiotlb.h>
51426e95d1SSimon Horman #include <linux/workqueue.h>
52426e95d1SSimon Horman 
53426e95d1SSimon Horman #include "tmio_mmc.h"
54426e95d1SSimon Horman 
55426e95d1SSimon Horman static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
56426e95d1SSimon Horman 				      struct mmc_data *data)
57426e95d1SSimon Horman {
58426e95d1SSimon Horman 	if (host->dma_ops)
59426e95d1SSimon Horman 		host->dma_ops->start(host, data);
60426e95d1SSimon Horman }
61426e95d1SSimon Horman 
62426e95d1SSimon Horman static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
63426e95d1SSimon Horman {
64426e95d1SSimon Horman 	if (host->dma_ops)
65426e95d1SSimon Horman 		host->dma_ops->enable(host, enable);
66426e95d1SSimon Horman }
67426e95d1SSimon Horman 
68426e95d1SSimon Horman static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
69426e95d1SSimon Horman 					struct tmio_mmc_data *pdata)
70426e95d1SSimon Horman {
71426e95d1SSimon Horman 	if (host->dma_ops) {
72426e95d1SSimon Horman 		host->dma_ops->request(host, pdata);
73426e95d1SSimon Horman 	} else {
74426e95d1SSimon Horman 		host->chan_tx = NULL;
75426e95d1SSimon Horman 		host->chan_rx = NULL;
76426e95d1SSimon Horman 	}
77426e95d1SSimon Horman }
78426e95d1SSimon Horman 
79426e95d1SSimon Horman static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
80426e95d1SSimon Horman {
81426e95d1SSimon Horman 	if (host->dma_ops)
82426e95d1SSimon Horman 		host->dma_ops->release(host);
83426e95d1SSimon Horman }
84426e95d1SSimon Horman 
85426e95d1SSimon Horman static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
86426e95d1SSimon Horman {
87426e95d1SSimon Horman 	if (host->dma_ops)
88426e95d1SSimon Horman 		host->dma_ops->abort(host);
89426e95d1SSimon Horman }
90426e95d1SSimon Horman 
9192d0f925SSimon Horman static inline void tmio_mmc_dataend_dma(struct tmio_mmc_host *host)
9292d0f925SSimon Horman {
9392d0f925SSimon Horman 	if (host->dma_ops)
9492d0f925SSimon Horman 		host->dma_ops->dataend(host);
9592d0f925SSimon Horman }
9692d0f925SSimon Horman 
97426e95d1SSimon Horman void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
98426e95d1SSimon Horman {
99426e95d1SSimon Horman 	host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
100426e95d1SSimon Horman 	sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
101426e95d1SSimon Horman }
1026106ecf3SSimon Horman EXPORT_SYMBOL_GPL(tmio_mmc_enable_mmc_irqs);
103426e95d1SSimon Horman 
104426e95d1SSimon Horman void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
105426e95d1SSimon Horman {
106426e95d1SSimon Horman 	host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
107426e95d1SSimon Horman 	sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
108426e95d1SSimon Horman }
1096106ecf3SSimon Horman EXPORT_SYMBOL_GPL(tmio_mmc_disable_mmc_irqs);
110426e95d1SSimon Horman 
111426e95d1SSimon Horman static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
112426e95d1SSimon Horman {
113426e95d1SSimon Horman 	sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
114426e95d1SSimon Horman }
115426e95d1SSimon Horman 
116426e95d1SSimon Horman static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
117426e95d1SSimon Horman {
118426e95d1SSimon Horman 	host->sg_len = data->sg_len;
119426e95d1SSimon Horman 	host->sg_ptr = data->sg;
120426e95d1SSimon Horman 	host->sg_orig = data->sg;
121426e95d1SSimon Horman 	host->sg_off = 0;
122426e95d1SSimon Horman }
123426e95d1SSimon Horman 
124426e95d1SSimon Horman static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
125426e95d1SSimon Horman {
126426e95d1SSimon Horman 	host->sg_ptr = sg_next(host->sg_ptr);
127426e95d1SSimon Horman 	host->sg_off = 0;
128426e95d1SSimon Horman 	return --host->sg_len;
129426e95d1SSimon Horman }
130426e95d1SSimon Horman 
131426e95d1SSimon Horman #define CMDREQ_TIMEOUT	5000
132426e95d1SSimon Horman 
133426e95d1SSimon Horman static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
134426e95d1SSimon Horman {
135426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(mmc);
136426e95d1SSimon Horman 
137426e95d1SSimon Horman 	if (enable && !host->sdio_irq_enabled) {
138426e95d1SSimon Horman 		u16 sdio_status;
139426e95d1SSimon Horman 
140426e95d1SSimon Horman 		/* Keep device active while SDIO irq is enabled */
141426e95d1SSimon Horman 		pm_runtime_get_sync(mmc_dev(mmc));
142426e95d1SSimon Horman 
143426e95d1SSimon Horman 		host->sdio_irq_enabled = true;
144f2218db8SSimon Horman 		host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ;
145426e95d1SSimon Horman 
146426e95d1SSimon Horman 		/* Clear obsolete interrupts before enabling */
147426e95d1SSimon Horman 		sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
148426e95d1SSimon Horman 		if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
149426e95d1SSimon Horman 			sdio_status |= TMIO_SDIO_SETBITS_MASK;
150426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
151426e95d1SSimon Horman 
152426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
153426e95d1SSimon Horman 	} else if (!enable && host->sdio_irq_enabled) {
154426e95d1SSimon Horman 		host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
155426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
156426e95d1SSimon Horman 
157426e95d1SSimon Horman 		host->sdio_irq_enabled = false;
158426e95d1SSimon Horman 		pm_runtime_mark_last_busy(mmc_dev(mmc));
159426e95d1SSimon Horman 		pm_runtime_put_autosuspend(mmc_dev(mmc));
160426e95d1SSimon Horman 	}
161426e95d1SSimon Horman }
162426e95d1SSimon Horman 
163426e95d1SSimon Horman static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
164426e95d1SSimon Horman {
165426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
166426e95d1SSimon Horman 		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
16701ffb1aeSWolfram Sang 
16801ffb1aeSWolfram Sang 	/* HW engineers overrode docs: no sleep needed on R-Car2+ */
16901ffb1aeSWolfram Sang 	if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
1701f27ddf0SMasaharu Hayakawa 		usleep_range(10000, 11000);
171426e95d1SSimon Horman 
172426e95d1SSimon Horman 	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
173426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
1741f27ddf0SMasaharu Hayakawa 		usleep_range(10000, 11000);
175426e95d1SSimon Horman 	}
176426e95d1SSimon Horman }
177426e95d1SSimon Horman 
178426e95d1SSimon Horman static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
179426e95d1SSimon Horman {
180426e95d1SSimon Horman 	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
181426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
1821f27ddf0SMasaharu Hayakawa 		usleep_range(10000, 11000);
183426e95d1SSimon Horman 	}
184426e95d1SSimon Horman 
185426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
186426e95d1SSimon Horman 		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
18701ffb1aeSWolfram Sang 
18801ffb1aeSWolfram Sang 	/* HW engineers overrode docs: no sleep needed on R-Car2+ */
18901ffb1aeSWolfram Sang 	if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
1901f27ddf0SMasaharu Hayakawa 		usleep_range(10000, 11000);
191426e95d1SSimon Horman }
192426e95d1SSimon Horman 
193426e95d1SSimon Horman static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
194426e95d1SSimon Horman 			       unsigned int new_clock)
195426e95d1SSimon Horman {
196426e95d1SSimon Horman 	u32 clk = 0, clock;
197426e95d1SSimon Horman 
198426e95d1SSimon Horman 	if (new_clock == 0) {
199426e95d1SSimon Horman 		tmio_mmc_clk_stop(host);
200426e95d1SSimon Horman 		return;
201426e95d1SSimon Horman 	}
202426e95d1SSimon Horman 
203426e95d1SSimon Horman 	if (host->clk_update)
204426e95d1SSimon Horman 		clock = host->clk_update(host, new_clock) / 512;
205426e95d1SSimon Horman 	else
206426e95d1SSimon Horman 		clock = host->mmc->f_min;
207426e95d1SSimon Horman 
208426e95d1SSimon Horman 	for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
209426e95d1SSimon Horman 		clock <<= 1;
210426e95d1SSimon Horman 
211426e95d1SSimon Horman 	/* 1/1 clock is option */
212426e95d1SSimon Horman 	if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
213426e95d1SSimon Horman 		clk |= 0xff;
214426e95d1SSimon Horman 
215426e95d1SSimon Horman 	if (host->set_clk_div)
216426e95d1SSimon Horman 		host->set_clk_div(host->pdev, (clk >> 22) & 1);
217426e95d1SSimon Horman 
218426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
219426e95d1SSimon Horman 			sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
220426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
221426e95d1SSimon Horman 	if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
2221f27ddf0SMasaharu Hayakawa 		usleep_range(10000, 11000);
223426e95d1SSimon Horman 
224426e95d1SSimon Horman 	tmio_mmc_clk_start(host);
225426e95d1SSimon Horman }
226426e95d1SSimon Horman 
227426e95d1SSimon Horman static void tmio_mmc_reset(struct tmio_mmc_host *host)
228426e95d1SSimon Horman {
229426e95d1SSimon Horman 	/* FIXME - should we set stop clock reg here */
230426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
231426e95d1SSimon Horman 	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
232426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
2331f27ddf0SMasaharu Hayakawa 	usleep_range(10000, 11000);
234426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
235426e95d1SSimon Horman 	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
236426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
2371f27ddf0SMasaharu Hayakawa 	usleep_range(10000, 11000);
23886beb538SWolfram Sang 
23986beb538SWolfram Sang 	if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
24086beb538SWolfram Sang 		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
24186beb538SWolfram Sang 		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
24286beb538SWolfram Sang 	}
24386beb538SWolfram Sang 
244426e95d1SSimon Horman }
245426e95d1SSimon Horman 
246426e95d1SSimon Horman static void tmio_mmc_reset_work(struct work_struct *work)
247426e95d1SSimon Horman {
248426e95d1SSimon Horman 	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
249426e95d1SSimon Horman 						  delayed_reset_work.work);
250426e95d1SSimon Horman 	struct mmc_request *mrq;
251426e95d1SSimon Horman 	unsigned long flags;
252426e95d1SSimon Horman 
253426e95d1SSimon Horman 	spin_lock_irqsave(&host->lock, flags);
254426e95d1SSimon Horman 	mrq = host->mrq;
255426e95d1SSimon Horman 
256426e95d1SSimon Horman 	/*
257426e95d1SSimon Horman 	 * is request already finished? Since we use a non-blocking
258426e95d1SSimon Horman 	 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
259426e95d1SSimon Horman 	 * us, so, have to check for IS_ERR(host->mrq)
260426e95d1SSimon Horman 	 */
261f2218db8SSimon Horman 	if (IS_ERR_OR_NULL(mrq) ||
262f2218db8SSimon Horman 	    time_is_after_jiffies(host->last_req_ts +
263426e95d1SSimon Horman 				  msecs_to_jiffies(CMDREQ_TIMEOUT))) {
264426e95d1SSimon Horman 		spin_unlock_irqrestore(&host->lock, flags);
265426e95d1SSimon Horman 		return;
266426e95d1SSimon Horman 	}
267426e95d1SSimon Horman 
268426e95d1SSimon Horman 	dev_warn(&host->pdev->dev,
269426e95d1SSimon Horman 		 "timeout waiting for hardware interrupt (CMD%u)\n",
270426e95d1SSimon Horman 		 mrq->cmd->opcode);
271426e95d1SSimon Horman 
272426e95d1SSimon Horman 	if (host->data)
273426e95d1SSimon Horman 		host->data->error = -ETIMEDOUT;
274426e95d1SSimon Horman 	else if (host->cmd)
275426e95d1SSimon Horman 		host->cmd->error = -ETIMEDOUT;
276426e95d1SSimon Horman 	else
277426e95d1SSimon Horman 		mrq->cmd->error = -ETIMEDOUT;
278426e95d1SSimon Horman 
279426e95d1SSimon Horman 	host->cmd = NULL;
280426e95d1SSimon Horman 	host->data = NULL;
281426e95d1SSimon Horman 	host->force_pio = false;
282426e95d1SSimon Horman 
283426e95d1SSimon Horman 	spin_unlock_irqrestore(&host->lock, flags);
284426e95d1SSimon Horman 
285426e95d1SSimon Horman 	tmio_mmc_reset(host);
286426e95d1SSimon Horman 
287426e95d1SSimon Horman 	/* Ready for new calls */
288426e95d1SSimon Horman 	host->mrq = NULL;
289426e95d1SSimon Horman 
290426e95d1SSimon Horman 	tmio_mmc_abort_dma(host);
291426e95d1SSimon Horman 	mmc_request_done(host->mmc, mrq);
292426e95d1SSimon Horman }
293426e95d1SSimon Horman 
294426e95d1SSimon Horman /* These are the bitmasks the tmio chip requires to implement the MMC response
295426e95d1SSimon Horman  * types. Note that R1 and R6 are the same in this scheme. */
296426e95d1SSimon Horman #define APP_CMD        0x0040
297426e95d1SSimon Horman #define RESP_NONE      0x0300
298426e95d1SSimon Horman #define RESP_R1        0x0400
299426e95d1SSimon Horman #define RESP_R1B       0x0500
300426e95d1SSimon Horman #define RESP_R2        0x0600
301426e95d1SSimon Horman #define RESP_R3        0x0700
302426e95d1SSimon Horman #define DATA_PRESENT   0x0800
303426e95d1SSimon Horman #define TRANSFER_READ  0x1000
304426e95d1SSimon Horman #define TRANSFER_MULTI 0x2000
305426e95d1SSimon Horman #define SECURITY_CMD   0x4000
306426e95d1SSimon Horman #define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
307426e95d1SSimon Horman 
308f2218db8SSimon Horman static int tmio_mmc_start_command(struct tmio_mmc_host *host,
309f2218db8SSimon Horman 				  struct mmc_command *cmd)
310426e95d1SSimon Horman {
311426e95d1SSimon Horman 	struct mmc_data *data = host->data;
312426e95d1SSimon Horman 	int c = cmd->opcode;
313426e95d1SSimon Horman 	u32 irq_mask = TMIO_MASK_CMD;
314426e95d1SSimon Horman 
315426e95d1SSimon Horman 	switch (mmc_resp_type(cmd)) {
316426e95d1SSimon Horman 	case MMC_RSP_NONE: c |= RESP_NONE; break;
317426e95d1SSimon Horman 	case MMC_RSP_R1:
318426e95d1SSimon Horman 	case MMC_RSP_R1_NO_CRC:
319426e95d1SSimon Horman 			   c |= RESP_R1;   break;
320426e95d1SSimon Horman 	case MMC_RSP_R1B:  c |= RESP_R1B;  break;
321426e95d1SSimon Horman 	case MMC_RSP_R2:   c |= RESP_R2;   break;
322426e95d1SSimon Horman 	case MMC_RSP_R3:   c |= RESP_R3;   break;
323426e95d1SSimon Horman 	default:
324426e95d1SSimon Horman 		pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
325426e95d1SSimon Horman 		return -EINVAL;
326426e95d1SSimon Horman 	}
327426e95d1SSimon Horman 
328426e95d1SSimon Horman 	host->cmd = cmd;
329426e95d1SSimon Horman 
330426e95d1SSimon Horman /* FIXME - this seems to be ok commented out but the spec suggest this bit
331426e95d1SSimon Horman  *         should be set when issuing app commands.
332426e95d1SSimon Horman  *	if(cmd->flags & MMC_FLAG_ACMD)
333426e95d1SSimon Horman  *		c |= APP_CMD;
334426e95d1SSimon Horman  */
335426e95d1SSimon Horman 	if (data) {
336426e95d1SSimon Horman 		c |= DATA_PRESENT;
337426e95d1SSimon Horman 		if (data->blocks > 1) {
338426e95d1SSimon Horman 			sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_SEC);
339426e95d1SSimon Horman 			c |= TRANSFER_MULTI;
340426e95d1SSimon Horman 
341426e95d1SSimon Horman 			/*
342f2218db8SSimon Horman 			 * Disable auto CMD12 at IO_RW_EXTENDED and
343f2218db8SSimon Horman 			 * SET_BLOCK_COUNT when doing multiple block transfer
344426e95d1SSimon Horman 			 */
345426e95d1SSimon Horman 			if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
3468b22c3c1SWolfram Sang 			    (cmd->opcode == SD_IO_RW_EXTENDED || host->mrq->sbc))
347426e95d1SSimon Horman 				c |= NO_CMD12_ISSUE;
348426e95d1SSimon Horman 		}
349426e95d1SSimon Horman 		if (data->flags & MMC_DATA_READ)
350426e95d1SSimon Horman 			c |= TRANSFER_READ;
351426e95d1SSimon Horman 	}
352426e95d1SSimon Horman 
353426e95d1SSimon Horman 	tmio_mmc_enable_mmc_irqs(host, irq_mask);
354426e95d1SSimon Horman 
355426e95d1SSimon Horman 	/* Fire off the command */
356426e95d1SSimon Horman 	sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
357426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_CMD, c);
358426e95d1SSimon Horman 
359426e95d1SSimon Horman 	return 0;
360426e95d1SSimon Horman }
361426e95d1SSimon Horman 
362426e95d1SSimon Horman static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
363426e95d1SSimon Horman 				   unsigned short *buf,
364426e95d1SSimon Horman 				   unsigned int count)
365426e95d1SSimon Horman {
366426e95d1SSimon Horman 	int is_read = host->data->flags & MMC_DATA_READ;
367426e95d1SSimon Horman 	u8  *buf8;
368426e95d1SSimon Horman 
369426e95d1SSimon Horman 	/*
370426e95d1SSimon Horman 	 * Transfer the data
371426e95d1SSimon Horman 	 */
372426e95d1SSimon Horman 	if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
3739c284c41SChris Brandt 		u32 data = 0;
3749c284c41SChris Brandt 		u32 *buf32 = (u32 *)buf;
375426e95d1SSimon Horman 
376426e95d1SSimon Horman 		if (is_read)
3779c284c41SChris Brandt 			sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32,
378426e95d1SSimon Horman 					   count >> 2);
379426e95d1SSimon Horman 		else
3809c284c41SChris Brandt 			sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32,
381426e95d1SSimon Horman 					    count >> 2);
382426e95d1SSimon Horman 
383426e95d1SSimon Horman 		/* if count was multiple of 4 */
384426e95d1SSimon Horman 		if (!(count & 0x3))
385426e95d1SSimon Horman 			return;
386426e95d1SSimon Horman 
3879c284c41SChris Brandt 		buf32 += count >> 2;
388426e95d1SSimon Horman 		count %= 4;
389426e95d1SSimon Horman 
390426e95d1SSimon Horman 		if (is_read) {
3919c284c41SChris Brandt 			sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1);
3929c284c41SChris Brandt 			memcpy(buf32, &data, count);
393426e95d1SSimon Horman 		} else {
3949c284c41SChris Brandt 			memcpy(&data, buf32, count);
3959c284c41SChris Brandt 			sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1);
396426e95d1SSimon Horman 		}
397426e95d1SSimon Horman 
398426e95d1SSimon Horman 		return;
399426e95d1SSimon Horman 	}
400426e95d1SSimon Horman 
401426e95d1SSimon Horman 	if (is_read)
402426e95d1SSimon Horman 		sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
403426e95d1SSimon Horman 	else
404426e95d1SSimon Horman 		sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
405426e95d1SSimon Horman 
406426e95d1SSimon Horman 	/* if count was even number */
407426e95d1SSimon Horman 	if (!(count & 0x1))
408426e95d1SSimon Horman 		return;
409426e95d1SSimon Horman 
410426e95d1SSimon Horman 	/* if count was odd number */
411426e95d1SSimon Horman 	buf8 = (u8 *)(buf + (count >> 1));
412426e95d1SSimon Horman 
413426e95d1SSimon Horman 	/*
414426e95d1SSimon Horman 	 * FIXME
415426e95d1SSimon Horman 	 *
416426e95d1SSimon Horman 	 * driver and this function are assuming that
417426e95d1SSimon Horman 	 * it is used as little endian
418426e95d1SSimon Horman 	 */
419426e95d1SSimon Horman 	if (is_read)
420426e95d1SSimon Horman 		*buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
421426e95d1SSimon Horman 	else
422426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
423426e95d1SSimon Horman }
424426e95d1SSimon Horman 
425426e95d1SSimon Horman /*
426426e95d1SSimon Horman  * This chip always returns (at least?) as much data as you ask for.
427426e95d1SSimon Horman  * I'm unsure what happens if you ask for less than a block. This should be
428426e95d1SSimon Horman  * looked into to ensure that a funny length read doesn't hose the controller.
429426e95d1SSimon Horman  */
430426e95d1SSimon Horman static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
431426e95d1SSimon Horman {
432426e95d1SSimon Horman 	struct mmc_data *data = host->data;
433426e95d1SSimon Horman 	void *sg_virt;
434426e95d1SSimon Horman 	unsigned short *buf;
435426e95d1SSimon Horman 	unsigned int count;
436426e95d1SSimon Horman 	unsigned long flags;
437426e95d1SSimon Horman 
438426e95d1SSimon Horman 	if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
439426e95d1SSimon Horman 		pr_err("PIO IRQ in DMA mode!\n");
440426e95d1SSimon Horman 		return;
441426e95d1SSimon Horman 	} else if (!data) {
442426e95d1SSimon Horman 		pr_debug("Spurious PIO IRQ\n");
443426e95d1SSimon Horman 		return;
444426e95d1SSimon Horman 	}
445426e95d1SSimon Horman 
446426e95d1SSimon Horman 	sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
447426e95d1SSimon Horman 	buf = (unsigned short *)(sg_virt + host->sg_off);
448426e95d1SSimon Horman 
449426e95d1SSimon Horman 	count = host->sg_ptr->length - host->sg_off;
450426e95d1SSimon Horman 	if (count > data->blksz)
451426e95d1SSimon Horman 		count = data->blksz;
452426e95d1SSimon Horman 
453426e95d1SSimon Horman 	pr_debug("count: %08x offset: %08x flags %08x\n",
454426e95d1SSimon Horman 		 count, host->sg_off, data->flags);
455426e95d1SSimon Horman 
456426e95d1SSimon Horman 	/* Transfer the data */
457426e95d1SSimon Horman 	tmio_mmc_transfer_data(host, buf, count);
458426e95d1SSimon Horman 
459426e95d1SSimon Horman 	host->sg_off += count;
460426e95d1SSimon Horman 
461426e95d1SSimon Horman 	tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
462426e95d1SSimon Horman 
463426e95d1SSimon Horman 	if (host->sg_off == host->sg_ptr->length)
464426e95d1SSimon Horman 		tmio_mmc_next_sg(host);
465426e95d1SSimon Horman }
466426e95d1SSimon Horman 
467426e95d1SSimon Horman static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
468426e95d1SSimon Horman {
469426e95d1SSimon Horman 	if (host->sg_ptr == &host->bounce_sg) {
470426e95d1SSimon Horman 		unsigned long flags;
471426e95d1SSimon Horman 		void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
472f2218db8SSimon Horman 
473426e95d1SSimon Horman 		memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
474426e95d1SSimon Horman 		tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
475426e95d1SSimon Horman 	}
476426e95d1SSimon Horman }
477426e95d1SSimon Horman 
478426e95d1SSimon Horman /* needs to be called with host->lock held */
479426e95d1SSimon Horman void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
480426e95d1SSimon Horman {
481426e95d1SSimon Horman 	struct mmc_data *data = host->data;
482426e95d1SSimon Horman 	struct mmc_command *stop;
483426e95d1SSimon Horman 
484426e95d1SSimon Horman 	host->data = NULL;
485426e95d1SSimon Horman 
486426e95d1SSimon Horman 	if (!data) {
487426e95d1SSimon Horman 		dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
488426e95d1SSimon Horman 		return;
489426e95d1SSimon Horman 	}
490426e95d1SSimon Horman 	stop = data->stop;
491426e95d1SSimon Horman 
492426e95d1SSimon Horman 	/* FIXME - return correct transfer count on errors */
493426e95d1SSimon Horman 	if (!data->error)
494426e95d1SSimon Horman 		data->bytes_xfered = data->blocks * data->blksz;
495426e95d1SSimon Horman 	else
496426e95d1SSimon Horman 		data->bytes_xfered = 0;
497426e95d1SSimon Horman 
498426e95d1SSimon Horman 	pr_debug("Completed data request\n");
499426e95d1SSimon Horman 
500426e95d1SSimon Horman 	/*
501426e95d1SSimon Horman 	 * FIXME: other drivers allow an optional stop command of any given type
502426e95d1SSimon Horman 	 *        which we dont do, as the chip can auto generate them.
503426e95d1SSimon Horman 	 *        Perhaps we can be smarter about when to use auto CMD12 and
504426e95d1SSimon Horman 	 *        only issue the auto request when we know this is the desired
505426e95d1SSimon Horman 	 *        stop command, allowing fallback to the stop command the
506426e95d1SSimon Horman 	 *        upper layers expect. For now, we do what works.
507426e95d1SSimon Horman 	 */
508426e95d1SSimon Horman 
509426e95d1SSimon Horman 	if (data->flags & MMC_DATA_READ) {
510426e95d1SSimon Horman 		if (host->chan_rx && !host->force_pio)
511426e95d1SSimon Horman 			tmio_mmc_check_bounce_buffer(host);
512426e95d1SSimon Horman 		dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
513426e95d1SSimon Horman 			host->mrq);
514426e95d1SSimon Horman 	} else {
515426e95d1SSimon Horman 		dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
516426e95d1SSimon Horman 			host->mrq);
517426e95d1SSimon Horman 	}
518426e95d1SSimon Horman 
5198b22c3c1SWolfram Sang 	if (stop && !host->mrq->sbc) {
520426e95d1SSimon Horman 		if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg)
521426e95d1SSimon Horman 			dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
522426e95d1SSimon Horman 				stop->opcode, stop->arg);
523426e95d1SSimon Horman 
524426e95d1SSimon Horman 		/* fill in response from auto CMD12 */
525426e95d1SSimon Horman 		stop->resp[0] = sd_ctrl_read16_and_16_as_32(host, CTL_RESPONSE);
526426e95d1SSimon Horman 
527426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0);
528426e95d1SSimon Horman 	}
529426e95d1SSimon Horman 
530426e95d1SSimon Horman 	schedule_work(&host->done);
531426e95d1SSimon Horman }
5326106ecf3SSimon Horman EXPORT_SYMBOL_GPL(tmio_mmc_do_data_irq);
533426e95d1SSimon Horman 
534426e95d1SSimon Horman static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
535426e95d1SSimon Horman {
536426e95d1SSimon Horman 	struct mmc_data *data;
537f2218db8SSimon Horman 
538426e95d1SSimon Horman 	spin_lock(&host->lock);
539426e95d1SSimon Horman 	data = host->data;
540426e95d1SSimon Horman 
541426e95d1SSimon Horman 	if (!data)
542426e95d1SSimon Horman 		goto out;
543426e95d1SSimon Horman 
544426e95d1SSimon Horman 	if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
545426e95d1SSimon Horman 	    stat & TMIO_STAT_TXUNDERRUN)
546426e95d1SSimon Horman 		data->error = -EILSEQ;
547426e95d1SSimon Horman 	if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
548426e95d1SSimon Horman 		u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
549426e95d1SSimon Horman 		bool done = false;
550426e95d1SSimon Horman 
551426e95d1SSimon Horman 		/*
552426e95d1SSimon Horman 		 * Has all data been written out yet? Testing on SuperH showed,
553426e95d1SSimon Horman 		 * that in most cases the first interrupt comes already with the
554426e95d1SSimon Horman 		 * BUSY status bit clear, but on some operations, like mount or
555426e95d1SSimon Horman 		 * in the beginning of a write / sync / umount, there is one
556426e95d1SSimon Horman 		 * DATAEND interrupt with the BUSY bit set, in this cases
557426e95d1SSimon Horman 		 * waiting for one more interrupt fixes the problem.
558426e95d1SSimon Horman 		 */
559426e95d1SSimon Horman 		if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
560426e95d1SSimon Horman 			if (status & TMIO_STAT_SCLKDIVEN)
561426e95d1SSimon Horman 				done = true;
562426e95d1SSimon Horman 		} else {
563426e95d1SSimon Horman 			if (!(status & TMIO_STAT_CMD_BUSY))
564426e95d1SSimon Horman 				done = true;
565426e95d1SSimon Horman 		}
566426e95d1SSimon Horman 
567426e95d1SSimon Horman 		if (done) {
568426e95d1SSimon Horman 			tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
56992d0f925SSimon Horman 			tmio_mmc_dataend_dma(host);
570426e95d1SSimon Horman 		}
571426e95d1SSimon Horman 	} else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
572426e95d1SSimon Horman 		tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
57392d0f925SSimon Horman 		tmio_mmc_dataend_dma(host);
574426e95d1SSimon Horman 	} else {
575426e95d1SSimon Horman 		tmio_mmc_do_data_irq(host);
576426e95d1SSimon Horman 		tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
577426e95d1SSimon Horman 	}
578426e95d1SSimon Horman out:
579426e95d1SSimon Horman 	spin_unlock(&host->lock);
580426e95d1SSimon Horman }
581426e95d1SSimon Horman 
582f2218db8SSimon Horman static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
583426e95d1SSimon Horman {
584426e95d1SSimon Horman 	struct mmc_command *cmd = host->cmd;
585426e95d1SSimon Horman 	int i, addr;
586426e95d1SSimon Horman 
587426e95d1SSimon Horman 	spin_lock(&host->lock);
588426e95d1SSimon Horman 
589426e95d1SSimon Horman 	if (!host->cmd) {
590426e95d1SSimon Horman 		pr_debug("Spurious CMD irq\n");
591426e95d1SSimon Horman 		goto out;
592426e95d1SSimon Horman 	}
593426e95d1SSimon Horman 
594426e95d1SSimon Horman 	/* This controller is sicker than the PXA one. Not only do we need to
595426e95d1SSimon Horman 	 * drop the top 8 bits of the first response word, we also need to
596426e95d1SSimon Horman 	 * modify the order of the response for short response command types.
597426e95d1SSimon Horman 	 */
598426e95d1SSimon Horman 
599426e95d1SSimon Horman 	for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
600426e95d1SSimon Horman 		cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
601426e95d1SSimon Horman 
602426e95d1SSimon Horman 	if (cmd->flags &  MMC_RSP_136) {
603426e95d1SSimon Horman 		cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
604426e95d1SSimon Horman 		cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
605426e95d1SSimon Horman 		cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
606426e95d1SSimon Horman 		cmd->resp[3] <<= 8;
607426e95d1SSimon Horman 	} else if (cmd->flags & MMC_RSP_R3) {
608426e95d1SSimon Horman 		cmd->resp[0] = cmd->resp[3];
609426e95d1SSimon Horman 	}
610426e95d1SSimon Horman 
611426e95d1SSimon Horman 	if (stat & TMIO_STAT_CMDTIMEOUT)
612426e95d1SSimon Horman 		cmd->error = -ETIMEDOUT;
613426e95d1SSimon Horman 	else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
614426e95d1SSimon Horman 		 stat & TMIO_STAT_STOPBIT_ERR ||
615426e95d1SSimon Horman 		 stat & TMIO_STAT_CMD_IDX_ERR)
616426e95d1SSimon Horman 		cmd->error = -EILSEQ;
617426e95d1SSimon Horman 
618426e95d1SSimon Horman 	/* If there is data to handle we enable data IRQs here, and
619426e95d1SSimon Horman 	 * we will ultimatley finish the request in the data_end handler.
620426e95d1SSimon Horman 	 * If theres no data or we encountered an error, finish now.
621426e95d1SSimon Horman 	 */
622426e95d1SSimon Horman 	if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
623426e95d1SSimon Horman 		if (host->data->flags & MMC_DATA_READ) {
624b12a7a28SMasahiro Yamada 			if (host->force_pio || !host->chan_rx) {
625426e95d1SSimon Horman 				tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
626426e95d1SSimon Horman 			} else {
627b12a7a28SMasahiro Yamada 				tmio_mmc_disable_mmc_irqs(host,
628b12a7a28SMasahiro Yamada 							  TMIO_MASK_READOP);
629426e95d1SSimon Horman 				tasklet_schedule(&host->dma_issue);
630426e95d1SSimon Horman 			}
631426e95d1SSimon Horman 		} else {
632b12a7a28SMasahiro Yamada 			if (host->force_pio || !host->chan_tx) {
633b12a7a28SMasahiro Yamada 				tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
634b12a7a28SMasahiro Yamada 			} else {
635b12a7a28SMasahiro Yamada 				tmio_mmc_disable_mmc_irqs(host,
636b12a7a28SMasahiro Yamada 							  TMIO_MASK_WRITEOP);
637b12a7a28SMasahiro Yamada 				tasklet_schedule(&host->dma_issue);
638b12a7a28SMasahiro Yamada 			}
639b12a7a28SMasahiro Yamada 		}
640b12a7a28SMasahiro Yamada 	} else {
641426e95d1SSimon Horman 		schedule_work(&host->done);
642426e95d1SSimon Horman 	}
643426e95d1SSimon Horman 
644426e95d1SSimon Horman out:
645426e95d1SSimon Horman 	spin_unlock(&host->lock);
646426e95d1SSimon Horman }
647426e95d1SSimon Horman 
648426e95d1SSimon Horman static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
649426e95d1SSimon Horman 				       int ireg, int status)
650426e95d1SSimon Horman {
651426e95d1SSimon Horman 	struct mmc_host *mmc = host->mmc;
652426e95d1SSimon Horman 
653426e95d1SSimon Horman 	/* Card insert / remove attempts */
654426e95d1SSimon Horman 	if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
655426e95d1SSimon Horman 		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
656426e95d1SSimon Horman 			TMIO_STAT_CARD_REMOVE);
657426e95d1SSimon Horman 		if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
658426e95d1SSimon Horman 		     ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
659426e95d1SSimon Horman 		    !work_pending(&mmc->detect.work))
660426e95d1SSimon Horman 			mmc_detect_change(host->mmc, msecs_to_jiffies(100));
661426e95d1SSimon Horman 		return true;
662426e95d1SSimon Horman 	}
663426e95d1SSimon Horman 
664426e95d1SSimon Horman 	return false;
665426e95d1SSimon Horman }
666426e95d1SSimon Horman 
667f2218db8SSimon Horman static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
668f2218db8SSimon Horman 				  int status)
669426e95d1SSimon Horman {
670426e95d1SSimon Horman 	/* Command completion */
671426e95d1SSimon Horman 	if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
672f2218db8SSimon Horman 		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CMDRESPEND |
673426e95d1SSimon Horman 				      TMIO_STAT_CMDTIMEOUT);
674426e95d1SSimon Horman 		tmio_mmc_cmd_irq(host, status);
675426e95d1SSimon Horman 		return true;
676426e95d1SSimon Horman 	}
677426e95d1SSimon Horman 
678426e95d1SSimon Horman 	/* Data transfer */
679426e95d1SSimon Horman 	if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
680426e95d1SSimon Horman 		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
681426e95d1SSimon Horman 		tmio_mmc_pio_irq(host);
682426e95d1SSimon Horman 		return true;
683426e95d1SSimon Horman 	}
684426e95d1SSimon Horman 
685426e95d1SSimon Horman 	/* Data transfer completion */
686426e95d1SSimon Horman 	if (ireg & TMIO_STAT_DATAEND) {
687426e95d1SSimon Horman 		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
688426e95d1SSimon Horman 		tmio_mmc_data_irq(host, status);
689426e95d1SSimon Horman 		return true;
690426e95d1SSimon Horman 	}
691426e95d1SSimon Horman 
692426e95d1SSimon Horman 	return false;
693426e95d1SSimon Horman }
694426e95d1SSimon Horman 
695426e95d1SSimon Horman static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
696426e95d1SSimon Horman {
697426e95d1SSimon Horman 	struct mmc_host *mmc = host->mmc;
698426e95d1SSimon Horman 	struct tmio_mmc_data *pdata = host->pdata;
699426e95d1SSimon Horman 	unsigned int ireg, status;
700426e95d1SSimon Horman 	unsigned int sdio_status;
701426e95d1SSimon Horman 
702426e95d1SSimon Horman 	if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
703426e95d1SSimon Horman 		return;
704426e95d1SSimon Horman 
705426e95d1SSimon Horman 	status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
706426e95d1SSimon Horman 	ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
707426e95d1SSimon Horman 
708426e95d1SSimon Horman 	sdio_status = status & ~TMIO_SDIO_MASK_ALL;
709426e95d1SSimon Horman 	if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
710426e95d1SSimon Horman 		sdio_status |= TMIO_SDIO_SETBITS_MASK;
711426e95d1SSimon Horman 
712426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
713426e95d1SSimon Horman 
714426e95d1SSimon Horman 	if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
715426e95d1SSimon Horman 		mmc_signal_sdio_irq(mmc);
716426e95d1SSimon Horman }
717426e95d1SSimon Horman 
718426e95d1SSimon Horman irqreturn_t tmio_mmc_irq(int irq, void *devid)
719426e95d1SSimon Horman {
720426e95d1SSimon Horman 	struct tmio_mmc_host *host = devid;
721426e95d1SSimon Horman 	unsigned int ireg, status;
722426e95d1SSimon Horman 
723426e95d1SSimon Horman 	status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
724426e95d1SSimon Horman 	ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
725426e95d1SSimon Horman 
726426e95d1SSimon Horman 	/* Clear the status except the interrupt status */
727426e95d1SSimon Horman 	sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
728426e95d1SSimon Horman 
729426e95d1SSimon Horman 	if (__tmio_mmc_card_detect_irq(host, ireg, status))
730426e95d1SSimon Horman 		return IRQ_HANDLED;
731426e95d1SSimon Horman 	if (__tmio_mmc_sdcard_irq(host, ireg, status))
732426e95d1SSimon Horman 		return IRQ_HANDLED;
733426e95d1SSimon Horman 
734426e95d1SSimon Horman 	__tmio_mmc_sdio_irq(host);
735426e95d1SSimon Horman 
736426e95d1SSimon Horman 	return IRQ_HANDLED;
737426e95d1SSimon Horman }
7386106ecf3SSimon Horman EXPORT_SYMBOL_GPL(tmio_mmc_irq);
739426e95d1SSimon Horman 
740426e95d1SSimon Horman static int tmio_mmc_start_data(struct tmio_mmc_host *host,
741426e95d1SSimon Horman 			       struct mmc_data *data)
742426e95d1SSimon Horman {
743426e95d1SSimon Horman 	struct tmio_mmc_data *pdata = host->pdata;
744426e95d1SSimon Horman 
745426e95d1SSimon Horman 	pr_debug("setup data transfer: blocksize %08x  nr_blocks %d\n",
746426e95d1SSimon Horman 		 data->blksz, data->blocks);
747426e95d1SSimon Horman 
748426e95d1SSimon Horman 	/* Some hardware cannot perform 2 byte requests in 4/8 bit mode */
749426e95d1SSimon Horman 	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
750426e95d1SSimon Horman 	    host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
751426e95d1SSimon Horman 		int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
752426e95d1SSimon Horman 
753426e95d1SSimon Horman 		if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
754426e95d1SSimon Horman 			pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
755426e95d1SSimon Horman 			       mmc_hostname(host->mmc), data->blksz);
756426e95d1SSimon Horman 			return -EINVAL;
757426e95d1SSimon Horman 		}
758426e95d1SSimon Horman 	}
759426e95d1SSimon Horman 
760426e95d1SSimon Horman 	tmio_mmc_init_sg(host, data);
761426e95d1SSimon Horman 	host->data = data;
762426e95d1SSimon Horman 
763426e95d1SSimon Horman 	/* Set transfer length / blocksize */
764426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
765426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
766426e95d1SSimon Horman 
767426e95d1SSimon Horman 	tmio_mmc_start_dma(host, data);
768426e95d1SSimon Horman 
769426e95d1SSimon Horman 	return 0;
770426e95d1SSimon Horman }
771426e95d1SSimon Horman 
772426e95d1SSimon Horman static void tmio_mmc_hw_reset(struct mmc_host *mmc)
773426e95d1SSimon Horman {
774426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(mmc);
775426e95d1SSimon Horman 
776426e95d1SSimon Horman 	if (host->hw_reset)
777426e95d1SSimon Horman 		host->hw_reset(host);
778426e95d1SSimon Horman }
779426e95d1SSimon Horman 
780426e95d1SSimon Horman static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
781426e95d1SSimon Horman {
782426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(mmc);
783426e95d1SSimon Horman 	int i, ret = 0;
784426e95d1SSimon Horman 
785426e95d1SSimon Horman 	if (!host->init_tuning || !host->select_tuning)
786426e95d1SSimon Horman 		/* Tuning is not supported */
787426e95d1SSimon Horman 		goto out;
788426e95d1SSimon Horman 
789426e95d1SSimon Horman 	host->tap_num = host->init_tuning(host);
790426e95d1SSimon Horman 	if (!host->tap_num)
791426e95d1SSimon Horman 		/* Tuning is not supported */
792426e95d1SSimon Horman 		goto out;
793426e95d1SSimon Horman 
794426e95d1SSimon Horman 	if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
795426e95d1SSimon Horman 		dev_warn_once(&host->pdev->dev,
796426e95d1SSimon Horman 			"Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
797426e95d1SSimon Horman 		goto out;
798426e95d1SSimon Horman 	}
799426e95d1SSimon Horman 
800426e95d1SSimon Horman 	bitmap_zero(host->taps, host->tap_num * 2);
801426e95d1SSimon Horman 
802426e95d1SSimon Horman 	/* Issue CMD19 twice for each tap */
803426e95d1SSimon Horman 	for (i = 0; i < 2 * host->tap_num; i++) {
804426e95d1SSimon Horman 		if (host->prepare_tuning)
805426e95d1SSimon Horman 			host->prepare_tuning(host, i % host->tap_num);
806426e95d1SSimon Horman 
807426e95d1SSimon Horman 		ret = mmc_send_tuning(mmc, opcode, NULL);
808426e95d1SSimon Horman 		if (ret && ret != -EILSEQ)
809426e95d1SSimon Horman 			goto out;
810426e95d1SSimon Horman 		if (ret == 0)
811426e95d1SSimon Horman 			set_bit(i, host->taps);
812426e95d1SSimon Horman 
813754febccSWolfram Sang 		usleep_range(1000, 1200);
814426e95d1SSimon Horman 	}
815426e95d1SSimon Horman 
816426e95d1SSimon Horman 	ret = host->select_tuning(host);
817426e95d1SSimon Horman 
818426e95d1SSimon Horman out:
819426e95d1SSimon Horman 	if (ret < 0) {
820426e95d1SSimon Horman 		dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
821426e95d1SSimon Horman 		tmio_mmc_hw_reset(mmc);
822426e95d1SSimon Horman 	}
823426e95d1SSimon Horman 
824426e95d1SSimon Horman 	return ret;
825426e95d1SSimon Horman }
826426e95d1SSimon Horman 
827f2218db8SSimon Horman static void tmio_process_mrq(struct tmio_mmc_host *host,
828f2218db8SSimon Horman 			     struct mmc_request *mrq)
829426e95d1SSimon Horman {
8308b22c3c1SWolfram Sang 	struct mmc_command *cmd;
831426e95d1SSimon Horman 	int ret;
832426e95d1SSimon Horman 
8338b22c3c1SWolfram Sang 	if (mrq->sbc && host->cmd != mrq->sbc) {
8348b22c3c1SWolfram Sang 		cmd = mrq->sbc;
8358b22c3c1SWolfram Sang 	} else {
8368b22c3c1SWolfram Sang 		cmd = mrq->cmd;
837426e95d1SSimon Horman 		if (mrq->data) {
838426e95d1SSimon Horman 			ret = tmio_mmc_start_data(host, mrq->data);
839426e95d1SSimon Horman 			if (ret)
840426e95d1SSimon Horman 				goto fail;
841426e95d1SSimon Horman 		}
8428b22c3c1SWolfram Sang 	}
843426e95d1SSimon Horman 
8448b22c3c1SWolfram Sang 	ret = tmio_mmc_start_command(host, cmd);
84510c998efSWolfram Sang 	if (ret)
84610c998efSWolfram Sang 		goto fail;
84710c998efSWolfram Sang 
848426e95d1SSimon Horman 	schedule_delayed_work(&host->delayed_reset_work,
849426e95d1SSimon Horman 			      msecs_to_jiffies(CMDREQ_TIMEOUT));
850426e95d1SSimon Horman 	return;
851426e95d1SSimon Horman 
852426e95d1SSimon Horman fail:
853426e95d1SSimon Horman 	host->force_pio = false;
854426e95d1SSimon Horman 	host->mrq = NULL;
855426e95d1SSimon Horman 	mrq->cmd->error = ret;
856de2a6bb9SWolfram Sang 	mmc_request_done(host->mmc, mrq);
857de2a6bb9SWolfram Sang }
858de2a6bb9SWolfram Sang 
859de2a6bb9SWolfram Sang /* Process requests from the MMC layer */
860de2a6bb9SWolfram Sang static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
861de2a6bb9SWolfram Sang {
862de2a6bb9SWolfram Sang 	struct tmio_mmc_host *host = mmc_priv(mmc);
863de2a6bb9SWolfram Sang 	unsigned long flags;
864de2a6bb9SWolfram Sang 
865de2a6bb9SWolfram Sang 	spin_lock_irqsave(&host->lock, flags);
866de2a6bb9SWolfram Sang 
867de2a6bb9SWolfram Sang 	if (host->mrq) {
868de2a6bb9SWolfram Sang 		pr_debug("request not null\n");
869de2a6bb9SWolfram Sang 		if (IS_ERR(host->mrq)) {
870de2a6bb9SWolfram Sang 			spin_unlock_irqrestore(&host->lock, flags);
871de2a6bb9SWolfram Sang 			mrq->cmd->error = -EAGAIN;
872426e95d1SSimon Horman 			mmc_request_done(mmc, mrq);
873de2a6bb9SWolfram Sang 			return;
874de2a6bb9SWolfram Sang 		}
875de2a6bb9SWolfram Sang 	}
876de2a6bb9SWolfram Sang 
877de2a6bb9SWolfram Sang 	host->last_req_ts = jiffies;
878de2a6bb9SWolfram Sang 	wmb();
879de2a6bb9SWolfram Sang 	host->mrq = mrq;
880de2a6bb9SWolfram Sang 
881de2a6bb9SWolfram Sang 	spin_unlock_irqrestore(&host->lock, flags);
882de2a6bb9SWolfram Sang 
883de2a6bb9SWolfram Sang 	tmio_process_mrq(host, mrq);
884426e95d1SSimon Horman }
885426e95d1SSimon Horman 
886f5fdcd1dSWolfram Sang static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
887f5fdcd1dSWolfram Sang {
888f5fdcd1dSWolfram Sang 	struct mmc_request *mrq;
889f5fdcd1dSWolfram Sang 	unsigned long flags;
890f5fdcd1dSWolfram Sang 
891f5fdcd1dSWolfram Sang 	spin_lock_irqsave(&host->lock, flags);
892f5fdcd1dSWolfram Sang 
893f5fdcd1dSWolfram Sang 	mrq = host->mrq;
894f5fdcd1dSWolfram Sang 	if (IS_ERR_OR_NULL(mrq)) {
895f5fdcd1dSWolfram Sang 		spin_unlock_irqrestore(&host->lock, flags);
896f5fdcd1dSWolfram Sang 		return;
897f5fdcd1dSWolfram Sang 	}
898f5fdcd1dSWolfram Sang 
8998b22c3c1SWolfram Sang 	/* If not SET_BLOCK_COUNT, clear old data */
9008b22c3c1SWolfram Sang 	if (host->cmd != mrq->sbc) {
901f5fdcd1dSWolfram Sang 		host->cmd = NULL;
902f5fdcd1dSWolfram Sang 		host->data = NULL;
903f5fdcd1dSWolfram Sang 		host->force_pio = false;
9048b22c3c1SWolfram Sang 		host->mrq = NULL;
9058b22c3c1SWolfram Sang 	}
906f5fdcd1dSWolfram Sang 
907f5fdcd1dSWolfram Sang 	cancel_delayed_work(&host->delayed_reset_work);
908f5fdcd1dSWolfram Sang 
909f5fdcd1dSWolfram Sang 	spin_unlock_irqrestore(&host->lock, flags);
910f5fdcd1dSWolfram Sang 
911f5fdcd1dSWolfram Sang 	if (mrq->cmd->error || (mrq->data && mrq->data->error))
912f5fdcd1dSWolfram Sang 		tmio_mmc_abort_dma(host);
913f5fdcd1dSWolfram Sang 
914f5fdcd1dSWolfram Sang 	if (host->check_scc_error)
915f5fdcd1dSWolfram Sang 		host->check_scc_error(host);
916f5fdcd1dSWolfram Sang 
9178b22c3c1SWolfram Sang 	/* If SET_BLOCK_COUNT, continue with main command */
9188b22c3c1SWolfram Sang 	if (host->mrq) {
9198b22c3c1SWolfram Sang 		tmio_process_mrq(host, mrq);
9208b22c3c1SWolfram Sang 		return;
9218b22c3c1SWolfram Sang 	}
9228b22c3c1SWolfram Sang 
923f5fdcd1dSWolfram Sang 	mmc_request_done(host->mmc, mrq);
924f5fdcd1dSWolfram Sang }
925f5fdcd1dSWolfram Sang 
926f5fdcd1dSWolfram Sang static void tmio_mmc_done_work(struct work_struct *work)
927f5fdcd1dSWolfram Sang {
928f5fdcd1dSWolfram Sang 	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
929f5fdcd1dSWolfram Sang 						  done);
930f5fdcd1dSWolfram Sang 	tmio_mmc_finish_request(host);
931f5fdcd1dSWolfram Sang }
932f5fdcd1dSWolfram Sang 
933426e95d1SSimon Horman static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
934426e95d1SSimon Horman {
935426e95d1SSimon Horman 	struct mmc_host *mmc = host->mmc;
936426e95d1SSimon Horman 	int ret = 0;
937426e95d1SSimon Horman 
938426e95d1SSimon Horman 	/* .set_ios() is returning void, so, no chance to report an error */
939426e95d1SSimon Horman 
940426e95d1SSimon Horman 	if (host->set_pwr)
941426e95d1SSimon Horman 		host->set_pwr(host->pdev, 1);
942426e95d1SSimon Horman 
943426e95d1SSimon Horman 	if (!IS_ERR(mmc->supply.vmmc)) {
944426e95d1SSimon Horman 		ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
945426e95d1SSimon Horman 		/*
946426e95d1SSimon Horman 		 * Attention: empiric value. With a b43 WiFi SDIO card this
947426e95d1SSimon Horman 		 * delay proved necessary for reliable card-insertion probing.
948426e95d1SSimon Horman 		 * 100us were not enough. Is this the same 140us delay, as in
949426e95d1SSimon Horman 		 * tmio_mmc_set_ios()?
950426e95d1SSimon Horman 		 */
951754febccSWolfram Sang 		usleep_range(200, 300);
952426e95d1SSimon Horman 	}
953426e95d1SSimon Horman 	/*
954426e95d1SSimon Horman 	 * It seems, VccQ should be switched on after Vcc, this is also what the
955426e95d1SSimon Horman 	 * omap_hsmmc.c driver does.
956426e95d1SSimon Horman 	 */
957426e95d1SSimon Horman 	if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
958426e95d1SSimon Horman 		ret = regulator_enable(mmc->supply.vqmmc);
959754febccSWolfram Sang 		usleep_range(200, 300);
960426e95d1SSimon Horman 	}
961426e95d1SSimon Horman 
962426e95d1SSimon Horman 	if (ret < 0)
963426e95d1SSimon Horman 		dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
964426e95d1SSimon Horman 			ret);
965426e95d1SSimon Horman }
966426e95d1SSimon Horman 
967426e95d1SSimon Horman static void tmio_mmc_power_off(struct tmio_mmc_host *host)
968426e95d1SSimon Horman {
969426e95d1SSimon Horman 	struct mmc_host *mmc = host->mmc;
970426e95d1SSimon Horman 
971426e95d1SSimon Horman 	if (!IS_ERR(mmc->supply.vqmmc))
972426e95d1SSimon Horman 		regulator_disable(mmc->supply.vqmmc);
973426e95d1SSimon Horman 
974426e95d1SSimon Horman 	if (!IS_ERR(mmc->supply.vmmc))
975426e95d1SSimon Horman 		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
976426e95d1SSimon Horman 
977426e95d1SSimon Horman 	if (host->set_pwr)
978426e95d1SSimon Horman 		host->set_pwr(host->pdev, 0);
979426e95d1SSimon Horman }
980426e95d1SSimon Horman 
981426e95d1SSimon Horman static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
982426e95d1SSimon Horman 				   unsigned char bus_width)
983426e95d1SSimon Horman {
984426e95d1SSimon Horman 	u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
985426e95d1SSimon Horman 				& ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
986426e95d1SSimon Horman 
987426e95d1SSimon Horman 	/* reg now applies to MMC_BUS_WIDTH_4 */
988426e95d1SSimon Horman 	if (bus_width == MMC_BUS_WIDTH_1)
989426e95d1SSimon Horman 		reg |= CARD_OPT_WIDTH;
990426e95d1SSimon Horman 	else if (bus_width == MMC_BUS_WIDTH_8)
991426e95d1SSimon Horman 		reg |= CARD_OPT_WIDTH8;
992426e95d1SSimon Horman 
993426e95d1SSimon Horman 	sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
994426e95d1SSimon Horman }
995426e95d1SSimon Horman 
996426e95d1SSimon Horman /* Set MMC clock / power.
997426e95d1SSimon Horman  * Note: This controller uses a simple divider scheme therefore it cannot
998426e95d1SSimon Horman  * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
999426e95d1SSimon Horman  * MMC wont run that fast, it has to be clocked at 12MHz which is the next
1000426e95d1SSimon Horman  * slowest setting.
1001426e95d1SSimon Horman  */
1002426e95d1SSimon Horman static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1003426e95d1SSimon Horman {
1004426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(mmc);
1005426e95d1SSimon Horman 	struct device *dev = &host->pdev->dev;
1006426e95d1SSimon Horman 	unsigned long flags;
1007426e95d1SSimon Horman 
1008426e95d1SSimon Horman 	mutex_lock(&host->ios_lock);
1009426e95d1SSimon Horman 
1010426e95d1SSimon Horman 	spin_lock_irqsave(&host->lock, flags);
1011426e95d1SSimon Horman 	if (host->mrq) {
1012426e95d1SSimon Horman 		if (IS_ERR(host->mrq)) {
1013426e95d1SSimon Horman 			dev_dbg(dev,
1014426e95d1SSimon Horman 				"%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
1015426e95d1SSimon Horman 				current->comm, task_pid_nr(current),
1016426e95d1SSimon Horman 				ios->clock, ios->power_mode);
1017426e95d1SSimon Horman 			host->mrq = ERR_PTR(-EINTR);
1018426e95d1SSimon Horman 		} else {
1019426e95d1SSimon Horman 			dev_dbg(dev,
1020426e95d1SSimon Horman 				"%s.%d: CMD%u active since %lu, now %lu!\n",
1021426e95d1SSimon Horman 				current->comm, task_pid_nr(current),
1022f2218db8SSimon Horman 				host->mrq->cmd->opcode, host->last_req_ts,
1023f2218db8SSimon Horman 				jiffies);
1024426e95d1SSimon Horman 		}
1025426e95d1SSimon Horman 		spin_unlock_irqrestore(&host->lock, flags);
1026426e95d1SSimon Horman 
1027426e95d1SSimon Horman 		mutex_unlock(&host->ios_lock);
1028426e95d1SSimon Horman 		return;
1029426e95d1SSimon Horman 	}
1030426e95d1SSimon Horman 
1031426e95d1SSimon Horman 	host->mrq = ERR_PTR(-EBUSY);
1032426e95d1SSimon Horman 
1033426e95d1SSimon Horman 	spin_unlock_irqrestore(&host->lock, flags);
1034426e95d1SSimon Horman 
1035426e95d1SSimon Horman 	switch (ios->power_mode) {
1036426e95d1SSimon Horman 	case MMC_POWER_OFF:
1037426e95d1SSimon Horman 		tmio_mmc_power_off(host);
1038426e95d1SSimon Horman 		tmio_mmc_clk_stop(host);
1039426e95d1SSimon Horman 		break;
1040426e95d1SSimon Horman 	case MMC_POWER_UP:
1041426e95d1SSimon Horman 		tmio_mmc_power_on(host, ios->vdd);
1042426e95d1SSimon Horman 		tmio_mmc_set_clock(host, ios->clock);
1043426e95d1SSimon Horman 		tmio_mmc_set_bus_width(host, ios->bus_width);
1044426e95d1SSimon Horman 		break;
1045426e95d1SSimon Horman 	case MMC_POWER_ON:
1046426e95d1SSimon Horman 		tmio_mmc_set_clock(host, ios->clock);
1047426e95d1SSimon Horman 		tmio_mmc_set_bus_width(host, ios->bus_width);
1048426e95d1SSimon Horman 		break;
1049426e95d1SSimon Horman 	}
1050426e95d1SSimon Horman 
1051426e95d1SSimon Horman 	/* Let things settle. delay taken from winCE driver */
1052754febccSWolfram Sang 	usleep_range(140, 200);
1053426e95d1SSimon Horman 	if (PTR_ERR(host->mrq) == -EINTR)
1054426e95d1SSimon Horman 		dev_dbg(&host->pdev->dev,
1055426e95d1SSimon Horman 			"%s.%d: IOS interrupted: clk %u, mode %u",
1056426e95d1SSimon Horman 			current->comm, task_pid_nr(current),
1057426e95d1SSimon Horman 			ios->clock, ios->power_mode);
1058426e95d1SSimon Horman 	host->mrq = NULL;
1059426e95d1SSimon Horman 
1060426e95d1SSimon Horman 	host->clk_cache = ios->clock;
1061426e95d1SSimon Horman 
1062426e95d1SSimon Horman 	mutex_unlock(&host->ios_lock);
1063426e95d1SSimon Horman }
1064426e95d1SSimon Horman 
1065426e95d1SSimon Horman static int tmio_mmc_get_ro(struct mmc_host *mmc)
1066426e95d1SSimon Horman {
1067426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(mmc);
1068f2218db8SSimon Horman 
1069218f6024SMasahiro Yamada 	return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) &
1070218f6024SMasahiro Yamada 		 TMIO_STAT_WRPROTECT);
1071426e95d1SSimon Horman }
1072426e95d1SSimon Horman 
1073497d1f96SMasahiro Yamada static int tmio_mmc_get_cd(struct mmc_host *mmc)
1074497d1f96SMasahiro Yamada {
1075497d1f96SMasahiro Yamada 	struct tmio_mmc_host *host = mmc_priv(mmc);
1076497d1f96SMasahiro Yamada 
1077497d1f96SMasahiro Yamada 	return !!(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) &
1078497d1f96SMasahiro Yamada 		  TMIO_STAT_SIGSTATE);
1079497d1f96SMasahiro Yamada }
1080497d1f96SMasahiro Yamada 
1081426e95d1SSimon Horman static int tmio_multi_io_quirk(struct mmc_card *card,
1082426e95d1SSimon Horman 			       unsigned int direction, int blk_size)
1083426e95d1SSimon Horman {
1084426e95d1SSimon Horman 	struct tmio_mmc_host *host = mmc_priv(card->host);
1085426e95d1SSimon Horman 
1086426e95d1SSimon Horman 	if (host->multi_io_quirk)
1087426e95d1SSimon Horman 		return host->multi_io_quirk(card, direction, blk_size);
1088426e95d1SSimon Horman 
1089426e95d1SSimon Horman 	return blk_size;
1090426e95d1SSimon Horman }
1091426e95d1SSimon Horman 
1092c055fc75SMasahiro Yamada static const struct mmc_host_ops tmio_mmc_ops = {
1093426e95d1SSimon Horman 	.request	= tmio_mmc_request,
1094426e95d1SSimon Horman 	.set_ios	= tmio_mmc_set_ios,
1095426e95d1SSimon Horman 	.get_ro         = tmio_mmc_get_ro,
1096497d1f96SMasahiro Yamada 	.get_cd		= tmio_mmc_get_cd,
1097426e95d1SSimon Horman 	.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
1098426e95d1SSimon Horman 	.multi_io_quirk	= tmio_multi_io_quirk,
1099426e95d1SSimon Horman 	.hw_reset	= tmio_mmc_hw_reset,
1100426e95d1SSimon Horman 	.execute_tuning = tmio_mmc_execute_tuning,
1101426e95d1SSimon Horman };
1102426e95d1SSimon Horman 
1103426e95d1SSimon Horman static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
1104426e95d1SSimon Horman {
1105426e95d1SSimon Horman 	struct tmio_mmc_data *pdata = host->pdata;
1106426e95d1SSimon Horman 	struct mmc_host *mmc = host->mmc;
1107a3d95d1dSFabrizio Castro 	int err;
1108426e95d1SSimon Horman 
1109a3d95d1dSFabrizio Castro 	err = mmc_regulator_get_supply(mmc);
1110a3d95d1dSFabrizio Castro 	if (err)
1111a3d95d1dSFabrizio Castro 		return err;
1112426e95d1SSimon Horman 
1113426e95d1SSimon Horman 	/* use ocr_mask if no regulator */
1114426e95d1SSimon Horman 	if (!mmc->ocr_avail)
1115426e95d1SSimon Horman 		mmc->ocr_avail =  pdata->ocr_mask;
1116426e95d1SSimon Horman 
1117426e95d1SSimon Horman 	/*
1118426e95d1SSimon Horman 	 * try again.
1119426e95d1SSimon Horman 	 * There is possibility that regulator has not been probed
1120426e95d1SSimon Horman 	 */
1121426e95d1SSimon Horman 	if (!mmc->ocr_avail)
1122426e95d1SSimon Horman 		return -EPROBE_DEFER;
1123426e95d1SSimon Horman 
1124426e95d1SSimon Horman 	return 0;
1125426e95d1SSimon Horman }
1126426e95d1SSimon Horman 
1127426e95d1SSimon Horman static void tmio_mmc_of_parse(struct platform_device *pdev,
11287c53b797SMasahiro Yamada 			      struct mmc_host *mmc)
1129426e95d1SSimon Horman {
1130426e95d1SSimon Horman 	const struct device_node *np = pdev->dev.of_node;
1131f2218db8SSimon Horman 
1132426e95d1SSimon Horman 	if (!np)
1133426e95d1SSimon Horman 		return;
1134426e95d1SSimon Horman 
1135788778b0SMasahiro Yamada 	/*
1136788778b0SMasahiro Yamada 	 * DEPRECATED:
1137788778b0SMasahiro Yamada 	 * For new platforms, please use "disable-wp" instead of
1138788778b0SMasahiro Yamada 	 * "toshiba,mmc-wrprotect-disable"
1139788778b0SMasahiro Yamada 	 */
1140426e95d1SSimon Horman 	if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
11417c53b797SMasahiro Yamada 		mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
1142426e95d1SSimon Horman }
1143426e95d1SSimon Horman 
1144b21fc294SMasahiro Yamada struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
1145b21fc294SMasahiro Yamada 					  struct tmio_mmc_data *pdata)
1146426e95d1SSimon Horman {
1147426e95d1SSimon Horman 	struct tmio_mmc_host *host;
1148426e95d1SSimon Horman 	struct mmc_host *mmc;
11498d09a133SMasahiro Yamada 	struct resource *res;
11508d09a133SMasahiro Yamada 	void __iomem *ctl;
11516fb294f7SMasahiro Yamada 	int ret;
11528d09a133SMasahiro Yamada 
11538d09a133SMasahiro Yamada 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
11548d09a133SMasahiro Yamada 	ctl = devm_ioremap_resource(&pdev->dev, res);
11558d09a133SMasahiro Yamada 	if (IS_ERR(ctl))
11568d09a133SMasahiro Yamada 		return ERR_CAST(ctl);
1157426e95d1SSimon Horman 
1158426e95d1SSimon Horman 	mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
1159426e95d1SSimon Horman 	if (!mmc)
11608d09a133SMasahiro Yamada 		return ERR_PTR(-ENOMEM);
1161426e95d1SSimon Horman 
1162426e95d1SSimon Horman 	host = mmc_priv(mmc);
11638d09a133SMasahiro Yamada 	host->ctl = ctl;
1164426e95d1SSimon Horman 	host->mmc = mmc;
1165426e95d1SSimon Horman 	host->pdev = pdev;
1166b21fc294SMasahiro Yamada 	host->pdata = pdata;
1167c055fc75SMasahiro Yamada 	host->ops = tmio_mmc_ops;
1168c055fc75SMasahiro Yamada 	mmc->ops = &host->ops;
1169426e95d1SSimon Horman 
11706fb294f7SMasahiro Yamada 	ret = mmc_of_parse(host->mmc);
11716fb294f7SMasahiro Yamada 	if (ret) {
11726fb294f7SMasahiro Yamada 		host = ERR_PTR(ret);
11736fb294f7SMasahiro Yamada 		goto free;
11746fb294f7SMasahiro Yamada 	}
11756fb294f7SMasahiro Yamada 
11767c53b797SMasahiro Yamada 	tmio_mmc_of_parse(pdev, mmc);
11776fb294f7SMasahiro Yamada 
1178b21fc294SMasahiro Yamada 	platform_set_drvdata(pdev, host);
1179b21fc294SMasahiro Yamada 
1180426e95d1SSimon Horman 	return host;
11816fb294f7SMasahiro Yamada free:
11826fb294f7SMasahiro Yamada 	mmc_free_host(mmc);
11836fb294f7SMasahiro Yamada 
11846fb294f7SMasahiro Yamada 	return host;
1185426e95d1SSimon Horman }
11866106ecf3SSimon Horman EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc);
1187426e95d1SSimon Horman 
1188426e95d1SSimon Horman void tmio_mmc_host_free(struct tmio_mmc_host *host)
1189426e95d1SSimon Horman {
1190426e95d1SSimon Horman 	mmc_free_host(host->mmc);
1191426e95d1SSimon Horman }
11926106ecf3SSimon Horman EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
1193426e95d1SSimon Horman 
1194bc45719cSMasahiro Yamada int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
1195426e95d1SSimon Horman {
1196426e95d1SSimon Horman 	struct platform_device *pdev = _host->pdev;
1197b21fc294SMasahiro Yamada 	struct tmio_mmc_data *pdata = _host->pdata;
1198426e95d1SSimon Horman 	struct mmc_host *mmc = _host->mmc;
1199426e95d1SSimon Horman 	int ret;
1200426e95d1SSimon Horman 	u32 irq_mask = TMIO_MASK_CMD;
1201426e95d1SSimon Horman 
1202b21fc294SMasahiro Yamada 	/*
1203b21fc294SMasahiro Yamada 	 * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
1204b21fc294SMasahiro Yamada 	 * looping forever...
1205b21fc294SMasahiro Yamada 	 */
1206b21fc294SMasahiro Yamada 	if (mmc->f_min == 0)
1207b21fc294SMasahiro Yamada 		return -EINVAL;
1208b21fc294SMasahiro Yamada 
1209426e95d1SSimon Horman 	if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
1210426e95d1SSimon Horman 		_host->write16_hook = NULL;
1211426e95d1SSimon Horman 
1212426e95d1SSimon Horman 	_host->set_pwr = pdata->set_pwr;
1213426e95d1SSimon Horman 	_host->set_clk_div = pdata->set_clk_div;
1214426e95d1SSimon Horman 
1215426e95d1SSimon Horman 	ret = tmio_mmc_init_ocr(_host);
1216426e95d1SSimon Horman 	if (ret < 0)
1217426e95d1SSimon Horman 		return ret;
1218426e95d1SSimon Horman 
1219cd82cd21SMasahiro Yamada 	if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
1220cd82cd21SMasahiro Yamada 		ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
1221cd82cd21SMasahiro Yamada 		if (ret)
1222cd82cd21SMasahiro Yamada 			return ret;
1223cd82cd21SMasahiro Yamada 	}
1224cd82cd21SMasahiro Yamada 
1225426e95d1SSimon Horman 	mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
1226426e95d1SSimon Horman 	mmc->caps2 |= pdata->capabilities2;
1227603aa14dSYoshihiro Shimoda 	mmc->max_segs = pdata->max_segs ? : 32;
1228426e95d1SSimon Horman 	mmc->max_blk_size = 512;
1229603aa14dSYoshihiro Shimoda 	mmc->max_blk_count = pdata->max_blk_count ? :
1230603aa14dSYoshihiro Shimoda 		(PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
1231426e95d1SSimon Horman 	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1232e90e8da7SYoshihiro Shimoda 	/*
1233e90e8da7SYoshihiro Shimoda 	 * Since swiotlb has memory size limitation, this will calculate
1234e90e8da7SYoshihiro Shimoda 	 * the maximum size locally (because we don't have any APIs for it now)
1235e90e8da7SYoshihiro Shimoda 	 * and check the current max_req_size. And then, this will update
1236e90e8da7SYoshihiro Shimoda 	 * the max_req_size if needed as a workaround.
1237e90e8da7SYoshihiro Shimoda 	 */
1238e90e8da7SYoshihiro Shimoda 	if (swiotlb_max_segment()) {
1239e90e8da7SYoshihiro Shimoda 		unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
1240e90e8da7SYoshihiro Shimoda 
1241e90e8da7SYoshihiro Shimoda 		if (mmc->max_req_size > max_size)
1242e90e8da7SYoshihiro Shimoda 			mmc->max_req_size = max_size;
1243e90e8da7SYoshihiro Shimoda 	}
1244426e95d1SSimon Horman 	mmc->max_seg_size = mmc->max_req_size;
1245426e95d1SSimon Horman 
12461910b87fSMasahiro Yamada 	if (mmc_can_gpio_ro(mmc))
12471910b87fSMasahiro Yamada 		_host->ops.get_ro = mmc_gpio_get_ro;
12481910b87fSMasahiro Yamada 
1249497d1f96SMasahiro Yamada 	if (mmc_can_gpio_cd(mmc))
1250497d1f96SMasahiro Yamada 		_host->ops.get_cd = mmc_gpio_get_cd;
1251497d1f96SMasahiro Yamada 
1252de21dc1dSMasahiro Yamada 	_host->native_hotplug = !(mmc_can_gpio_cd(mmc) ||
1253426e95d1SSimon Horman 				  mmc->caps & MMC_CAP_NEEDS_POLL ||
1254426e95d1SSimon Horman 				  !mmc_card_is_removable(mmc));
1255426e95d1SSimon Horman 
1256426e95d1SSimon Horman 	/*
1257426e95d1SSimon Horman 	 * On Gen2+, eMMC with NONREMOVABLE currently fails because native
1258426e95d1SSimon Horman 	 * hotplug gets disabled. It seems RuntimePM related yet we need further
1259426e95d1SSimon Horman 	 * research. Since we are planning a PM overhaul anyway, let's enforce
1260426e95d1SSimon Horman 	 * for now the device being active by enabling native hotplug always.
1261426e95d1SSimon Horman 	 */
1262426e95d1SSimon Horman 	if (pdata->flags & TMIO_MMC_MIN_RCAR2)
1263426e95d1SSimon Horman 		_host->native_hotplug = true;
1264426e95d1SSimon Horman 
1265426e95d1SSimon Horman 	/*
1266426e95d1SSimon Horman 	 * While using internal tmio hardware logic for card detection, we need
1267426e95d1SSimon Horman 	 * to ensure it stays powered for it to work.
1268426e95d1SSimon Horman 	 */
1269426e95d1SSimon Horman 	if (_host->native_hotplug)
1270426e95d1SSimon Horman 		pm_runtime_get_noresume(&pdev->dev);
1271426e95d1SSimon Horman 
127286beb538SWolfram Sang 	_host->sdio_irq_enabled = false;
127386beb538SWolfram Sang 	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
127486beb538SWolfram Sang 		_host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
127586beb538SWolfram Sang 
1276426e95d1SSimon Horman 	tmio_mmc_clk_stop(_host);
1277426e95d1SSimon Horman 	tmio_mmc_reset(_host);
1278426e95d1SSimon Horman 
1279426e95d1SSimon Horman 	_host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
1280426e95d1SSimon Horman 	tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
1281426e95d1SSimon Horman 
1282426e95d1SSimon Horman 	_host->sdcard_irq_mask &= ~irq_mask;
1283426e95d1SSimon Horman 
1284c7cd630aSMasahiro Yamada 	if (_host->native_hotplug)
1285c7cd630aSMasahiro Yamada 		tmio_mmc_enable_mmc_irqs(_host,
1286c7cd630aSMasahiro Yamada 				TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1287c7cd630aSMasahiro Yamada 
1288426e95d1SSimon Horman 	spin_lock_init(&_host->lock);
1289426e95d1SSimon Horman 	mutex_init(&_host->ios_lock);
1290426e95d1SSimon Horman 
1291426e95d1SSimon Horman 	/* Init delayed work for request timeouts */
1292426e95d1SSimon Horman 	INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
1293426e95d1SSimon Horman 	INIT_WORK(&_host->done, tmio_mmc_done_work);
1294426e95d1SSimon Horman 
1295426e95d1SSimon Horman 	/* See if we also get DMA */
1296426e95d1SSimon Horman 	tmio_mmc_request_dma(_host, pdata);
1297426e95d1SSimon Horman 
1298426e95d1SSimon Horman 	pm_runtime_set_active(&pdev->dev);
1299426e95d1SSimon Horman 	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1300426e95d1SSimon Horman 	pm_runtime_use_autosuspend(&pdev->dev);
1301426e95d1SSimon Horman 	pm_runtime_enable(&pdev->dev);
1302426e95d1SSimon Horman 
1303426e95d1SSimon Horman 	ret = mmc_add_host(mmc);
13047f8e446bSMarkus Elfring 	if (ret)
13057f8e446bSMarkus Elfring 		goto remove_host;
1306426e95d1SSimon Horman 
1307426e95d1SSimon Horman 	dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1308426e95d1SSimon Horman 
1309426e95d1SSimon Horman 	return 0;
13107f8e446bSMarkus Elfring 
13117f8e446bSMarkus Elfring remove_host:
13127f8e446bSMarkus Elfring 	tmio_mmc_host_remove(_host);
13137f8e446bSMarkus Elfring 	return ret;
1314426e95d1SSimon Horman }
13156106ecf3SSimon Horman EXPORT_SYMBOL_GPL(tmio_mmc_host_probe);
1316426e95d1SSimon Horman 
1317426e95d1SSimon Horman void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1318426e95d1SSimon Horman {
1319426e95d1SSimon Horman 	struct platform_device *pdev = host->pdev;
1320426e95d1SSimon Horman 	struct mmc_host *mmc = host->mmc;
1321426e95d1SSimon Horman 
1322426e95d1SSimon Horman 	if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
1323426e95d1SSimon Horman 		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
1324426e95d1SSimon Horman 
1325426e95d1SSimon Horman 	if (!host->native_hotplug)
1326426e95d1SSimon Horman 		pm_runtime_get_sync(&pdev->dev);
1327426e95d1SSimon Horman 
1328426e95d1SSimon Horman 	dev_pm_qos_hide_latency_limit(&pdev->dev);
1329426e95d1SSimon Horman 
1330426e95d1SSimon Horman 	mmc_remove_host(mmc);
1331426e95d1SSimon Horman 	cancel_work_sync(&host->done);
1332426e95d1SSimon Horman 	cancel_delayed_work_sync(&host->delayed_reset_work);
1333426e95d1SSimon Horman 	tmio_mmc_release_dma(host);
1334426e95d1SSimon Horman 
1335426e95d1SSimon Horman 	pm_runtime_put_sync(&pdev->dev);
1336426e95d1SSimon Horman 	pm_runtime_disable(&pdev->dev);
1337426e95d1SSimon Horman }
13386106ecf3SSimon Horman EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
1339426e95d1SSimon Horman 
1340426e95d1SSimon Horman #ifdef CONFIG_PM
13414a09d0b8SArnd Bergmann static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
13424a09d0b8SArnd Bergmann {
13434a09d0b8SArnd Bergmann 	if (!host->clk_enable)
13444a09d0b8SArnd Bergmann 		return -ENOTSUPP;
13454a09d0b8SArnd Bergmann 
13464a09d0b8SArnd Bergmann 	return host->clk_enable(host);
13474a09d0b8SArnd Bergmann }
13484a09d0b8SArnd Bergmann 
13494a09d0b8SArnd Bergmann static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
13504a09d0b8SArnd Bergmann {
13514a09d0b8SArnd Bergmann 	if (host->clk_disable)
13524a09d0b8SArnd Bergmann 		host->clk_disable(host);
13534a09d0b8SArnd Bergmann }
13544a09d0b8SArnd Bergmann 
1355426e95d1SSimon Horman int tmio_mmc_host_runtime_suspend(struct device *dev)
1356426e95d1SSimon Horman {
1357a3b05373SMasahiro Yamada 	struct tmio_mmc_host *host = dev_get_drvdata(dev);
1358426e95d1SSimon Horman 
1359426e95d1SSimon Horman 	tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
1360426e95d1SSimon Horman 
1361426e95d1SSimon Horman 	if (host->clk_cache)
1362426e95d1SSimon Horman 		tmio_mmc_clk_stop(host);
1363426e95d1SSimon Horman 
1364426e95d1SSimon Horman 	tmio_mmc_clk_disable(host);
1365426e95d1SSimon Horman 
1366426e95d1SSimon Horman 	return 0;
1367426e95d1SSimon Horman }
13686106ecf3SSimon Horman EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend);
1369426e95d1SSimon Horman 
1370426e95d1SSimon Horman static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
1371426e95d1SSimon Horman {
1372426e95d1SSimon Horman 	return host->tap_num && mmc_can_retune(host->mmc);
1373426e95d1SSimon Horman }
1374426e95d1SSimon Horman 
1375426e95d1SSimon Horman int tmio_mmc_host_runtime_resume(struct device *dev)
1376426e95d1SSimon Horman {
1377a3b05373SMasahiro Yamada 	struct tmio_mmc_host *host = dev_get_drvdata(dev);
1378426e95d1SSimon Horman 
1379426e95d1SSimon Horman 	tmio_mmc_reset(host);
1380426e95d1SSimon Horman 	tmio_mmc_clk_enable(host);
1381426e95d1SSimon Horman 
1382426e95d1SSimon Horman 	if (host->clk_cache)
1383426e95d1SSimon Horman 		tmio_mmc_set_clock(host, host->clk_cache);
1384426e95d1SSimon Horman 
1385c7cd630aSMasahiro Yamada 	if (host->native_hotplug)
1386c7cd630aSMasahiro Yamada 		tmio_mmc_enable_mmc_irqs(host,
1387c7cd630aSMasahiro Yamada 				TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1388c7cd630aSMasahiro Yamada 
1389426e95d1SSimon Horman 	tmio_mmc_enable_dma(host, true);
1390426e95d1SSimon Horman 
1391426e95d1SSimon Horman 	if (tmio_mmc_can_retune(host) && host->select_tuning(host))
1392426e95d1SSimon Horman 		dev_warn(&host->pdev->dev, "Tuning selection failed\n");
1393426e95d1SSimon Horman 
1394426e95d1SSimon Horman 	return 0;
1395426e95d1SSimon Horman }
13966106ecf3SSimon Horman EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume);
1397426e95d1SSimon Horman #endif
1398426e95d1SSimon Horman 
1399426e95d1SSimon Horman MODULE_LICENSE("GPL v2");
1400