xref: /openbmc/linux/drivers/mmc/host/davinci_mmc.c (revision e2f3bfbd)
1 /*
2  * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver
3  *
4  * Copyright (C) 2006 Texas Instruments.
5  *       Original author: Purushotam Kumar
6  * Copyright (C) 2009 David Brownell
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #include <linux/module.h>
24 #include <linux/ioport.h>
25 #include <linux/platform_device.h>
26 #include <linux/clk.h>
27 #include <linux/err.h>
28 #include <linux/cpufreq.h>
29 #include <linux/mmc/host.h>
30 #include <linux/io.h>
31 #include <linux/irq.h>
32 #include <linux/delay.h>
33 #include <linux/dmaengine.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/of.h>
37 #include <linux/of_device.h>
38 
39 #include <linux/platform_data/mmc-davinci.h>
40 
41 /*
42  * Register Definitions
43  */
44 #define DAVINCI_MMCCTL       0x00 /* Control Register                  */
45 #define DAVINCI_MMCCLK       0x04 /* Memory Clock Control Register     */
46 #define DAVINCI_MMCST0       0x08 /* Status Register 0                 */
47 #define DAVINCI_MMCST1       0x0C /* Status Register 1                 */
48 #define DAVINCI_MMCIM        0x10 /* Interrupt Mask Register           */
49 #define DAVINCI_MMCTOR       0x14 /* Response Time-Out Register        */
50 #define DAVINCI_MMCTOD       0x18 /* Data Read Time-Out Register       */
51 #define DAVINCI_MMCBLEN      0x1C /* Block Length Register             */
52 #define DAVINCI_MMCNBLK      0x20 /* Number of Blocks Register         */
53 #define DAVINCI_MMCNBLC      0x24 /* Number of Blocks Counter Register */
54 #define DAVINCI_MMCDRR       0x28 /* Data Receive Register             */
55 #define DAVINCI_MMCDXR       0x2C /* Data Transmit Register            */
56 #define DAVINCI_MMCCMD       0x30 /* Command Register                  */
57 #define DAVINCI_MMCARGHL     0x34 /* Argument Register                 */
58 #define DAVINCI_MMCRSP01     0x38 /* Response Register 0 and 1         */
59 #define DAVINCI_MMCRSP23     0x3C /* Response Register 0 and 1         */
60 #define DAVINCI_MMCRSP45     0x40 /* Response Register 0 and 1         */
61 #define DAVINCI_MMCRSP67     0x44 /* Response Register 0 and 1         */
62 #define DAVINCI_MMCDRSP      0x48 /* Data Response Register            */
63 #define DAVINCI_MMCETOK      0x4C
64 #define DAVINCI_MMCCIDX      0x50 /* Command Index Register            */
65 #define DAVINCI_MMCCKC       0x54
66 #define DAVINCI_MMCTORC      0x58
67 #define DAVINCI_MMCTODC      0x5C
68 #define DAVINCI_MMCBLNC      0x60
69 #define DAVINCI_SDIOCTL      0x64
70 #define DAVINCI_SDIOST0      0x68
71 #define DAVINCI_SDIOIEN      0x6C
72 #define DAVINCI_SDIOIST      0x70
73 #define DAVINCI_MMCFIFOCTL   0x74 /* FIFO Control Register             */
74 
75 /* DAVINCI_MMCCTL definitions */
76 #define MMCCTL_DATRST         (1 << 0)
77 #define MMCCTL_CMDRST         (1 << 1)
78 #define MMCCTL_WIDTH_8_BIT    (1 << 8)
79 #define MMCCTL_WIDTH_4_BIT    (1 << 2)
80 #define MMCCTL_DATEG_DISABLED (0 << 6)
81 #define MMCCTL_DATEG_RISING   (1 << 6)
82 #define MMCCTL_DATEG_FALLING  (2 << 6)
83 #define MMCCTL_DATEG_BOTH     (3 << 6)
84 #define MMCCTL_PERMDR_LE      (0 << 9)
85 #define MMCCTL_PERMDR_BE      (1 << 9)
86 #define MMCCTL_PERMDX_LE      (0 << 10)
87 #define MMCCTL_PERMDX_BE      (1 << 10)
88 
89 /* DAVINCI_MMCCLK definitions */
90 #define MMCCLK_CLKEN          (1 << 8)
91 #define MMCCLK_CLKRT_MASK     (0xFF << 0)
92 
93 /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */
94 #define MMCST0_DATDNE         BIT(0)	/* data done */
95 #define MMCST0_BSYDNE         BIT(1)	/* busy done */
96 #define MMCST0_RSPDNE         BIT(2)	/* command done */
97 #define MMCST0_TOUTRD         BIT(3)	/* data read timeout */
98 #define MMCST0_TOUTRS         BIT(4)	/* command response timeout */
99 #define MMCST0_CRCWR          BIT(5)	/* data write CRC error */
100 #define MMCST0_CRCRD          BIT(6)	/* data read CRC error */
101 #define MMCST0_CRCRS          BIT(7)	/* command response CRC error */
102 #define MMCST0_DXRDY          BIT(9)	/* data transmit ready (fifo empty) */
103 #define MMCST0_DRRDY          BIT(10)	/* data receive ready (data in fifo)*/
104 #define MMCST0_DATED          BIT(11)	/* DAT3 edge detect */
105 #define MMCST0_TRNDNE         BIT(12)	/* transfer done */
106 
107 /* DAVINCI_MMCST1 definitions */
108 #define MMCST1_BUSY           (1 << 0)
109 
110 /* DAVINCI_MMCCMD definitions */
111 #define MMCCMD_CMD_MASK       (0x3F << 0)
112 #define MMCCMD_PPLEN          (1 << 7)
113 #define MMCCMD_BSYEXP         (1 << 8)
114 #define MMCCMD_RSPFMT_MASK    (3 << 9)
115 #define MMCCMD_RSPFMT_NONE    (0 << 9)
116 #define MMCCMD_RSPFMT_R1456   (1 << 9)
117 #define MMCCMD_RSPFMT_R2      (2 << 9)
118 #define MMCCMD_RSPFMT_R3      (3 << 9)
119 #define MMCCMD_DTRW           (1 << 11)
120 #define MMCCMD_STRMTP         (1 << 12)
121 #define MMCCMD_WDATX          (1 << 13)
122 #define MMCCMD_INITCK         (1 << 14)
123 #define MMCCMD_DCLR           (1 << 15)
124 #define MMCCMD_DMATRIG        (1 << 16)
125 
126 /* DAVINCI_MMCFIFOCTL definitions */
127 #define MMCFIFOCTL_FIFORST    (1 << 0)
128 #define MMCFIFOCTL_FIFODIR_WR (1 << 1)
129 #define MMCFIFOCTL_FIFODIR_RD (0 << 1)
130 #define MMCFIFOCTL_FIFOLEV    (1 << 2) /* 0 = 128 bits, 1 = 256 bits */
131 #define MMCFIFOCTL_ACCWD_4    (0 << 3) /* access width of 4 bytes    */
132 #define MMCFIFOCTL_ACCWD_3    (1 << 3) /* access width of 3 bytes    */
133 #define MMCFIFOCTL_ACCWD_2    (2 << 3) /* access width of 2 bytes    */
134 #define MMCFIFOCTL_ACCWD_1    (3 << 3) /* access width of 1 byte     */
135 
136 /* DAVINCI_SDIOST0 definitions */
137 #define SDIOST0_DAT1_HI       BIT(0)
138 
139 /* DAVINCI_SDIOIEN definitions */
140 #define SDIOIEN_IOINTEN       BIT(0)
141 
142 /* DAVINCI_SDIOIST definitions */
143 #define SDIOIST_IOINT         BIT(0)
144 
145 /* MMCSD Init clock in Hz in opendrain mode */
146 #define MMCSD_INIT_CLOCK		200000
147 
148 /*
149  * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
150  * and we handle up to MAX_NR_SG segments.  MMC_BLOCK_BOUNCE kicks in only
151  * for drivers with max_segs == 1, making the segments bigger (64KB)
152  * than the page or two that's otherwise typical. nr_sg (passed from
153  * platform data) == 16 gives at least the same throughput boost, using
154  * EDMA transfer linkage instead of spending CPU time copying pages.
155  */
156 #define MAX_CCNT	((1 << 16) - 1)
157 
158 #define MAX_NR_SG	16
159 
160 static unsigned rw_threshold = 32;
161 module_param(rw_threshold, uint, S_IRUGO);
162 MODULE_PARM_DESC(rw_threshold,
163 		"Read/Write threshold. Default = 32");
164 
165 static unsigned poll_threshold = 128;
166 module_param(poll_threshold, uint, S_IRUGO);
167 MODULE_PARM_DESC(poll_threshold,
168 		 "Polling transaction size threshold. Default = 128");
169 
170 static unsigned poll_loopcount = 32;
171 module_param(poll_loopcount, uint, S_IRUGO);
172 MODULE_PARM_DESC(poll_loopcount,
173 		 "Maximum polling loop count. Default = 32");
174 
175 static unsigned __initdata use_dma = 1;
176 module_param(use_dma, uint, 0);
177 MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
178 
179 struct mmc_davinci_host {
180 	struct mmc_command *cmd;
181 	struct mmc_data *data;
182 	struct mmc_host *mmc;
183 	struct clk *clk;
184 	unsigned int mmc_input_clk;
185 	void __iomem *base;
186 	struct resource *mem_res;
187 	int mmc_irq, sdio_irq;
188 	unsigned char bus_mode;
189 
190 #define DAVINCI_MMC_DATADIR_NONE	0
191 #define DAVINCI_MMC_DATADIR_READ	1
192 #define DAVINCI_MMC_DATADIR_WRITE	2
193 	unsigned char data_dir;
194 
195 	/* buffer is used during PIO of one scatterlist segment, and
196 	 * is updated along with buffer_bytes_left.  bytes_left applies
197 	 * to all N blocks of the PIO transfer.
198 	 */
199 	u8 *buffer;
200 	u32 buffer_bytes_left;
201 	u32 bytes_left;
202 
203 	struct dma_chan *dma_tx;
204 	struct dma_chan *dma_rx;
205 	bool use_dma;
206 	bool do_dma;
207 	bool sdio_int;
208 	bool active_request;
209 
210 	/* For PIO we walk scatterlists one segment at a time. */
211 	unsigned int		sg_len;
212 	struct scatterlist *sg;
213 
214 	/* Version of the MMC/SD controller */
215 	u8 version;
216 	/* for ns in one cycle calculation */
217 	unsigned ns_in_one_cycle;
218 	/* Number of sg segments */
219 	u8 nr_sg;
220 #ifdef CONFIG_CPU_FREQ
221 	struct notifier_block	freq_transition;
222 #endif
223 };
224 
225 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id);
226 
227 /* PIO only */
228 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
229 {
230 	host->buffer_bytes_left = sg_dma_len(host->sg);
231 	host->buffer = sg_virt(host->sg);
232 	if (host->buffer_bytes_left > host->bytes_left)
233 		host->buffer_bytes_left = host->bytes_left;
234 }
235 
236 static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
237 					unsigned int n)
238 {
239 	u8 *p;
240 	unsigned int i;
241 
242 	if (host->buffer_bytes_left == 0) {
243 		host->sg = sg_next(host->data->sg);
244 		mmc_davinci_sg_to_buf(host);
245 	}
246 
247 	p = host->buffer;
248 	if (n > host->buffer_bytes_left)
249 		n = host->buffer_bytes_left;
250 	host->buffer_bytes_left -= n;
251 	host->bytes_left -= n;
252 
253 	/* NOTE:  we never transfer more than rw_threshold bytes
254 	 * to/from the fifo here; there's no I/O overlap.
255 	 * This also assumes that access width( i.e. ACCWD) is 4 bytes
256 	 */
257 	if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
258 		for (i = 0; i < (n >> 2); i++) {
259 			writel(*((u32 *)p), host->base + DAVINCI_MMCDXR);
260 			p = p + 4;
261 		}
262 		if (n & 3) {
263 			iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3));
264 			p = p + (n & 3);
265 		}
266 	} else {
267 		for (i = 0; i < (n >> 2); i++) {
268 			*((u32 *)p) = readl(host->base + DAVINCI_MMCDRR);
269 			p  = p + 4;
270 		}
271 		if (n & 3) {
272 			ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3));
273 			p = p + (n & 3);
274 		}
275 	}
276 	host->buffer = p;
277 }
278 
279 static void mmc_davinci_start_command(struct mmc_davinci_host *host,
280 		struct mmc_command *cmd)
281 {
282 	u32 cmd_reg = 0;
283 	u32 im_val;
284 
285 	dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n",
286 		cmd->opcode, cmd->arg,
287 		({ char *s;
288 		switch (mmc_resp_type(cmd)) {
289 		case MMC_RSP_R1:
290 			s = ", R1/R5/R6/R7 response";
291 			break;
292 		case MMC_RSP_R1B:
293 			s = ", R1b response";
294 			break;
295 		case MMC_RSP_R2:
296 			s = ", R2 response";
297 			break;
298 		case MMC_RSP_R3:
299 			s = ", R3/R4 response";
300 			break;
301 		default:
302 			s = ", (R? response)";
303 			break;
304 		}; s; }));
305 	host->cmd = cmd;
306 
307 	switch (mmc_resp_type(cmd)) {
308 	case MMC_RSP_R1B:
309 		/* There's some spec confusion about when R1B is
310 		 * allowed, but if the card doesn't issue a BUSY
311 		 * then it's harmless for us to allow it.
312 		 */
313 		cmd_reg |= MMCCMD_BSYEXP;
314 		/* FALLTHROUGH */
315 	case MMC_RSP_R1:		/* 48 bits, CRC */
316 		cmd_reg |= MMCCMD_RSPFMT_R1456;
317 		break;
318 	case MMC_RSP_R2:		/* 136 bits, CRC */
319 		cmd_reg |= MMCCMD_RSPFMT_R2;
320 		break;
321 	case MMC_RSP_R3:		/* 48 bits, no CRC */
322 		cmd_reg |= MMCCMD_RSPFMT_R3;
323 		break;
324 	default:
325 		cmd_reg |= MMCCMD_RSPFMT_NONE;
326 		dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n",
327 			mmc_resp_type(cmd));
328 		break;
329 	}
330 
331 	/* Set command index */
332 	cmd_reg |= cmd->opcode;
333 
334 	/* Enable EDMA transfer triggers */
335 	if (host->do_dma)
336 		cmd_reg |= MMCCMD_DMATRIG;
337 
338 	if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL &&
339 			host->data_dir == DAVINCI_MMC_DATADIR_READ)
340 		cmd_reg |= MMCCMD_DMATRIG;
341 
342 	/* Setting whether command involves data transfer or not */
343 	if (cmd->data)
344 		cmd_reg |= MMCCMD_WDATX;
345 
346 	/* Setting whether data read or write */
347 	if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
348 		cmd_reg |= MMCCMD_DTRW;
349 
350 	if (host->bus_mode == MMC_BUSMODE_PUSHPULL)
351 		cmd_reg |= MMCCMD_PPLEN;
352 
353 	/* set Command timeout */
354 	writel(0x1FFF, host->base + DAVINCI_MMCTOR);
355 
356 	/* Enable interrupt (calculate here, defer until FIFO is stuffed). */
357 	im_val =  MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS;
358 	if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
359 		im_val |= MMCST0_DATDNE | MMCST0_CRCWR;
360 
361 		if (!host->do_dma)
362 			im_val |= MMCST0_DXRDY;
363 	} else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
364 		im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD;
365 
366 		if (!host->do_dma)
367 			im_val |= MMCST0_DRRDY;
368 	}
369 
370 	/*
371 	 * Before non-DMA WRITE commands the controller needs priming:
372 	 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size
373 	 */
374 	if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE))
375 		davinci_fifo_data_trans(host, rw_threshold);
376 
377 	writel(cmd->arg, host->base + DAVINCI_MMCARGHL);
378 	writel(cmd_reg,  host->base + DAVINCI_MMCCMD);
379 
380 	host->active_request = true;
381 
382 	if (!host->do_dma && host->bytes_left <= poll_threshold) {
383 		u32 count = poll_loopcount;
384 
385 		while (host->active_request && count--) {
386 			mmc_davinci_irq(0, host);
387 			cpu_relax();
388 		}
389 	}
390 
391 	if (host->active_request)
392 		writel(im_val, host->base + DAVINCI_MMCIM);
393 }
394 
395 /*----------------------------------------------------------------------*/
396 
397 /* DMA infrastructure */
398 
399 static void davinci_abort_dma(struct mmc_davinci_host *host)
400 {
401 	struct dma_chan *sync_dev;
402 
403 	if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
404 		sync_dev = host->dma_rx;
405 	else
406 		sync_dev = host->dma_tx;
407 
408 	dmaengine_terminate_all(sync_dev);
409 }
410 
411 static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
412 		struct mmc_data *data)
413 {
414 	struct dma_chan *chan;
415 	struct dma_async_tx_descriptor *desc;
416 	int ret = 0;
417 
418 	if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
419 		struct dma_slave_config dma_tx_conf = {
420 			.direction = DMA_MEM_TO_DEV,
421 			.dst_addr = host->mem_res->start + DAVINCI_MMCDXR,
422 			.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
423 			.dst_maxburst =
424 				rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
425 		};
426 		chan = host->dma_tx;
427 		dmaengine_slave_config(host->dma_tx, &dma_tx_conf);
428 
429 		desc = dmaengine_prep_slave_sg(host->dma_tx,
430 				data->sg,
431 				host->sg_len,
432 				DMA_MEM_TO_DEV,
433 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
434 		if (!desc) {
435 			dev_dbg(mmc_dev(host->mmc),
436 				"failed to allocate DMA TX descriptor");
437 			ret = -1;
438 			goto out;
439 		}
440 	} else {
441 		struct dma_slave_config dma_rx_conf = {
442 			.direction = DMA_DEV_TO_MEM,
443 			.src_addr = host->mem_res->start + DAVINCI_MMCDRR,
444 			.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
445 			.src_maxburst =
446 				rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
447 		};
448 		chan = host->dma_rx;
449 		dmaengine_slave_config(host->dma_rx, &dma_rx_conf);
450 
451 		desc = dmaengine_prep_slave_sg(host->dma_rx,
452 				data->sg,
453 				host->sg_len,
454 				DMA_DEV_TO_MEM,
455 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
456 		if (!desc) {
457 			dev_dbg(mmc_dev(host->mmc),
458 				"failed to allocate DMA RX descriptor");
459 			ret = -1;
460 			goto out;
461 		}
462 	}
463 
464 	dmaengine_submit(desc);
465 	dma_async_issue_pending(chan);
466 
467 out:
468 	return ret;
469 }
470 
471 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
472 		struct mmc_data *data)
473 {
474 	int i;
475 	int mask = rw_threshold - 1;
476 	int ret = 0;
477 
478 	host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
479 				((data->flags & MMC_DATA_WRITE)
480 				? DMA_TO_DEVICE
481 				: DMA_FROM_DEVICE));
482 
483 	/* no individual DMA segment should need a partial FIFO */
484 	for (i = 0; i < host->sg_len; i++) {
485 		if (sg_dma_len(data->sg + i) & mask) {
486 			dma_unmap_sg(mmc_dev(host->mmc),
487 					data->sg, data->sg_len,
488 					(data->flags & MMC_DATA_WRITE)
489 					? DMA_TO_DEVICE
490 					: DMA_FROM_DEVICE);
491 			return -1;
492 		}
493 	}
494 
495 	host->do_dma = 1;
496 	ret = mmc_davinci_send_dma_request(host, data);
497 
498 	return ret;
499 }
500 
501 static void __init_or_module
502 davinci_release_dma_channels(struct mmc_davinci_host *host)
503 {
504 	if (!host->use_dma)
505 		return;
506 
507 	dma_release_channel(host->dma_tx);
508 	dma_release_channel(host->dma_rx);
509 }
510 
511 static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
512 {
513 	host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
514 	if (IS_ERR(host->dma_tx)) {
515 		dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n");
516 		return PTR_ERR(host->dma_tx);
517 	}
518 
519 	host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
520 	if (IS_ERR(host->dma_rx)) {
521 		dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n");
522 		dma_release_channel(host->dma_tx);
523 		return PTR_ERR(host->dma_rx);
524 	}
525 
526 	return 0;
527 }
528 
529 /*----------------------------------------------------------------------*/
530 
531 static void
532 mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
533 {
534 	int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
535 	int timeout;
536 	struct mmc_data *data = req->data;
537 
538 	if (host->version == MMC_CTLR_VERSION_2)
539 		fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
540 
541 	host->data = data;
542 	if (data == NULL) {
543 		host->data_dir = DAVINCI_MMC_DATADIR_NONE;
544 		writel(0, host->base + DAVINCI_MMCBLEN);
545 		writel(0, host->base + DAVINCI_MMCNBLK);
546 		return;
547 	}
548 
549 	dev_dbg(mmc_dev(host->mmc), "%s, %d blocks of %d bytes\n",
550 		(data->flags & MMC_DATA_WRITE) ? "write" : "read",
551 		data->blocks, data->blksz);
552 	dev_dbg(mmc_dev(host->mmc), "  DTO %d cycles + %d ns\n",
553 		data->timeout_clks, data->timeout_ns);
554 	timeout = data->timeout_clks +
555 		(data->timeout_ns / host->ns_in_one_cycle);
556 	if (timeout > 0xffff)
557 		timeout = 0xffff;
558 
559 	writel(timeout, host->base + DAVINCI_MMCTOD);
560 	writel(data->blocks, host->base + DAVINCI_MMCNBLK);
561 	writel(data->blksz, host->base + DAVINCI_MMCBLEN);
562 
563 	/* Configure the FIFO */
564 	if (data->flags & MMC_DATA_WRITE) {
565 		host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
566 		writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
567 			host->base + DAVINCI_MMCFIFOCTL);
568 		writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
569 			host->base + DAVINCI_MMCFIFOCTL);
570 	} else {
571 		host->data_dir = DAVINCI_MMC_DATADIR_READ;
572 		writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
573 			host->base + DAVINCI_MMCFIFOCTL);
574 		writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
575 			host->base + DAVINCI_MMCFIFOCTL);
576 	}
577 
578 	host->buffer = NULL;
579 	host->bytes_left = data->blocks * data->blksz;
580 
581 	/* For now we try to use DMA whenever we won't need partial FIFO
582 	 * reads or writes, either for the whole transfer (as tested here)
583 	 * or for any individual scatterlist segment (tested when we call
584 	 * start_dma_transfer).
585 	 *
586 	 * While we *could* change that, unusual block sizes are rarely
587 	 * used.  The occasional fallback to PIO should't hurt.
588 	 */
589 	if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0
590 			&& mmc_davinci_start_dma_transfer(host, data) == 0) {
591 		/* zero this to ensure we take no PIO paths */
592 		host->bytes_left = 0;
593 	} else {
594 		/* Revert to CPU Copy */
595 		host->sg_len = data->sg_len;
596 		host->sg = host->data->sg;
597 		mmc_davinci_sg_to_buf(host);
598 	}
599 }
600 
601 static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
602 {
603 	struct mmc_davinci_host *host = mmc_priv(mmc);
604 	unsigned long timeout = jiffies + msecs_to_jiffies(900);
605 	u32 mmcst1 = 0;
606 
607 	/* Card may still be sending BUSY after a previous operation,
608 	 * typically some kind of write.  If so, we can't proceed yet.
609 	 */
610 	while (time_before(jiffies, timeout)) {
611 		mmcst1  = readl(host->base + DAVINCI_MMCST1);
612 		if (!(mmcst1 & MMCST1_BUSY))
613 			break;
614 		cpu_relax();
615 	}
616 	if (mmcst1 & MMCST1_BUSY) {
617 		dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n");
618 		req->cmd->error = -ETIMEDOUT;
619 		mmc_request_done(mmc, req);
620 		return;
621 	}
622 
623 	host->do_dma = 0;
624 	mmc_davinci_prepare_data(host, req);
625 	mmc_davinci_start_command(host, req->cmd);
626 }
627 
628 static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host,
629 	unsigned int mmc_req_freq)
630 {
631 	unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0;
632 
633 	mmc_pclk = host->mmc_input_clk;
634 	if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq))
635 		mmc_push_pull_divisor = ((unsigned int)mmc_pclk
636 				/ (2 * mmc_req_freq)) - 1;
637 	else
638 		mmc_push_pull_divisor = 0;
639 
640 	mmc_freq = (unsigned int)mmc_pclk
641 		/ (2 * (mmc_push_pull_divisor + 1));
642 
643 	if (mmc_freq > mmc_req_freq)
644 		mmc_push_pull_divisor = mmc_push_pull_divisor + 1;
645 	/* Convert ns to clock cycles */
646 	if (mmc_req_freq <= 400000)
647 		host->ns_in_one_cycle = (1000000) / (((mmc_pclk
648 				/ (2 * (mmc_push_pull_divisor + 1)))/1000));
649 	else
650 		host->ns_in_one_cycle = (1000000) / (((mmc_pclk
651 				/ (2 * (mmc_push_pull_divisor + 1)))/1000000));
652 
653 	return mmc_push_pull_divisor;
654 }
655 
656 static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)
657 {
658 	unsigned int open_drain_freq = 0, mmc_pclk = 0;
659 	unsigned int mmc_push_pull_freq = 0;
660 	struct mmc_davinci_host *host = mmc_priv(mmc);
661 
662 	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
663 		u32 temp;
664 
665 		/* Ignoring the init clock value passed for fixing the inter
666 		 * operability with different cards.
667 		 */
668 		open_drain_freq = ((unsigned int)mmc_pclk
669 				/ (2 * MMCSD_INIT_CLOCK)) - 1;
670 
671 		if (open_drain_freq > 0xFF)
672 			open_drain_freq = 0xFF;
673 
674 		temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
675 		temp |= open_drain_freq;
676 		writel(temp, host->base + DAVINCI_MMCCLK);
677 
678 		/* Convert ns to clock cycles */
679 		host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000);
680 	} else {
681 		u32 temp;
682 		mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock);
683 
684 		if (mmc_push_pull_freq > 0xFF)
685 			mmc_push_pull_freq = 0xFF;
686 
687 		temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN;
688 		writel(temp, host->base + DAVINCI_MMCCLK);
689 
690 		udelay(10);
691 
692 		temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
693 		temp |= mmc_push_pull_freq;
694 		writel(temp, host->base + DAVINCI_MMCCLK);
695 
696 		writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
697 
698 		udelay(10);
699 	}
700 }
701 
702 static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
703 {
704 	struct mmc_davinci_host *host = mmc_priv(mmc);
705 	struct platform_device *pdev = to_platform_device(mmc->parent);
706 	struct davinci_mmc_config *config = pdev->dev.platform_data;
707 
708 	dev_dbg(mmc_dev(host->mmc),
709 		"clock %dHz busmode %d powermode %d Vdd %04x\n",
710 		ios->clock, ios->bus_mode, ios->power_mode,
711 		ios->vdd);
712 
713 	switch (ios->power_mode) {
714 	case MMC_POWER_OFF:
715 		if (config && config->set_power)
716 			config->set_power(pdev->id, false);
717 		break;
718 	case MMC_POWER_UP:
719 		if (config && config->set_power)
720 			config->set_power(pdev->id, true);
721 		break;
722 	}
723 
724 	switch (ios->bus_width) {
725 	case MMC_BUS_WIDTH_8:
726 		dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n");
727 		writel((readl(host->base + DAVINCI_MMCCTL) &
728 			~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT,
729 			host->base + DAVINCI_MMCCTL);
730 		break;
731 	case MMC_BUS_WIDTH_4:
732 		dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n");
733 		if (host->version == MMC_CTLR_VERSION_2)
734 			writel((readl(host->base + DAVINCI_MMCCTL) &
735 				~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT,
736 				host->base + DAVINCI_MMCCTL);
737 		else
738 			writel(readl(host->base + DAVINCI_MMCCTL) |
739 				MMCCTL_WIDTH_4_BIT,
740 				host->base + DAVINCI_MMCCTL);
741 		break;
742 	case MMC_BUS_WIDTH_1:
743 		dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n");
744 		if (host->version == MMC_CTLR_VERSION_2)
745 			writel(readl(host->base + DAVINCI_MMCCTL) &
746 				~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT),
747 				host->base + DAVINCI_MMCCTL);
748 		else
749 			writel(readl(host->base + DAVINCI_MMCCTL) &
750 				~MMCCTL_WIDTH_4_BIT,
751 				host->base + DAVINCI_MMCCTL);
752 		break;
753 	}
754 
755 	calculate_clk_divider(mmc, ios);
756 
757 	host->bus_mode = ios->bus_mode;
758 	if (ios->power_mode == MMC_POWER_UP) {
759 		unsigned long timeout = jiffies + msecs_to_jiffies(50);
760 		bool lose = true;
761 
762 		/* Send clock cycles, poll completion */
763 		writel(0, host->base + DAVINCI_MMCARGHL);
764 		writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD);
765 		while (time_before(jiffies, timeout)) {
766 			u32 tmp = readl(host->base + DAVINCI_MMCST0);
767 
768 			if (tmp & MMCST0_RSPDNE) {
769 				lose = false;
770 				break;
771 			}
772 			cpu_relax();
773 		}
774 		if (lose)
775 			dev_warn(mmc_dev(host->mmc), "powerup timeout\n");
776 	}
777 
778 	/* FIXME on power OFF, reset things ... */
779 }
780 
781 static void
782 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
783 {
784 	host->data = NULL;
785 
786 	if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
787 		/*
788 		 * SDIO Interrupt Detection work-around as suggested by
789 		 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
790 		 * 2.1.6): Signal SDIO interrupt only if it is enabled by core
791 		 */
792 		if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
793 					SDIOST0_DAT1_HI)) {
794 			writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
795 			mmc_signal_sdio_irq(host->mmc);
796 		}
797 	}
798 
799 	if (host->do_dma) {
800 		davinci_abort_dma(host);
801 
802 		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
803 			     (data->flags & MMC_DATA_WRITE)
804 			     ? DMA_TO_DEVICE
805 			     : DMA_FROM_DEVICE);
806 		host->do_dma = false;
807 	}
808 	host->data_dir = DAVINCI_MMC_DATADIR_NONE;
809 
810 	if (!data->stop || (host->cmd && host->cmd->error)) {
811 		mmc_request_done(host->mmc, data->mrq);
812 		writel(0, host->base + DAVINCI_MMCIM);
813 		host->active_request = false;
814 	} else
815 		mmc_davinci_start_command(host, data->stop);
816 }
817 
818 static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
819 				 struct mmc_command *cmd)
820 {
821 	host->cmd = NULL;
822 
823 	if (cmd->flags & MMC_RSP_PRESENT) {
824 		if (cmd->flags & MMC_RSP_136) {
825 			/* response type 2 */
826 			cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01);
827 			cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23);
828 			cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45);
829 			cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
830 		} else {
831 			/* response types 1, 1b, 3, 4, 5, 6 */
832 			cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
833 		}
834 	}
835 
836 	if (host->data == NULL || cmd->error) {
837 		if (cmd->error == -ETIMEDOUT)
838 			cmd->mrq->cmd->retries = 0;
839 		mmc_request_done(host->mmc, cmd->mrq);
840 		writel(0, host->base + DAVINCI_MMCIM);
841 		host->active_request = false;
842 	}
843 }
844 
845 static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
846 								int val)
847 {
848 	u32 temp;
849 
850 	temp = readl(host->base + DAVINCI_MMCCTL);
851 	if (val)	/* reset */
852 		temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
853 	else		/* enable */
854 		temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
855 
856 	writel(temp, host->base + DAVINCI_MMCCTL);
857 	udelay(10);
858 }
859 
860 static void
861 davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
862 {
863 	mmc_davinci_reset_ctrl(host, 1);
864 	mmc_davinci_reset_ctrl(host, 0);
865 }
866 
867 static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
868 {
869 	struct mmc_davinci_host *host = dev_id;
870 	unsigned int status;
871 
872 	status = readl(host->base + DAVINCI_SDIOIST);
873 	if (status & SDIOIST_IOINT) {
874 		dev_dbg(mmc_dev(host->mmc),
875 			"SDIO interrupt status %x\n", status);
876 		writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
877 		mmc_signal_sdio_irq(host->mmc);
878 	}
879 	return IRQ_HANDLED;
880 }
881 
882 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
883 {
884 	struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
885 	unsigned int status, qstatus;
886 	int end_command = 0;
887 	int end_transfer = 0;
888 	struct mmc_data *data = host->data;
889 
890 	if (host->cmd == NULL && host->data == NULL) {
891 		status = readl(host->base + DAVINCI_MMCST0);
892 		dev_dbg(mmc_dev(host->mmc),
893 			"Spurious interrupt 0x%04x\n", status);
894 		/* Disable the interrupt from mmcsd */
895 		writel(0, host->base + DAVINCI_MMCIM);
896 		return IRQ_NONE;
897 	}
898 
899 	status = readl(host->base + DAVINCI_MMCST0);
900 	qstatus = status;
901 
902 	/* handle FIFO first when using PIO for data.
903 	 * bytes_left will decrease to zero as I/O progress and status will
904 	 * read zero over iteration because this controller status
905 	 * register(MMCST0) reports any status only once and it is cleared
906 	 * by read. So, it is not unbouned loop even in the case of
907 	 * non-dma.
908 	 */
909 	if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) {
910 		unsigned long im_val;
911 
912 		/*
913 		 * If interrupts fire during the following loop, they will be
914 		 * handled by the handler, but the PIC will still buffer these.
915 		 * As a result, the handler will be called again to serve these
916 		 * needlessly. In order to avoid these spurious interrupts,
917 		 * keep interrupts masked during the loop.
918 		 */
919 		im_val = readl(host->base + DAVINCI_MMCIM);
920 		writel(0, host->base + DAVINCI_MMCIM);
921 
922 		do {
923 			davinci_fifo_data_trans(host, rw_threshold);
924 			status = readl(host->base + DAVINCI_MMCST0);
925 			qstatus |= status;
926 		} while (host->bytes_left &&
927 			 (status & (MMCST0_DXRDY | MMCST0_DRRDY)));
928 
929 		/*
930 		 * If an interrupt is pending, it is assumed it will fire when
931 		 * it is unmasked. This assumption is also taken when the MMCIM
932 		 * is first set. Otherwise, writing to MMCIM after reading the
933 		 * status is race-prone.
934 		 */
935 		writel(im_val, host->base + DAVINCI_MMCIM);
936 	}
937 
938 	if (qstatus & MMCST0_DATDNE) {
939 		/* All blocks sent/received, and CRC checks passed */
940 		if (data != NULL) {
941 			if ((host->do_dma == 0) && (host->bytes_left > 0)) {
942 				/* if datasize < rw_threshold
943 				 * no RX ints are generated
944 				 */
945 				davinci_fifo_data_trans(host, host->bytes_left);
946 			}
947 			end_transfer = 1;
948 			data->bytes_xfered = data->blocks * data->blksz;
949 		} else {
950 			dev_err(mmc_dev(host->mmc),
951 					"DATDNE with no host->data\n");
952 		}
953 	}
954 
955 	if (qstatus & MMCST0_TOUTRD) {
956 		/* Read data timeout */
957 		data->error = -ETIMEDOUT;
958 		end_transfer = 1;
959 
960 		dev_dbg(mmc_dev(host->mmc),
961 			"read data timeout, status %x\n",
962 			qstatus);
963 
964 		davinci_abort_data(host, data);
965 	}
966 
967 	if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) {
968 		/* Data CRC error */
969 		data->error = -EILSEQ;
970 		end_transfer = 1;
971 
972 		/* NOTE:  this controller uses CRCWR to report both CRC
973 		 * errors and timeouts (on writes).  MMCDRSP values are
974 		 * only weakly documented, but 0x9f was clearly a timeout
975 		 * case and the two three-bit patterns in various SD specs
976 		 * (101, 010) aren't part of it ...
977 		 */
978 		if (qstatus & MMCST0_CRCWR) {
979 			u32 temp = readb(host->base + DAVINCI_MMCDRSP);
980 
981 			if (temp == 0x9f)
982 				data->error = -ETIMEDOUT;
983 		}
984 		dev_dbg(mmc_dev(host->mmc), "data %s %s error\n",
985 			(qstatus & MMCST0_CRCWR) ? "write" : "read",
986 			(data->error == -ETIMEDOUT) ? "timeout" : "CRC");
987 
988 		davinci_abort_data(host, data);
989 	}
990 
991 	if (qstatus & MMCST0_TOUTRS) {
992 		/* Command timeout */
993 		if (host->cmd) {
994 			dev_dbg(mmc_dev(host->mmc),
995 				"CMD%d timeout, status %x\n",
996 				host->cmd->opcode, qstatus);
997 			host->cmd->error = -ETIMEDOUT;
998 			if (data) {
999 				end_transfer = 1;
1000 				davinci_abort_data(host, data);
1001 			} else
1002 				end_command = 1;
1003 		}
1004 	}
1005 
1006 	if (qstatus & MMCST0_CRCRS) {
1007 		/* Command CRC error */
1008 		dev_dbg(mmc_dev(host->mmc), "Command CRC error\n");
1009 		if (host->cmd) {
1010 			host->cmd->error = -EILSEQ;
1011 			end_command = 1;
1012 		}
1013 	}
1014 
1015 	if (qstatus & MMCST0_RSPDNE) {
1016 		/* End of command phase */
1017 		end_command = (int) host->cmd;
1018 	}
1019 
1020 	if (end_command)
1021 		mmc_davinci_cmd_done(host, host->cmd);
1022 	if (end_transfer)
1023 		mmc_davinci_xfer_done(host, data);
1024 	return IRQ_HANDLED;
1025 }
1026 
1027 static int mmc_davinci_get_cd(struct mmc_host *mmc)
1028 {
1029 	struct platform_device *pdev = to_platform_device(mmc->parent);
1030 	struct davinci_mmc_config *config = pdev->dev.platform_data;
1031 
1032 	if (!config || !config->get_cd)
1033 		return -ENOSYS;
1034 	return config->get_cd(pdev->id);
1035 }
1036 
1037 static int mmc_davinci_get_ro(struct mmc_host *mmc)
1038 {
1039 	struct platform_device *pdev = to_platform_device(mmc->parent);
1040 	struct davinci_mmc_config *config = pdev->dev.platform_data;
1041 
1042 	if (!config || !config->get_ro)
1043 		return -ENOSYS;
1044 	return config->get_ro(pdev->id);
1045 }
1046 
1047 static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1048 {
1049 	struct mmc_davinci_host *host = mmc_priv(mmc);
1050 
1051 	if (enable) {
1052 		if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
1053 			writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
1054 			mmc_signal_sdio_irq(host->mmc);
1055 		} else {
1056 			host->sdio_int = true;
1057 			writel(readl(host->base + DAVINCI_SDIOIEN) |
1058 			       SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
1059 		}
1060 	} else {
1061 		host->sdio_int = false;
1062 		writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
1063 		       host->base + DAVINCI_SDIOIEN);
1064 	}
1065 }
1066 
1067 static struct mmc_host_ops mmc_davinci_ops = {
1068 	.request	= mmc_davinci_request,
1069 	.set_ios	= mmc_davinci_set_ios,
1070 	.get_cd		= mmc_davinci_get_cd,
1071 	.get_ro		= mmc_davinci_get_ro,
1072 	.enable_sdio_irq = mmc_davinci_enable_sdio_irq,
1073 };
1074 
1075 /*----------------------------------------------------------------------*/
1076 
1077 #ifdef CONFIG_CPU_FREQ
1078 static int mmc_davinci_cpufreq_transition(struct notifier_block *nb,
1079 				     unsigned long val, void *data)
1080 {
1081 	struct mmc_davinci_host *host;
1082 	unsigned int mmc_pclk;
1083 	struct mmc_host *mmc;
1084 	unsigned long flags;
1085 
1086 	host = container_of(nb, struct mmc_davinci_host, freq_transition);
1087 	mmc = host->mmc;
1088 	mmc_pclk = clk_get_rate(host->clk);
1089 
1090 	if (val == CPUFREQ_POSTCHANGE) {
1091 		spin_lock_irqsave(&mmc->lock, flags);
1092 		host->mmc_input_clk = mmc_pclk;
1093 		calculate_clk_divider(mmc, &mmc->ios);
1094 		spin_unlock_irqrestore(&mmc->lock, flags);
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
1101 {
1102 	host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition;
1103 
1104 	return cpufreq_register_notifier(&host->freq_transition,
1105 					 CPUFREQ_TRANSITION_NOTIFIER);
1106 }
1107 
1108 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1109 {
1110 	cpufreq_unregister_notifier(&host->freq_transition,
1111 				    CPUFREQ_TRANSITION_NOTIFIER);
1112 }
1113 #else
1114 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
1115 {
1116 	return 0;
1117 }
1118 
1119 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1120 {
1121 }
1122 #endif
1123 static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1124 {
1125 
1126 	mmc_davinci_reset_ctrl(host, 1);
1127 
1128 	writel(0, host->base + DAVINCI_MMCCLK);
1129 	writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
1130 
1131 	writel(0x1FFF, host->base + DAVINCI_MMCTOR);
1132 	writel(0xFFFF, host->base + DAVINCI_MMCTOD);
1133 
1134 	mmc_davinci_reset_ctrl(host, 0);
1135 }
1136 
1137 static const struct platform_device_id davinci_mmc_devtype[] = {
1138 	{
1139 		.name	= "dm6441-mmc",
1140 		.driver_data = MMC_CTLR_VERSION_1,
1141 	}, {
1142 		.name	= "da830-mmc",
1143 		.driver_data = MMC_CTLR_VERSION_2,
1144 	},
1145 	{},
1146 };
1147 MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype);
1148 
1149 static const struct of_device_id davinci_mmc_dt_ids[] = {
1150 	{
1151 		.compatible = "ti,dm6441-mmc",
1152 		.data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1],
1153 	},
1154 	{
1155 		.compatible = "ti,da830-mmc",
1156 		.data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2],
1157 	},
1158 	{},
1159 };
1160 MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids);
1161 
1162 static struct davinci_mmc_config
1163 	*mmc_parse_pdata(struct platform_device *pdev)
1164 {
1165 	struct device_node *np;
1166 	struct davinci_mmc_config *pdata = pdev->dev.platform_data;
1167 	const struct of_device_id *match =
1168 		of_match_device(davinci_mmc_dt_ids, &pdev->dev);
1169 	u32 data;
1170 
1171 	np = pdev->dev.of_node;
1172 	if (!np)
1173 		return pdata;
1174 
1175 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1176 	if (!pdata) {
1177 		dev_err(&pdev->dev, "Failed to allocate memory for struct davinci_mmc_config\n");
1178 		goto nodata;
1179 	}
1180 
1181 	if (match)
1182 		pdev->id_entry = match->data;
1183 
1184 	if (of_property_read_u32(np, "max-frequency", &pdata->max_freq))
1185 		dev_info(&pdev->dev, "'max-frequency' property not specified, defaulting to 25MHz\n");
1186 
1187 	of_property_read_u32(np, "bus-width", &data);
1188 	switch (data) {
1189 	case 1:
1190 	case 4:
1191 	case 8:
1192 		pdata->wires = data;
1193 		break;
1194 	default:
1195 		pdata->wires = 1;
1196 		dev_info(&pdev->dev, "Unsupported buswidth, defaulting to 1 bit\n");
1197 	}
1198 nodata:
1199 	return pdata;
1200 }
1201 
1202 static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1203 {
1204 	struct davinci_mmc_config *pdata = NULL;
1205 	struct mmc_davinci_host *host = NULL;
1206 	struct mmc_host *mmc = NULL;
1207 	struct resource *r, *mem = NULL;
1208 	int ret, irq;
1209 	size_t mem_size;
1210 	const struct platform_device_id *id_entry;
1211 
1212 	pdata = mmc_parse_pdata(pdev);
1213 	if (pdata == NULL) {
1214 		dev_err(&pdev->dev, "Couldn't get platform data\n");
1215 		return -ENOENT;
1216 	}
1217 
1218 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1219 	irq = platform_get_irq(pdev, 0);
1220 	if (!r || irq == NO_IRQ)
1221 		return -ENODEV;
1222 
1223 	mem_size = resource_size(r);
1224 	mem = devm_request_mem_region(&pdev->dev, r->start, mem_size,
1225 				      pdev->name);
1226 	if (!mem)
1227 		return -EBUSY;
1228 
1229 	mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
1230 	if (!mmc)
1231 		return -ENOMEM;
1232 
1233 	host = mmc_priv(mmc);
1234 	host->mmc = mmc;	/* Important */
1235 
1236 	host->mem_res = mem;
1237 	host->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
1238 	if (!host->base) {
1239 		ret = -ENOMEM;
1240 		goto ioremap_fail;
1241 	}
1242 
1243 	host->clk = devm_clk_get(&pdev->dev, NULL);
1244 	if (IS_ERR(host->clk)) {
1245 		ret = PTR_ERR(host->clk);
1246 		goto clk_get_fail;
1247 	}
1248 	ret = clk_prepare_enable(host->clk);
1249 	if (ret)
1250 		goto clk_prepare_enable_fail;
1251 
1252 	host->mmc_input_clk = clk_get_rate(host->clk);
1253 
1254 	init_mmcsd_host(host);
1255 
1256 	if (pdata->nr_sg)
1257 		host->nr_sg = pdata->nr_sg - 1;
1258 
1259 	if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
1260 		host->nr_sg = MAX_NR_SG;
1261 
1262 	host->use_dma = use_dma;
1263 	host->mmc_irq = irq;
1264 	host->sdio_irq = platform_get_irq(pdev, 1);
1265 
1266 	if (host->use_dma) {
1267 		ret = davinci_acquire_dma_channels(host);
1268 		if (ret == -EPROBE_DEFER)
1269 			goto dma_probe_defer;
1270 		else if (ret)
1271 			host->use_dma = 0;
1272 	}
1273 
1274 	/* REVISIT:  someday, support IRQ-driven card detection.  */
1275 	mmc->caps |= MMC_CAP_NEEDS_POLL;
1276 	mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1277 
1278 	if (pdata && (pdata->wires == 4 || pdata->wires == 0))
1279 		mmc->caps |= MMC_CAP_4_BIT_DATA;
1280 
1281 	if (pdata && (pdata->wires == 8))
1282 		mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
1283 
1284 	id_entry = platform_get_device_id(pdev);
1285 	if (id_entry)
1286 		host->version = id_entry->driver_data;
1287 
1288 	mmc->ops = &mmc_davinci_ops;
1289 	mmc->f_min = 312500;
1290 	mmc->f_max = 25000000;
1291 	if (pdata && pdata->max_freq)
1292 		mmc->f_max = pdata->max_freq;
1293 	if (pdata && pdata->caps)
1294 		mmc->caps |= pdata->caps;
1295 	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1296 
1297 	/* With no iommu coalescing pages, each phys_seg is a hw_seg.
1298 	 * Each hw_seg uses one EDMA parameter RAM slot, always one
1299 	 * channel and then usually some linked slots.
1300 	 */
1301 	mmc->max_segs		= MAX_NR_SG;
1302 
1303 	/* EDMA limit per hw segment (one or two MBytes) */
1304 	mmc->max_seg_size	= MAX_CCNT * rw_threshold;
1305 
1306 	/* MMC/SD controller limits for multiblock requests */
1307 	mmc->max_blk_size	= 4095;  /* BLEN is 12 bits */
1308 	mmc->max_blk_count	= 65535; /* NBLK is 16 bits */
1309 	mmc->max_req_size	= mmc->max_blk_size * mmc->max_blk_count;
1310 
1311 	dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs);
1312 	dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
1313 	dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
1314 	dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
1315 
1316 	platform_set_drvdata(pdev, host);
1317 
1318 	ret = mmc_davinci_cpufreq_register(host);
1319 	if (ret) {
1320 		dev_err(&pdev->dev, "failed to register cpufreq\n");
1321 		goto cpu_freq_fail;
1322 	}
1323 
1324 	ret = mmc_add_host(mmc);
1325 	if (ret < 0)
1326 		goto mmc_add_host_fail;
1327 
1328 	ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0,
1329 			       mmc_hostname(mmc), host);
1330 	if (ret)
1331 		goto request_irq_fail;
1332 
1333 	if (host->sdio_irq >= 0) {
1334 		ret = devm_request_irq(&pdev->dev, host->sdio_irq,
1335 				       mmc_davinci_sdio_irq, 0,
1336 				       mmc_hostname(mmc), host);
1337 		if (!ret)
1338 			mmc->caps |= MMC_CAP_SDIO_IRQ;
1339 	}
1340 
1341 	rename_region(mem, mmc_hostname(mmc));
1342 
1343 	dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
1344 		host->use_dma ? "DMA" : "PIO",
1345 		(mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
1346 
1347 	return 0;
1348 
1349 request_irq_fail:
1350 	mmc_remove_host(mmc);
1351 mmc_add_host_fail:
1352 	mmc_davinci_cpufreq_deregister(host);
1353 cpu_freq_fail:
1354 	davinci_release_dma_channels(host);
1355 dma_probe_defer:
1356 	clk_disable_unprepare(host->clk);
1357 clk_prepare_enable_fail:
1358 clk_get_fail:
1359 ioremap_fail:
1360 	mmc_free_host(mmc);
1361 
1362 	return ret;
1363 }
1364 
1365 static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1366 {
1367 	struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1368 
1369 	mmc_remove_host(host->mmc);
1370 	mmc_davinci_cpufreq_deregister(host);
1371 	davinci_release_dma_channels(host);
1372 	clk_disable_unprepare(host->clk);
1373 	mmc_free_host(host->mmc);
1374 
1375 	return 0;
1376 }
1377 
1378 #ifdef CONFIG_PM
1379 static int davinci_mmcsd_suspend(struct device *dev)
1380 {
1381 	struct platform_device *pdev = to_platform_device(dev);
1382 	struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1383 
1384 	writel(0, host->base + DAVINCI_MMCIM);
1385 	mmc_davinci_reset_ctrl(host, 1);
1386 	clk_disable(host->clk);
1387 
1388 	return 0;
1389 }
1390 
1391 static int davinci_mmcsd_resume(struct device *dev)
1392 {
1393 	struct platform_device *pdev = to_platform_device(dev);
1394 	struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1395 
1396 	clk_enable(host->clk);
1397 	mmc_davinci_reset_ctrl(host, 0);
1398 
1399 	return 0;
1400 }
1401 
1402 static const struct dev_pm_ops davinci_mmcsd_pm = {
1403 	.suspend        = davinci_mmcsd_suspend,
1404 	.resume         = davinci_mmcsd_resume,
1405 };
1406 
1407 #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
1408 #else
1409 #define davinci_mmcsd_pm_ops NULL
1410 #endif
1411 
1412 static struct platform_driver davinci_mmcsd_driver = {
1413 	.driver		= {
1414 		.name	= "davinci_mmc",
1415 		.pm	= davinci_mmcsd_pm_ops,
1416 		.of_match_table = davinci_mmc_dt_ids,
1417 	},
1418 	.remove		= __exit_p(davinci_mmcsd_remove),
1419 	.id_table	= davinci_mmc_devtype,
1420 };
1421 
1422 module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe);
1423 
1424 MODULE_AUTHOR("Texas Instruments India");
1425 MODULE_LICENSE("GPL");
1426 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
1427 MODULE_ALIAS("platform:davinci_mmc");
1428 
1429