xref: /openbmc/linux/drivers/mmc/host/omap.c (revision 732a675a)
1 /*
2  *  linux/drivers/mmc/host/omap.c
3  *
4  *  Copyright (C) 2004 Nokia Corporation
5  *  Written by Tuukka Tikkanen and Juha Yrj�l�<juha.yrjola@nokia.com>
6  *  Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7  *  Other hacks (DMA, SD, etc) by David Brownell
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/init.h>
17 #include <linux/ioport.h>
18 #include <linux/platform_device.h>
19 #include <linux/interrupt.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/delay.h>
22 #include <linux/spinlock.h>
23 #include <linux/timer.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/card.h>
26 #include <linux/clk.h>
27 #include <linux/scatterlist.h>
28 #include <linux/i2c/tps65010.h>
29 
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <asm/mach-types.h>
33 
34 #include <asm/arch/board.h>
35 #include <asm/arch/mmc.h>
36 #include <asm/arch/gpio.h>
37 #include <asm/arch/dma.h>
38 #include <asm/arch/mux.h>
39 #include <asm/arch/fpga.h>
40 
41 #define	OMAP_MMC_REG_CMD	0x00
42 #define	OMAP_MMC_REG_ARGL	0x04
43 #define	OMAP_MMC_REG_ARGH	0x08
44 #define	OMAP_MMC_REG_CON	0x0c
45 #define	OMAP_MMC_REG_STAT	0x10
46 #define	OMAP_MMC_REG_IE		0x14
47 #define	OMAP_MMC_REG_CTO	0x18
48 #define	OMAP_MMC_REG_DTO	0x1c
49 #define	OMAP_MMC_REG_DATA	0x20
50 #define	OMAP_MMC_REG_BLEN	0x24
51 #define	OMAP_MMC_REG_NBLK	0x28
52 #define	OMAP_MMC_REG_BUF	0x2c
53 #define OMAP_MMC_REG_SDIO	0x34
54 #define	OMAP_MMC_REG_REV	0x3c
55 #define	OMAP_MMC_REG_RSP0	0x40
56 #define	OMAP_MMC_REG_RSP1	0x44
57 #define	OMAP_MMC_REG_RSP2	0x48
58 #define	OMAP_MMC_REG_RSP3	0x4c
59 #define	OMAP_MMC_REG_RSP4	0x50
60 #define	OMAP_MMC_REG_RSP5	0x54
61 #define	OMAP_MMC_REG_RSP6	0x58
62 #define	OMAP_MMC_REG_RSP7	0x5c
63 #define	OMAP_MMC_REG_IOSR	0x60
64 #define	OMAP_MMC_REG_SYSC	0x64
65 #define	OMAP_MMC_REG_SYSS	0x68
66 
67 #define	OMAP_MMC_STAT_CARD_ERR		(1 << 14)
68 #define	OMAP_MMC_STAT_CARD_IRQ		(1 << 13)
69 #define	OMAP_MMC_STAT_OCR_BUSY		(1 << 12)
70 #define	OMAP_MMC_STAT_A_EMPTY		(1 << 11)
71 #define	OMAP_MMC_STAT_A_FULL		(1 << 10)
72 #define	OMAP_MMC_STAT_CMD_CRC		(1 <<  8)
73 #define	OMAP_MMC_STAT_CMD_TOUT		(1 <<  7)
74 #define	OMAP_MMC_STAT_DATA_CRC		(1 <<  6)
75 #define	OMAP_MMC_STAT_DATA_TOUT		(1 <<  5)
76 #define	OMAP_MMC_STAT_END_BUSY		(1 <<  4)
77 #define	OMAP_MMC_STAT_END_OF_DATA	(1 <<  3)
78 #define	OMAP_MMC_STAT_CARD_BUSY		(1 <<  2)
79 #define	OMAP_MMC_STAT_END_OF_CMD	(1 <<  0)
80 
81 #define OMAP_MMC_READ(host, reg)	__raw_readw((host)->virt_base + OMAP_MMC_REG_##reg)
82 #define OMAP_MMC_WRITE(host, reg, val)	__raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg)
83 
84 /*
85  * Command types
86  */
87 #define OMAP_MMC_CMDTYPE_BC	0
88 #define OMAP_MMC_CMDTYPE_BCR	1
89 #define OMAP_MMC_CMDTYPE_AC	2
90 #define OMAP_MMC_CMDTYPE_ADTC	3
91 
92 
93 #define DRIVER_NAME "mmci-omap"
94 
95 /* Specifies how often in millisecs to poll for card status changes
96  * when the cover switch is open */
97 #define OMAP_MMC_COVER_POLL_DELAY	500
98 
99 struct mmc_omap_host;
100 
101 struct mmc_omap_slot {
102 	int			id;
103 	unsigned int		vdd;
104 	u16			saved_con;
105 	u16			bus_mode;
106 	unsigned int		fclk_freq;
107 	unsigned		powered:1;
108 
109 	struct tasklet_struct	cover_tasklet;
110 	struct timer_list       cover_timer;
111 	unsigned		cover_open;
112 
113 	struct mmc_request      *mrq;
114 	struct mmc_omap_host    *host;
115 	struct mmc_host		*mmc;
116 	struct omap_mmc_slot_data *pdata;
117 };
118 
119 struct mmc_omap_host {
120 	int			initialized;
121 	int			suspended;
122 	struct mmc_request *	mrq;
123 	struct mmc_command *	cmd;
124 	struct mmc_data *	data;
125 	struct mmc_host *	mmc;
126 	struct device *		dev;
127 	unsigned char		id; /* 16xx chips have 2 MMC blocks */
128 	struct clk *		iclk;
129 	struct clk *		fclk;
130 	struct resource		*mem_res;
131 	void __iomem		*virt_base;
132 	unsigned int		phys_base;
133 	int			irq;
134 	unsigned char		bus_mode;
135 	unsigned char		hw_bus_mode;
136 
137 	struct work_struct	cmd_abort_work;
138 	unsigned		abort:1;
139 	struct timer_list	cmd_abort_timer;
140 
141 	struct work_struct      slot_release_work;
142 	struct mmc_omap_slot    *next_slot;
143 	struct work_struct      send_stop_work;
144 	struct mmc_data		*stop_data;
145 
146 	unsigned int		sg_len;
147 	int			sg_idx;
148 	u16 *			buffer;
149 	u32			buffer_bytes_left;
150 	u32			total_bytes_left;
151 
152 	unsigned		use_dma:1;
153 	unsigned		brs_received:1, dma_done:1;
154 	unsigned		dma_is_read:1;
155 	unsigned		dma_in_use:1;
156 	int			dma_ch;
157 	spinlock_t		dma_lock;
158 	struct timer_list	dma_timer;
159 	unsigned		dma_len;
160 
161 	short			power_pin;
162 
163 	struct mmc_omap_slot    *slots[OMAP_MMC_MAX_SLOTS];
164 	struct mmc_omap_slot    *current_slot;
165 	spinlock_t              slot_lock;
166 	wait_queue_head_t       slot_wq;
167 	int                     nr_slots;
168 
169 	struct timer_list       clk_timer;
170 	spinlock_t		clk_lock;     /* for changing enabled state */
171 	unsigned int            fclk_enabled:1;
172 
173 	struct omap_mmc_platform_data *pdata;
174 };
175 
176 void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
177 {
178 	unsigned long tick_ns;
179 
180 	if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) {
181 		tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq;
182 		ndelay(8 * tick_ns);
183 	}
184 }
185 
186 void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable)
187 {
188 	unsigned long flags;
189 
190 	spin_lock_irqsave(&host->clk_lock, flags);
191 	if (host->fclk_enabled != enable) {
192 		host->fclk_enabled = enable;
193 		if (enable)
194 			clk_enable(host->fclk);
195 		else
196 			clk_disable(host->fclk);
197 	}
198 	spin_unlock_irqrestore(&host->clk_lock, flags);
199 }
200 
201 static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed)
202 {
203 	struct mmc_omap_host *host = slot->host;
204 	unsigned long flags;
205 
206 	if (claimed)
207 		goto no_claim;
208 	spin_lock_irqsave(&host->slot_lock, flags);
209 	while (host->mmc != NULL) {
210 		spin_unlock_irqrestore(&host->slot_lock, flags);
211 		wait_event(host->slot_wq, host->mmc == NULL);
212 		spin_lock_irqsave(&host->slot_lock, flags);
213 	}
214 	host->mmc = slot->mmc;
215 	spin_unlock_irqrestore(&host->slot_lock, flags);
216 no_claim:
217 	del_timer(&host->clk_timer);
218 	if (host->current_slot != slot || !claimed)
219 		mmc_omap_fclk_offdelay(host->current_slot);
220 
221 	if (host->current_slot != slot) {
222 		OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00);
223 		if (host->pdata->switch_slot != NULL)
224 			host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id);
225 		host->current_slot = slot;
226 	}
227 
228 	if (claimed) {
229 		mmc_omap_fclk_enable(host, 1);
230 
231 		/* Doing the dummy read here seems to work around some bug
232 		 * at least in OMAP24xx silicon where the command would not
233 		 * start after writing the CMD register. Sigh. */
234 		OMAP_MMC_READ(host, CON);
235 
236 		OMAP_MMC_WRITE(host, CON, slot->saved_con);
237 	} else
238 		mmc_omap_fclk_enable(host, 0);
239 }
240 
241 static void mmc_omap_start_request(struct mmc_omap_host *host,
242 				   struct mmc_request *req);
243 
244 static void mmc_omap_slot_release_work(struct work_struct *work)
245 {
246 	struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
247 						  slot_release_work);
248 	struct mmc_omap_slot *next_slot = host->next_slot;
249 	struct mmc_request *rq;
250 
251 	host->next_slot = NULL;
252 	mmc_omap_select_slot(next_slot, 1);
253 
254 	rq = next_slot->mrq;
255 	next_slot->mrq = NULL;
256 	mmc_omap_start_request(host, rq);
257 }
258 
259 static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
260 {
261 	struct mmc_omap_host *host = slot->host;
262 	unsigned long flags;
263 	int i;
264 
265 	BUG_ON(slot == NULL || host->mmc == NULL);
266 
267 	if (clk_enabled)
268 		/* Keeps clock running for at least 8 cycles on valid freq */
269 		mod_timer(&host->clk_timer, jiffies  + HZ/10);
270 	else {
271 		del_timer(&host->clk_timer);
272 		mmc_omap_fclk_offdelay(slot);
273 		mmc_omap_fclk_enable(host, 0);
274 	}
275 
276 	spin_lock_irqsave(&host->slot_lock, flags);
277 	/* Check for any pending requests */
278 	for (i = 0; i < host->nr_slots; i++) {
279 		struct mmc_omap_slot *new_slot;
280 
281 		if (host->slots[i] == NULL || host->slots[i]->mrq == NULL)
282 			continue;
283 
284 		BUG_ON(host->next_slot != NULL);
285 		new_slot = host->slots[i];
286 		/* The current slot should not have a request in queue */
287 		BUG_ON(new_slot == host->current_slot);
288 
289 		host->next_slot = new_slot;
290 		host->mmc = new_slot->mmc;
291 		spin_unlock_irqrestore(&host->slot_lock, flags);
292 		schedule_work(&host->slot_release_work);
293 		return;
294 	}
295 
296 	host->mmc = NULL;
297 	wake_up(&host->slot_wq);
298 	spin_unlock_irqrestore(&host->slot_lock, flags);
299 }
300 
301 static inline
302 int mmc_omap_cover_is_open(struct mmc_omap_slot *slot)
303 {
304 	if (slot->pdata->get_cover_state)
305 		return slot->pdata->get_cover_state(mmc_dev(slot->mmc),
306 						    slot->id);
307 	return 0;
308 }
309 
310 static ssize_t
311 mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
312 			   char *buf)
313 {
314 	struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
315 	struct mmc_omap_slot *slot = mmc_priv(mmc);
316 
317 	return sprintf(buf, "%s\n", mmc_omap_cover_is_open(slot) ? "open" :
318 		       "closed");
319 }
320 
321 static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
322 
323 static ssize_t
324 mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
325 			char *buf)
326 {
327 	struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
328 	struct mmc_omap_slot *slot = mmc_priv(mmc);
329 
330 	return sprintf(buf, "%s\n", slot->pdata->name);
331 }
332 
333 static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
334 
335 static void
336 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
337 {
338 	u32 cmdreg;
339 	u32 resptype;
340 	u32 cmdtype;
341 
342 	host->cmd = cmd;
343 
344 	resptype = 0;
345 	cmdtype = 0;
346 
347 	/* Our hardware needs to know exact type */
348 	switch (mmc_resp_type(cmd)) {
349 	case MMC_RSP_NONE:
350 		break;
351 	case MMC_RSP_R1:
352 	case MMC_RSP_R1B:
353 		/* resp 1, 1b, 6, 7 */
354 		resptype = 1;
355 		break;
356 	case MMC_RSP_R2:
357 		resptype = 2;
358 		break;
359 	case MMC_RSP_R3:
360 		resptype = 3;
361 		break;
362 	default:
363 		dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
364 		break;
365 	}
366 
367 	if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
368 		cmdtype = OMAP_MMC_CMDTYPE_ADTC;
369 	} else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
370 		cmdtype = OMAP_MMC_CMDTYPE_BC;
371 	} else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
372 		cmdtype = OMAP_MMC_CMDTYPE_BCR;
373 	} else {
374 		cmdtype = OMAP_MMC_CMDTYPE_AC;
375 	}
376 
377 	cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
378 
379 	if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN)
380 		cmdreg |= 1 << 6;
381 
382 	if (cmd->flags & MMC_RSP_BUSY)
383 		cmdreg |= 1 << 11;
384 
385 	if (host->data && !(host->data->flags & MMC_DATA_WRITE))
386 		cmdreg |= 1 << 15;
387 
388 	mod_timer(&host->cmd_abort_timer, jiffies + HZ/2);
389 
390 	OMAP_MMC_WRITE(host, CTO, 200);
391 	OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
392 	OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
393 	OMAP_MMC_WRITE(host, IE,
394 		       OMAP_MMC_STAT_A_EMPTY    | OMAP_MMC_STAT_A_FULL    |
395 		       OMAP_MMC_STAT_CMD_CRC    | OMAP_MMC_STAT_CMD_TOUT  |
396 		       OMAP_MMC_STAT_DATA_CRC   | OMAP_MMC_STAT_DATA_TOUT |
397 		       OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR  |
398 		       OMAP_MMC_STAT_END_OF_DATA);
399 	OMAP_MMC_WRITE(host, CMD, cmdreg);
400 }
401 
402 static void
403 mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
404 		     int abort)
405 {
406 	enum dma_data_direction dma_data_dir;
407 
408 	BUG_ON(host->dma_ch < 0);
409 	if (data->error)
410 		omap_stop_dma(host->dma_ch);
411 	/* Release DMA channel lazily */
412 	mod_timer(&host->dma_timer, jiffies + HZ);
413 	if (data->flags & MMC_DATA_WRITE)
414 		dma_data_dir = DMA_TO_DEVICE;
415 	else
416 		dma_data_dir = DMA_FROM_DEVICE;
417 	dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
418 		     dma_data_dir);
419 }
420 
421 static void mmc_omap_send_stop_work(struct work_struct *work)
422 {
423 	struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
424 						  send_stop_work);
425 	struct mmc_omap_slot *slot = host->current_slot;
426 	struct mmc_data *data = host->stop_data;
427 	unsigned long tick_ns;
428 
429 	tick_ns = (1000000000 + slot->fclk_freq - 1)/slot->fclk_freq;
430 	ndelay(8*tick_ns);
431 
432 	mmc_omap_start_command(host, data->stop);
433 }
434 
435 static void
436 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
437 {
438 	if (host->dma_in_use)
439 		mmc_omap_release_dma(host, data, data->error);
440 
441 	host->data = NULL;
442 	host->sg_len = 0;
443 
444 	/* NOTE:  MMC layer will sometimes poll-wait CMD13 next, issuing
445 	 * dozens of requests until the card finishes writing data.
446 	 * It'd be cheaper to just wait till an EOFB interrupt arrives...
447 	 */
448 
449 	if (!data->stop) {
450 		struct mmc_host *mmc;
451 
452 		host->mrq = NULL;
453 		mmc = host->mmc;
454 		mmc_omap_release_slot(host->current_slot, 1);
455 		mmc_request_done(mmc, data->mrq);
456 		return;
457 	}
458 
459 	host->stop_data = data;
460 	schedule_work(&host->send_stop_work);
461 }
462 
463 static void
464 mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops)
465 {
466 	struct mmc_omap_slot *slot = host->current_slot;
467 	unsigned int restarts, passes, timeout;
468 	u16 stat = 0;
469 
470 	/* Sending abort takes 80 clocks. Have some extra and round up */
471 	timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq;
472 	restarts = 0;
473 	while (restarts < maxloops) {
474 		OMAP_MMC_WRITE(host, STAT, 0xFFFF);
475 		OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7));
476 
477 		passes = 0;
478 		while (passes < timeout) {
479 			stat = OMAP_MMC_READ(host, STAT);
480 			if (stat & OMAP_MMC_STAT_END_OF_CMD)
481 				goto out;
482 			udelay(1);
483 			passes++;
484 		}
485 
486 		restarts++;
487 	}
488 out:
489 	OMAP_MMC_WRITE(host, STAT, stat);
490 }
491 
492 static void
493 mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data)
494 {
495 	if (host->dma_in_use)
496 		mmc_omap_release_dma(host, data, 1);
497 
498 	host->data = NULL;
499 	host->sg_len = 0;
500 
501 	mmc_omap_send_abort(host, 10000);
502 }
503 
504 static void
505 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
506 {
507 	unsigned long flags;
508 	int done;
509 
510 	if (!host->dma_in_use) {
511 		mmc_omap_xfer_done(host, data);
512 		return;
513 	}
514 	done = 0;
515 	spin_lock_irqsave(&host->dma_lock, flags);
516 	if (host->dma_done)
517 		done = 1;
518 	else
519 		host->brs_received = 1;
520 	spin_unlock_irqrestore(&host->dma_lock, flags);
521 	if (done)
522 		mmc_omap_xfer_done(host, data);
523 }
524 
525 static void
526 mmc_omap_dma_timer(unsigned long data)
527 {
528 	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
529 
530 	BUG_ON(host->dma_ch < 0);
531 	omap_free_dma(host->dma_ch);
532 	host->dma_ch = -1;
533 }
534 
535 static void
536 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
537 {
538 	unsigned long flags;
539 	int done;
540 
541 	done = 0;
542 	spin_lock_irqsave(&host->dma_lock, flags);
543 	if (host->brs_received)
544 		done = 1;
545 	else
546 		host->dma_done = 1;
547 	spin_unlock_irqrestore(&host->dma_lock, flags);
548 	if (done)
549 		mmc_omap_xfer_done(host, data);
550 }
551 
552 static void
553 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
554 {
555 	host->cmd = NULL;
556 
557 	del_timer(&host->cmd_abort_timer);
558 
559 	if (cmd->flags & MMC_RSP_PRESENT) {
560 		if (cmd->flags & MMC_RSP_136) {
561 			/* response type 2 */
562 			cmd->resp[3] =
563 				OMAP_MMC_READ(host, RSP0) |
564 				(OMAP_MMC_READ(host, RSP1) << 16);
565 			cmd->resp[2] =
566 				OMAP_MMC_READ(host, RSP2) |
567 				(OMAP_MMC_READ(host, RSP3) << 16);
568 			cmd->resp[1] =
569 				OMAP_MMC_READ(host, RSP4) |
570 				(OMAP_MMC_READ(host, RSP5) << 16);
571 			cmd->resp[0] =
572 				OMAP_MMC_READ(host, RSP6) |
573 				(OMAP_MMC_READ(host, RSP7) << 16);
574 		} else {
575 			/* response types 1, 1b, 3, 4, 5, 6 */
576 			cmd->resp[0] =
577 				OMAP_MMC_READ(host, RSP6) |
578 				(OMAP_MMC_READ(host, RSP7) << 16);
579 		}
580 	}
581 
582 	if (host->data == NULL || cmd->error) {
583 		struct mmc_host *mmc;
584 
585 		if (host->data != NULL)
586 			mmc_omap_abort_xfer(host, host->data);
587 		host->mrq = NULL;
588 		mmc = host->mmc;
589 		mmc_omap_release_slot(host->current_slot, 1);
590 		mmc_request_done(mmc, cmd->mrq);
591 	}
592 }
593 
594 /*
595  * Abort stuck command. Can occur when card is removed while it is being
596  * read.
597  */
598 static void mmc_omap_abort_command(struct work_struct *work)
599 {
600 	struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
601 						  cmd_abort_work);
602 	BUG_ON(!host->cmd);
603 
604 	dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n",
605 		host->cmd->opcode);
606 
607 	if (host->cmd->error == 0)
608 		host->cmd->error = -ETIMEDOUT;
609 
610 	if (host->data == NULL) {
611 		struct mmc_command *cmd;
612 		struct mmc_host    *mmc;
613 
614 		cmd = host->cmd;
615 		host->cmd = NULL;
616 		mmc_omap_send_abort(host, 10000);
617 
618 		host->mrq = NULL;
619 		mmc = host->mmc;
620 		mmc_omap_release_slot(host->current_slot, 1);
621 		mmc_request_done(mmc, cmd->mrq);
622 	} else
623 		mmc_omap_cmd_done(host, host->cmd);
624 
625 	host->abort = 0;
626 	enable_irq(host->irq);
627 }
628 
629 static void
630 mmc_omap_cmd_timer(unsigned long data)
631 {
632 	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
633 	unsigned long flags;
634 
635 	spin_lock_irqsave(&host->slot_lock, flags);
636 	if (host->cmd != NULL && !host->abort) {
637 		OMAP_MMC_WRITE(host, IE, 0);
638 		disable_irq(host->irq);
639 		host->abort = 1;
640 		schedule_work(&host->cmd_abort_work);
641 	}
642 	spin_unlock_irqrestore(&host->slot_lock, flags);
643 }
644 
645 /* PIO only */
646 static void
647 mmc_omap_sg_to_buf(struct mmc_omap_host *host)
648 {
649 	struct scatterlist *sg;
650 
651 	sg = host->data->sg + host->sg_idx;
652 	host->buffer_bytes_left = sg->length;
653 	host->buffer = sg_virt(sg);
654 	if (host->buffer_bytes_left > host->total_bytes_left)
655 		host->buffer_bytes_left = host->total_bytes_left;
656 }
657 
658 static void
659 mmc_omap_clk_timer(unsigned long data)
660 {
661 	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
662 
663 	mmc_omap_fclk_enable(host, 0);
664 }
665 
666 /* PIO only */
667 static void
668 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
669 {
670 	int n;
671 
672 	if (host->buffer_bytes_left == 0) {
673 		host->sg_idx++;
674 		BUG_ON(host->sg_idx == host->sg_len);
675 		mmc_omap_sg_to_buf(host);
676 	}
677 	n = 64;
678 	if (n > host->buffer_bytes_left)
679 		n = host->buffer_bytes_left;
680 	host->buffer_bytes_left -= n;
681 	host->total_bytes_left -= n;
682 	host->data->bytes_xfered += n;
683 
684 	if (write) {
685 		__raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
686 	} else {
687 		__raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
688 	}
689 }
690 
691 static inline void mmc_omap_report_irq(u16 status)
692 {
693 	static const char *mmc_omap_status_bits[] = {
694 		"EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
695 		"CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
696 	};
697 	int i, c = 0;
698 
699 	for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
700 		if (status & (1 << i)) {
701 			if (c)
702 				printk(" ");
703 			printk("%s", mmc_omap_status_bits[i]);
704 			c++;
705 		}
706 }
707 
708 static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
709 {
710 	struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
711 	u16 status;
712 	int end_command;
713 	int end_transfer;
714 	int transfer_error, cmd_error;
715 
716 	if (host->cmd == NULL && host->data == NULL) {
717 		status = OMAP_MMC_READ(host, STAT);
718 		dev_info(mmc_dev(host->slots[0]->mmc),
719 			 "Spurious IRQ 0x%04x\n", status);
720 		if (status != 0) {
721 			OMAP_MMC_WRITE(host, STAT, status);
722 			OMAP_MMC_WRITE(host, IE, 0);
723 		}
724 		return IRQ_HANDLED;
725 	}
726 
727 	end_command = 0;
728 	end_transfer = 0;
729 	transfer_error = 0;
730 	cmd_error = 0;
731 
732 	while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
733 		int cmd;
734 
735 		OMAP_MMC_WRITE(host, STAT, status);
736 		if (host->cmd != NULL)
737 			cmd = host->cmd->opcode;
738 		else
739 			cmd = -1;
740 #ifdef CONFIG_MMC_DEBUG
741 		dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
742 			status, cmd);
743 		mmc_omap_report_irq(status);
744 		printk("\n");
745 #endif
746 		if (host->total_bytes_left) {
747 			if ((status & OMAP_MMC_STAT_A_FULL) ||
748 			    (status & OMAP_MMC_STAT_END_OF_DATA))
749 				mmc_omap_xfer_data(host, 0);
750 			if (status & OMAP_MMC_STAT_A_EMPTY)
751 				mmc_omap_xfer_data(host, 1);
752 		}
753 
754 		if (status & OMAP_MMC_STAT_END_OF_DATA)
755 			end_transfer = 1;
756 
757 		if (status & OMAP_MMC_STAT_DATA_TOUT) {
758 			dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n",
759 				cmd);
760 			if (host->data) {
761 				host->data->error = -ETIMEDOUT;
762 				transfer_error = 1;
763 			}
764 		}
765 
766 		if (status & OMAP_MMC_STAT_DATA_CRC) {
767 			if (host->data) {
768 				host->data->error = -EILSEQ;
769 				dev_dbg(mmc_dev(host->mmc),
770 					 "data CRC error, bytes left %d\n",
771 					host->total_bytes_left);
772 				transfer_error = 1;
773 			} else {
774 				dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
775 			}
776 		}
777 
778 		if (status & OMAP_MMC_STAT_CMD_TOUT) {
779 			/* Timeouts are routine with some commands */
780 			if (host->cmd) {
781 				struct mmc_omap_slot *slot =
782 					host->current_slot;
783 				if (slot == NULL ||
784 				    !mmc_omap_cover_is_open(slot))
785 					dev_err(mmc_dev(host->mmc),
786 						"command timeout (CMD%d)\n",
787 						cmd);
788 				host->cmd->error = -ETIMEDOUT;
789 				end_command = 1;
790 				cmd_error = 1;
791 			}
792 		}
793 
794 		if (status & OMAP_MMC_STAT_CMD_CRC) {
795 			if (host->cmd) {
796 				dev_err(mmc_dev(host->mmc),
797 					"command CRC error (CMD%d, arg 0x%08x)\n",
798 					cmd, host->cmd->arg);
799 				host->cmd->error = -EILSEQ;
800 				end_command = 1;
801 				cmd_error = 1;
802 			} else
803 				dev_err(mmc_dev(host->mmc),
804 					"command CRC error without cmd?\n");
805 		}
806 
807 		if (status & OMAP_MMC_STAT_CARD_ERR) {
808 			dev_dbg(mmc_dev(host->mmc),
809 				"ignoring card status error (CMD%d)\n",
810 				cmd);
811 			end_command = 1;
812 		}
813 
814 		/*
815 		 * NOTE: On 1610 the END_OF_CMD may come too early when
816 		 * starting a write
817 		 */
818 		if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
819 		    (!(status & OMAP_MMC_STAT_A_EMPTY))) {
820 			end_command = 1;
821 		}
822 	}
823 
824 	if (cmd_error && host->data) {
825 		del_timer(&host->cmd_abort_timer);
826 		host->abort = 1;
827 		OMAP_MMC_WRITE(host, IE, 0);
828 		disable_irq(host->irq);
829 		schedule_work(&host->cmd_abort_work);
830 		return IRQ_HANDLED;
831 	}
832 
833 	if (end_command)
834 		mmc_omap_cmd_done(host, host->cmd);
835 	if (host->data != NULL) {
836 		if (transfer_error)
837 			mmc_omap_xfer_done(host, host->data);
838 		else if (end_transfer)
839 			mmc_omap_end_of_data(host, host->data);
840 	}
841 
842 	return IRQ_HANDLED;
843 }
844 
845 void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
846 {
847 	int cover_open;
848 	struct mmc_omap_host *host = dev_get_drvdata(dev);
849 	struct mmc_omap_slot *slot = host->slots[num];
850 
851 	BUG_ON(num >= host->nr_slots);
852 
853 	/* Other subsystems can call in here before we're initialised. */
854 	if (host->nr_slots == 0 || !host->slots[num])
855 		return;
856 
857 	cover_open = mmc_omap_cover_is_open(slot);
858 	if (cover_open != slot->cover_open) {
859 		slot->cover_open = cover_open;
860 		sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch");
861 	}
862 
863 	tasklet_hi_schedule(&slot->cover_tasklet);
864 }
865 
866 static void mmc_omap_cover_timer(unsigned long arg)
867 {
868 	struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
869 	tasklet_schedule(&slot->cover_tasklet);
870 }
871 
872 static void mmc_omap_cover_handler(unsigned long param)
873 {
874 	struct mmc_omap_slot *slot = (struct mmc_omap_slot *)param;
875 	int cover_open = mmc_omap_cover_is_open(slot);
876 
877 	mmc_detect_change(slot->mmc, 0);
878 	if (!cover_open)
879 		return;
880 
881 	/*
882 	 * If no card is inserted, we postpone polling until
883 	 * the cover has been closed.
884 	 */
885 	if (slot->mmc->card == NULL || !mmc_card_present(slot->mmc->card))
886 		return;
887 
888 	mod_timer(&slot->cover_timer,
889 		  jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
890 }
891 
892 /* Prepare to transfer the next segment of a scatterlist */
893 static void
894 mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
895 {
896 	int dma_ch = host->dma_ch;
897 	unsigned long data_addr;
898 	u16 buf, frame;
899 	u32 count;
900 	struct scatterlist *sg = &data->sg[host->sg_idx];
901 	int src_port = 0;
902 	int dst_port = 0;
903 	int sync_dev = 0;
904 
905 	data_addr = host->phys_base + OMAP_MMC_REG_DATA;
906 	frame = data->blksz;
907 	count = sg_dma_len(sg);
908 
909 	if ((data->blocks == 1) && (count > data->blksz))
910 		count = frame;
911 
912 	host->dma_len = count;
913 
914 	/* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
915 	 * Use 16 or 32 word frames when the blocksize is at least that large.
916 	 * Blocksize is usually 512 bytes; but not for some SD reads.
917 	 */
918 	if (cpu_is_omap15xx() && frame > 32)
919 		frame = 32;
920 	else if (frame > 64)
921 		frame = 64;
922 	count /= frame;
923 	frame >>= 1;
924 
925 	if (!(data->flags & MMC_DATA_WRITE)) {
926 		buf = 0x800f | ((frame - 1) << 8);
927 
928 		if (cpu_class_is_omap1()) {
929 			src_port = OMAP_DMA_PORT_TIPB;
930 			dst_port = OMAP_DMA_PORT_EMIFF;
931 		}
932 		if (cpu_is_omap24xx())
933 			sync_dev = OMAP24XX_DMA_MMC1_RX;
934 
935 		omap_set_dma_src_params(dma_ch, src_port,
936 					OMAP_DMA_AMODE_CONSTANT,
937 					data_addr, 0, 0);
938 		omap_set_dma_dest_params(dma_ch, dst_port,
939 					 OMAP_DMA_AMODE_POST_INC,
940 					 sg_dma_address(sg), 0, 0);
941 		omap_set_dma_dest_data_pack(dma_ch, 1);
942 		omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
943 	} else {
944 		buf = 0x0f80 | ((frame - 1) << 0);
945 
946 		if (cpu_class_is_omap1()) {
947 			src_port = OMAP_DMA_PORT_EMIFF;
948 			dst_port = OMAP_DMA_PORT_TIPB;
949 		}
950 		if (cpu_is_omap24xx())
951 			sync_dev = OMAP24XX_DMA_MMC1_TX;
952 
953 		omap_set_dma_dest_params(dma_ch, dst_port,
954 					 OMAP_DMA_AMODE_CONSTANT,
955 					 data_addr, 0, 0);
956 		omap_set_dma_src_params(dma_ch, src_port,
957 					OMAP_DMA_AMODE_POST_INC,
958 					sg_dma_address(sg), 0, 0);
959 		omap_set_dma_src_data_pack(dma_ch, 1);
960 		omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
961 	}
962 
963 	/* Max limit for DMA frame count is 0xffff */
964 	BUG_ON(count > 0xffff);
965 
966 	OMAP_MMC_WRITE(host, BUF, buf);
967 	omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
968 				     frame, count, OMAP_DMA_SYNC_FRAME,
969 				     sync_dev, 0);
970 }
971 
972 /* A scatterlist segment completed */
973 static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
974 {
975 	struct mmc_omap_host *host = (struct mmc_omap_host *) data;
976 	struct mmc_data *mmcdat = host->data;
977 
978 	if (unlikely(host->dma_ch < 0)) {
979 		dev_err(mmc_dev(host->mmc),
980 			"DMA callback while DMA not enabled\n");
981 		return;
982 	}
983 	/* FIXME: We really should do something to _handle_ the errors */
984 	if (ch_status & OMAP1_DMA_TOUT_IRQ) {
985 		dev_err(mmc_dev(host->mmc),"DMA timeout\n");
986 		return;
987 	}
988 	if (ch_status & OMAP_DMA_DROP_IRQ) {
989 		dev_err(mmc_dev(host->mmc), "DMA sync error\n");
990 		return;
991 	}
992 	if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
993 		return;
994 	}
995 	mmcdat->bytes_xfered += host->dma_len;
996 	host->sg_idx++;
997 	if (host->sg_idx < host->sg_len) {
998 		mmc_omap_prepare_dma(host, host->data);
999 		omap_start_dma(host->dma_ch);
1000 	} else
1001 		mmc_omap_dma_done(host, host->data);
1002 }
1003 
1004 static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
1005 {
1006 	const char *dma_dev_name;
1007 	int sync_dev, dma_ch, is_read, r;
1008 
1009 	is_read = !(data->flags & MMC_DATA_WRITE);
1010 	del_timer_sync(&host->dma_timer);
1011 	if (host->dma_ch >= 0) {
1012 		if (is_read == host->dma_is_read)
1013 			return 0;
1014 		omap_free_dma(host->dma_ch);
1015 		host->dma_ch = -1;
1016 	}
1017 
1018 	if (is_read) {
1019 		if (host->id == 1) {
1020 			sync_dev = OMAP_DMA_MMC_RX;
1021 			dma_dev_name = "MMC1 read";
1022 		} else {
1023 			sync_dev = OMAP_DMA_MMC2_RX;
1024 			dma_dev_name = "MMC2 read";
1025 		}
1026 	} else {
1027 		if (host->id == 1) {
1028 			sync_dev = OMAP_DMA_MMC_TX;
1029 			dma_dev_name = "MMC1 write";
1030 		} else {
1031 			sync_dev = OMAP_DMA_MMC2_TX;
1032 			dma_dev_name = "MMC2 write";
1033 		}
1034 	}
1035 	r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
1036 			     host, &dma_ch);
1037 	if (r != 0) {
1038 		dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
1039 		return r;
1040 	}
1041 	host->dma_ch = dma_ch;
1042 	host->dma_is_read = is_read;
1043 
1044 	return 0;
1045 }
1046 
1047 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
1048 {
1049 	u16 reg;
1050 
1051 	reg = OMAP_MMC_READ(host, SDIO);
1052 	reg &= ~(1 << 5);
1053 	OMAP_MMC_WRITE(host, SDIO, reg);
1054 	/* Set maximum timeout */
1055 	OMAP_MMC_WRITE(host, CTO, 0xff);
1056 }
1057 
1058 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
1059 {
1060 	unsigned int timeout, cycle_ns;
1061 	u16 reg;
1062 
1063 	cycle_ns = 1000000000 / host->current_slot->fclk_freq;
1064 	timeout = req->data->timeout_ns / cycle_ns;
1065 	timeout += req->data->timeout_clks;
1066 
1067 	/* Check if we need to use timeout multiplier register */
1068 	reg = OMAP_MMC_READ(host, SDIO);
1069 	if (timeout > 0xffff) {
1070 		reg |= (1 << 5);
1071 		timeout /= 1024;
1072 	} else
1073 		reg &= ~(1 << 5);
1074 	OMAP_MMC_WRITE(host, SDIO, reg);
1075 	OMAP_MMC_WRITE(host, DTO, timeout);
1076 }
1077 
1078 static void
1079 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
1080 {
1081 	struct mmc_data *data = req->data;
1082 	int i, use_dma, block_size;
1083 	unsigned sg_len;
1084 
1085 	host->data = data;
1086 	if (data == NULL) {
1087 		OMAP_MMC_WRITE(host, BLEN, 0);
1088 		OMAP_MMC_WRITE(host, NBLK, 0);
1089 		OMAP_MMC_WRITE(host, BUF, 0);
1090 		host->dma_in_use = 0;
1091 		set_cmd_timeout(host, req);
1092 		return;
1093 	}
1094 
1095 	block_size = data->blksz;
1096 
1097 	OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
1098 	OMAP_MMC_WRITE(host, BLEN, block_size - 1);
1099 	set_data_timeout(host, req);
1100 
1101 	/* cope with calling layer confusion; it issues "single
1102 	 * block" writes using multi-block scatterlists.
1103 	 */
1104 	sg_len = (data->blocks == 1) ? 1 : data->sg_len;
1105 
1106 	/* Only do DMA for entire blocks */
1107 	use_dma = host->use_dma;
1108 	if (use_dma) {
1109 		for (i = 0; i < sg_len; i++) {
1110 			if ((data->sg[i].length % block_size) != 0) {
1111 				use_dma = 0;
1112 				break;
1113 			}
1114 		}
1115 	}
1116 
1117 	host->sg_idx = 0;
1118 	if (use_dma) {
1119 		if (mmc_omap_get_dma_channel(host, data) == 0) {
1120 			enum dma_data_direction dma_data_dir;
1121 
1122 			if (data->flags & MMC_DATA_WRITE)
1123 				dma_data_dir = DMA_TO_DEVICE;
1124 			else
1125 				dma_data_dir = DMA_FROM_DEVICE;
1126 
1127 			host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
1128 						sg_len, dma_data_dir);
1129 			host->total_bytes_left = 0;
1130 			mmc_omap_prepare_dma(host, req->data);
1131 			host->brs_received = 0;
1132 			host->dma_done = 0;
1133 			host->dma_in_use = 1;
1134 		} else
1135 			use_dma = 0;
1136 	}
1137 
1138 	/* Revert to PIO? */
1139 	if (!use_dma) {
1140 		OMAP_MMC_WRITE(host, BUF, 0x1f1f);
1141 		host->total_bytes_left = data->blocks * block_size;
1142 		host->sg_len = sg_len;
1143 		mmc_omap_sg_to_buf(host);
1144 		host->dma_in_use = 0;
1145 	}
1146 }
1147 
1148 static void mmc_omap_start_request(struct mmc_omap_host *host,
1149 				   struct mmc_request *req)
1150 {
1151 	BUG_ON(host->mrq != NULL);
1152 
1153 	host->mrq = req;
1154 
1155 	/* only touch fifo AFTER the controller readies it */
1156 	mmc_omap_prepare_data(host, req);
1157 	mmc_omap_start_command(host, req->cmd);
1158 	if (host->dma_in_use)
1159 		omap_start_dma(host->dma_ch);
1160 	BUG_ON(irqs_disabled());
1161 }
1162 
1163 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
1164 {
1165 	struct mmc_omap_slot *slot = mmc_priv(mmc);
1166 	struct mmc_omap_host *host = slot->host;
1167 	unsigned long flags;
1168 
1169 	spin_lock_irqsave(&host->slot_lock, flags);
1170 	if (host->mmc != NULL) {
1171 		BUG_ON(slot->mrq != NULL);
1172 		slot->mrq = req;
1173 		spin_unlock_irqrestore(&host->slot_lock, flags);
1174 		return;
1175 	} else
1176 		host->mmc = mmc;
1177 	spin_unlock_irqrestore(&host->slot_lock, flags);
1178 	mmc_omap_select_slot(slot, 1);
1179 	mmc_omap_start_request(host, req);
1180 }
1181 
1182 static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
1183 				int vdd)
1184 {
1185 	struct mmc_omap_host *host;
1186 
1187 	host = slot->host;
1188 
1189 	if (slot->pdata->set_power != NULL)
1190 		slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
1191 					vdd);
1192 
1193 	if (cpu_is_omap24xx()) {
1194 		u16 w;
1195 
1196 		if (power_on) {
1197 			w = OMAP_MMC_READ(host, CON);
1198 			OMAP_MMC_WRITE(host, CON, w | (1 << 11));
1199 		} else {
1200 			w = OMAP_MMC_READ(host, CON);
1201 			OMAP_MMC_WRITE(host, CON, w & ~(1 << 11));
1202 		}
1203 	}
1204 }
1205 
1206 static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios)
1207 {
1208 	struct mmc_omap_slot *slot = mmc_priv(mmc);
1209 	struct mmc_omap_host *host = slot->host;
1210 	int func_clk_rate = clk_get_rate(host->fclk);
1211 	int dsor;
1212 
1213 	if (ios->clock == 0)
1214 		return 0;
1215 
1216 	dsor = func_clk_rate / ios->clock;
1217 	if (dsor < 1)
1218 		dsor = 1;
1219 
1220 	if (func_clk_rate / dsor > ios->clock)
1221 		dsor++;
1222 
1223 	if (dsor > 250)
1224 		dsor = 250;
1225 
1226 	slot->fclk_freq = func_clk_rate / dsor;
1227 
1228 	if (ios->bus_width == MMC_BUS_WIDTH_4)
1229 		dsor |= 1 << 15;
1230 
1231 	return dsor;
1232 }
1233 
1234 static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1235 {
1236 	struct mmc_omap_slot *slot = mmc_priv(mmc);
1237 	struct mmc_omap_host *host = slot->host;
1238 	int i, dsor;
1239 	int clk_enabled;
1240 
1241 	mmc_omap_select_slot(slot, 0);
1242 
1243 	dsor = mmc_omap_calc_divisor(mmc, ios);
1244 
1245 	if (ios->vdd != slot->vdd)
1246 		slot->vdd = ios->vdd;
1247 
1248 	clk_enabled = 0;
1249 	switch (ios->power_mode) {
1250 	case MMC_POWER_OFF:
1251 		mmc_omap_set_power(slot, 0, ios->vdd);
1252 		break;
1253 	case MMC_POWER_UP:
1254 		/* Cannot touch dsor yet, just power up MMC */
1255 		mmc_omap_set_power(slot, 1, ios->vdd);
1256 		goto exit;
1257 	case MMC_POWER_ON:
1258 		mmc_omap_fclk_enable(host, 1);
1259 		clk_enabled = 1;
1260 		dsor |= 1 << 11;
1261 		break;
1262 	}
1263 
1264 	if (slot->bus_mode != ios->bus_mode) {
1265 		if (slot->pdata->set_bus_mode != NULL)
1266 			slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id,
1267 						  ios->bus_mode);
1268 		slot->bus_mode = ios->bus_mode;
1269 	}
1270 
1271 	/* On insanely high arm_per frequencies something sometimes
1272 	 * goes somehow out of sync, and the POW bit is not being set,
1273 	 * which results in the while loop below getting stuck.
1274 	 * Writing to the CON register twice seems to do the trick. */
1275 	for (i = 0; i < 2; i++)
1276 		OMAP_MMC_WRITE(host, CON, dsor);
1277 	slot->saved_con = dsor;
1278 	if (ios->power_mode == MMC_POWER_ON) {
1279 		/* worst case at 400kHz, 80 cycles makes 200 microsecs */
1280 		int usecs = 250;
1281 
1282 		/* Send clock cycles, poll completion */
1283 		OMAP_MMC_WRITE(host, IE, 0);
1284 		OMAP_MMC_WRITE(host, STAT, 0xffff);
1285 		OMAP_MMC_WRITE(host, CMD, 1 << 7);
1286 		while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) {
1287 			udelay(1);
1288 			usecs--;
1289 		}
1290 		OMAP_MMC_WRITE(host, STAT, 1);
1291 	}
1292 
1293 exit:
1294 	mmc_omap_release_slot(slot, clk_enabled);
1295 }
1296 
1297 static const struct mmc_host_ops mmc_omap_ops = {
1298 	.request	= mmc_omap_request,
1299 	.set_ios	= mmc_omap_set_ios,
1300 };
1301 
1302 static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1303 {
1304 	struct mmc_omap_slot *slot = NULL;
1305 	struct mmc_host *mmc;
1306 	int r;
1307 
1308 	mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
1309 	if (mmc == NULL)
1310 		return -ENOMEM;
1311 
1312 	slot = mmc_priv(mmc);
1313 	slot->host = host;
1314 	slot->mmc = mmc;
1315 	slot->id = id;
1316 	slot->pdata = &host->pdata->slots[id];
1317 
1318 	host->slots[id] = slot;
1319 
1320 	mmc->caps = MMC_CAP_MULTIWRITE;
1321 	if (host->pdata->conf.wire4)
1322 		mmc->caps |= MMC_CAP_4_BIT_DATA;
1323 
1324 	mmc->ops = &mmc_omap_ops;
1325 	mmc->f_min = 400000;
1326 
1327 	if (cpu_class_is_omap2())
1328 		mmc->f_max = 48000000;
1329 	else
1330 		mmc->f_max = 24000000;
1331 	if (host->pdata->max_freq)
1332 		mmc->f_max = min(host->pdata->max_freq, mmc->f_max);
1333 	mmc->ocr_avail = slot->pdata->ocr_mask;
1334 
1335 	/* Use scatterlist DMA to reduce per-transfer costs.
1336 	 * NOTE max_seg_size assumption that small blocks aren't
1337 	 * normally used (except e.g. for reading SD registers).
1338 	 */
1339 	mmc->max_phys_segs = 32;
1340 	mmc->max_hw_segs = 32;
1341 	mmc->max_blk_size = 2048;	/* BLEN is 11 bits (+1) */
1342 	mmc->max_blk_count = 2048;	/* NBLK is 11 bits (+1) */
1343 	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1344 	mmc->max_seg_size = mmc->max_req_size;
1345 
1346 	r = mmc_add_host(mmc);
1347 	if (r < 0)
1348 		goto err_remove_host;
1349 
1350 	if (slot->pdata->name != NULL) {
1351 		r = device_create_file(&mmc->class_dev,
1352 					&dev_attr_slot_name);
1353 		if (r < 0)
1354 			goto err_remove_host;
1355 	}
1356 
1357 	if (slot->pdata->get_cover_state != NULL) {
1358 		r = device_create_file(&mmc->class_dev,
1359 					&dev_attr_cover_switch);
1360 		if (r < 0)
1361 			goto err_remove_slot_name;
1362 
1363 		setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
1364 			    (unsigned long)slot);
1365 		tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
1366 			     (unsigned long)slot);
1367 		tasklet_schedule(&slot->cover_tasklet);
1368 	}
1369 
1370 	return 0;
1371 
1372 err_remove_slot_name:
1373 	if (slot->pdata->name != NULL)
1374 		device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
1375 err_remove_host:
1376 	mmc_remove_host(mmc);
1377 	mmc_free_host(mmc);
1378 	return r;
1379 }
1380 
1381 static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
1382 {
1383 	struct mmc_host *mmc = slot->mmc;
1384 
1385 	if (slot->pdata->name != NULL)
1386 		device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
1387 	if (slot->pdata->get_cover_state != NULL)
1388 		device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
1389 
1390 	tasklet_kill(&slot->cover_tasklet);
1391 	del_timer_sync(&slot->cover_timer);
1392 	flush_scheduled_work();
1393 
1394 	mmc_remove_host(mmc);
1395 	mmc_free_host(mmc);
1396 }
1397 
1398 static int __init mmc_omap_probe(struct platform_device *pdev)
1399 {
1400 	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
1401 	struct mmc_omap_host *host = NULL;
1402 	struct resource *res;
1403 	int i, ret = 0;
1404 	int irq;
1405 
1406 	if (pdata == NULL) {
1407 		dev_err(&pdev->dev, "platform data missing\n");
1408 		return -ENXIO;
1409 	}
1410 	if (pdata->nr_slots == 0) {
1411 		dev_err(&pdev->dev, "no slots\n");
1412 		return -ENXIO;
1413 	}
1414 
1415 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1416 	irq = platform_get_irq(pdev, 0);
1417 	if (res == NULL || irq < 0)
1418 		return -ENXIO;
1419 
1420 	res = request_mem_region(res->start, res->end - res->start + 1,
1421 				 pdev->name);
1422 	if (res == NULL)
1423 		return -EBUSY;
1424 
1425 	host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL);
1426 	if (host == NULL) {
1427 		ret = -ENOMEM;
1428 		goto err_free_mem_region;
1429 	}
1430 
1431 	INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
1432 	INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
1433 
1434 	INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
1435 	setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
1436 		    (unsigned long) host);
1437 
1438 	spin_lock_init(&host->clk_lock);
1439 	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
1440 
1441 	spin_lock_init(&host->dma_lock);
1442 	setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
1443 	spin_lock_init(&host->slot_lock);
1444 	init_waitqueue_head(&host->slot_wq);
1445 
1446 	host->pdata = pdata;
1447 	host->dev = &pdev->dev;
1448 	platform_set_drvdata(pdev, host);
1449 
1450 	host->id = pdev->id;
1451 	host->mem_res = res;
1452 	host->irq = irq;
1453 
1454 	host->use_dma = 1;
1455 	host->dma_ch = -1;
1456 
1457 	host->irq = irq;
1458 	host->phys_base = host->mem_res->start;
1459 	host->virt_base = (void __iomem *) IO_ADDRESS(host->phys_base);
1460 
1461 	if (cpu_is_omap24xx()) {
1462 		host->iclk = clk_get(&pdev->dev, "mmc_ick");
1463 		if (IS_ERR(host->iclk))
1464 			goto err_free_mmc_host;
1465 		clk_enable(host->iclk);
1466 	}
1467 
1468 	if (!cpu_is_omap24xx())
1469 		host->fclk = clk_get(&pdev->dev, "mmc_ck");
1470 	else
1471 		host->fclk = clk_get(&pdev->dev, "mmc_fck");
1472 
1473 	if (IS_ERR(host->fclk)) {
1474 		ret = PTR_ERR(host->fclk);
1475 		goto err_free_iclk;
1476 	}
1477 
1478 	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1479 	if (ret)
1480 		goto err_free_fclk;
1481 
1482 	if (pdata->init != NULL) {
1483 		ret = pdata->init(&pdev->dev);
1484 		if (ret < 0)
1485 			goto err_free_irq;
1486 	}
1487 
1488 	host->nr_slots = pdata->nr_slots;
1489 	for (i = 0; i < pdata->nr_slots; i++) {
1490 		ret = mmc_omap_new_slot(host, i);
1491 		if (ret < 0) {
1492 			while (--i >= 0)
1493 				mmc_omap_remove_slot(host->slots[i]);
1494 
1495 			goto err_plat_cleanup;
1496 		}
1497 	}
1498 
1499 	return 0;
1500 
1501 err_plat_cleanup:
1502 	if (pdata->cleanup)
1503 		pdata->cleanup(&pdev->dev);
1504 err_free_irq:
1505 	free_irq(host->irq, host);
1506 err_free_fclk:
1507 	clk_put(host->fclk);
1508 err_free_iclk:
1509 	if (host->iclk != NULL) {
1510 		clk_disable(host->iclk);
1511 		clk_put(host->iclk);
1512 	}
1513 err_free_mmc_host:
1514 	kfree(host);
1515 err_free_mem_region:
1516 	release_mem_region(res->start, res->end - res->start + 1);
1517 	return ret;
1518 }
1519 
1520 static int mmc_omap_remove(struct platform_device *pdev)
1521 {
1522 	struct mmc_omap_host *host = platform_get_drvdata(pdev);
1523 	int i;
1524 
1525 	platform_set_drvdata(pdev, NULL);
1526 
1527 	BUG_ON(host == NULL);
1528 
1529 	for (i = 0; i < host->nr_slots; i++)
1530 		mmc_omap_remove_slot(host->slots[i]);
1531 
1532 	if (host->pdata->cleanup)
1533 		host->pdata->cleanup(&pdev->dev);
1534 
1535 	if (host->iclk && !IS_ERR(host->iclk))
1536 		clk_put(host->iclk);
1537 	if (host->fclk && !IS_ERR(host->fclk))
1538 		clk_put(host->fclk);
1539 
1540 	release_mem_region(pdev->resource[0].start,
1541 			   pdev->resource[0].end - pdev->resource[0].start + 1);
1542 
1543 	kfree(host);
1544 
1545 	return 0;
1546 }
1547 
1548 #ifdef CONFIG_PM
1549 static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1550 {
1551 	int i, ret = 0;
1552 	struct mmc_omap_host *host = platform_get_drvdata(pdev);
1553 
1554 	if (host == NULL || host->suspended)
1555 		return 0;
1556 
1557 	for (i = 0; i < host->nr_slots; i++) {
1558 		struct mmc_omap_slot *slot;
1559 
1560 		slot = host->slots[i];
1561 		ret = mmc_suspend_host(slot->mmc, mesg);
1562 		if (ret < 0) {
1563 			while (--i >= 0) {
1564 				slot = host->slots[i];
1565 				mmc_resume_host(slot->mmc);
1566 			}
1567 			return ret;
1568 		}
1569 	}
1570 	host->suspended = 1;
1571 	return 0;
1572 }
1573 
1574 static int mmc_omap_resume(struct platform_device *pdev)
1575 {
1576 	int i, ret = 0;
1577 	struct mmc_omap_host *host = platform_get_drvdata(pdev);
1578 
1579 	if (host == NULL || !host->suspended)
1580 		return 0;
1581 
1582 	for (i = 0; i < host->nr_slots; i++) {
1583 		struct mmc_omap_slot *slot;
1584 		slot = host->slots[i];
1585 		ret = mmc_resume_host(slot->mmc);
1586 		if (ret < 0)
1587 			return ret;
1588 
1589 		host->suspended = 0;
1590 	}
1591 	return 0;
1592 }
1593 #else
1594 #define mmc_omap_suspend	NULL
1595 #define mmc_omap_resume		NULL
1596 #endif
1597 
1598 static struct platform_driver mmc_omap_driver = {
1599 	.probe		= mmc_omap_probe,
1600 	.remove		= mmc_omap_remove,
1601 	.suspend	= mmc_omap_suspend,
1602 	.resume		= mmc_omap_resume,
1603 	.driver		= {
1604 		.name	= DRIVER_NAME,
1605 		.owner	= THIS_MODULE,
1606 	},
1607 };
1608 
1609 static int __init mmc_omap_init(void)
1610 {
1611 	return platform_driver_register(&mmc_omap_driver);
1612 }
1613 
1614 static void __exit mmc_omap_exit(void)
1615 {
1616 	platform_driver_unregister(&mmc_omap_driver);
1617 }
1618 
1619 module_init(mmc_omap_init);
1620 module_exit(mmc_omap_exit);
1621 
1622 MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1623 MODULE_LICENSE("GPL");
1624 MODULE_ALIAS("platform:" DRIVER_NAME);
1625 MODULE_AUTHOR("Juha Yrj�l�");
1626