xref: /openbmc/linux/drivers/mmc/host/sdhci.c (revision 82ced6fd)
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
18 #include <linux/io.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/scatterlist.h>
21 
22 #include <linux/leds.h>
23 
24 #include <linux/mmc/host.h>
25 
26 #include "sdhci.h"
27 
28 #define DRIVER_NAME "sdhci"
29 
30 #define DBG(f, x...) \
31 	pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
32 
33 #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
34 	defined(CONFIG_MMC_SDHCI_MODULE))
35 #define SDHCI_USE_LEDS_CLASS
36 #endif
37 
38 static unsigned int debug_quirks = 0;
39 
40 static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
41 static void sdhci_finish_data(struct sdhci_host *);
42 
43 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
44 static void sdhci_finish_command(struct sdhci_host *);
45 
46 static void sdhci_dumpregs(struct sdhci_host *host)
47 {
48 	printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
49 
50 	printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
51 		sdhci_readl(host, SDHCI_DMA_ADDRESS),
52 		sdhci_readw(host, SDHCI_HOST_VERSION));
53 	printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
54 		sdhci_readw(host, SDHCI_BLOCK_SIZE),
55 		sdhci_readw(host, SDHCI_BLOCK_COUNT));
56 	printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
57 		sdhci_readl(host, SDHCI_ARGUMENT),
58 		sdhci_readw(host, SDHCI_TRANSFER_MODE));
59 	printk(KERN_DEBUG DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
60 		sdhci_readl(host, SDHCI_PRESENT_STATE),
61 		sdhci_readb(host, SDHCI_HOST_CONTROL));
62 	printk(KERN_DEBUG DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
63 		sdhci_readb(host, SDHCI_POWER_CONTROL),
64 		sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
65 	printk(KERN_DEBUG DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
66 		sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
67 		sdhci_readw(host, SDHCI_CLOCK_CONTROL));
68 	printk(KERN_DEBUG DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
69 		sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
70 		sdhci_readl(host, SDHCI_INT_STATUS));
71 	printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
72 		sdhci_readl(host, SDHCI_INT_ENABLE),
73 		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
74 	printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
75 		sdhci_readw(host, SDHCI_ACMD12_ERR),
76 		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
77 	printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Max curr: 0x%08x\n",
78 		sdhci_readl(host, SDHCI_CAPABILITIES),
79 		sdhci_readl(host, SDHCI_MAX_CURRENT));
80 
81 	printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
82 }
83 
84 /*****************************************************************************\
85  *                                                                           *
86  * Low level functions                                                       *
87  *                                                                           *
88 \*****************************************************************************/
89 
90 static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
91 {
92 	u32 ier;
93 
94 	ier = sdhci_readl(host, SDHCI_INT_ENABLE);
95 	ier &= ~clear;
96 	ier |= set;
97 	sdhci_writel(host, ier, SDHCI_INT_ENABLE);
98 	sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
99 }
100 
101 static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
102 {
103 	sdhci_clear_set_irqs(host, 0, irqs);
104 }
105 
106 static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
107 {
108 	sdhci_clear_set_irqs(host, irqs, 0);
109 }
110 
111 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
112 {
113 	u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
114 
115 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
116 		return;
117 
118 	if (enable)
119 		sdhci_unmask_irqs(host, irqs);
120 	else
121 		sdhci_mask_irqs(host, irqs);
122 }
123 
124 static void sdhci_enable_card_detection(struct sdhci_host *host)
125 {
126 	sdhci_set_card_detection(host, true);
127 }
128 
129 static void sdhci_disable_card_detection(struct sdhci_host *host)
130 {
131 	sdhci_set_card_detection(host, false);
132 }
133 
134 static void sdhci_reset(struct sdhci_host *host, u8 mask)
135 {
136 	unsigned long timeout;
137 	u32 uninitialized_var(ier);
138 
139 	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
140 		if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
141 			SDHCI_CARD_PRESENT))
142 			return;
143 	}
144 
145 	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
146 		ier = sdhci_readl(host, SDHCI_INT_ENABLE);
147 
148 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
149 
150 	if (mask & SDHCI_RESET_ALL)
151 		host->clock = 0;
152 
153 	/* Wait max 100 ms */
154 	timeout = 100;
155 
156 	/* hw clears the bit when it's done */
157 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
158 		if (timeout == 0) {
159 			printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
160 				mmc_hostname(host->mmc), (int)mask);
161 			sdhci_dumpregs(host);
162 			return;
163 		}
164 		timeout--;
165 		mdelay(1);
166 	}
167 
168 	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
169 		sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
170 }
171 
172 static void sdhci_init(struct sdhci_host *host)
173 {
174 	sdhci_reset(host, SDHCI_RESET_ALL);
175 
176 	sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
177 		SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
178 		SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
179 		SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
180 		SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
181 }
182 
183 static void sdhci_reinit(struct sdhci_host *host)
184 {
185 	sdhci_init(host);
186 	sdhci_enable_card_detection(host);
187 }
188 
189 static void sdhci_activate_led(struct sdhci_host *host)
190 {
191 	u8 ctrl;
192 
193 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
194 	ctrl |= SDHCI_CTRL_LED;
195 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
196 }
197 
198 static void sdhci_deactivate_led(struct sdhci_host *host)
199 {
200 	u8 ctrl;
201 
202 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
203 	ctrl &= ~SDHCI_CTRL_LED;
204 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
205 }
206 
207 #ifdef SDHCI_USE_LEDS_CLASS
208 static void sdhci_led_control(struct led_classdev *led,
209 	enum led_brightness brightness)
210 {
211 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
212 	unsigned long flags;
213 
214 	spin_lock_irqsave(&host->lock, flags);
215 
216 	if (brightness == LED_OFF)
217 		sdhci_deactivate_led(host);
218 	else
219 		sdhci_activate_led(host);
220 
221 	spin_unlock_irqrestore(&host->lock, flags);
222 }
223 #endif
224 
225 /*****************************************************************************\
226  *                                                                           *
227  * Core functions                                                            *
228  *                                                                           *
229 \*****************************************************************************/
230 
231 static void sdhci_read_block_pio(struct sdhci_host *host)
232 {
233 	unsigned long flags;
234 	size_t blksize, len, chunk;
235 	u32 uninitialized_var(scratch);
236 	u8 *buf;
237 
238 	DBG("PIO reading\n");
239 
240 	blksize = host->data->blksz;
241 	chunk = 0;
242 
243 	local_irq_save(flags);
244 
245 	while (blksize) {
246 		if (!sg_miter_next(&host->sg_miter))
247 			BUG();
248 
249 		len = min(host->sg_miter.length, blksize);
250 
251 		blksize -= len;
252 		host->sg_miter.consumed = len;
253 
254 		buf = host->sg_miter.addr;
255 
256 		while (len) {
257 			if (chunk == 0) {
258 				scratch = sdhci_readl(host, SDHCI_BUFFER);
259 				chunk = 4;
260 			}
261 
262 			*buf = scratch & 0xFF;
263 
264 			buf++;
265 			scratch >>= 8;
266 			chunk--;
267 			len--;
268 		}
269 	}
270 
271 	sg_miter_stop(&host->sg_miter);
272 
273 	local_irq_restore(flags);
274 }
275 
276 static void sdhci_write_block_pio(struct sdhci_host *host)
277 {
278 	unsigned long flags;
279 	size_t blksize, len, chunk;
280 	u32 scratch;
281 	u8 *buf;
282 
283 	DBG("PIO writing\n");
284 
285 	blksize = host->data->blksz;
286 	chunk = 0;
287 	scratch = 0;
288 
289 	local_irq_save(flags);
290 
291 	while (blksize) {
292 		if (!sg_miter_next(&host->sg_miter))
293 			BUG();
294 
295 		len = min(host->sg_miter.length, blksize);
296 
297 		blksize -= len;
298 		host->sg_miter.consumed = len;
299 
300 		buf = host->sg_miter.addr;
301 
302 		while (len) {
303 			scratch |= (u32)*buf << (chunk * 8);
304 
305 			buf++;
306 			chunk++;
307 			len--;
308 
309 			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
310 				sdhci_writel(host, scratch, SDHCI_BUFFER);
311 				chunk = 0;
312 				scratch = 0;
313 			}
314 		}
315 	}
316 
317 	sg_miter_stop(&host->sg_miter);
318 
319 	local_irq_restore(flags);
320 }
321 
322 static void sdhci_transfer_pio(struct sdhci_host *host)
323 {
324 	u32 mask;
325 
326 	BUG_ON(!host->data);
327 
328 	if (host->blocks == 0)
329 		return;
330 
331 	if (host->data->flags & MMC_DATA_READ)
332 		mask = SDHCI_DATA_AVAILABLE;
333 	else
334 		mask = SDHCI_SPACE_AVAILABLE;
335 
336 	/*
337 	 * Some controllers (JMicron JMB38x) mess up the buffer bits
338 	 * for transfers < 4 bytes. As long as it is just one block,
339 	 * we can ignore the bits.
340 	 */
341 	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
342 		(host->data->blocks == 1))
343 		mask = ~0;
344 
345 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
346 		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
347 			udelay(100);
348 
349 		if (host->data->flags & MMC_DATA_READ)
350 			sdhci_read_block_pio(host);
351 		else
352 			sdhci_write_block_pio(host);
353 
354 		host->blocks--;
355 		if (host->blocks == 0)
356 			break;
357 	}
358 
359 	DBG("PIO transfer complete.\n");
360 }
361 
362 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
363 {
364 	local_irq_save(*flags);
365 	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
366 }
367 
368 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
369 {
370 	kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
371 	local_irq_restore(*flags);
372 }
373 
374 static int sdhci_adma_table_pre(struct sdhci_host *host,
375 	struct mmc_data *data)
376 {
377 	int direction;
378 
379 	u8 *desc;
380 	u8 *align;
381 	dma_addr_t addr;
382 	dma_addr_t align_addr;
383 	int len, offset;
384 
385 	struct scatterlist *sg;
386 	int i;
387 	char *buffer;
388 	unsigned long flags;
389 
390 	/*
391 	 * The spec does not specify endianness of descriptor table.
392 	 * We currently guess that it is LE.
393 	 */
394 
395 	if (data->flags & MMC_DATA_READ)
396 		direction = DMA_FROM_DEVICE;
397 	else
398 		direction = DMA_TO_DEVICE;
399 
400 	/*
401 	 * The ADMA descriptor table is mapped further down as we
402 	 * need to fill it with data first.
403 	 */
404 
405 	host->align_addr = dma_map_single(mmc_dev(host->mmc),
406 		host->align_buffer, 128 * 4, direction);
407 	if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
408 		goto fail;
409 	BUG_ON(host->align_addr & 0x3);
410 
411 	host->sg_count = dma_map_sg(mmc_dev(host->mmc),
412 		data->sg, data->sg_len, direction);
413 	if (host->sg_count == 0)
414 		goto unmap_align;
415 
416 	desc = host->adma_desc;
417 	align = host->align_buffer;
418 
419 	align_addr = host->align_addr;
420 
421 	for_each_sg(data->sg, sg, host->sg_count, i) {
422 		addr = sg_dma_address(sg);
423 		len = sg_dma_len(sg);
424 
425 		/*
426 		 * The SDHCI specification states that ADMA
427 		 * addresses must be 32-bit aligned. If they
428 		 * aren't, then we use a bounce buffer for
429 		 * the (up to three) bytes that screw up the
430 		 * alignment.
431 		 */
432 		offset = (4 - (addr & 0x3)) & 0x3;
433 		if (offset) {
434 			if (data->flags & MMC_DATA_WRITE) {
435 				buffer = sdhci_kmap_atomic(sg, &flags);
436 				WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
437 				memcpy(align, buffer, offset);
438 				sdhci_kunmap_atomic(buffer, &flags);
439 			}
440 
441 			desc[7] = (align_addr >> 24) & 0xff;
442 			desc[6] = (align_addr >> 16) & 0xff;
443 			desc[5] = (align_addr >> 8) & 0xff;
444 			desc[4] = (align_addr >> 0) & 0xff;
445 
446 			BUG_ON(offset > 65536);
447 
448 			desc[3] = (offset >> 8) & 0xff;
449 			desc[2] = (offset >> 0) & 0xff;
450 
451 			desc[1] = 0x00;
452 			desc[0] = 0x21; /* tran, valid */
453 
454 			align += 4;
455 			align_addr += 4;
456 
457 			desc += 8;
458 
459 			addr += offset;
460 			len -= offset;
461 		}
462 
463 		desc[7] = (addr >> 24) & 0xff;
464 		desc[6] = (addr >> 16) & 0xff;
465 		desc[5] = (addr >> 8) & 0xff;
466 		desc[4] = (addr >> 0) & 0xff;
467 
468 		BUG_ON(len > 65536);
469 
470 		desc[3] = (len >> 8) & 0xff;
471 		desc[2] = (len >> 0) & 0xff;
472 
473 		desc[1] = 0x00;
474 		desc[0] = 0x21; /* tran, valid */
475 
476 		desc += 8;
477 
478 		/*
479 		 * If this triggers then we have a calculation bug
480 		 * somewhere. :/
481 		 */
482 		WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
483 	}
484 
485 	/*
486 	 * Add a terminating entry.
487 	 */
488 	desc[7] = 0;
489 	desc[6] = 0;
490 	desc[5] = 0;
491 	desc[4] = 0;
492 
493 	desc[3] = 0;
494 	desc[2] = 0;
495 
496 	desc[1] = 0x00;
497 	desc[0] = 0x03; /* nop, end, valid */
498 
499 	/*
500 	 * Resync align buffer as we might have changed it.
501 	 */
502 	if (data->flags & MMC_DATA_WRITE) {
503 		dma_sync_single_for_device(mmc_dev(host->mmc),
504 			host->align_addr, 128 * 4, direction);
505 	}
506 
507 	host->adma_addr = dma_map_single(mmc_dev(host->mmc),
508 		host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
509 	if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
510 		goto unmap_entries;
511 	BUG_ON(host->adma_addr & 0x3);
512 
513 	return 0;
514 
515 unmap_entries:
516 	dma_unmap_sg(mmc_dev(host->mmc), data->sg,
517 		data->sg_len, direction);
518 unmap_align:
519 	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
520 		128 * 4, direction);
521 fail:
522 	return -EINVAL;
523 }
524 
525 static void sdhci_adma_table_post(struct sdhci_host *host,
526 	struct mmc_data *data)
527 {
528 	int direction;
529 
530 	struct scatterlist *sg;
531 	int i, size;
532 	u8 *align;
533 	char *buffer;
534 	unsigned long flags;
535 
536 	if (data->flags & MMC_DATA_READ)
537 		direction = DMA_FROM_DEVICE;
538 	else
539 		direction = DMA_TO_DEVICE;
540 
541 	dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
542 		(128 * 2 + 1) * 4, DMA_TO_DEVICE);
543 
544 	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
545 		128 * 4, direction);
546 
547 	if (data->flags & MMC_DATA_READ) {
548 		dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
549 			data->sg_len, direction);
550 
551 		align = host->align_buffer;
552 
553 		for_each_sg(data->sg, sg, host->sg_count, i) {
554 			if (sg_dma_address(sg) & 0x3) {
555 				size = 4 - (sg_dma_address(sg) & 0x3);
556 
557 				buffer = sdhci_kmap_atomic(sg, &flags);
558 				WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
559 				memcpy(buffer, align, size);
560 				sdhci_kunmap_atomic(buffer, &flags);
561 
562 				align += 4;
563 			}
564 		}
565 	}
566 
567 	dma_unmap_sg(mmc_dev(host->mmc), data->sg,
568 		data->sg_len, direction);
569 }
570 
571 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
572 {
573 	u8 count;
574 	unsigned target_timeout, current_timeout;
575 
576 	/*
577 	 * If the host controller provides us with an incorrect timeout
578 	 * value, just skip the check and use 0xE.  The hardware may take
579 	 * longer to time out, but that's much better than having a too-short
580 	 * timeout value.
581 	 */
582 	if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL))
583 		return 0xE;
584 
585 	/* timeout in us */
586 	target_timeout = data->timeout_ns / 1000 +
587 		data->timeout_clks / host->clock;
588 
589 	/*
590 	 * Figure out needed cycles.
591 	 * We do this in steps in order to fit inside a 32 bit int.
592 	 * The first step is the minimum timeout, which will have a
593 	 * minimum resolution of 6 bits:
594 	 * (1) 2^13*1000 > 2^22,
595 	 * (2) host->timeout_clk < 2^16
596 	 *     =>
597 	 *     (1) / (2) > 2^6
598 	 */
599 	count = 0;
600 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
601 	while (current_timeout < target_timeout) {
602 		count++;
603 		current_timeout <<= 1;
604 		if (count >= 0xF)
605 			break;
606 	}
607 
608 	if (count >= 0xF) {
609 		printk(KERN_WARNING "%s: Too large timeout requested!\n",
610 			mmc_hostname(host->mmc));
611 		count = 0xE;
612 	}
613 
614 	return count;
615 }
616 
617 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
618 {
619 	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
620 	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
621 
622 	if (host->flags & SDHCI_REQ_USE_DMA)
623 		sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
624 	else
625 		sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
626 }
627 
628 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
629 {
630 	u8 count;
631 	u8 ctrl;
632 	int ret;
633 
634 	WARN_ON(host->data);
635 
636 	if (data == NULL)
637 		return;
638 
639 	/* Sanity checks */
640 	BUG_ON(data->blksz * data->blocks > 524288);
641 	BUG_ON(data->blksz > host->mmc->max_blk_size);
642 	BUG_ON(data->blocks > 65535);
643 
644 	host->data = data;
645 	host->data_early = 0;
646 
647 	count = sdhci_calc_timeout(host, data);
648 	sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
649 
650 	if (host->flags & SDHCI_USE_DMA)
651 		host->flags |= SDHCI_REQ_USE_DMA;
652 
653 	/*
654 	 * FIXME: This doesn't account for merging when mapping the
655 	 * scatterlist.
656 	 */
657 	if (host->flags & SDHCI_REQ_USE_DMA) {
658 		int broken, i;
659 		struct scatterlist *sg;
660 
661 		broken = 0;
662 		if (host->flags & SDHCI_USE_ADMA) {
663 			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
664 				broken = 1;
665 		} else {
666 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
667 				broken = 1;
668 		}
669 
670 		if (unlikely(broken)) {
671 			for_each_sg(data->sg, sg, data->sg_len, i) {
672 				if (sg->length & 0x3) {
673 					DBG("Reverting to PIO because of "
674 						"transfer size (%d)\n",
675 						sg->length);
676 					host->flags &= ~SDHCI_REQ_USE_DMA;
677 					break;
678 				}
679 			}
680 		}
681 	}
682 
683 	/*
684 	 * The assumption here being that alignment is the same after
685 	 * translation to device address space.
686 	 */
687 	if (host->flags & SDHCI_REQ_USE_DMA) {
688 		int broken, i;
689 		struct scatterlist *sg;
690 
691 		broken = 0;
692 		if (host->flags & SDHCI_USE_ADMA) {
693 			/*
694 			 * As we use 3 byte chunks to work around
695 			 * alignment problems, we need to check this
696 			 * quirk.
697 			 */
698 			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
699 				broken = 1;
700 		} else {
701 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
702 				broken = 1;
703 		}
704 
705 		if (unlikely(broken)) {
706 			for_each_sg(data->sg, sg, data->sg_len, i) {
707 				if (sg->offset & 0x3) {
708 					DBG("Reverting to PIO because of "
709 						"bad alignment\n");
710 					host->flags &= ~SDHCI_REQ_USE_DMA;
711 					break;
712 				}
713 			}
714 		}
715 	}
716 
717 	if (host->flags & SDHCI_REQ_USE_DMA) {
718 		if (host->flags & SDHCI_USE_ADMA) {
719 			ret = sdhci_adma_table_pre(host, data);
720 			if (ret) {
721 				/*
722 				 * This only happens when someone fed
723 				 * us an invalid request.
724 				 */
725 				WARN_ON(1);
726 				host->flags &= ~SDHCI_REQ_USE_DMA;
727 			} else {
728 				sdhci_writel(host, host->adma_addr,
729 					SDHCI_ADMA_ADDRESS);
730 			}
731 		} else {
732 			int sg_cnt;
733 
734 			sg_cnt = dma_map_sg(mmc_dev(host->mmc),
735 					data->sg, data->sg_len,
736 					(data->flags & MMC_DATA_READ) ?
737 						DMA_FROM_DEVICE :
738 						DMA_TO_DEVICE);
739 			if (sg_cnt == 0) {
740 				/*
741 				 * This only happens when someone fed
742 				 * us an invalid request.
743 				 */
744 				WARN_ON(1);
745 				host->flags &= ~SDHCI_REQ_USE_DMA;
746 			} else {
747 				WARN_ON(sg_cnt != 1);
748 				sdhci_writel(host, sg_dma_address(data->sg),
749 					SDHCI_DMA_ADDRESS);
750 			}
751 		}
752 	}
753 
754 	/*
755 	 * Always adjust the DMA selection as some controllers
756 	 * (e.g. JMicron) can't do PIO properly when the selection
757 	 * is ADMA.
758 	 */
759 	if (host->version >= SDHCI_SPEC_200) {
760 		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
761 		ctrl &= ~SDHCI_CTRL_DMA_MASK;
762 		if ((host->flags & SDHCI_REQ_USE_DMA) &&
763 			(host->flags & SDHCI_USE_ADMA))
764 			ctrl |= SDHCI_CTRL_ADMA32;
765 		else
766 			ctrl |= SDHCI_CTRL_SDMA;
767 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
768 	}
769 
770 	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
771 		sg_miter_start(&host->sg_miter,
772 			data->sg, data->sg_len, SG_MITER_ATOMIC);
773 		host->blocks = data->blocks;
774 	}
775 
776 	sdhci_set_transfer_irqs(host);
777 
778 	/* We do not handle DMA boundaries, so set it to max (512 KiB) */
779 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE);
780 	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
781 }
782 
783 static void sdhci_set_transfer_mode(struct sdhci_host *host,
784 	struct mmc_data *data)
785 {
786 	u16 mode;
787 
788 	if (data == NULL)
789 		return;
790 
791 	WARN_ON(!host->data);
792 
793 	mode = SDHCI_TRNS_BLK_CNT_EN;
794 	if (data->blocks > 1)
795 		mode |= SDHCI_TRNS_MULTI;
796 	if (data->flags & MMC_DATA_READ)
797 		mode |= SDHCI_TRNS_READ;
798 	if (host->flags & SDHCI_REQ_USE_DMA)
799 		mode |= SDHCI_TRNS_DMA;
800 
801 	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
802 }
803 
804 static void sdhci_finish_data(struct sdhci_host *host)
805 {
806 	struct mmc_data *data;
807 
808 	BUG_ON(!host->data);
809 
810 	data = host->data;
811 	host->data = NULL;
812 
813 	if (host->flags & SDHCI_REQ_USE_DMA) {
814 		if (host->flags & SDHCI_USE_ADMA)
815 			sdhci_adma_table_post(host, data);
816 		else {
817 			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
818 				data->sg_len, (data->flags & MMC_DATA_READ) ?
819 					DMA_FROM_DEVICE : DMA_TO_DEVICE);
820 		}
821 	}
822 
823 	/*
824 	 * The specification states that the block count register must
825 	 * be updated, but it does not specify at what point in the
826 	 * data flow. That makes the register entirely useless to read
827 	 * back so we have to assume that nothing made it to the card
828 	 * in the event of an error.
829 	 */
830 	if (data->error)
831 		data->bytes_xfered = 0;
832 	else
833 		data->bytes_xfered = data->blksz * data->blocks;
834 
835 	if (data->stop) {
836 		/*
837 		 * The controller needs a reset of internal state machines
838 		 * upon error conditions.
839 		 */
840 		if (data->error) {
841 			sdhci_reset(host, SDHCI_RESET_CMD);
842 			sdhci_reset(host, SDHCI_RESET_DATA);
843 		}
844 
845 		sdhci_send_command(host, data->stop);
846 	} else
847 		tasklet_schedule(&host->finish_tasklet);
848 }
849 
850 static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
851 {
852 	int flags;
853 	u32 mask;
854 	unsigned long timeout;
855 
856 	WARN_ON(host->cmd);
857 
858 	/* Wait max 10 ms */
859 	timeout = 10;
860 
861 	mask = SDHCI_CMD_INHIBIT;
862 	if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
863 		mask |= SDHCI_DATA_INHIBIT;
864 
865 	/* We shouldn't wait for data inihibit for stop commands, even
866 	   though they might use busy signaling */
867 	if (host->mrq->data && (cmd == host->mrq->data->stop))
868 		mask &= ~SDHCI_DATA_INHIBIT;
869 
870 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
871 		if (timeout == 0) {
872 			printk(KERN_ERR "%s: Controller never released "
873 				"inhibit bit(s).\n", mmc_hostname(host->mmc));
874 			sdhci_dumpregs(host);
875 			cmd->error = -EIO;
876 			tasklet_schedule(&host->finish_tasklet);
877 			return;
878 		}
879 		timeout--;
880 		mdelay(1);
881 	}
882 
883 	mod_timer(&host->timer, jiffies + 10 * HZ);
884 
885 	host->cmd = cmd;
886 
887 	sdhci_prepare_data(host, cmd->data);
888 
889 	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
890 
891 	sdhci_set_transfer_mode(host, cmd->data);
892 
893 	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
894 		printk(KERN_ERR "%s: Unsupported response type!\n",
895 			mmc_hostname(host->mmc));
896 		cmd->error = -EINVAL;
897 		tasklet_schedule(&host->finish_tasklet);
898 		return;
899 	}
900 
901 	if (!(cmd->flags & MMC_RSP_PRESENT))
902 		flags = SDHCI_CMD_RESP_NONE;
903 	else if (cmd->flags & MMC_RSP_136)
904 		flags = SDHCI_CMD_RESP_LONG;
905 	else if (cmd->flags & MMC_RSP_BUSY)
906 		flags = SDHCI_CMD_RESP_SHORT_BUSY;
907 	else
908 		flags = SDHCI_CMD_RESP_SHORT;
909 
910 	if (cmd->flags & MMC_RSP_CRC)
911 		flags |= SDHCI_CMD_CRC;
912 	if (cmd->flags & MMC_RSP_OPCODE)
913 		flags |= SDHCI_CMD_INDEX;
914 	if (cmd->data)
915 		flags |= SDHCI_CMD_DATA;
916 
917 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
918 }
919 
920 static void sdhci_finish_command(struct sdhci_host *host)
921 {
922 	int i;
923 
924 	BUG_ON(host->cmd == NULL);
925 
926 	if (host->cmd->flags & MMC_RSP_PRESENT) {
927 		if (host->cmd->flags & MMC_RSP_136) {
928 			/* CRC is stripped so we need to do some shifting. */
929 			for (i = 0;i < 4;i++) {
930 				host->cmd->resp[i] = sdhci_readl(host,
931 					SDHCI_RESPONSE + (3-i)*4) << 8;
932 				if (i != 3)
933 					host->cmd->resp[i] |=
934 						sdhci_readb(host,
935 						SDHCI_RESPONSE + (3-i)*4-1);
936 			}
937 		} else {
938 			host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
939 		}
940 	}
941 
942 	host->cmd->error = 0;
943 
944 	if (host->data && host->data_early)
945 		sdhci_finish_data(host);
946 
947 	if (!host->cmd->data)
948 		tasklet_schedule(&host->finish_tasklet);
949 
950 	host->cmd = NULL;
951 }
952 
953 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
954 {
955 	int div;
956 	u16 clk;
957 	unsigned long timeout;
958 
959 	if (clock == host->clock)
960 		return;
961 
962 	if (host->ops->set_clock) {
963 		host->ops->set_clock(host, clock);
964 		if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
965 			return;
966 	}
967 
968 	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
969 
970 	if (clock == 0)
971 		goto out;
972 
973 	for (div = 1;div < 256;div *= 2) {
974 		if ((host->max_clk / div) <= clock)
975 			break;
976 	}
977 	div >>= 1;
978 
979 	clk = div << SDHCI_DIVIDER_SHIFT;
980 	clk |= SDHCI_CLOCK_INT_EN;
981 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
982 
983 	/* Wait max 10 ms */
984 	timeout = 10;
985 	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
986 		& SDHCI_CLOCK_INT_STABLE)) {
987 		if (timeout == 0) {
988 			printk(KERN_ERR "%s: Internal clock never "
989 				"stabilised.\n", mmc_hostname(host->mmc));
990 			sdhci_dumpregs(host);
991 			return;
992 		}
993 		timeout--;
994 		mdelay(1);
995 	}
996 
997 	clk |= SDHCI_CLOCK_CARD_EN;
998 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
999 
1000 out:
1001 	host->clock = clock;
1002 }
1003 
1004 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
1005 {
1006 	u8 pwr;
1007 
1008 	if (host->power == power)
1009 		return;
1010 
1011 	if (power == (unsigned short)-1) {
1012 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1013 		goto out;
1014 	}
1015 
1016 	/*
1017 	 * Spec says that we should clear the power reg before setting
1018 	 * a new value. Some controllers don't seem to like this though.
1019 	 */
1020 	if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1021 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1022 
1023 	pwr = SDHCI_POWER_ON;
1024 
1025 	switch (1 << power) {
1026 	case MMC_VDD_165_195:
1027 		pwr |= SDHCI_POWER_180;
1028 		break;
1029 	case MMC_VDD_29_30:
1030 	case MMC_VDD_30_31:
1031 		pwr |= SDHCI_POWER_300;
1032 		break;
1033 	case MMC_VDD_32_33:
1034 	case MMC_VDD_33_34:
1035 		pwr |= SDHCI_POWER_330;
1036 		break;
1037 	default:
1038 		BUG();
1039 	}
1040 
1041 	/*
1042 	 * At least the Marvell CaFe chip gets confused if we set the voltage
1043 	 * and set turn on power at the same time, so set the voltage first.
1044 	 */
1045 	if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
1046 		sdhci_writeb(host, pwr & ~SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1047 
1048 	sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1049 
1050 out:
1051 	host->power = power;
1052 }
1053 
1054 /*****************************************************************************\
1055  *                                                                           *
1056  * MMC callbacks                                                             *
1057  *                                                                           *
1058 \*****************************************************************************/
1059 
1060 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1061 {
1062 	struct sdhci_host *host;
1063 	bool present;
1064 	unsigned long flags;
1065 
1066 	host = mmc_priv(mmc);
1067 
1068 	spin_lock_irqsave(&host->lock, flags);
1069 
1070 	WARN_ON(host->mrq != NULL);
1071 
1072 #ifndef SDHCI_USE_LEDS_CLASS
1073 	sdhci_activate_led(host);
1074 #endif
1075 
1076 	host->mrq = mrq;
1077 
1078 	/* If polling, assume that the card is always present. */
1079 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1080 		present = true;
1081 	else
1082 		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1083 				SDHCI_CARD_PRESENT;
1084 
1085 	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1086 		host->mrq->cmd->error = -ENOMEDIUM;
1087 		tasklet_schedule(&host->finish_tasklet);
1088 	} else
1089 		sdhci_send_command(host, mrq->cmd);
1090 
1091 	mmiowb();
1092 	spin_unlock_irqrestore(&host->lock, flags);
1093 }
1094 
1095 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1096 {
1097 	struct sdhci_host *host;
1098 	unsigned long flags;
1099 	u8 ctrl;
1100 
1101 	host = mmc_priv(mmc);
1102 
1103 	spin_lock_irqsave(&host->lock, flags);
1104 
1105 	if (host->flags & SDHCI_DEVICE_DEAD)
1106 		goto out;
1107 
1108 	/*
1109 	 * Reset the chip on each power off.
1110 	 * Should clear out any weird states.
1111 	 */
1112 	if (ios->power_mode == MMC_POWER_OFF) {
1113 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1114 		sdhci_reinit(host);
1115 	}
1116 
1117 	sdhci_set_clock(host, ios->clock);
1118 
1119 	if (ios->power_mode == MMC_POWER_OFF)
1120 		sdhci_set_power(host, -1);
1121 	else
1122 		sdhci_set_power(host, ios->vdd);
1123 
1124 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1125 
1126 	if (ios->bus_width == MMC_BUS_WIDTH_4)
1127 		ctrl |= SDHCI_CTRL_4BITBUS;
1128 	else
1129 		ctrl &= ~SDHCI_CTRL_4BITBUS;
1130 
1131 	if (ios->timing == MMC_TIMING_SD_HS)
1132 		ctrl |= SDHCI_CTRL_HISPD;
1133 	else
1134 		ctrl &= ~SDHCI_CTRL_HISPD;
1135 
1136 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1137 
1138 	/*
1139 	 * Some (ENE) controllers go apeshit on some ios operation,
1140 	 * signalling timeout and CRC errors even on CMD0. Resetting
1141 	 * it on each ios seems to solve the problem.
1142 	 */
1143 	if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1144 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1145 
1146 out:
1147 	mmiowb();
1148 	spin_unlock_irqrestore(&host->lock, flags);
1149 }
1150 
1151 static int sdhci_get_ro(struct mmc_host *mmc)
1152 {
1153 	struct sdhci_host *host;
1154 	unsigned long flags;
1155 	int present;
1156 
1157 	host = mmc_priv(mmc);
1158 
1159 	spin_lock_irqsave(&host->lock, flags);
1160 
1161 	if (host->flags & SDHCI_DEVICE_DEAD)
1162 		present = 0;
1163 	else
1164 		present = sdhci_readl(host, SDHCI_PRESENT_STATE);
1165 
1166 	spin_unlock_irqrestore(&host->lock, flags);
1167 
1168 	if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)
1169 		return !!(present & SDHCI_WRITE_PROTECT);
1170 	return !(present & SDHCI_WRITE_PROTECT);
1171 }
1172 
1173 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1174 {
1175 	struct sdhci_host *host;
1176 	unsigned long flags;
1177 
1178 	host = mmc_priv(mmc);
1179 
1180 	spin_lock_irqsave(&host->lock, flags);
1181 
1182 	if (host->flags & SDHCI_DEVICE_DEAD)
1183 		goto out;
1184 
1185 	if (enable)
1186 		sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
1187 	else
1188 		sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
1189 out:
1190 	mmiowb();
1191 
1192 	spin_unlock_irqrestore(&host->lock, flags);
1193 }
1194 
1195 static const struct mmc_host_ops sdhci_ops = {
1196 	.request	= sdhci_request,
1197 	.set_ios	= sdhci_set_ios,
1198 	.get_ro		= sdhci_get_ro,
1199 	.enable_sdio_irq = sdhci_enable_sdio_irq,
1200 };
1201 
1202 /*****************************************************************************\
1203  *                                                                           *
1204  * Tasklets                                                                  *
1205  *                                                                           *
1206 \*****************************************************************************/
1207 
1208 static void sdhci_tasklet_card(unsigned long param)
1209 {
1210 	struct sdhci_host *host;
1211 	unsigned long flags;
1212 
1213 	host = (struct sdhci_host*)param;
1214 
1215 	spin_lock_irqsave(&host->lock, flags);
1216 
1217 	if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
1218 		if (host->mrq) {
1219 			printk(KERN_ERR "%s: Card removed during transfer!\n",
1220 				mmc_hostname(host->mmc));
1221 			printk(KERN_ERR "%s: Resetting controller.\n",
1222 				mmc_hostname(host->mmc));
1223 
1224 			sdhci_reset(host, SDHCI_RESET_CMD);
1225 			sdhci_reset(host, SDHCI_RESET_DATA);
1226 
1227 			host->mrq->cmd->error = -ENOMEDIUM;
1228 			tasklet_schedule(&host->finish_tasklet);
1229 		}
1230 	}
1231 
1232 	spin_unlock_irqrestore(&host->lock, flags);
1233 
1234 	mmc_detect_change(host->mmc, msecs_to_jiffies(200));
1235 }
1236 
1237 static void sdhci_tasklet_finish(unsigned long param)
1238 {
1239 	struct sdhci_host *host;
1240 	unsigned long flags;
1241 	struct mmc_request *mrq;
1242 
1243 	host = (struct sdhci_host*)param;
1244 
1245 	spin_lock_irqsave(&host->lock, flags);
1246 
1247 	del_timer(&host->timer);
1248 
1249 	mrq = host->mrq;
1250 
1251 	/*
1252 	 * The controller needs a reset of internal state machines
1253 	 * upon error conditions.
1254 	 */
1255 	if (!(host->flags & SDHCI_DEVICE_DEAD) &&
1256 		(mrq->cmd->error ||
1257 		 (mrq->data && (mrq->data->error ||
1258 		  (mrq->data->stop && mrq->data->stop->error))) ||
1259 		   (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
1260 
1261 		/* Some controllers need this kick or reset won't work here */
1262 		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
1263 			unsigned int clock;
1264 
1265 			/* This is to force an update */
1266 			clock = host->clock;
1267 			host->clock = 0;
1268 			sdhci_set_clock(host, clock);
1269 		}
1270 
1271 		/* Spec says we should do both at the same time, but Ricoh
1272 		   controllers do not like that. */
1273 		sdhci_reset(host, SDHCI_RESET_CMD);
1274 		sdhci_reset(host, SDHCI_RESET_DATA);
1275 	}
1276 
1277 	host->mrq = NULL;
1278 	host->cmd = NULL;
1279 	host->data = NULL;
1280 
1281 #ifndef SDHCI_USE_LEDS_CLASS
1282 	sdhci_deactivate_led(host);
1283 #endif
1284 
1285 	mmiowb();
1286 	spin_unlock_irqrestore(&host->lock, flags);
1287 
1288 	mmc_request_done(host->mmc, mrq);
1289 }
1290 
1291 static void sdhci_timeout_timer(unsigned long data)
1292 {
1293 	struct sdhci_host *host;
1294 	unsigned long flags;
1295 
1296 	host = (struct sdhci_host*)data;
1297 
1298 	spin_lock_irqsave(&host->lock, flags);
1299 
1300 	if (host->mrq) {
1301 		printk(KERN_ERR "%s: Timeout waiting for hardware "
1302 			"interrupt.\n", mmc_hostname(host->mmc));
1303 		sdhci_dumpregs(host);
1304 
1305 		if (host->data) {
1306 			host->data->error = -ETIMEDOUT;
1307 			sdhci_finish_data(host);
1308 		} else {
1309 			if (host->cmd)
1310 				host->cmd->error = -ETIMEDOUT;
1311 			else
1312 				host->mrq->cmd->error = -ETIMEDOUT;
1313 
1314 			tasklet_schedule(&host->finish_tasklet);
1315 		}
1316 	}
1317 
1318 	mmiowb();
1319 	spin_unlock_irqrestore(&host->lock, flags);
1320 }
1321 
1322 /*****************************************************************************\
1323  *                                                                           *
1324  * Interrupt handling                                                        *
1325  *                                                                           *
1326 \*****************************************************************************/
1327 
1328 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1329 {
1330 	BUG_ON(intmask == 0);
1331 
1332 	if (!host->cmd) {
1333 		printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
1334 			"though no command operation was in progress.\n",
1335 			mmc_hostname(host->mmc), (unsigned)intmask);
1336 		sdhci_dumpregs(host);
1337 		return;
1338 	}
1339 
1340 	if (intmask & SDHCI_INT_TIMEOUT)
1341 		host->cmd->error = -ETIMEDOUT;
1342 	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
1343 			SDHCI_INT_INDEX))
1344 		host->cmd->error = -EILSEQ;
1345 
1346 	if (host->cmd->error) {
1347 		tasklet_schedule(&host->finish_tasklet);
1348 		return;
1349 	}
1350 
1351 	/*
1352 	 * The host can send and interrupt when the busy state has
1353 	 * ended, allowing us to wait without wasting CPU cycles.
1354 	 * Unfortunately this is overloaded on the "data complete"
1355 	 * interrupt, so we need to take some care when handling
1356 	 * it.
1357 	 *
1358 	 * Note: The 1.0 specification is a bit ambiguous about this
1359 	 *       feature so there might be some problems with older
1360 	 *       controllers.
1361 	 */
1362 	if (host->cmd->flags & MMC_RSP_BUSY) {
1363 		if (host->cmd->data)
1364 			DBG("Cannot wait for busy signal when also "
1365 				"doing a data transfer");
1366 		else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
1367 			return;
1368 
1369 		/* The controller does not support the end-of-busy IRQ,
1370 		 * fall through and take the SDHCI_INT_RESPONSE */
1371 	}
1372 
1373 	if (intmask & SDHCI_INT_RESPONSE)
1374 		sdhci_finish_command(host);
1375 }
1376 
1377 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1378 {
1379 	BUG_ON(intmask == 0);
1380 
1381 	if (!host->data) {
1382 		/*
1383 		 * The "data complete" interrupt is also used to
1384 		 * indicate that a busy state has ended. See comment
1385 		 * above in sdhci_cmd_irq().
1386 		 */
1387 		if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
1388 			if (intmask & SDHCI_INT_DATA_END) {
1389 				sdhci_finish_command(host);
1390 				return;
1391 			}
1392 		}
1393 
1394 		printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1395 			"though no data operation was in progress.\n",
1396 			mmc_hostname(host->mmc), (unsigned)intmask);
1397 		sdhci_dumpregs(host);
1398 
1399 		return;
1400 	}
1401 
1402 	if (intmask & SDHCI_INT_DATA_TIMEOUT)
1403 		host->data->error = -ETIMEDOUT;
1404 	else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1405 		host->data->error = -EILSEQ;
1406 	else if (intmask & SDHCI_INT_ADMA_ERROR)
1407 		host->data->error = -EIO;
1408 
1409 	if (host->data->error)
1410 		sdhci_finish_data(host);
1411 	else {
1412 		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
1413 			sdhci_transfer_pio(host);
1414 
1415 		/*
1416 		 * We currently don't do anything fancy with DMA
1417 		 * boundaries, but as we can't disable the feature
1418 		 * we need to at least restart the transfer.
1419 		 */
1420 		if (intmask & SDHCI_INT_DMA_END)
1421 			sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS),
1422 				SDHCI_DMA_ADDRESS);
1423 
1424 		if (intmask & SDHCI_INT_DATA_END) {
1425 			if (host->cmd) {
1426 				/*
1427 				 * Data managed to finish before the
1428 				 * command completed. Make sure we do
1429 				 * things in the proper order.
1430 				 */
1431 				host->data_early = 1;
1432 			} else {
1433 				sdhci_finish_data(host);
1434 			}
1435 		}
1436 	}
1437 }
1438 
1439 static irqreturn_t sdhci_irq(int irq, void *dev_id)
1440 {
1441 	irqreturn_t result;
1442 	struct sdhci_host* host = dev_id;
1443 	u32 intmask;
1444 	int cardint = 0;
1445 
1446 	spin_lock(&host->lock);
1447 
1448 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
1449 
1450 	if (!intmask || intmask == 0xffffffff) {
1451 		result = IRQ_NONE;
1452 		goto out;
1453 	}
1454 
1455 	DBG("*** %s got interrupt: 0x%08x\n",
1456 		mmc_hostname(host->mmc), intmask);
1457 
1458 	if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
1459 		sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
1460 			SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
1461 		tasklet_schedule(&host->card_tasklet);
1462 	}
1463 
1464 	intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
1465 
1466 	if (intmask & SDHCI_INT_CMD_MASK) {
1467 		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
1468 			SDHCI_INT_STATUS);
1469 		sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1470 	}
1471 
1472 	if (intmask & SDHCI_INT_DATA_MASK) {
1473 		sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
1474 			SDHCI_INT_STATUS);
1475 		sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1476 	}
1477 
1478 	intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1479 
1480 	intmask &= ~SDHCI_INT_ERROR;
1481 
1482 	if (intmask & SDHCI_INT_BUS_POWER) {
1483 		printk(KERN_ERR "%s: Card is consuming too much power!\n",
1484 			mmc_hostname(host->mmc));
1485 		sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
1486 	}
1487 
1488 	intmask &= ~SDHCI_INT_BUS_POWER;
1489 
1490 	if (intmask & SDHCI_INT_CARD_INT)
1491 		cardint = 1;
1492 
1493 	intmask &= ~SDHCI_INT_CARD_INT;
1494 
1495 	if (intmask) {
1496 		printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1497 			mmc_hostname(host->mmc), intmask);
1498 		sdhci_dumpregs(host);
1499 
1500 		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
1501 	}
1502 
1503 	result = IRQ_HANDLED;
1504 
1505 	mmiowb();
1506 out:
1507 	spin_unlock(&host->lock);
1508 
1509 	/*
1510 	 * We have to delay this as it calls back into the driver.
1511 	 */
1512 	if (cardint)
1513 		mmc_signal_sdio_irq(host->mmc);
1514 
1515 	return result;
1516 }
1517 
1518 /*****************************************************************************\
1519  *                                                                           *
1520  * Suspend/resume                                                            *
1521  *                                                                           *
1522 \*****************************************************************************/
1523 
1524 #ifdef CONFIG_PM
1525 
1526 int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1527 {
1528 	int ret;
1529 
1530 	sdhci_disable_card_detection(host);
1531 
1532 	ret = mmc_suspend_host(host->mmc, state);
1533 	if (ret)
1534 		return ret;
1535 
1536 	free_irq(host->irq, host);
1537 
1538 	return 0;
1539 }
1540 
1541 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
1542 
1543 int sdhci_resume_host(struct sdhci_host *host)
1544 {
1545 	int ret;
1546 
1547 	if (host->flags & SDHCI_USE_DMA) {
1548 		if (host->ops->enable_dma)
1549 			host->ops->enable_dma(host);
1550 	}
1551 
1552 	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1553 			  mmc_hostname(host->mmc), host);
1554 	if (ret)
1555 		return ret;
1556 
1557 	sdhci_init(host);
1558 	mmiowb();
1559 
1560 	ret = mmc_resume_host(host->mmc);
1561 	if (ret)
1562 		return ret;
1563 
1564 	sdhci_enable_card_detection(host);
1565 
1566 	return 0;
1567 }
1568 
1569 EXPORT_SYMBOL_GPL(sdhci_resume_host);
1570 
1571 #endif /* CONFIG_PM */
1572 
1573 /*****************************************************************************\
1574  *                                                                           *
1575  * Device allocation/registration                                            *
1576  *                                                                           *
1577 \*****************************************************************************/
1578 
1579 struct sdhci_host *sdhci_alloc_host(struct device *dev,
1580 	size_t priv_size)
1581 {
1582 	struct mmc_host *mmc;
1583 	struct sdhci_host *host;
1584 
1585 	WARN_ON(dev == NULL);
1586 
1587 	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
1588 	if (!mmc)
1589 		return ERR_PTR(-ENOMEM);
1590 
1591 	host = mmc_priv(mmc);
1592 	host->mmc = mmc;
1593 
1594 	return host;
1595 }
1596 
1597 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1598 
1599 int sdhci_add_host(struct sdhci_host *host)
1600 {
1601 	struct mmc_host *mmc;
1602 	unsigned int caps;
1603 	int ret;
1604 
1605 	WARN_ON(host == NULL);
1606 	if (host == NULL)
1607 		return -EINVAL;
1608 
1609 	mmc = host->mmc;
1610 
1611 	if (debug_quirks)
1612 		host->quirks = debug_quirks;
1613 
1614 	sdhci_reset(host, SDHCI_RESET_ALL);
1615 
1616 	host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
1617 	host->version = (host->version & SDHCI_SPEC_VER_MASK)
1618 				>> SDHCI_SPEC_VER_SHIFT;
1619 	if (host->version > SDHCI_SPEC_200) {
1620 		printk(KERN_ERR "%s: Unknown controller version (%d). "
1621 			"You may experience problems.\n", mmc_hostname(mmc),
1622 			host->version);
1623 	}
1624 
1625 	caps = sdhci_readl(host, SDHCI_CAPABILITIES);
1626 
1627 	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1628 		host->flags |= SDHCI_USE_DMA;
1629 	else if (!(caps & SDHCI_CAN_DO_DMA))
1630 		DBG("Controller doesn't have DMA capability\n");
1631 	else
1632 		host->flags |= SDHCI_USE_DMA;
1633 
1634 	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
1635 		(host->flags & SDHCI_USE_DMA)) {
1636 		DBG("Disabling DMA as it is marked broken\n");
1637 		host->flags &= ~SDHCI_USE_DMA;
1638 	}
1639 
1640 	if (host->flags & SDHCI_USE_DMA) {
1641 		if ((host->version >= SDHCI_SPEC_200) &&
1642 				(caps & SDHCI_CAN_DO_ADMA2))
1643 			host->flags |= SDHCI_USE_ADMA;
1644 	}
1645 
1646 	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
1647 		(host->flags & SDHCI_USE_ADMA)) {
1648 		DBG("Disabling ADMA as it is marked broken\n");
1649 		host->flags &= ~SDHCI_USE_ADMA;
1650 	}
1651 
1652 	if (host->flags & SDHCI_USE_DMA) {
1653 		if (host->ops->enable_dma) {
1654 			if (host->ops->enable_dma(host)) {
1655 				printk(KERN_WARNING "%s: No suitable DMA "
1656 					"available. Falling back to PIO.\n",
1657 					mmc_hostname(mmc));
1658 				host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA);
1659 			}
1660 		}
1661 	}
1662 
1663 	if (host->flags & SDHCI_USE_ADMA) {
1664 		/*
1665 		 * We need to allocate descriptors for all sg entries
1666 		 * (128) and potentially one alignment transfer for
1667 		 * each of those entries.
1668 		 */
1669 		host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
1670 		host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
1671 		if (!host->adma_desc || !host->align_buffer) {
1672 			kfree(host->adma_desc);
1673 			kfree(host->align_buffer);
1674 			printk(KERN_WARNING "%s: Unable to allocate ADMA "
1675 				"buffers. Falling back to standard DMA.\n",
1676 				mmc_hostname(mmc));
1677 			host->flags &= ~SDHCI_USE_ADMA;
1678 		}
1679 	}
1680 
1681 	/*
1682 	 * If we use DMA, then it's up to the caller to set the DMA
1683 	 * mask, but PIO does not need the hw shim so we set a new
1684 	 * mask here in that case.
1685 	 */
1686 	if (!(host->flags & SDHCI_USE_DMA)) {
1687 		host->dma_mask = DMA_BIT_MASK(64);
1688 		mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1689 	}
1690 
1691 	host->max_clk =
1692 		(caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1693 	host->max_clk *= 1000000;
1694 	if (host->max_clk == 0) {
1695 		if (!host->ops->get_max_clock) {
1696 			printk(KERN_ERR
1697 			       "%s: Hardware doesn't specify base clock "
1698 			       "frequency.\n", mmc_hostname(mmc));
1699 			return -ENODEV;
1700 		}
1701 		host->max_clk = host->ops->get_max_clock(host);
1702 	}
1703 
1704 	host->timeout_clk =
1705 		(caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1706 	if (host->timeout_clk == 0) {
1707 		if (!host->ops->get_timeout_clock) {
1708 			printk(KERN_ERR
1709 			       "%s: Hardware doesn't specify timeout clock "
1710 			       "frequency.\n", mmc_hostname(mmc));
1711 			return -ENODEV;
1712 		}
1713 		host->timeout_clk = host->ops->get_timeout_clock(host);
1714 	}
1715 	if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1716 		host->timeout_clk *= 1000;
1717 
1718 	/*
1719 	 * Set host parameters.
1720 	 */
1721 	mmc->ops = &sdhci_ops;
1722 	mmc->f_min = host->max_clk / 256;
1723 	mmc->f_max = host->max_clk;
1724 	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1725 
1726 	if (caps & SDHCI_CAN_DO_HISPD)
1727 		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1728 
1729 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1730 		mmc->caps |= MMC_CAP_NEEDS_POLL;
1731 
1732 	mmc->ocr_avail = 0;
1733 	if (caps & SDHCI_CAN_VDD_330)
1734 		mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1735 	if (caps & SDHCI_CAN_VDD_300)
1736 		mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1737 	if (caps & SDHCI_CAN_VDD_180)
1738 		mmc->ocr_avail |= MMC_VDD_165_195;
1739 
1740 	if (mmc->ocr_avail == 0) {
1741 		printk(KERN_ERR "%s: Hardware doesn't report any "
1742 			"support voltages.\n", mmc_hostname(mmc));
1743 		return -ENODEV;
1744 	}
1745 
1746 	spin_lock_init(&host->lock);
1747 
1748 	/*
1749 	 * Maximum number of segments. Depends on if the hardware
1750 	 * can do scatter/gather or not.
1751 	 */
1752 	if (host->flags & SDHCI_USE_ADMA)
1753 		mmc->max_hw_segs = 128;
1754 	else if (host->flags & SDHCI_USE_DMA)
1755 		mmc->max_hw_segs = 1;
1756 	else /* PIO */
1757 		mmc->max_hw_segs = 128;
1758 	mmc->max_phys_segs = 128;
1759 
1760 	/*
1761 	 * Maximum number of sectors in one transfer. Limited by DMA boundary
1762 	 * size (512KiB).
1763 	 */
1764 	mmc->max_req_size = 524288;
1765 
1766 	/*
1767 	 * Maximum segment size. Could be one segment with the maximum number
1768 	 * of bytes. When doing hardware scatter/gather, each entry cannot
1769 	 * be larger than 64 KiB though.
1770 	 */
1771 	if (host->flags & SDHCI_USE_ADMA)
1772 		mmc->max_seg_size = 65536;
1773 	else
1774 		mmc->max_seg_size = mmc->max_req_size;
1775 
1776 	/*
1777 	 * Maximum block size. This varies from controller to controller and
1778 	 * is specified in the capabilities register.
1779 	 */
1780 	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
1781 		mmc->max_blk_size = 2;
1782 	} else {
1783 		mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >>
1784 				SDHCI_MAX_BLOCK_SHIFT;
1785 		if (mmc->max_blk_size >= 3) {
1786 			printk(KERN_WARNING "%s: Invalid maximum block size, "
1787 				"assuming 512 bytes\n", mmc_hostname(mmc));
1788 			mmc->max_blk_size = 0;
1789 		}
1790 	}
1791 
1792 	mmc->max_blk_size = 512 << mmc->max_blk_size;
1793 
1794 	/*
1795 	 * Maximum block count.
1796 	 */
1797 	mmc->max_blk_count = 65535;
1798 
1799 	/*
1800 	 * Init tasklets.
1801 	 */
1802 	tasklet_init(&host->card_tasklet,
1803 		sdhci_tasklet_card, (unsigned long)host);
1804 	tasklet_init(&host->finish_tasklet,
1805 		sdhci_tasklet_finish, (unsigned long)host);
1806 
1807 	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
1808 
1809 	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1810 		mmc_hostname(mmc), host);
1811 	if (ret)
1812 		goto untasklet;
1813 
1814 	sdhci_init(host);
1815 
1816 #ifdef CONFIG_MMC_DEBUG
1817 	sdhci_dumpregs(host);
1818 #endif
1819 
1820 #ifdef SDHCI_USE_LEDS_CLASS
1821 	snprintf(host->led_name, sizeof(host->led_name),
1822 		"%s::", mmc_hostname(mmc));
1823 	host->led.name = host->led_name;
1824 	host->led.brightness = LED_OFF;
1825 	host->led.default_trigger = mmc_hostname(mmc);
1826 	host->led.brightness_set = sdhci_led_control;
1827 
1828 	ret = led_classdev_register(mmc_dev(mmc), &host->led);
1829 	if (ret)
1830 		goto reset;
1831 #endif
1832 
1833 	mmiowb();
1834 
1835 	mmc_add_host(mmc);
1836 
1837 	printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n",
1838 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
1839 		(host->flags & SDHCI_USE_ADMA)?"A":"",
1840 		(host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
1841 
1842 	sdhci_enable_card_detection(host);
1843 
1844 	return 0;
1845 
1846 #ifdef SDHCI_USE_LEDS_CLASS
1847 reset:
1848 	sdhci_reset(host, SDHCI_RESET_ALL);
1849 	free_irq(host->irq, host);
1850 #endif
1851 untasklet:
1852 	tasklet_kill(&host->card_tasklet);
1853 	tasklet_kill(&host->finish_tasklet);
1854 
1855 	return ret;
1856 }
1857 
1858 EXPORT_SYMBOL_GPL(sdhci_add_host);
1859 
1860 void sdhci_remove_host(struct sdhci_host *host, int dead)
1861 {
1862 	unsigned long flags;
1863 
1864 	if (dead) {
1865 		spin_lock_irqsave(&host->lock, flags);
1866 
1867 		host->flags |= SDHCI_DEVICE_DEAD;
1868 
1869 		if (host->mrq) {
1870 			printk(KERN_ERR "%s: Controller removed during "
1871 				" transfer!\n", mmc_hostname(host->mmc));
1872 
1873 			host->mrq->cmd->error = -ENOMEDIUM;
1874 			tasklet_schedule(&host->finish_tasklet);
1875 		}
1876 
1877 		spin_unlock_irqrestore(&host->lock, flags);
1878 	}
1879 
1880 	sdhci_disable_card_detection(host);
1881 
1882 	mmc_remove_host(host->mmc);
1883 
1884 #ifdef SDHCI_USE_LEDS_CLASS
1885 	led_classdev_unregister(&host->led);
1886 #endif
1887 
1888 	if (!dead)
1889 		sdhci_reset(host, SDHCI_RESET_ALL);
1890 
1891 	free_irq(host->irq, host);
1892 
1893 	del_timer_sync(&host->timer);
1894 
1895 	tasklet_kill(&host->card_tasklet);
1896 	tasklet_kill(&host->finish_tasklet);
1897 
1898 	kfree(host->adma_desc);
1899 	kfree(host->align_buffer);
1900 
1901 	host->adma_desc = NULL;
1902 	host->align_buffer = NULL;
1903 }
1904 
1905 EXPORT_SYMBOL_GPL(sdhci_remove_host);
1906 
1907 void sdhci_free_host(struct sdhci_host *host)
1908 {
1909 	mmc_free_host(host->mmc);
1910 }
1911 
1912 EXPORT_SYMBOL_GPL(sdhci_free_host);
1913 
1914 /*****************************************************************************\
1915  *                                                                           *
1916  * Driver init/exit                                                          *
1917  *                                                                           *
1918 \*****************************************************************************/
1919 
1920 static int __init sdhci_drv_init(void)
1921 {
1922 	printk(KERN_INFO DRIVER_NAME
1923 		": Secure Digital Host Controller Interface driver\n");
1924 	printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1925 
1926 	return 0;
1927 }
1928 
1929 static void __exit sdhci_drv_exit(void)
1930 {
1931 }
1932 
1933 module_init(sdhci_drv_init);
1934 module_exit(sdhci_drv_exit);
1935 
1936 module_param(debug_quirks, uint, 0444);
1937 
1938 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
1939 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
1940 MODULE_LICENSE("GPL");
1941 
1942 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
1943