xref: /openbmc/linux/drivers/mmc/host/sdhci.c (revision b35565bb)
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/ktime.h>
18 #include <linux/highmem.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/of.h>
27 
28 #include <linux/leds.h>
29 
30 #include <linux/mmc/mmc.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/card.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/slot-gpio.h>
35 
36 #include "sdhci.h"
37 
38 #define DRIVER_NAME "sdhci"
39 
40 #define DBG(f, x...) \
41 	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
42 
43 #define SDHCI_DUMP(f, x...) \
44 	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
45 
46 #define MAX_TUNING_LOOP 40
47 
48 static unsigned int debug_quirks = 0;
49 static unsigned int debug_quirks2;
50 
51 static void sdhci_finish_data(struct sdhci_host *);
52 
53 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
54 
55 void sdhci_dumpregs(struct sdhci_host *host)
56 {
57 	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
58 
59 	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
60 		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
61 		   sdhci_readw(host, SDHCI_HOST_VERSION));
62 	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
63 		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
64 		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
65 	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
66 		   sdhci_readl(host, SDHCI_ARGUMENT),
67 		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
68 	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
69 		   sdhci_readl(host, SDHCI_PRESENT_STATE),
70 		   sdhci_readb(host, SDHCI_HOST_CONTROL));
71 	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
72 		   sdhci_readb(host, SDHCI_POWER_CONTROL),
73 		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
74 	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
75 		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
76 		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
77 	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
78 		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
79 		   sdhci_readl(host, SDHCI_INT_STATUS));
80 	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
81 		   sdhci_readl(host, SDHCI_INT_ENABLE),
82 		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
83 	SDHCI_DUMP("AC12 err:  0x%08x | Slot int: 0x%08x\n",
84 		   sdhci_readw(host, SDHCI_ACMD12_ERR),
85 		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
86 	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
87 		   sdhci_readl(host, SDHCI_CAPABILITIES),
88 		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
89 	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
90 		   sdhci_readw(host, SDHCI_COMMAND),
91 		   sdhci_readl(host, SDHCI_MAX_CURRENT));
92 	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
93 		   sdhci_readl(host, SDHCI_RESPONSE),
94 		   sdhci_readl(host, SDHCI_RESPONSE + 4));
95 	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
96 		   sdhci_readl(host, SDHCI_RESPONSE + 8),
97 		   sdhci_readl(host, SDHCI_RESPONSE + 12));
98 	SDHCI_DUMP("Host ctl2: 0x%08x\n",
99 		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
100 
101 	if (host->flags & SDHCI_USE_ADMA) {
102 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
103 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
104 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
105 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
106 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
107 		} else {
108 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
109 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
110 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
111 		}
112 	}
113 
114 	SDHCI_DUMP("============================================\n");
115 }
116 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
117 
118 /*****************************************************************************\
119  *                                                                           *
120  * Low level functions                                                       *
121  *                                                                           *
122 \*****************************************************************************/
123 
124 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
125 {
126 	return cmd->data || cmd->flags & MMC_RSP_BUSY;
127 }
128 
129 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
130 {
131 	u32 present;
132 
133 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
134 	    !mmc_card_is_removable(host->mmc))
135 		return;
136 
137 	if (enable) {
138 		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
139 				      SDHCI_CARD_PRESENT;
140 
141 		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
142 				       SDHCI_INT_CARD_INSERT;
143 	} else {
144 		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
145 	}
146 
147 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
148 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
149 }
150 
151 static void sdhci_enable_card_detection(struct sdhci_host *host)
152 {
153 	sdhci_set_card_detection(host, true);
154 }
155 
156 static void sdhci_disable_card_detection(struct sdhci_host *host)
157 {
158 	sdhci_set_card_detection(host, false);
159 }
160 
161 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
162 {
163 	if (host->bus_on)
164 		return;
165 	host->bus_on = true;
166 	pm_runtime_get_noresume(host->mmc->parent);
167 }
168 
169 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
170 {
171 	if (!host->bus_on)
172 		return;
173 	host->bus_on = false;
174 	pm_runtime_put_noidle(host->mmc->parent);
175 }
176 
177 void sdhci_reset(struct sdhci_host *host, u8 mask)
178 {
179 	ktime_t timeout;
180 
181 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
182 
183 	if (mask & SDHCI_RESET_ALL) {
184 		host->clock = 0;
185 		/* Reset-all turns off SD Bus Power */
186 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
187 			sdhci_runtime_pm_bus_off(host);
188 	}
189 
190 	/* Wait max 100 ms */
191 	timeout = ktime_add_ms(ktime_get(), 100);
192 
193 	/* hw clears the bit when it's done */
194 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
195 		if (ktime_after(ktime_get(), timeout)) {
196 			pr_err("%s: Reset 0x%x never completed.\n",
197 				mmc_hostname(host->mmc), (int)mask);
198 			sdhci_dumpregs(host);
199 			return;
200 		}
201 		udelay(10);
202 	}
203 }
204 EXPORT_SYMBOL_GPL(sdhci_reset);
205 
206 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
207 {
208 	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
209 		struct mmc_host *mmc = host->mmc;
210 
211 		if (!mmc->ops->get_cd(mmc))
212 			return;
213 	}
214 
215 	host->ops->reset(host, mask);
216 
217 	if (mask & SDHCI_RESET_ALL) {
218 		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
219 			if (host->ops->enable_dma)
220 				host->ops->enable_dma(host);
221 		}
222 
223 		/* Resetting the controller clears many */
224 		host->preset_enabled = false;
225 	}
226 }
227 
228 static void sdhci_set_default_irqs(struct sdhci_host *host)
229 {
230 	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
231 		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
232 		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
233 		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
234 		    SDHCI_INT_RESPONSE;
235 
236 	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
237 	    host->tuning_mode == SDHCI_TUNING_MODE_3)
238 		host->ier |= SDHCI_INT_RETUNE;
239 
240 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
241 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
242 }
243 
244 static void sdhci_init(struct sdhci_host *host, int soft)
245 {
246 	struct mmc_host *mmc = host->mmc;
247 
248 	if (soft)
249 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
250 	else
251 		sdhci_do_reset(host, SDHCI_RESET_ALL);
252 
253 	sdhci_set_default_irqs(host);
254 
255 	host->cqe_on = false;
256 
257 	if (soft) {
258 		/* force clock reconfiguration */
259 		host->clock = 0;
260 		mmc->ops->set_ios(mmc, &mmc->ios);
261 	}
262 }
263 
264 static void sdhci_reinit(struct sdhci_host *host)
265 {
266 	sdhci_init(host, 0);
267 	sdhci_enable_card_detection(host);
268 }
269 
270 static void __sdhci_led_activate(struct sdhci_host *host)
271 {
272 	u8 ctrl;
273 
274 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
275 	ctrl |= SDHCI_CTRL_LED;
276 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
277 }
278 
279 static void __sdhci_led_deactivate(struct sdhci_host *host)
280 {
281 	u8 ctrl;
282 
283 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
284 	ctrl &= ~SDHCI_CTRL_LED;
285 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
286 }
287 
288 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
289 static void sdhci_led_control(struct led_classdev *led,
290 			      enum led_brightness brightness)
291 {
292 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
293 	unsigned long flags;
294 
295 	spin_lock_irqsave(&host->lock, flags);
296 
297 	if (host->runtime_suspended)
298 		goto out;
299 
300 	if (brightness == LED_OFF)
301 		__sdhci_led_deactivate(host);
302 	else
303 		__sdhci_led_activate(host);
304 out:
305 	spin_unlock_irqrestore(&host->lock, flags);
306 }
307 
308 static int sdhci_led_register(struct sdhci_host *host)
309 {
310 	struct mmc_host *mmc = host->mmc;
311 
312 	snprintf(host->led_name, sizeof(host->led_name),
313 		 "%s::", mmc_hostname(mmc));
314 
315 	host->led.name = host->led_name;
316 	host->led.brightness = LED_OFF;
317 	host->led.default_trigger = mmc_hostname(mmc);
318 	host->led.brightness_set = sdhci_led_control;
319 
320 	return led_classdev_register(mmc_dev(mmc), &host->led);
321 }
322 
323 static void sdhci_led_unregister(struct sdhci_host *host)
324 {
325 	led_classdev_unregister(&host->led);
326 }
327 
328 static inline void sdhci_led_activate(struct sdhci_host *host)
329 {
330 }
331 
332 static inline void sdhci_led_deactivate(struct sdhci_host *host)
333 {
334 }
335 
336 #else
337 
338 static inline int sdhci_led_register(struct sdhci_host *host)
339 {
340 	return 0;
341 }
342 
343 static inline void sdhci_led_unregister(struct sdhci_host *host)
344 {
345 }
346 
347 static inline void sdhci_led_activate(struct sdhci_host *host)
348 {
349 	__sdhci_led_activate(host);
350 }
351 
352 static inline void sdhci_led_deactivate(struct sdhci_host *host)
353 {
354 	__sdhci_led_deactivate(host);
355 }
356 
357 #endif
358 
359 /*****************************************************************************\
360  *                                                                           *
361  * Core functions                                                            *
362  *                                                                           *
363 \*****************************************************************************/
364 
365 static void sdhci_read_block_pio(struct sdhci_host *host)
366 {
367 	unsigned long flags;
368 	size_t blksize, len, chunk;
369 	u32 uninitialized_var(scratch);
370 	u8 *buf;
371 
372 	DBG("PIO reading\n");
373 
374 	blksize = host->data->blksz;
375 	chunk = 0;
376 
377 	local_irq_save(flags);
378 
379 	while (blksize) {
380 		BUG_ON(!sg_miter_next(&host->sg_miter));
381 
382 		len = min(host->sg_miter.length, blksize);
383 
384 		blksize -= len;
385 		host->sg_miter.consumed = len;
386 
387 		buf = host->sg_miter.addr;
388 
389 		while (len) {
390 			if (chunk == 0) {
391 				scratch = sdhci_readl(host, SDHCI_BUFFER);
392 				chunk = 4;
393 			}
394 
395 			*buf = scratch & 0xFF;
396 
397 			buf++;
398 			scratch >>= 8;
399 			chunk--;
400 			len--;
401 		}
402 	}
403 
404 	sg_miter_stop(&host->sg_miter);
405 
406 	local_irq_restore(flags);
407 }
408 
409 static void sdhci_write_block_pio(struct sdhci_host *host)
410 {
411 	unsigned long flags;
412 	size_t blksize, len, chunk;
413 	u32 scratch;
414 	u8 *buf;
415 
416 	DBG("PIO writing\n");
417 
418 	blksize = host->data->blksz;
419 	chunk = 0;
420 	scratch = 0;
421 
422 	local_irq_save(flags);
423 
424 	while (blksize) {
425 		BUG_ON(!sg_miter_next(&host->sg_miter));
426 
427 		len = min(host->sg_miter.length, blksize);
428 
429 		blksize -= len;
430 		host->sg_miter.consumed = len;
431 
432 		buf = host->sg_miter.addr;
433 
434 		while (len) {
435 			scratch |= (u32)*buf << (chunk * 8);
436 
437 			buf++;
438 			chunk++;
439 			len--;
440 
441 			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
442 				sdhci_writel(host, scratch, SDHCI_BUFFER);
443 				chunk = 0;
444 				scratch = 0;
445 			}
446 		}
447 	}
448 
449 	sg_miter_stop(&host->sg_miter);
450 
451 	local_irq_restore(flags);
452 }
453 
454 static void sdhci_transfer_pio(struct sdhci_host *host)
455 {
456 	u32 mask;
457 
458 	if (host->blocks == 0)
459 		return;
460 
461 	if (host->data->flags & MMC_DATA_READ)
462 		mask = SDHCI_DATA_AVAILABLE;
463 	else
464 		mask = SDHCI_SPACE_AVAILABLE;
465 
466 	/*
467 	 * Some controllers (JMicron JMB38x) mess up the buffer bits
468 	 * for transfers < 4 bytes. As long as it is just one block,
469 	 * we can ignore the bits.
470 	 */
471 	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
472 		(host->data->blocks == 1))
473 		mask = ~0;
474 
475 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
476 		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
477 			udelay(100);
478 
479 		if (host->data->flags & MMC_DATA_READ)
480 			sdhci_read_block_pio(host);
481 		else
482 			sdhci_write_block_pio(host);
483 
484 		host->blocks--;
485 		if (host->blocks == 0)
486 			break;
487 	}
488 
489 	DBG("PIO transfer complete.\n");
490 }
491 
492 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
493 				  struct mmc_data *data, int cookie)
494 {
495 	int sg_count;
496 
497 	/*
498 	 * If the data buffers are already mapped, return the previous
499 	 * dma_map_sg() result.
500 	 */
501 	if (data->host_cookie == COOKIE_PRE_MAPPED)
502 		return data->sg_count;
503 
504 	sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
505 			      mmc_get_dma_dir(data));
506 
507 	if (sg_count == 0)
508 		return -ENOSPC;
509 
510 	data->sg_count = sg_count;
511 	data->host_cookie = cookie;
512 
513 	return sg_count;
514 }
515 
516 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
517 {
518 	local_irq_save(*flags);
519 	return kmap_atomic(sg_page(sg)) + sg->offset;
520 }
521 
522 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
523 {
524 	kunmap_atomic(buffer);
525 	local_irq_restore(*flags);
526 }
527 
528 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
529 				  dma_addr_t addr, int len, unsigned cmd)
530 {
531 	struct sdhci_adma2_64_desc *dma_desc = desc;
532 
533 	/* 32-bit and 64-bit descriptors have these members in same position */
534 	dma_desc->cmd = cpu_to_le16(cmd);
535 	dma_desc->len = cpu_to_le16(len);
536 	dma_desc->addr_lo = cpu_to_le32((u32)addr);
537 
538 	if (host->flags & SDHCI_USE_64_BIT_DMA)
539 		dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
540 }
541 
542 static void sdhci_adma_mark_end(void *desc)
543 {
544 	struct sdhci_adma2_64_desc *dma_desc = desc;
545 
546 	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
547 	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
548 }
549 
550 static void sdhci_adma_table_pre(struct sdhci_host *host,
551 	struct mmc_data *data, int sg_count)
552 {
553 	struct scatterlist *sg;
554 	unsigned long flags;
555 	dma_addr_t addr, align_addr;
556 	void *desc, *align;
557 	char *buffer;
558 	int len, offset, i;
559 
560 	/*
561 	 * The spec does not specify endianness of descriptor table.
562 	 * We currently guess that it is LE.
563 	 */
564 
565 	host->sg_count = sg_count;
566 
567 	desc = host->adma_table;
568 	align = host->align_buffer;
569 
570 	align_addr = host->align_addr;
571 
572 	for_each_sg(data->sg, sg, host->sg_count, i) {
573 		addr = sg_dma_address(sg);
574 		len = sg_dma_len(sg);
575 
576 		/*
577 		 * The SDHCI specification states that ADMA addresses must
578 		 * be 32-bit aligned. If they aren't, then we use a bounce
579 		 * buffer for the (up to three) bytes that screw up the
580 		 * alignment.
581 		 */
582 		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
583 			 SDHCI_ADMA2_MASK;
584 		if (offset) {
585 			if (data->flags & MMC_DATA_WRITE) {
586 				buffer = sdhci_kmap_atomic(sg, &flags);
587 				memcpy(align, buffer, offset);
588 				sdhci_kunmap_atomic(buffer, &flags);
589 			}
590 
591 			/* tran, valid */
592 			sdhci_adma_write_desc(host, desc, align_addr, offset,
593 					      ADMA2_TRAN_VALID);
594 
595 			BUG_ON(offset > 65536);
596 
597 			align += SDHCI_ADMA2_ALIGN;
598 			align_addr += SDHCI_ADMA2_ALIGN;
599 
600 			desc += host->desc_sz;
601 
602 			addr += offset;
603 			len -= offset;
604 		}
605 
606 		BUG_ON(len > 65536);
607 
608 		if (len) {
609 			/* tran, valid */
610 			sdhci_adma_write_desc(host, desc, addr, len,
611 					      ADMA2_TRAN_VALID);
612 			desc += host->desc_sz;
613 		}
614 
615 		/*
616 		 * If this triggers then we have a calculation bug
617 		 * somewhere. :/
618 		 */
619 		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
620 	}
621 
622 	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
623 		/* Mark the last descriptor as the terminating descriptor */
624 		if (desc != host->adma_table) {
625 			desc -= host->desc_sz;
626 			sdhci_adma_mark_end(desc);
627 		}
628 	} else {
629 		/* Add a terminating entry - nop, end, valid */
630 		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
631 	}
632 }
633 
634 static void sdhci_adma_table_post(struct sdhci_host *host,
635 	struct mmc_data *data)
636 {
637 	struct scatterlist *sg;
638 	int i, size;
639 	void *align;
640 	char *buffer;
641 	unsigned long flags;
642 
643 	if (data->flags & MMC_DATA_READ) {
644 		bool has_unaligned = false;
645 
646 		/* Do a quick scan of the SG list for any unaligned mappings */
647 		for_each_sg(data->sg, sg, host->sg_count, i)
648 			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
649 				has_unaligned = true;
650 				break;
651 			}
652 
653 		if (has_unaligned) {
654 			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
655 					    data->sg_len, DMA_FROM_DEVICE);
656 
657 			align = host->align_buffer;
658 
659 			for_each_sg(data->sg, sg, host->sg_count, i) {
660 				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
661 					size = SDHCI_ADMA2_ALIGN -
662 					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
663 
664 					buffer = sdhci_kmap_atomic(sg, &flags);
665 					memcpy(buffer, align, size);
666 					sdhci_kunmap_atomic(buffer, &flags);
667 
668 					align += SDHCI_ADMA2_ALIGN;
669 				}
670 			}
671 		}
672 	}
673 }
674 
675 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
676 {
677 	u8 count;
678 	struct mmc_data *data = cmd->data;
679 	unsigned target_timeout, current_timeout;
680 
681 	/*
682 	 * If the host controller provides us with an incorrect timeout
683 	 * value, just skip the check and use 0xE.  The hardware may take
684 	 * longer to time out, but that's much better than having a too-short
685 	 * timeout value.
686 	 */
687 	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
688 		return 0xE;
689 
690 	/* Unspecified timeout, assume max */
691 	if (!data && !cmd->busy_timeout)
692 		return 0xE;
693 
694 	/* timeout in us */
695 	if (!data)
696 		target_timeout = cmd->busy_timeout * 1000;
697 	else {
698 		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
699 		if (host->clock && data->timeout_clks) {
700 			unsigned long long val;
701 
702 			/*
703 			 * data->timeout_clks is in units of clock cycles.
704 			 * host->clock is in Hz.  target_timeout is in us.
705 			 * Hence, us = 1000000 * cycles / Hz.  Round up.
706 			 */
707 			val = 1000000ULL * data->timeout_clks;
708 			if (do_div(val, host->clock))
709 				target_timeout++;
710 			target_timeout += val;
711 		}
712 	}
713 
714 	/*
715 	 * Figure out needed cycles.
716 	 * We do this in steps in order to fit inside a 32 bit int.
717 	 * The first step is the minimum timeout, which will have a
718 	 * minimum resolution of 6 bits:
719 	 * (1) 2^13*1000 > 2^22,
720 	 * (2) host->timeout_clk < 2^16
721 	 *     =>
722 	 *     (1) / (2) > 2^6
723 	 */
724 	count = 0;
725 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
726 	while (current_timeout < target_timeout) {
727 		count++;
728 		current_timeout <<= 1;
729 		if (count >= 0xF)
730 			break;
731 	}
732 
733 	if (count >= 0xF) {
734 		DBG("Too large timeout 0x%x requested for CMD%d!\n",
735 		    count, cmd->opcode);
736 		count = 0xE;
737 	}
738 
739 	return count;
740 }
741 
742 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
743 {
744 	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
745 	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
746 
747 	if (host->flags & SDHCI_REQ_USE_DMA)
748 		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
749 	else
750 		host->ier = (host->ier & ~dma_irqs) | pio_irqs;
751 
752 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
753 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
754 }
755 
756 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
757 {
758 	u8 count;
759 
760 	if (host->ops->set_timeout) {
761 		host->ops->set_timeout(host, cmd);
762 	} else {
763 		count = sdhci_calc_timeout(host, cmd);
764 		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
765 	}
766 }
767 
768 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
769 {
770 	u8 ctrl;
771 	struct mmc_data *data = cmd->data;
772 
773 	if (sdhci_data_line_cmd(cmd))
774 		sdhci_set_timeout(host, cmd);
775 
776 	if (!data)
777 		return;
778 
779 	WARN_ON(host->data);
780 
781 	/* Sanity checks */
782 	BUG_ON(data->blksz * data->blocks > 524288);
783 	BUG_ON(data->blksz > host->mmc->max_blk_size);
784 	BUG_ON(data->blocks > 65535);
785 
786 	host->data = data;
787 	host->data_early = 0;
788 	host->data->bytes_xfered = 0;
789 
790 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
791 		struct scatterlist *sg;
792 		unsigned int length_mask, offset_mask;
793 		int i;
794 
795 		host->flags |= SDHCI_REQ_USE_DMA;
796 
797 		/*
798 		 * FIXME: This doesn't account for merging when mapping the
799 		 * scatterlist.
800 		 *
801 		 * The assumption here being that alignment and lengths are
802 		 * the same after DMA mapping to device address space.
803 		 */
804 		length_mask = 0;
805 		offset_mask = 0;
806 		if (host->flags & SDHCI_USE_ADMA) {
807 			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
808 				length_mask = 3;
809 				/*
810 				 * As we use up to 3 byte chunks to work
811 				 * around alignment problems, we need to
812 				 * check the offset as well.
813 				 */
814 				offset_mask = 3;
815 			}
816 		} else {
817 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
818 				length_mask = 3;
819 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
820 				offset_mask = 3;
821 		}
822 
823 		if (unlikely(length_mask | offset_mask)) {
824 			for_each_sg(data->sg, sg, data->sg_len, i) {
825 				if (sg->length & length_mask) {
826 					DBG("Reverting to PIO because of transfer size (%d)\n",
827 					    sg->length);
828 					host->flags &= ~SDHCI_REQ_USE_DMA;
829 					break;
830 				}
831 				if (sg->offset & offset_mask) {
832 					DBG("Reverting to PIO because of bad alignment\n");
833 					host->flags &= ~SDHCI_REQ_USE_DMA;
834 					break;
835 				}
836 			}
837 		}
838 	}
839 
840 	if (host->flags & SDHCI_REQ_USE_DMA) {
841 		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
842 
843 		if (sg_cnt <= 0) {
844 			/*
845 			 * This only happens when someone fed
846 			 * us an invalid request.
847 			 */
848 			WARN_ON(1);
849 			host->flags &= ~SDHCI_REQ_USE_DMA;
850 		} else if (host->flags & SDHCI_USE_ADMA) {
851 			sdhci_adma_table_pre(host, data, sg_cnt);
852 
853 			sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
854 			if (host->flags & SDHCI_USE_64_BIT_DMA)
855 				sdhci_writel(host,
856 					     (u64)host->adma_addr >> 32,
857 					     SDHCI_ADMA_ADDRESS_HI);
858 		} else {
859 			WARN_ON(sg_cnt != 1);
860 			sdhci_writel(host, sg_dma_address(data->sg),
861 				SDHCI_DMA_ADDRESS);
862 		}
863 	}
864 
865 	/*
866 	 * Always adjust the DMA selection as some controllers
867 	 * (e.g. JMicron) can't do PIO properly when the selection
868 	 * is ADMA.
869 	 */
870 	if (host->version >= SDHCI_SPEC_200) {
871 		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
872 		ctrl &= ~SDHCI_CTRL_DMA_MASK;
873 		if ((host->flags & SDHCI_REQ_USE_DMA) &&
874 			(host->flags & SDHCI_USE_ADMA)) {
875 			if (host->flags & SDHCI_USE_64_BIT_DMA)
876 				ctrl |= SDHCI_CTRL_ADMA64;
877 			else
878 				ctrl |= SDHCI_CTRL_ADMA32;
879 		} else {
880 			ctrl |= SDHCI_CTRL_SDMA;
881 		}
882 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
883 	}
884 
885 	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
886 		int flags;
887 
888 		flags = SG_MITER_ATOMIC;
889 		if (host->data->flags & MMC_DATA_READ)
890 			flags |= SG_MITER_TO_SG;
891 		else
892 			flags |= SG_MITER_FROM_SG;
893 		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
894 		host->blocks = data->blocks;
895 	}
896 
897 	sdhci_set_transfer_irqs(host);
898 
899 	/* Set the DMA boundary value and block size */
900 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
901 		     SDHCI_BLOCK_SIZE);
902 	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
903 }
904 
905 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
906 				    struct mmc_request *mrq)
907 {
908 	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
909 	       !mrq->cap_cmd_during_tfr;
910 }
911 
912 static void sdhci_set_transfer_mode(struct sdhci_host *host,
913 	struct mmc_command *cmd)
914 {
915 	u16 mode = 0;
916 	struct mmc_data *data = cmd->data;
917 
918 	if (data == NULL) {
919 		if (host->quirks2 &
920 			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
921 			sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
922 		} else {
923 		/* clear Auto CMD settings for no data CMDs */
924 			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
925 			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
926 				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
927 		}
928 		return;
929 	}
930 
931 	WARN_ON(!host->data);
932 
933 	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
934 		mode = SDHCI_TRNS_BLK_CNT_EN;
935 
936 	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
937 		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
938 		/*
939 		 * If we are sending CMD23, CMD12 never gets sent
940 		 * on successful completion (so no Auto-CMD12).
941 		 */
942 		if (sdhci_auto_cmd12(host, cmd->mrq) &&
943 		    (cmd->opcode != SD_IO_RW_EXTENDED))
944 			mode |= SDHCI_TRNS_AUTO_CMD12;
945 		else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
946 			mode |= SDHCI_TRNS_AUTO_CMD23;
947 			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
948 		}
949 	}
950 
951 	if (data->flags & MMC_DATA_READ)
952 		mode |= SDHCI_TRNS_READ;
953 	if (host->flags & SDHCI_REQ_USE_DMA)
954 		mode |= SDHCI_TRNS_DMA;
955 
956 	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
957 }
958 
959 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
960 {
961 	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
962 		((mrq->cmd && mrq->cmd->error) ||
963 		 (mrq->sbc && mrq->sbc->error) ||
964 		 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
965 				(mrq->data->stop && mrq->data->stop->error))) ||
966 		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
967 }
968 
969 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
970 {
971 	int i;
972 
973 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
974 		if (host->mrqs_done[i] == mrq) {
975 			WARN_ON(1);
976 			return;
977 		}
978 	}
979 
980 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
981 		if (!host->mrqs_done[i]) {
982 			host->mrqs_done[i] = mrq;
983 			break;
984 		}
985 	}
986 
987 	WARN_ON(i >= SDHCI_MAX_MRQS);
988 
989 	tasklet_schedule(&host->finish_tasklet);
990 }
991 
992 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
993 {
994 	if (host->cmd && host->cmd->mrq == mrq)
995 		host->cmd = NULL;
996 
997 	if (host->data_cmd && host->data_cmd->mrq == mrq)
998 		host->data_cmd = NULL;
999 
1000 	if (host->data && host->data->mrq == mrq)
1001 		host->data = NULL;
1002 
1003 	if (sdhci_needs_reset(host, mrq))
1004 		host->pending_reset = true;
1005 
1006 	__sdhci_finish_mrq(host, mrq);
1007 }
1008 
1009 static void sdhci_finish_data(struct sdhci_host *host)
1010 {
1011 	struct mmc_command *data_cmd = host->data_cmd;
1012 	struct mmc_data *data = host->data;
1013 
1014 	host->data = NULL;
1015 	host->data_cmd = NULL;
1016 
1017 	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1018 	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1019 		sdhci_adma_table_post(host, data);
1020 
1021 	/*
1022 	 * The specification states that the block count register must
1023 	 * be updated, but it does not specify at what point in the
1024 	 * data flow. That makes the register entirely useless to read
1025 	 * back so we have to assume that nothing made it to the card
1026 	 * in the event of an error.
1027 	 */
1028 	if (data->error)
1029 		data->bytes_xfered = 0;
1030 	else
1031 		data->bytes_xfered = data->blksz * data->blocks;
1032 
1033 	/*
1034 	 * Need to send CMD12 if -
1035 	 * a) open-ended multiblock transfer (no CMD23)
1036 	 * b) error in multiblock transfer
1037 	 */
1038 	if (data->stop &&
1039 	    (data->error ||
1040 	     !data->mrq->sbc)) {
1041 
1042 		/*
1043 		 * The controller needs a reset of internal state machines
1044 		 * upon error conditions.
1045 		 */
1046 		if (data->error) {
1047 			if (!host->cmd || host->cmd == data_cmd)
1048 				sdhci_do_reset(host, SDHCI_RESET_CMD);
1049 			sdhci_do_reset(host, SDHCI_RESET_DATA);
1050 		}
1051 
1052 		/*
1053 		 * 'cap_cmd_during_tfr' request must not use the command line
1054 		 * after mmc_command_done() has been called. It is upper layer's
1055 		 * responsibility to send the stop command if required.
1056 		 */
1057 		if (data->mrq->cap_cmd_during_tfr) {
1058 			sdhci_finish_mrq(host, data->mrq);
1059 		} else {
1060 			/* Avoid triggering warning in sdhci_send_command() */
1061 			host->cmd = NULL;
1062 			sdhci_send_command(host, data->stop);
1063 		}
1064 	} else {
1065 		sdhci_finish_mrq(host, data->mrq);
1066 	}
1067 }
1068 
1069 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1070 			    unsigned long timeout)
1071 {
1072 	if (sdhci_data_line_cmd(mrq->cmd))
1073 		mod_timer(&host->data_timer, timeout);
1074 	else
1075 		mod_timer(&host->timer, timeout);
1076 }
1077 
1078 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1079 {
1080 	if (sdhci_data_line_cmd(mrq->cmd))
1081 		del_timer(&host->data_timer);
1082 	else
1083 		del_timer(&host->timer);
1084 }
1085 
1086 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1087 {
1088 	int flags;
1089 	u32 mask;
1090 	unsigned long timeout;
1091 
1092 	WARN_ON(host->cmd);
1093 
1094 	/* Initially, a command has no error */
1095 	cmd->error = 0;
1096 
1097 	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1098 	    cmd->opcode == MMC_STOP_TRANSMISSION)
1099 		cmd->flags |= MMC_RSP_BUSY;
1100 
1101 	/* Wait max 10 ms */
1102 	timeout = 10;
1103 
1104 	mask = SDHCI_CMD_INHIBIT;
1105 	if (sdhci_data_line_cmd(cmd))
1106 		mask |= SDHCI_DATA_INHIBIT;
1107 
1108 	/* We shouldn't wait for data inihibit for stop commands, even
1109 	   though they might use busy signaling */
1110 	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1111 		mask &= ~SDHCI_DATA_INHIBIT;
1112 
1113 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1114 		if (timeout == 0) {
1115 			pr_err("%s: Controller never released inhibit bit(s).\n",
1116 			       mmc_hostname(host->mmc));
1117 			sdhci_dumpregs(host);
1118 			cmd->error = -EIO;
1119 			sdhci_finish_mrq(host, cmd->mrq);
1120 			return;
1121 		}
1122 		timeout--;
1123 		mdelay(1);
1124 	}
1125 
1126 	timeout = jiffies;
1127 	if (!cmd->data && cmd->busy_timeout > 9000)
1128 		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1129 	else
1130 		timeout += 10 * HZ;
1131 	sdhci_mod_timer(host, cmd->mrq, timeout);
1132 
1133 	host->cmd = cmd;
1134 	if (sdhci_data_line_cmd(cmd)) {
1135 		WARN_ON(host->data_cmd);
1136 		host->data_cmd = cmd;
1137 	}
1138 
1139 	sdhci_prepare_data(host, cmd);
1140 
1141 	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1142 
1143 	sdhci_set_transfer_mode(host, cmd);
1144 
1145 	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1146 		pr_err("%s: Unsupported response type!\n",
1147 			mmc_hostname(host->mmc));
1148 		cmd->error = -EINVAL;
1149 		sdhci_finish_mrq(host, cmd->mrq);
1150 		return;
1151 	}
1152 
1153 	if (!(cmd->flags & MMC_RSP_PRESENT))
1154 		flags = SDHCI_CMD_RESP_NONE;
1155 	else if (cmd->flags & MMC_RSP_136)
1156 		flags = SDHCI_CMD_RESP_LONG;
1157 	else if (cmd->flags & MMC_RSP_BUSY)
1158 		flags = SDHCI_CMD_RESP_SHORT_BUSY;
1159 	else
1160 		flags = SDHCI_CMD_RESP_SHORT;
1161 
1162 	if (cmd->flags & MMC_RSP_CRC)
1163 		flags |= SDHCI_CMD_CRC;
1164 	if (cmd->flags & MMC_RSP_OPCODE)
1165 		flags |= SDHCI_CMD_INDEX;
1166 
1167 	/* CMD19 is special in that the Data Present Select should be set */
1168 	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1169 	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1170 		flags |= SDHCI_CMD_DATA;
1171 
1172 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1173 }
1174 EXPORT_SYMBOL_GPL(sdhci_send_command);
1175 
1176 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1177 {
1178 	int i, reg;
1179 
1180 	for (i = 0; i < 4; i++) {
1181 		reg = SDHCI_RESPONSE + (3 - i) * 4;
1182 		cmd->resp[i] = sdhci_readl(host, reg);
1183 	}
1184 
1185 	if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1186 		return;
1187 
1188 	/* CRC is stripped so we need to do some shifting */
1189 	for (i = 0; i < 4; i++) {
1190 		cmd->resp[i] <<= 8;
1191 		if (i != 3)
1192 			cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1193 	}
1194 }
1195 
1196 static void sdhci_finish_command(struct sdhci_host *host)
1197 {
1198 	struct mmc_command *cmd = host->cmd;
1199 
1200 	host->cmd = NULL;
1201 
1202 	if (cmd->flags & MMC_RSP_PRESENT) {
1203 		if (cmd->flags & MMC_RSP_136) {
1204 			sdhci_read_rsp_136(host, cmd);
1205 		} else {
1206 			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1207 		}
1208 	}
1209 
1210 	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1211 		mmc_command_done(host->mmc, cmd->mrq);
1212 
1213 	/*
1214 	 * The host can send and interrupt when the busy state has
1215 	 * ended, allowing us to wait without wasting CPU cycles.
1216 	 * The busy signal uses DAT0 so this is similar to waiting
1217 	 * for data to complete.
1218 	 *
1219 	 * Note: The 1.0 specification is a bit ambiguous about this
1220 	 *       feature so there might be some problems with older
1221 	 *       controllers.
1222 	 */
1223 	if (cmd->flags & MMC_RSP_BUSY) {
1224 		if (cmd->data) {
1225 			DBG("Cannot wait for busy signal when also doing a data transfer");
1226 		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1227 			   cmd == host->data_cmd) {
1228 			/* Command complete before busy is ended */
1229 			return;
1230 		}
1231 	}
1232 
1233 	/* Finished CMD23, now send actual command. */
1234 	if (cmd == cmd->mrq->sbc) {
1235 		sdhci_send_command(host, cmd->mrq->cmd);
1236 	} else {
1237 
1238 		/* Processed actual command. */
1239 		if (host->data && host->data_early)
1240 			sdhci_finish_data(host);
1241 
1242 		if (!cmd->data)
1243 			sdhci_finish_mrq(host, cmd->mrq);
1244 	}
1245 }
1246 
1247 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1248 {
1249 	u16 preset = 0;
1250 
1251 	switch (host->timing) {
1252 	case MMC_TIMING_UHS_SDR12:
1253 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1254 		break;
1255 	case MMC_TIMING_UHS_SDR25:
1256 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1257 		break;
1258 	case MMC_TIMING_UHS_SDR50:
1259 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1260 		break;
1261 	case MMC_TIMING_UHS_SDR104:
1262 	case MMC_TIMING_MMC_HS200:
1263 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1264 		break;
1265 	case MMC_TIMING_UHS_DDR50:
1266 	case MMC_TIMING_MMC_DDR52:
1267 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1268 		break;
1269 	case MMC_TIMING_MMC_HS400:
1270 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1271 		break;
1272 	default:
1273 		pr_warn("%s: Invalid UHS-I mode selected\n",
1274 			mmc_hostname(host->mmc));
1275 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1276 		break;
1277 	}
1278 	return preset;
1279 }
1280 
1281 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1282 		   unsigned int *actual_clock)
1283 {
1284 	int div = 0; /* Initialized for compiler warning */
1285 	int real_div = div, clk_mul = 1;
1286 	u16 clk = 0;
1287 	bool switch_base_clk = false;
1288 
1289 	if (host->version >= SDHCI_SPEC_300) {
1290 		if (host->preset_enabled) {
1291 			u16 pre_val;
1292 
1293 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1294 			pre_val = sdhci_get_preset_value(host);
1295 			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1296 				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1297 			if (host->clk_mul &&
1298 				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1299 				clk = SDHCI_PROG_CLOCK_MODE;
1300 				real_div = div + 1;
1301 				clk_mul = host->clk_mul;
1302 			} else {
1303 				real_div = max_t(int, 1, div << 1);
1304 			}
1305 			goto clock_set;
1306 		}
1307 
1308 		/*
1309 		 * Check if the Host Controller supports Programmable Clock
1310 		 * Mode.
1311 		 */
1312 		if (host->clk_mul) {
1313 			for (div = 1; div <= 1024; div++) {
1314 				if ((host->max_clk * host->clk_mul / div)
1315 					<= clock)
1316 					break;
1317 			}
1318 			if ((host->max_clk * host->clk_mul / div) <= clock) {
1319 				/*
1320 				 * Set Programmable Clock Mode in the Clock
1321 				 * Control register.
1322 				 */
1323 				clk = SDHCI_PROG_CLOCK_MODE;
1324 				real_div = div;
1325 				clk_mul = host->clk_mul;
1326 				div--;
1327 			} else {
1328 				/*
1329 				 * Divisor can be too small to reach clock
1330 				 * speed requirement. Then use the base clock.
1331 				 */
1332 				switch_base_clk = true;
1333 			}
1334 		}
1335 
1336 		if (!host->clk_mul || switch_base_clk) {
1337 			/* Version 3.00 divisors must be a multiple of 2. */
1338 			if (host->max_clk <= clock)
1339 				div = 1;
1340 			else {
1341 				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1342 				     div += 2) {
1343 					if ((host->max_clk / div) <= clock)
1344 						break;
1345 				}
1346 			}
1347 			real_div = div;
1348 			div >>= 1;
1349 			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1350 				&& !div && host->max_clk <= 25000000)
1351 				div = 1;
1352 		}
1353 	} else {
1354 		/* Version 2.00 divisors must be a power of 2. */
1355 		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1356 			if ((host->max_clk / div) <= clock)
1357 				break;
1358 		}
1359 		real_div = div;
1360 		div >>= 1;
1361 	}
1362 
1363 clock_set:
1364 	if (real_div)
1365 		*actual_clock = (host->max_clk * clk_mul) / real_div;
1366 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1367 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1368 		<< SDHCI_DIVIDER_HI_SHIFT;
1369 
1370 	return clk;
1371 }
1372 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1373 
1374 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1375 {
1376 	ktime_t timeout;
1377 
1378 	clk |= SDHCI_CLOCK_INT_EN;
1379 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1380 
1381 	/* Wait max 20 ms */
1382 	timeout = ktime_add_ms(ktime_get(), 20);
1383 	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1384 		& SDHCI_CLOCK_INT_STABLE)) {
1385 		if (ktime_after(ktime_get(), timeout)) {
1386 			pr_err("%s: Internal clock never stabilised.\n",
1387 			       mmc_hostname(host->mmc));
1388 			sdhci_dumpregs(host);
1389 			return;
1390 		}
1391 		udelay(10);
1392 	}
1393 
1394 	clk |= SDHCI_CLOCK_CARD_EN;
1395 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1396 }
1397 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1398 
1399 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1400 {
1401 	u16 clk;
1402 
1403 	host->mmc->actual_clock = 0;
1404 
1405 	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1406 
1407 	if (clock == 0)
1408 		return;
1409 
1410 	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1411 	sdhci_enable_clk(host, clk);
1412 }
1413 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1414 
1415 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1416 				unsigned short vdd)
1417 {
1418 	struct mmc_host *mmc = host->mmc;
1419 
1420 	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1421 
1422 	if (mode != MMC_POWER_OFF)
1423 		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1424 	else
1425 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1426 }
1427 
1428 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1429 			   unsigned short vdd)
1430 {
1431 	u8 pwr = 0;
1432 
1433 	if (mode != MMC_POWER_OFF) {
1434 		switch (1 << vdd) {
1435 		case MMC_VDD_165_195:
1436 			pwr = SDHCI_POWER_180;
1437 			break;
1438 		case MMC_VDD_29_30:
1439 		case MMC_VDD_30_31:
1440 			pwr = SDHCI_POWER_300;
1441 			break;
1442 		case MMC_VDD_32_33:
1443 		case MMC_VDD_33_34:
1444 			pwr = SDHCI_POWER_330;
1445 			break;
1446 		default:
1447 			WARN(1, "%s: Invalid vdd %#x\n",
1448 			     mmc_hostname(host->mmc), vdd);
1449 			break;
1450 		}
1451 	}
1452 
1453 	if (host->pwr == pwr)
1454 		return;
1455 
1456 	host->pwr = pwr;
1457 
1458 	if (pwr == 0) {
1459 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1460 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1461 			sdhci_runtime_pm_bus_off(host);
1462 	} else {
1463 		/*
1464 		 * Spec says that we should clear the power reg before setting
1465 		 * a new value. Some controllers don't seem to like this though.
1466 		 */
1467 		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1468 			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1469 
1470 		/*
1471 		 * At least the Marvell CaFe chip gets confused if we set the
1472 		 * voltage and set turn on power at the same time, so set the
1473 		 * voltage first.
1474 		 */
1475 		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1476 			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1477 
1478 		pwr |= SDHCI_POWER_ON;
1479 
1480 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1481 
1482 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1483 			sdhci_runtime_pm_bus_on(host);
1484 
1485 		/*
1486 		 * Some controllers need an extra 10ms delay of 10ms before
1487 		 * they can apply clock after applying power
1488 		 */
1489 		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1490 			mdelay(10);
1491 	}
1492 }
1493 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1494 
1495 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1496 		     unsigned short vdd)
1497 {
1498 	if (IS_ERR(host->mmc->supply.vmmc))
1499 		sdhci_set_power_noreg(host, mode, vdd);
1500 	else
1501 		sdhci_set_power_reg(host, mode, vdd);
1502 }
1503 EXPORT_SYMBOL_GPL(sdhci_set_power);
1504 
1505 /*****************************************************************************\
1506  *                                                                           *
1507  * MMC callbacks                                                             *
1508  *                                                                           *
1509 \*****************************************************************************/
1510 
1511 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1512 {
1513 	struct sdhci_host *host;
1514 	int present;
1515 	unsigned long flags;
1516 
1517 	host = mmc_priv(mmc);
1518 
1519 	/* Firstly check card presence */
1520 	present = mmc->ops->get_cd(mmc);
1521 
1522 	spin_lock_irqsave(&host->lock, flags);
1523 
1524 	sdhci_led_activate(host);
1525 
1526 	/*
1527 	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1528 	 * requests if Auto-CMD12 is enabled.
1529 	 */
1530 	if (sdhci_auto_cmd12(host, mrq)) {
1531 		if (mrq->stop) {
1532 			mrq->data->stop = NULL;
1533 			mrq->stop = NULL;
1534 		}
1535 	}
1536 
1537 	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1538 		mrq->cmd->error = -ENOMEDIUM;
1539 		sdhci_finish_mrq(host, mrq);
1540 	} else {
1541 		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1542 			sdhci_send_command(host, mrq->sbc);
1543 		else
1544 			sdhci_send_command(host, mrq->cmd);
1545 	}
1546 
1547 	mmiowb();
1548 	spin_unlock_irqrestore(&host->lock, flags);
1549 }
1550 
1551 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1552 {
1553 	u8 ctrl;
1554 
1555 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1556 	if (width == MMC_BUS_WIDTH_8) {
1557 		ctrl &= ~SDHCI_CTRL_4BITBUS;
1558 		ctrl |= SDHCI_CTRL_8BITBUS;
1559 	} else {
1560 		if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1561 			ctrl &= ~SDHCI_CTRL_8BITBUS;
1562 		if (width == MMC_BUS_WIDTH_4)
1563 			ctrl |= SDHCI_CTRL_4BITBUS;
1564 		else
1565 			ctrl &= ~SDHCI_CTRL_4BITBUS;
1566 	}
1567 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1568 }
1569 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1570 
1571 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1572 {
1573 	u16 ctrl_2;
1574 
1575 	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1576 	/* Select Bus Speed Mode for host */
1577 	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1578 	if ((timing == MMC_TIMING_MMC_HS200) ||
1579 	    (timing == MMC_TIMING_UHS_SDR104))
1580 		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1581 	else if (timing == MMC_TIMING_UHS_SDR12)
1582 		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1583 	else if (timing == MMC_TIMING_UHS_SDR25)
1584 		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1585 	else if (timing == MMC_TIMING_UHS_SDR50)
1586 		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1587 	else if ((timing == MMC_TIMING_UHS_DDR50) ||
1588 		 (timing == MMC_TIMING_MMC_DDR52))
1589 		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1590 	else if (timing == MMC_TIMING_MMC_HS400)
1591 		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1592 	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1593 }
1594 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1595 
1596 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1597 {
1598 	struct sdhci_host *host = mmc_priv(mmc);
1599 	u8 ctrl;
1600 
1601 	if (ios->power_mode == MMC_POWER_UNDEFINED)
1602 		return;
1603 
1604 	if (host->flags & SDHCI_DEVICE_DEAD) {
1605 		if (!IS_ERR(mmc->supply.vmmc) &&
1606 		    ios->power_mode == MMC_POWER_OFF)
1607 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1608 		return;
1609 	}
1610 
1611 	/*
1612 	 * Reset the chip on each power off.
1613 	 * Should clear out any weird states.
1614 	 */
1615 	if (ios->power_mode == MMC_POWER_OFF) {
1616 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1617 		sdhci_reinit(host);
1618 	}
1619 
1620 	if (host->version >= SDHCI_SPEC_300 &&
1621 		(ios->power_mode == MMC_POWER_UP) &&
1622 		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1623 		sdhci_enable_preset_value(host, false);
1624 
1625 	if (!ios->clock || ios->clock != host->clock) {
1626 		host->ops->set_clock(host, ios->clock);
1627 		host->clock = ios->clock;
1628 
1629 		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1630 		    host->clock) {
1631 			host->timeout_clk = host->mmc->actual_clock ?
1632 						host->mmc->actual_clock / 1000 :
1633 						host->clock / 1000;
1634 			host->mmc->max_busy_timeout =
1635 				host->ops->get_max_timeout_count ?
1636 				host->ops->get_max_timeout_count(host) :
1637 				1 << 27;
1638 			host->mmc->max_busy_timeout /= host->timeout_clk;
1639 		}
1640 	}
1641 
1642 	if (host->ops->set_power)
1643 		host->ops->set_power(host, ios->power_mode, ios->vdd);
1644 	else
1645 		sdhci_set_power(host, ios->power_mode, ios->vdd);
1646 
1647 	if (host->ops->platform_send_init_74_clocks)
1648 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1649 
1650 	host->ops->set_bus_width(host, ios->bus_width);
1651 
1652 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1653 
1654 	if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1655 		if (ios->timing == MMC_TIMING_SD_HS ||
1656 		     ios->timing == MMC_TIMING_MMC_HS ||
1657 		     ios->timing == MMC_TIMING_MMC_HS400 ||
1658 		     ios->timing == MMC_TIMING_MMC_HS200 ||
1659 		     ios->timing == MMC_TIMING_MMC_DDR52 ||
1660 		     ios->timing == MMC_TIMING_UHS_SDR50 ||
1661 		     ios->timing == MMC_TIMING_UHS_SDR104 ||
1662 		     ios->timing == MMC_TIMING_UHS_DDR50 ||
1663 		     ios->timing == MMC_TIMING_UHS_SDR25)
1664 			ctrl |= SDHCI_CTRL_HISPD;
1665 		else
1666 			ctrl &= ~SDHCI_CTRL_HISPD;
1667 	}
1668 
1669 	if (host->version >= SDHCI_SPEC_300) {
1670 		u16 clk, ctrl_2;
1671 
1672 		if (!host->preset_enabled) {
1673 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1674 			/*
1675 			 * We only need to set Driver Strength if the
1676 			 * preset value enable is not set.
1677 			 */
1678 			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1679 			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1680 			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1681 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1682 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1683 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1684 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1685 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1686 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1687 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1688 			else {
1689 				pr_warn("%s: invalid driver type, default to driver type B\n",
1690 					mmc_hostname(mmc));
1691 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1692 			}
1693 
1694 			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1695 		} else {
1696 			/*
1697 			 * According to SDHC Spec v3.00, if the Preset Value
1698 			 * Enable in the Host Control 2 register is set, we
1699 			 * need to reset SD Clock Enable before changing High
1700 			 * Speed Enable to avoid generating clock gliches.
1701 			 */
1702 
1703 			/* Reset SD Clock Enable */
1704 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1705 			clk &= ~SDHCI_CLOCK_CARD_EN;
1706 			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1707 
1708 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1709 
1710 			/* Re-enable SD Clock */
1711 			host->ops->set_clock(host, host->clock);
1712 		}
1713 
1714 		/* Reset SD Clock Enable */
1715 		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1716 		clk &= ~SDHCI_CLOCK_CARD_EN;
1717 		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1718 
1719 		host->ops->set_uhs_signaling(host, ios->timing);
1720 		host->timing = ios->timing;
1721 
1722 		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1723 				((ios->timing == MMC_TIMING_UHS_SDR12) ||
1724 				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1725 				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1726 				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1727 				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1728 				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1729 			u16 preset;
1730 
1731 			sdhci_enable_preset_value(host, true);
1732 			preset = sdhci_get_preset_value(host);
1733 			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1734 				>> SDHCI_PRESET_DRV_SHIFT;
1735 		}
1736 
1737 		/* Re-enable SD Clock */
1738 		host->ops->set_clock(host, host->clock);
1739 	} else
1740 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1741 
1742 	/*
1743 	 * Some (ENE) controllers go apeshit on some ios operation,
1744 	 * signalling timeout and CRC errors even on CMD0. Resetting
1745 	 * it on each ios seems to solve the problem.
1746 	 */
1747 	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1748 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1749 
1750 	mmiowb();
1751 }
1752 EXPORT_SYMBOL_GPL(sdhci_set_ios);
1753 
1754 static int sdhci_get_cd(struct mmc_host *mmc)
1755 {
1756 	struct sdhci_host *host = mmc_priv(mmc);
1757 	int gpio_cd = mmc_gpio_get_cd(mmc);
1758 
1759 	if (host->flags & SDHCI_DEVICE_DEAD)
1760 		return 0;
1761 
1762 	/* If nonremovable, assume that the card is always present. */
1763 	if (!mmc_card_is_removable(host->mmc))
1764 		return 1;
1765 
1766 	/*
1767 	 * Try slot gpio detect, if defined it take precedence
1768 	 * over build in controller functionality
1769 	 */
1770 	if (gpio_cd >= 0)
1771 		return !!gpio_cd;
1772 
1773 	/* If polling, assume that the card is always present. */
1774 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1775 		return 1;
1776 
1777 	/* Host native card detect */
1778 	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1779 }
1780 
1781 static int sdhci_check_ro(struct sdhci_host *host)
1782 {
1783 	unsigned long flags;
1784 	int is_readonly;
1785 
1786 	spin_lock_irqsave(&host->lock, flags);
1787 
1788 	if (host->flags & SDHCI_DEVICE_DEAD)
1789 		is_readonly = 0;
1790 	else if (host->ops->get_ro)
1791 		is_readonly = host->ops->get_ro(host);
1792 	else
1793 		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1794 				& SDHCI_WRITE_PROTECT);
1795 
1796 	spin_unlock_irqrestore(&host->lock, flags);
1797 
1798 	/* This quirk needs to be replaced by a callback-function later */
1799 	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1800 		!is_readonly : is_readonly;
1801 }
1802 
1803 #define SAMPLE_COUNT	5
1804 
1805 static int sdhci_get_ro(struct mmc_host *mmc)
1806 {
1807 	struct sdhci_host *host = mmc_priv(mmc);
1808 	int i, ro_count;
1809 
1810 	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1811 		return sdhci_check_ro(host);
1812 
1813 	ro_count = 0;
1814 	for (i = 0; i < SAMPLE_COUNT; i++) {
1815 		if (sdhci_check_ro(host)) {
1816 			if (++ro_count > SAMPLE_COUNT / 2)
1817 				return 1;
1818 		}
1819 		msleep(30);
1820 	}
1821 	return 0;
1822 }
1823 
1824 static void sdhci_hw_reset(struct mmc_host *mmc)
1825 {
1826 	struct sdhci_host *host = mmc_priv(mmc);
1827 
1828 	if (host->ops && host->ops->hw_reset)
1829 		host->ops->hw_reset(host);
1830 }
1831 
1832 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1833 {
1834 	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1835 		if (enable)
1836 			host->ier |= SDHCI_INT_CARD_INT;
1837 		else
1838 			host->ier &= ~SDHCI_INT_CARD_INT;
1839 
1840 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1841 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1842 		mmiowb();
1843 	}
1844 }
1845 
1846 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1847 {
1848 	struct sdhci_host *host = mmc_priv(mmc);
1849 	unsigned long flags;
1850 
1851 	if (enable)
1852 		pm_runtime_get_noresume(host->mmc->parent);
1853 
1854 	spin_lock_irqsave(&host->lock, flags);
1855 	if (enable)
1856 		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1857 	else
1858 		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1859 
1860 	sdhci_enable_sdio_irq_nolock(host, enable);
1861 	spin_unlock_irqrestore(&host->lock, flags);
1862 
1863 	if (!enable)
1864 		pm_runtime_put_noidle(host->mmc->parent);
1865 }
1866 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
1867 
1868 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1869 				      struct mmc_ios *ios)
1870 {
1871 	struct sdhci_host *host = mmc_priv(mmc);
1872 	u16 ctrl;
1873 	int ret;
1874 
1875 	/*
1876 	 * Signal Voltage Switching is only applicable for Host Controllers
1877 	 * v3.00 and above.
1878 	 */
1879 	if (host->version < SDHCI_SPEC_300)
1880 		return 0;
1881 
1882 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1883 
1884 	switch (ios->signal_voltage) {
1885 	case MMC_SIGNAL_VOLTAGE_330:
1886 		if (!(host->flags & SDHCI_SIGNALING_330))
1887 			return -EINVAL;
1888 		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1889 		ctrl &= ~SDHCI_CTRL_VDD_180;
1890 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1891 
1892 		if (!IS_ERR(mmc->supply.vqmmc)) {
1893 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1894 			if (ret) {
1895 				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1896 					mmc_hostname(mmc));
1897 				return -EIO;
1898 			}
1899 		}
1900 		/* Wait for 5ms */
1901 		usleep_range(5000, 5500);
1902 
1903 		/* 3.3V regulator output should be stable within 5 ms */
1904 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1905 		if (!(ctrl & SDHCI_CTRL_VDD_180))
1906 			return 0;
1907 
1908 		pr_warn("%s: 3.3V regulator output did not became stable\n",
1909 			mmc_hostname(mmc));
1910 
1911 		return -EAGAIN;
1912 	case MMC_SIGNAL_VOLTAGE_180:
1913 		if (!(host->flags & SDHCI_SIGNALING_180))
1914 			return -EINVAL;
1915 		if (!IS_ERR(mmc->supply.vqmmc)) {
1916 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1917 			if (ret) {
1918 				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1919 					mmc_hostname(mmc));
1920 				return -EIO;
1921 			}
1922 		}
1923 
1924 		/*
1925 		 * Enable 1.8V Signal Enable in the Host Control2
1926 		 * register
1927 		 */
1928 		ctrl |= SDHCI_CTRL_VDD_180;
1929 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1930 
1931 		/* Some controller need to do more when switching */
1932 		if (host->ops->voltage_switch)
1933 			host->ops->voltage_switch(host);
1934 
1935 		/* 1.8V regulator output should be stable within 5 ms */
1936 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1937 		if (ctrl & SDHCI_CTRL_VDD_180)
1938 			return 0;
1939 
1940 		pr_warn("%s: 1.8V regulator output did not became stable\n",
1941 			mmc_hostname(mmc));
1942 
1943 		return -EAGAIN;
1944 	case MMC_SIGNAL_VOLTAGE_120:
1945 		if (!(host->flags & SDHCI_SIGNALING_120))
1946 			return -EINVAL;
1947 		if (!IS_ERR(mmc->supply.vqmmc)) {
1948 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1949 			if (ret) {
1950 				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1951 					mmc_hostname(mmc));
1952 				return -EIO;
1953 			}
1954 		}
1955 		return 0;
1956 	default:
1957 		/* No signal voltage switch required */
1958 		return 0;
1959 	}
1960 }
1961 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
1962 
1963 static int sdhci_card_busy(struct mmc_host *mmc)
1964 {
1965 	struct sdhci_host *host = mmc_priv(mmc);
1966 	u32 present_state;
1967 
1968 	/* Check whether DAT[0] is 0 */
1969 	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1970 
1971 	return !(present_state & SDHCI_DATA_0_LVL_MASK);
1972 }
1973 
1974 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1975 {
1976 	struct sdhci_host *host = mmc_priv(mmc);
1977 	unsigned long flags;
1978 
1979 	spin_lock_irqsave(&host->lock, flags);
1980 	host->flags |= SDHCI_HS400_TUNING;
1981 	spin_unlock_irqrestore(&host->lock, flags);
1982 
1983 	return 0;
1984 }
1985 
1986 static void sdhci_start_tuning(struct sdhci_host *host)
1987 {
1988 	u16 ctrl;
1989 
1990 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1991 	ctrl |= SDHCI_CTRL_EXEC_TUNING;
1992 	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1993 		ctrl |= SDHCI_CTRL_TUNED_CLK;
1994 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1995 
1996 	/*
1997 	 * As per the Host Controller spec v3.00, tuning command
1998 	 * generates Buffer Read Ready interrupt, so enable that.
1999 	 *
2000 	 * Note: The spec clearly says that when tuning sequence
2001 	 * is being performed, the controller does not generate
2002 	 * interrupts other than Buffer Read Ready interrupt. But
2003 	 * to make sure we don't hit a controller bug, we _only_
2004 	 * enable Buffer Read Ready interrupt here.
2005 	 */
2006 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2007 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2008 }
2009 
2010 static void sdhci_end_tuning(struct sdhci_host *host)
2011 {
2012 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2013 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2014 }
2015 
2016 static void sdhci_reset_tuning(struct sdhci_host *host)
2017 {
2018 	u16 ctrl;
2019 
2020 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2021 	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2022 	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2023 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2024 }
2025 
2026 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2027 {
2028 	sdhci_reset_tuning(host);
2029 
2030 	sdhci_do_reset(host, SDHCI_RESET_CMD);
2031 	sdhci_do_reset(host, SDHCI_RESET_DATA);
2032 
2033 	sdhci_end_tuning(host);
2034 
2035 	mmc_abort_tuning(host->mmc, opcode);
2036 }
2037 
2038 /*
2039  * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2040  * tuning command does not have a data payload (or rather the hardware does it
2041  * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2042  * interrupt setup is different to other commands and there is no timeout
2043  * interrupt so special handling is needed.
2044  */
2045 static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2046 {
2047 	struct mmc_host *mmc = host->mmc;
2048 	struct mmc_command cmd = {};
2049 	struct mmc_request mrq = {};
2050 	unsigned long flags;
2051 	u32 b = host->sdma_boundary;
2052 
2053 	spin_lock_irqsave(&host->lock, flags);
2054 
2055 	cmd.opcode = opcode;
2056 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2057 	cmd.mrq = &mrq;
2058 
2059 	mrq.cmd = &cmd;
2060 	/*
2061 	 * In response to CMD19, the card sends 64 bytes of tuning
2062 	 * block to the Host Controller. So we set the block size
2063 	 * to 64 here.
2064 	 */
2065 	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2066 	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2067 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2068 	else
2069 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2070 
2071 	/*
2072 	 * The tuning block is sent by the card to the host controller.
2073 	 * So we set the TRNS_READ bit in the Transfer Mode register.
2074 	 * This also takes care of setting DMA Enable and Multi Block
2075 	 * Select in the same register to 0.
2076 	 */
2077 	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2078 
2079 	sdhci_send_command(host, &cmd);
2080 
2081 	host->cmd = NULL;
2082 
2083 	sdhci_del_timer(host, &mrq);
2084 
2085 	host->tuning_done = 0;
2086 
2087 	mmiowb();
2088 	spin_unlock_irqrestore(&host->lock, flags);
2089 
2090 	/* Wait for Buffer Read Ready interrupt */
2091 	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2092 			   msecs_to_jiffies(50));
2093 
2094 }
2095 
2096 static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2097 {
2098 	int i;
2099 
2100 	/*
2101 	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2102 	 * of loops reaches 40 times.
2103 	 */
2104 	for (i = 0; i < MAX_TUNING_LOOP; i++) {
2105 		u16 ctrl;
2106 
2107 		sdhci_send_tuning(host, opcode);
2108 
2109 		if (!host->tuning_done) {
2110 			pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2111 				mmc_hostname(host->mmc));
2112 			sdhci_abort_tuning(host, opcode);
2113 			return;
2114 		}
2115 
2116 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2117 		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2118 			if (ctrl & SDHCI_CTRL_TUNED_CLK)
2119 				return; /* Success! */
2120 			break;
2121 		}
2122 
2123 		/* Spec does not require a delay between tuning cycles */
2124 		if (host->tuning_delay > 0)
2125 			mdelay(host->tuning_delay);
2126 	}
2127 
2128 	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2129 		mmc_hostname(host->mmc));
2130 	sdhci_reset_tuning(host);
2131 }
2132 
2133 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2134 {
2135 	struct sdhci_host *host = mmc_priv(mmc);
2136 	int err = 0;
2137 	unsigned int tuning_count = 0;
2138 	bool hs400_tuning;
2139 
2140 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2141 
2142 	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2143 		tuning_count = host->tuning_count;
2144 
2145 	/*
2146 	 * The Host Controller needs tuning in case of SDR104 and DDR50
2147 	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2148 	 * the Capabilities register.
2149 	 * If the Host Controller supports the HS200 mode then the
2150 	 * tuning function has to be executed.
2151 	 */
2152 	switch (host->timing) {
2153 	/* HS400 tuning is done in HS200 mode */
2154 	case MMC_TIMING_MMC_HS400:
2155 		err = -EINVAL;
2156 		goto out;
2157 
2158 	case MMC_TIMING_MMC_HS200:
2159 		/*
2160 		 * Periodic re-tuning for HS400 is not expected to be needed, so
2161 		 * disable it here.
2162 		 */
2163 		if (hs400_tuning)
2164 			tuning_count = 0;
2165 		break;
2166 
2167 	case MMC_TIMING_UHS_SDR104:
2168 	case MMC_TIMING_UHS_DDR50:
2169 		break;
2170 
2171 	case MMC_TIMING_UHS_SDR50:
2172 		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2173 			break;
2174 		/* FALLTHROUGH */
2175 
2176 	default:
2177 		goto out;
2178 	}
2179 
2180 	if (host->ops->platform_execute_tuning) {
2181 		err = host->ops->platform_execute_tuning(host, opcode);
2182 		goto out;
2183 	}
2184 
2185 	host->mmc->retune_period = tuning_count;
2186 
2187 	if (host->tuning_delay < 0)
2188 		host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2189 
2190 	sdhci_start_tuning(host);
2191 
2192 	__sdhci_execute_tuning(host, opcode);
2193 
2194 	sdhci_end_tuning(host);
2195 out:
2196 	host->flags &= ~SDHCI_HS400_TUNING;
2197 
2198 	return err;
2199 }
2200 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2201 
2202 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2203 {
2204 	/* Host Controller v3.00 defines preset value registers */
2205 	if (host->version < SDHCI_SPEC_300)
2206 		return;
2207 
2208 	/*
2209 	 * We only enable or disable Preset Value if they are not already
2210 	 * enabled or disabled respectively. Otherwise, we bail out.
2211 	 */
2212 	if (host->preset_enabled != enable) {
2213 		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2214 
2215 		if (enable)
2216 			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2217 		else
2218 			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2219 
2220 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2221 
2222 		if (enable)
2223 			host->flags |= SDHCI_PV_ENABLED;
2224 		else
2225 			host->flags &= ~SDHCI_PV_ENABLED;
2226 
2227 		host->preset_enabled = enable;
2228 	}
2229 }
2230 
2231 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2232 				int err)
2233 {
2234 	struct sdhci_host *host = mmc_priv(mmc);
2235 	struct mmc_data *data = mrq->data;
2236 
2237 	if (data->host_cookie != COOKIE_UNMAPPED)
2238 		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2239 			     mmc_get_dma_dir(data));
2240 
2241 	data->host_cookie = COOKIE_UNMAPPED;
2242 }
2243 
2244 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2245 {
2246 	struct sdhci_host *host = mmc_priv(mmc);
2247 
2248 	mrq->data->host_cookie = COOKIE_UNMAPPED;
2249 
2250 	if (host->flags & SDHCI_REQ_USE_DMA)
2251 		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2252 }
2253 
2254 static inline bool sdhci_has_requests(struct sdhci_host *host)
2255 {
2256 	return host->cmd || host->data_cmd;
2257 }
2258 
2259 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2260 {
2261 	if (host->data_cmd) {
2262 		host->data_cmd->error = err;
2263 		sdhci_finish_mrq(host, host->data_cmd->mrq);
2264 	}
2265 
2266 	if (host->cmd) {
2267 		host->cmd->error = err;
2268 		sdhci_finish_mrq(host, host->cmd->mrq);
2269 	}
2270 }
2271 
2272 static void sdhci_card_event(struct mmc_host *mmc)
2273 {
2274 	struct sdhci_host *host = mmc_priv(mmc);
2275 	unsigned long flags;
2276 	int present;
2277 
2278 	/* First check if client has provided their own card event */
2279 	if (host->ops->card_event)
2280 		host->ops->card_event(host);
2281 
2282 	present = mmc->ops->get_cd(mmc);
2283 
2284 	spin_lock_irqsave(&host->lock, flags);
2285 
2286 	/* Check sdhci_has_requests() first in case we are runtime suspended */
2287 	if (sdhci_has_requests(host) && !present) {
2288 		pr_err("%s: Card removed during transfer!\n",
2289 			mmc_hostname(host->mmc));
2290 		pr_err("%s: Resetting controller.\n",
2291 			mmc_hostname(host->mmc));
2292 
2293 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2294 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2295 
2296 		sdhci_error_out_mrqs(host, -ENOMEDIUM);
2297 	}
2298 
2299 	spin_unlock_irqrestore(&host->lock, flags);
2300 }
2301 
2302 static const struct mmc_host_ops sdhci_ops = {
2303 	.request	= sdhci_request,
2304 	.post_req	= sdhci_post_req,
2305 	.pre_req	= sdhci_pre_req,
2306 	.set_ios	= sdhci_set_ios,
2307 	.get_cd		= sdhci_get_cd,
2308 	.get_ro		= sdhci_get_ro,
2309 	.hw_reset	= sdhci_hw_reset,
2310 	.enable_sdio_irq = sdhci_enable_sdio_irq,
2311 	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2312 	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2313 	.execute_tuning			= sdhci_execute_tuning,
2314 	.card_event			= sdhci_card_event,
2315 	.card_busy	= sdhci_card_busy,
2316 };
2317 
2318 /*****************************************************************************\
2319  *                                                                           *
2320  * Tasklets                                                                  *
2321  *                                                                           *
2322 \*****************************************************************************/
2323 
2324 static bool sdhci_request_done(struct sdhci_host *host)
2325 {
2326 	unsigned long flags;
2327 	struct mmc_request *mrq;
2328 	int i;
2329 
2330 	spin_lock_irqsave(&host->lock, flags);
2331 
2332 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2333 		mrq = host->mrqs_done[i];
2334 		if (mrq)
2335 			break;
2336 	}
2337 
2338 	if (!mrq) {
2339 		spin_unlock_irqrestore(&host->lock, flags);
2340 		return true;
2341 	}
2342 
2343 	sdhci_del_timer(host, mrq);
2344 
2345 	/*
2346 	 * Always unmap the data buffers if they were mapped by
2347 	 * sdhci_prepare_data() whenever we finish with a request.
2348 	 * This avoids leaking DMA mappings on error.
2349 	 */
2350 	if (host->flags & SDHCI_REQ_USE_DMA) {
2351 		struct mmc_data *data = mrq->data;
2352 
2353 		if (data && data->host_cookie == COOKIE_MAPPED) {
2354 			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2355 				     mmc_get_dma_dir(data));
2356 			data->host_cookie = COOKIE_UNMAPPED;
2357 		}
2358 	}
2359 
2360 	/*
2361 	 * The controller needs a reset of internal state machines
2362 	 * upon error conditions.
2363 	 */
2364 	if (sdhci_needs_reset(host, mrq)) {
2365 		/*
2366 		 * Do not finish until command and data lines are available for
2367 		 * reset. Note there can only be one other mrq, so it cannot
2368 		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2369 		 * would both be null.
2370 		 */
2371 		if (host->cmd || host->data_cmd) {
2372 			spin_unlock_irqrestore(&host->lock, flags);
2373 			return true;
2374 		}
2375 
2376 		/* Some controllers need this kick or reset won't work here */
2377 		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2378 			/* This is to force an update */
2379 			host->ops->set_clock(host, host->clock);
2380 
2381 		/* Spec says we should do both at the same time, but Ricoh
2382 		   controllers do not like that. */
2383 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2384 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2385 
2386 		host->pending_reset = false;
2387 	}
2388 
2389 	if (!sdhci_has_requests(host))
2390 		sdhci_led_deactivate(host);
2391 
2392 	host->mrqs_done[i] = NULL;
2393 
2394 	mmiowb();
2395 	spin_unlock_irqrestore(&host->lock, flags);
2396 
2397 	mmc_request_done(host->mmc, mrq);
2398 
2399 	return false;
2400 }
2401 
2402 static void sdhci_tasklet_finish(unsigned long param)
2403 {
2404 	struct sdhci_host *host = (struct sdhci_host *)param;
2405 
2406 	while (!sdhci_request_done(host))
2407 		;
2408 }
2409 
2410 static void sdhci_timeout_timer(unsigned long data)
2411 {
2412 	struct sdhci_host *host;
2413 	unsigned long flags;
2414 
2415 	host = (struct sdhci_host*)data;
2416 
2417 	spin_lock_irqsave(&host->lock, flags);
2418 
2419 	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2420 		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2421 		       mmc_hostname(host->mmc));
2422 		sdhci_dumpregs(host);
2423 
2424 		host->cmd->error = -ETIMEDOUT;
2425 		sdhci_finish_mrq(host, host->cmd->mrq);
2426 	}
2427 
2428 	mmiowb();
2429 	spin_unlock_irqrestore(&host->lock, flags);
2430 }
2431 
2432 static void sdhci_timeout_data_timer(unsigned long data)
2433 {
2434 	struct sdhci_host *host;
2435 	unsigned long flags;
2436 
2437 	host = (struct sdhci_host *)data;
2438 
2439 	spin_lock_irqsave(&host->lock, flags);
2440 
2441 	if (host->data || host->data_cmd ||
2442 	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2443 		pr_err("%s: Timeout waiting for hardware interrupt.\n",
2444 		       mmc_hostname(host->mmc));
2445 		sdhci_dumpregs(host);
2446 
2447 		if (host->data) {
2448 			host->data->error = -ETIMEDOUT;
2449 			sdhci_finish_data(host);
2450 		} else if (host->data_cmd) {
2451 			host->data_cmd->error = -ETIMEDOUT;
2452 			sdhci_finish_mrq(host, host->data_cmd->mrq);
2453 		} else {
2454 			host->cmd->error = -ETIMEDOUT;
2455 			sdhci_finish_mrq(host, host->cmd->mrq);
2456 		}
2457 	}
2458 
2459 	mmiowb();
2460 	spin_unlock_irqrestore(&host->lock, flags);
2461 }
2462 
2463 /*****************************************************************************\
2464  *                                                                           *
2465  * Interrupt handling                                                        *
2466  *                                                                           *
2467 \*****************************************************************************/
2468 
2469 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2470 {
2471 	if (!host->cmd) {
2472 		/*
2473 		 * SDHCI recovers from errors by resetting the cmd and data
2474 		 * circuits.  Until that is done, there very well might be more
2475 		 * interrupts, so ignore them in that case.
2476 		 */
2477 		if (host->pending_reset)
2478 			return;
2479 		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2480 		       mmc_hostname(host->mmc), (unsigned)intmask);
2481 		sdhci_dumpregs(host);
2482 		return;
2483 	}
2484 
2485 	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2486 		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2487 		if (intmask & SDHCI_INT_TIMEOUT)
2488 			host->cmd->error = -ETIMEDOUT;
2489 		else
2490 			host->cmd->error = -EILSEQ;
2491 
2492 		/*
2493 		 * If this command initiates a data phase and a response
2494 		 * CRC error is signalled, the card can start transferring
2495 		 * data - the card may have received the command without
2496 		 * error.  We must not terminate the mmc_request early.
2497 		 *
2498 		 * If the card did not receive the command or returned an
2499 		 * error which prevented it sending data, the data phase
2500 		 * will time out.
2501 		 */
2502 		if (host->cmd->data &&
2503 		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2504 		     SDHCI_INT_CRC) {
2505 			host->cmd = NULL;
2506 			return;
2507 		}
2508 
2509 		sdhci_finish_mrq(host, host->cmd->mrq);
2510 		return;
2511 	}
2512 
2513 	if (intmask & SDHCI_INT_RESPONSE)
2514 		sdhci_finish_command(host);
2515 }
2516 
2517 static void sdhci_adma_show_error(struct sdhci_host *host)
2518 {
2519 	void *desc = host->adma_table;
2520 
2521 	sdhci_dumpregs(host);
2522 
2523 	while (true) {
2524 		struct sdhci_adma2_64_desc *dma_desc = desc;
2525 
2526 		if (host->flags & SDHCI_USE_64_BIT_DMA)
2527 			DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2528 			    desc, le32_to_cpu(dma_desc->addr_hi),
2529 			    le32_to_cpu(dma_desc->addr_lo),
2530 			    le16_to_cpu(dma_desc->len),
2531 			    le16_to_cpu(dma_desc->cmd));
2532 		else
2533 			DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2534 			    desc, le32_to_cpu(dma_desc->addr_lo),
2535 			    le16_to_cpu(dma_desc->len),
2536 			    le16_to_cpu(dma_desc->cmd));
2537 
2538 		desc += host->desc_sz;
2539 
2540 		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2541 			break;
2542 	}
2543 }
2544 
2545 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2546 {
2547 	u32 command;
2548 
2549 	/* CMD19 generates _only_ Buffer Read Ready interrupt */
2550 	if (intmask & SDHCI_INT_DATA_AVAIL) {
2551 		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2552 		if (command == MMC_SEND_TUNING_BLOCK ||
2553 		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2554 			host->tuning_done = 1;
2555 			wake_up(&host->buf_ready_int);
2556 			return;
2557 		}
2558 	}
2559 
2560 	if (!host->data) {
2561 		struct mmc_command *data_cmd = host->data_cmd;
2562 
2563 		/*
2564 		 * The "data complete" interrupt is also used to
2565 		 * indicate that a busy state has ended. See comment
2566 		 * above in sdhci_cmd_irq().
2567 		 */
2568 		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2569 			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2570 				host->data_cmd = NULL;
2571 				data_cmd->error = -ETIMEDOUT;
2572 				sdhci_finish_mrq(host, data_cmd->mrq);
2573 				return;
2574 			}
2575 			if (intmask & SDHCI_INT_DATA_END) {
2576 				host->data_cmd = NULL;
2577 				/*
2578 				 * Some cards handle busy-end interrupt
2579 				 * before the command completed, so make
2580 				 * sure we do things in the proper order.
2581 				 */
2582 				if (host->cmd == data_cmd)
2583 					return;
2584 
2585 				sdhci_finish_mrq(host, data_cmd->mrq);
2586 				return;
2587 			}
2588 		}
2589 
2590 		/*
2591 		 * SDHCI recovers from errors by resetting the cmd and data
2592 		 * circuits. Until that is done, there very well might be more
2593 		 * interrupts, so ignore them in that case.
2594 		 */
2595 		if (host->pending_reset)
2596 			return;
2597 
2598 		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2599 		       mmc_hostname(host->mmc), (unsigned)intmask);
2600 		sdhci_dumpregs(host);
2601 
2602 		return;
2603 	}
2604 
2605 	if (intmask & SDHCI_INT_DATA_TIMEOUT)
2606 		host->data->error = -ETIMEDOUT;
2607 	else if (intmask & SDHCI_INT_DATA_END_BIT)
2608 		host->data->error = -EILSEQ;
2609 	else if ((intmask & SDHCI_INT_DATA_CRC) &&
2610 		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2611 			!= MMC_BUS_TEST_R)
2612 		host->data->error = -EILSEQ;
2613 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2614 		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2615 		sdhci_adma_show_error(host);
2616 		host->data->error = -EIO;
2617 		if (host->ops->adma_workaround)
2618 			host->ops->adma_workaround(host, intmask);
2619 	}
2620 
2621 	if (host->data->error)
2622 		sdhci_finish_data(host);
2623 	else {
2624 		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2625 			sdhci_transfer_pio(host);
2626 
2627 		/*
2628 		 * We currently don't do anything fancy with DMA
2629 		 * boundaries, but as we can't disable the feature
2630 		 * we need to at least restart the transfer.
2631 		 *
2632 		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2633 		 * should return a valid address to continue from, but as
2634 		 * some controllers are faulty, don't trust them.
2635 		 */
2636 		if (intmask & SDHCI_INT_DMA_END) {
2637 			u32 dmastart, dmanow;
2638 			dmastart = sg_dma_address(host->data->sg);
2639 			dmanow = dmastart + host->data->bytes_xfered;
2640 			/*
2641 			 * Force update to the next DMA block boundary.
2642 			 */
2643 			dmanow = (dmanow &
2644 				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2645 				SDHCI_DEFAULT_BOUNDARY_SIZE;
2646 			host->data->bytes_xfered = dmanow - dmastart;
2647 			DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2648 			    dmastart, host->data->bytes_xfered, dmanow);
2649 			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2650 		}
2651 
2652 		if (intmask & SDHCI_INT_DATA_END) {
2653 			if (host->cmd == host->data_cmd) {
2654 				/*
2655 				 * Data managed to finish before the
2656 				 * command completed. Make sure we do
2657 				 * things in the proper order.
2658 				 */
2659 				host->data_early = 1;
2660 			} else {
2661 				sdhci_finish_data(host);
2662 			}
2663 		}
2664 	}
2665 }
2666 
2667 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2668 {
2669 	irqreturn_t result = IRQ_NONE;
2670 	struct sdhci_host *host = dev_id;
2671 	u32 intmask, mask, unexpected = 0;
2672 	int max_loops = 16;
2673 
2674 	spin_lock(&host->lock);
2675 
2676 	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2677 		spin_unlock(&host->lock);
2678 		return IRQ_NONE;
2679 	}
2680 
2681 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2682 	if (!intmask || intmask == 0xffffffff) {
2683 		result = IRQ_NONE;
2684 		goto out;
2685 	}
2686 
2687 	do {
2688 		DBG("IRQ status 0x%08x\n", intmask);
2689 
2690 		if (host->ops->irq) {
2691 			intmask = host->ops->irq(host, intmask);
2692 			if (!intmask)
2693 				goto cont;
2694 		}
2695 
2696 		/* Clear selected interrupts. */
2697 		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2698 				  SDHCI_INT_BUS_POWER);
2699 		sdhci_writel(host, mask, SDHCI_INT_STATUS);
2700 
2701 		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2702 			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2703 				      SDHCI_CARD_PRESENT;
2704 
2705 			/*
2706 			 * There is a observation on i.mx esdhc.  INSERT
2707 			 * bit will be immediately set again when it gets
2708 			 * cleared, if a card is inserted.  We have to mask
2709 			 * the irq to prevent interrupt storm which will
2710 			 * freeze the system.  And the REMOVE gets the
2711 			 * same situation.
2712 			 *
2713 			 * More testing are needed here to ensure it works
2714 			 * for other platforms though.
2715 			 */
2716 			host->ier &= ~(SDHCI_INT_CARD_INSERT |
2717 				       SDHCI_INT_CARD_REMOVE);
2718 			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2719 					       SDHCI_INT_CARD_INSERT;
2720 			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2721 			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2722 
2723 			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2724 				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2725 
2726 			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2727 						       SDHCI_INT_CARD_REMOVE);
2728 			result = IRQ_WAKE_THREAD;
2729 		}
2730 
2731 		if (intmask & SDHCI_INT_CMD_MASK)
2732 			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2733 
2734 		if (intmask & SDHCI_INT_DATA_MASK)
2735 			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2736 
2737 		if (intmask & SDHCI_INT_BUS_POWER)
2738 			pr_err("%s: Card is consuming too much power!\n",
2739 				mmc_hostname(host->mmc));
2740 
2741 		if (intmask & SDHCI_INT_RETUNE)
2742 			mmc_retune_needed(host->mmc);
2743 
2744 		if ((intmask & SDHCI_INT_CARD_INT) &&
2745 		    (host->ier & SDHCI_INT_CARD_INT)) {
2746 			sdhci_enable_sdio_irq_nolock(host, false);
2747 			host->thread_isr |= SDHCI_INT_CARD_INT;
2748 			result = IRQ_WAKE_THREAD;
2749 		}
2750 
2751 		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2752 			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2753 			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2754 			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2755 
2756 		if (intmask) {
2757 			unexpected |= intmask;
2758 			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2759 		}
2760 cont:
2761 		if (result == IRQ_NONE)
2762 			result = IRQ_HANDLED;
2763 
2764 		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2765 	} while (intmask && --max_loops);
2766 out:
2767 	spin_unlock(&host->lock);
2768 
2769 	if (unexpected) {
2770 		pr_err("%s: Unexpected interrupt 0x%08x.\n",
2771 			   mmc_hostname(host->mmc), unexpected);
2772 		sdhci_dumpregs(host);
2773 	}
2774 
2775 	return result;
2776 }
2777 
2778 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2779 {
2780 	struct sdhci_host *host = dev_id;
2781 	unsigned long flags;
2782 	u32 isr;
2783 
2784 	spin_lock_irqsave(&host->lock, flags);
2785 	isr = host->thread_isr;
2786 	host->thread_isr = 0;
2787 	spin_unlock_irqrestore(&host->lock, flags);
2788 
2789 	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2790 		struct mmc_host *mmc = host->mmc;
2791 
2792 		mmc->ops->card_event(mmc);
2793 		mmc_detect_change(mmc, msecs_to_jiffies(200));
2794 	}
2795 
2796 	if (isr & SDHCI_INT_CARD_INT) {
2797 		sdio_run_irqs(host->mmc);
2798 
2799 		spin_lock_irqsave(&host->lock, flags);
2800 		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2801 			sdhci_enable_sdio_irq_nolock(host, true);
2802 		spin_unlock_irqrestore(&host->lock, flags);
2803 	}
2804 
2805 	return isr ? IRQ_HANDLED : IRQ_NONE;
2806 }
2807 
2808 /*****************************************************************************\
2809  *                                                                           *
2810  * Suspend/resume                                                            *
2811  *                                                                           *
2812 \*****************************************************************************/
2813 
2814 #ifdef CONFIG_PM
2815 /*
2816  * To enable wakeup events, the corresponding events have to be enabled in
2817  * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2818  * Table' in the SD Host Controller Standard Specification.
2819  * It is useless to restore SDHCI_INT_ENABLE state in
2820  * sdhci_disable_irq_wakeups() since it will be set by
2821  * sdhci_enable_card_detection() or sdhci_init().
2822  */
2823 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2824 {
2825 	u8 val;
2826 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2827 			| SDHCI_WAKE_ON_INT;
2828 	u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2829 		      SDHCI_INT_CARD_INT;
2830 
2831 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2832 	val |= mask ;
2833 	/* Avoid fake wake up */
2834 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
2835 		val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2836 		irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2837 	}
2838 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2839 	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
2840 }
2841 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2842 
2843 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2844 {
2845 	u8 val;
2846 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2847 			| SDHCI_WAKE_ON_INT;
2848 
2849 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2850 	val &= ~mask;
2851 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2852 }
2853 
2854 int sdhci_suspend_host(struct sdhci_host *host)
2855 {
2856 	sdhci_disable_card_detection(host);
2857 
2858 	mmc_retune_timer_stop(host->mmc);
2859 
2860 	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2861 		host->ier = 0;
2862 		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2863 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2864 		free_irq(host->irq, host);
2865 	} else {
2866 		sdhci_enable_irq_wakeups(host);
2867 		enable_irq_wake(host->irq);
2868 	}
2869 	return 0;
2870 }
2871 
2872 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2873 
2874 int sdhci_resume_host(struct sdhci_host *host)
2875 {
2876 	struct mmc_host *mmc = host->mmc;
2877 	int ret = 0;
2878 
2879 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2880 		if (host->ops->enable_dma)
2881 			host->ops->enable_dma(host);
2882 	}
2883 
2884 	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2885 	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2886 		/* Card keeps power but host controller does not */
2887 		sdhci_init(host, 0);
2888 		host->pwr = 0;
2889 		host->clock = 0;
2890 		mmc->ops->set_ios(mmc, &mmc->ios);
2891 	} else {
2892 		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2893 		mmiowb();
2894 	}
2895 
2896 	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2897 		ret = request_threaded_irq(host->irq, sdhci_irq,
2898 					   sdhci_thread_irq, IRQF_SHARED,
2899 					   mmc_hostname(host->mmc), host);
2900 		if (ret)
2901 			return ret;
2902 	} else {
2903 		sdhci_disable_irq_wakeups(host);
2904 		disable_irq_wake(host->irq);
2905 	}
2906 
2907 	sdhci_enable_card_detection(host);
2908 
2909 	return ret;
2910 }
2911 
2912 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2913 
2914 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2915 {
2916 	unsigned long flags;
2917 
2918 	mmc_retune_timer_stop(host->mmc);
2919 
2920 	spin_lock_irqsave(&host->lock, flags);
2921 	host->ier &= SDHCI_INT_CARD_INT;
2922 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2923 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2924 	spin_unlock_irqrestore(&host->lock, flags);
2925 
2926 	synchronize_hardirq(host->irq);
2927 
2928 	spin_lock_irqsave(&host->lock, flags);
2929 	host->runtime_suspended = true;
2930 	spin_unlock_irqrestore(&host->lock, flags);
2931 
2932 	return 0;
2933 }
2934 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2935 
2936 int sdhci_runtime_resume_host(struct sdhci_host *host)
2937 {
2938 	struct mmc_host *mmc = host->mmc;
2939 	unsigned long flags;
2940 	int host_flags = host->flags;
2941 
2942 	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2943 		if (host->ops->enable_dma)
2944 			host->ops->enable_dma(host);
2945 	}
2946 
2947 	sdhci_init(host, 0);
2948 
2949 	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
2950 	    mmc->ios.power_mode != MMC_POWER_OFF) {
2951 		/* Force clock and power re-program */
2952 		host->pwr = 0;
2953 		host->clock = 0;
2954 		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
2955 		mmc->ops->set_ios(mmc, &mmc->ios);
2956 
2957 		if ((host_flags & SDHCI_PV_ENABLED) &&
2958 		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2959 			spin_lock_irqsave(&host->lock, flags);
2960 			sdhci_enable_preset_value(host, true);
2961 			spin_unlock_irqrestore(&host->lock, flags);
2962 		}
2963 
2964 		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
2965 		    mmc->ops->hs400_enhanced_strobe)
2966 			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
2967 	}
2968 
2969 	spin_lock_irqsave(&host->lock, flags);
2970 
2971 	host->runtime_suspended = false;
2972 
2973 	/* Enable SDIO IRQ */
2974 	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2975 		sdhci_enable_sdio_irq_nolock(host, true);
2976 
2977 	/* Enable Card Detection */
2978 	sdhci_enable_card_detection(host);
2979 
2980 	spin_unlock_irqrestore(&host->lock, flags);
2981 
2982 	return 0;
2983 }
2984 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2985 
2986 #endif /* CONFIG_PM */
2987 
2988 /*****************************************************************************\
2989  *                                                                           *
2990  * Command Queue Engine (CQE) helpers                                        *
2991  *                                                                           *
2992 \*****************************************************************************/
2993 
2994 void sdhci_cqe_enable(struct mmc_host *mmc)
2995 {
2996 	struct sdhci_host *host = mmc_priv(mmc);
2997 	unsigned long flags;
2998 	u8 ctrl;
2999 
3000 	spin_lock_irqsave(&host->lock, flags);
3001 
3002 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3003 	ctrl &= ~SDHCI_CTRL_DMA_MASK;
3004 	if (host->flags & SDHCI_USE_64_BIT_DMA)
3005 		ctrl |= SDHCI_CTRL_ADMA64;
3006 	else
3007 		ctrl |= SDHCI_CTRL_ADMA32;
3008 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3009 
3010 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3011 		     SDHCI_BLOCK_SIZE);
3012 
3013 	/* Set maximum timeout */
3014 	sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3015 
3016 	host->ier = host->cqe_ier;
3017 
3018 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3019 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3020 
3021 	host->cqe_on = true;
3022 
3023 	pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3024 		 mmc_hostname(mmc), host->ier,
3025 		 sdhci_readl(host, SDHCI_INT_STATUS));
3026 
3027 	mmiowb();
3028 	spin_unlock_irqrestore(&host->lock, flags);
3029 }
3030 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3031 
3032 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3033 {
3034 	struct sdhci_host *host = mmc_priv(mmc);
3035 	unsigned long flags;
3036 
3037 	spin_lock_irqsave(&host->lock, flags);
3038 
3039 	sdhci_set_default_irqs(host);
3040 
3041 	host->cqe_on = false;
3042 
3043 	if (recovery) {
3044 		sdhci_do_reset(host, SDHCI_RESET_CMD);
3045 		sdhci_do_reset(host, SDHCI_RESET_DATA);
3046 	}
3047 
3048 	pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3049 		 mmc_hostname(mmc), host->ier,
3050 		 sdhci_readl(host, SDHCI_INT_STATUS));
3051 
3052 	mmiowb();
3053 	spin_unlock_irqrestore(&host->lock, flags);
3054 }
3055 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3056 
3057 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3058 		   int *data_error)
3059 {
3060 	u32 mask;
3061 
3062 	if (!host->cqe_on)
3063 		return false;
3064 
3065 	if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3066 		*cmd_error = -EILSEQ;
3067 	else if (intmask & SDHCI_INT_TIMEOUT)
3068 		*cmd_error = -ETIMEDOUT;
3069 	else
3070 		*cmd_error = 0;
3071 
3072 	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3073 		*data_error = -EILSEQ;
3074 	else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3075 		*data_error = -ETIMEDOUT;
3076 	else if (intmask & SDHCI_INT_ADMA_ERROR)
3077 		*data_error = -EIO;
3078 	else
3079 		*data_error = 0;
3080 
3081 	/* Clear selected interrupts. */
3082 	mask = intmask & host->cqe_ier;
3083 	sdhci_writel(host, mask, SDHCI_INT_STATUS);
3084 
3085 	if (intmask & SDHCI_INT_BUS_POWER)
3086 		pr_err("%s: Card is consuming too much power!\n",
3087 		       mmc_hostname(host->mmc));
3088 
3089 	intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3090 	if (intmask) {
3091 		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3092 		pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3093 		       mmc_hostname(host->mmc), intmask);
3094 		sdhci_dumpregs(host);
3095 	}
3096 
3097 	return true;
3098 }
3099 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3100 
3101 /*****************************************************************************\
3102  *                                                                           *
3103  * Device allocation/registration                                            *
3104  *                                                                           *
3105 \*****************************************************************************/
3106 
3107 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3108 	size_t priv_size)
3109 {
3110 	struct mmc_host *mmc;
3111 	struct sdhci_host *host;
3112 
3113 	WARN_ON(dev == NULL);
3114 
3115 	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3116 	if (!mmc)
3117 		return ERR_PTR(-ENOMEM);
3118 
3119 	host = mmc_priv(mmc);
3120 	host->mmc = mmc;
3121 	host->mmc_host_ops = sdhci_ops;
3122 	mmc->ops = &host->mmc_host_ops;
3123 
3124 	host->flags = SDHCI_SIGNALING_330;
3125 
3126 	host->cqe_ier     = SDHCI_CQE_INT_MASK;
3127 	host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3128 
3129 	host->tuning_delay = -1;
3130 
3131 	host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3132 
3133 	return host;
3134 }
3135 
3136 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3137 
3138 static int sdhci_set_dma_mask(struct sdhci_host *host)
3139 {
3140 	struct mmc_host *mmc = host->mmc;
3141 	struct device *dev = mmc_dev(mmc);
3142 	int ret = -EINVAL;
3143 
3144 	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3145 		host->flags &= ~SDHCI_USE_64_BIT_DMA;
3146 
3147 	/* Try 64-bit mask if hardware is capable  of it */
3148 	if (host->flags & SDHCI_USE_64_BIT_DMA) {
3149 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3150 		if (ret) {
3151 			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3152 				mmc_hostname(mmc));
3153 			host->flags &= ~SDHCI_USE_64_BIT_DMA;
3154 		}
3155 	}
3156 
3157 	/* 32-bit mask as default & fallback */
3158 	if (ret) {
3159 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3160 		if (ret)
3161 			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3162 				mmc_hostname(mmc));
3163 	}
3164 
3165 	return ret;
3166 }
3167 
3168 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3169 {
3170 	u16 v;
3171 	u64 dt_caps_mask = 0;
3172 	u64 dt_caps = 0;
3173 
3174 	if (host->read_caps)
3175 		return;
3176 
3177 	host->read_caps = true;
3178 
3179 	if (debug_quirks)
3180 		host->quirks = debug_quirks;
3181 
3182 	if (debug_quirks2)
3183 		host->quirks2 = debug_quirks2;
3184 
3185 	sdhci_do_reset(host, SDHCI_RESET_ALL);
3186 
3187 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3188 			     "sdhci-caps-mask", &dt_caps_mask);
3189 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3190 			     "sdhci-caps", &dt_caps);
3191 
3192 	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3193 	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3194 
3195 	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3196 		return;
3197 
3198 	if (caps) {
3199 		host->caps = *caps;
3200 	} else {
3201 		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3202 		host->caps &= ~lower_32_bits(dt_caps_mask);
3203 		host->caps |= lower_32_bits(dt_caps);
3204 	}
3205 
3206 	if (host->version < SDHCI_SPEC_300)
3207 		return;
3208 
3209 	if (caps1) {
3210 		host->caps1 = *caps1;
3211 	} else {
3212 		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3213 		host->caps1 &= ~upper_32_bits(dt_caps_mask);
3214 		host->caps1 |= upper_32_bits(dt_caps);
3215 	}
3216 }
3217 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3218 
3219 int sdhci_setup_host(struct sdhci_host *host)
3220 {
3221 	struct mmc_host *mmc;
3222 	u32 max_current_caps;
3223 	unsigned int ocr_avail;
3224 	unsigned int override_timeout_clk;
3225 	u32 max_clk;
3226 	int ret;
3227 
3228 	WARN_ON(host == NULL);
3229 	if (host == NULL)
3230 		return -EINVAL;
3231 
3232 	mmc = host->mmc;
3233 
3234 	/*
3235 	 * If there are external regulators, get them. Note this must be done
3236 	 * early before resetting the host and reading the capabilities so that
3237 	 * the host can take the appropriate action if regulators are not
3238 	 * available.
3239 	 */
3240 	ret = mmc_regulator_get_supply(mmc);
3241 	if (ret == -EPROBE_DEFER)
3242 		return ret;
3243 
3244 	DBG("Version:   0x%08x | Present:  0x%08x\n",
3245 	    sdhci_readw(host, SDHCI_HOST_VERSION),
3246 	    sdhci_readl(host, SDHCI_PRESENT_STATE));
3247 	DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
3248 	    sdhci_readl(host, SDHCI_CAPABILITIES),
3249 	    sdhci_readl(host, SDHCI_CAPABILITIES_1));
3250 
3251 	sdhci_read_caps(host);
3252 
3253 	override_timeout_clk = host->timeout_clk;
3254 
3255 	if (host->version > SDHCI_SPEC_300) {
3256 		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3257 		       mmc_hostname(mmc), host->version);
3258 	}
3259 
3260 	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3261 		host->flags |= SDHCI_USE_SDMA;
3262 	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3263 		DBG("Controller doesn't have SDMA capability\n");
3264 	else
3265 		host->flags |= SDHCI_USE_SDMA;
3266 
3267 	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3268 		(host->flags & SDHCI_USE_SDMA)) {
3269 		DBG("Disabling DMA as it is marked broken\n");
3270 		host->flags &= ~SDHCI_USE_SDMA;
3271 	}
3272 
3273 	if ((host->version >= SDHCI_SPEC_200) &&
3274 		(host->caps & SDHCI_CAN_DO_ADMA2))
3275 		host->flags |= SDHCI_USE_ADMA;
3276 
3277 	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3278 		(host->flags & SDHCI_USE_ADMA)) {
3279 		DBG("Disabling ADMA as it is marked broken\n");
3280 		host->flags &= ~SDHCI_USE_ADMA;
3281 	}
3282 
3283 	/*
3284 	 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3285 	 * and *must* do 64-bit DMA.  A driver has the opportunity to change
3286 	 * that during the first call to ->enable_dma().  Similarly
3287 	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3288 	 * implement.
3289 	 */
3290 	if (host->caps & SDHCI_CAN_64BIT)
3291 		host->flags |= SDHCI_USE_64_BIT_DMA;
3292 
3293 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3294 		ret = sdhci_set_dma_mask(host);
3295 
3296 		if (!ret && host->ops->enable_dma)
3297 			ret = host->ops->enable_dma(host);
3298 
3299 		if (ret) {
3300 			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3301 				mmc_hostname(mmc));
3302 			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3303 
3304 			ret = 0;
3305 		}
3306 	}
3307 
3308 	/* SDMA does not support 64-bit DMA */
3309 	if (host->flags & SDHCI_USE_64_BIT_DMA)
3310 		host->flags &= ~SDHCI_USE_SDMA;
3311 
3312 	if (host->flags & SDHCI_USE_ADMA) {
3313 		dma_addr_t dma;
3314 		void *buf;
3315 
3316 		/*
3317 		 * The DMA descriptor table size is calculated as the maximum
3318 		 * number of segments times 2, to allow for an alignment
3319 		 * descriptor for each segment, plus 1 for a nop end descriptor,
3320 		 * all multipled by the descriptor size.
3321 		 */
3322 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
3323 			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3324 					      SDHCI_ADMA2_64_DESC_SZ;
3325 			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3326 		} else {
3327 			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3328 					      SDHCI_ADMA2_32_DESC_SZ;
3329 			host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3330 		}
3331 
3332 		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3333 		buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3334 					 host->adma_table_sz, &dma, GFP_KERNEL);
3335 		if (!buf) {
3336 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3337 				mmc_hostname(mmc));
3338 			host->flags &= ~SDHCI_USE_ADMA;
3339 		} else if ((dma + host->align_buffer_sz) &
3340 			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3341 			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3342 				mmc_hostname(mmc));
3343 			host->flags &= ~SDHCI_USE_ADMA;
3344 			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3345 					  host->adma_table_sz, buf, dma);
3346 		} else {
3347 			host->align_buffer = buf;
3348 			host->align_addr = dma;
3349 
3350 			host->adma_table = buf + host->align_buffer_sz;
3351 			host->adma_addr = dma + host->align_buffer_sz;
3352 		}
3353 	}
3354 
3355 	/*
3356 	 * If we use DMA, then it's up to the caller to set the DMA
3357 	 * mask, but PIO does not need the hw shim so we set a new
3358 	 * mask here in that case.
3359 	 */
3360 	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3361 		host->dma_mask = DMA_BIT_MASK(64);
3362 		mmc_dev(mmc)->dma_mask = &host->dma_mask;
3363 	}
3364 
3365 	if (host->version >= SDHCI_SPEC_300)
3366 		host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3367 			>> SDHCI_CLOCK_BASE_SHIFT;
3368 	else
3369 		host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3370 			>> SDHCI_CLOCK_BASE_SHIFT;
3371 
3372 	host->max_clk *= 1000000;
3373 	if (host->max_clk == 0 || host->quirks &
3374 			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3375 		if (!host->ops->get_max_clock) {
3376 			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3377 			       mmc_hostname(mmc));
3378 			ret = -ENODEV;
3379 			goto undma;
3380 		}
3381 		host->max_clk = host->ops->get_max_clock(host);
3382 	}
3383 
3384 	/*
3385 	 * In case of Host Controller v3.00, find out whether clock
3386 	 * multiplier is supported.
3387 	 */
3388 	host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3389 			SDHCI_CLOCK_MUL_SHIFT;
3390 
3391 	/*
3392 	 * In case the value in Clock Multiplier is 0, then programmable
3393 	 * clock mode is not supported, otherwise the actual clock
3394 	 * multiplier is one more than the value of Clock Multiplier
3395 	 * in the Capabilities Register.
3396 	 */
3397 	if (host->clk_mul)
3398 		host->clk_mul += 1;
3399 
3400 	/*
3401 	 * Set host parameters.
3402 	 */
3403 	max_clk = host->max_clk;
3404 
3405 	if (host->ops->get_min_clock)
3406 		mmc->f_min = host->ops->get_min_clock(host);
3407 	else if (host->version >= SDHCI_SPEC_300) {
3408 		if (host->clk_mul) {
3409 			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3410 			max_clk = host->max_clk * host->clk_mul;
3411 		} else
3412 			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3413 	} else
3414 		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3415 
3416 	if (!mmc->f_max || mmc->f_max > max_clk)
3417 		mmc->f_max = max_clk;
3418 
3419 	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3420 		host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3421 					SDHCI_TIMEOUT_CLK_SHIFT;
3422 
3423 		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3424 			host->timeout_clk *= 1000;
3425 
3426 		if (host->timeout_clk == 0) {
3427 			if (!host->ops->get_timeout_clock) {
3428 				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3429 					mmc_hostname(mmc));
3430 				ret = -ENODEV;
3431 				goto undma;
3432 			}
3433 
3434 			host->timeout_clk =
3435 				DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3436 					     1000);
3437 		}
3438 
3439 		if (override_timeout_clk)
3440 			host->timeout_clk = override_timeout_clk;
3441 
3442 		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3443 			host->ops->get_max_timeout_count(host) : 1 << 27;
3444 		mmc->max_busy_timeout /= host->timeout_clk;
3445 	}
3446 
3447 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3448 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3449 
3450 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3451 		host->flags |= SDHCI_AUTO_CMD12;
3452 
3453 	/* Auto-CMD23 stuff only works in ADMA or PIO. */
3454 	if ((host->version >= SDHCI_SPEC_300) &&
3455 	    ((host->flags & SDHCI_USE_ADMA) ||
3456 	     !(host->flags & SDHCI_USE_SDMA)) &&
3457 	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3458 		host->flags |= SDHCI_AUTO_CMD23;
3459 		DBG("Auto-CMD23 available\n");
3460 	} else {
3461 		DBG("Auto-CMD23 unavailable\n");
3462 	}
3463 
3464 	/*
3465 	 * A controller may support 8-bit width, but the board itself
3466 	 * might not have the pins brought out.  Boards that support
3467 	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3468 	 * their platform code before calling sdhci_add_host(), and we
3469 	 * won't assume 8-bit width for hosts without that CAP.
3470 	 */
3471 	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3472 		mmc->caps |= MMC_CAP_4_BIT_DATA;
3473 
3474 	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3475 		mmc->caps &= ~MMC_CAP_CMD23;
3476 
3477 	if (host->caps & SDHCI_CAN_DO_HISPD)
3478 		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3479 
3480 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3481 	    mmc_card_is_removable(mmc) &&
3482 	    mmc_gpio_get_cd(host->mmc) < 0)
3483 		mmc->caps |= MMC_CAP_NEEDS_POLL;
3484 
3485 	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3486 	if (!IS_ERR(mmc->supply.vqmmc)) {
3487 		ret = regulator_enable(mmc->supply.vqmmc);
3488 		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3489 						    1950000))
3490 			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3491 					 SDHCI_SUPPORT_SDR50 |
3492 					 SDHCI_SUPPORT_DDR50);
3493 		if (ret) {
3494 			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3495 				mmc_hostname(mmc), ret);
3496 			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3497 		}
3498 	}
3499 
3500 	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3501 		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3502 				 SDHCI_SUPPORT_DDR50);
3503 	}
3504 
3505 	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3506 	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3507 			   SDHCI_SUPPORT_DDR50))
3508 		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3509 
3510 	/* SDR104 supports also implies SDR50 support */
3511 	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3512 		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3513 		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
3514 		 * field can be promoted to support HS200.
3515 		 */
3516 		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3517 			mmc->caps2 |= MMC_CAP2_HS200;
3518 	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3519 		mmc->caps |= MMC_CAP_UHS_SDR50;
3520 	}
3521 
3522 	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3523 	    (host->caps1 & SDHCI_SUPPORT_HS400))
3524 		mmc->caps2 |= MMC_CAP2_HS400;
3525 
3526 	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3527 	    (IS_ERR(mmc->supply.vqmmc) ||
3528 	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3529 					     1300000)))
3530 		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3531 
3532 	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3533 	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3534 		mmc->caps |= MMC_CAP_UHS_DDR50;
3535 
3536 	/* Does the host need tuning for SDR50? */
3537 	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3538 		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3539 
3540 	/* Driver Type(s) (A, C, D) supported by the host */
3541 	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3542 		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3543 	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3544 		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3545 	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3546 		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3547 
3548 	/* Initial value for re-tuning timer count */
3549 	host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3550 			     SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3551 
3552 	/*
3553 	 * In case Re-tuning Timer is not disabled, the actual value of
3554 	 * re-tuning timer will be 2 ^ (n - 1).
3555 	 */
3556 	if (host->tuning_count)
3557 		host->tuning_count = 1 << (host->tuning_count - 1);
3558 
3559 	/* Re-tuning mode supported by the Host Controller */
3560 	host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3561 			     SDHCI_RETUNING_MODE_SHIFT;
3562 
3563 	ocr_avail = 0;
3564 
3565 	/*
3566 	 * According to SD Host Controller spec v3.00, if the Host System
3567 	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3568 	 * the value is meaningful only if Voltage Support in the Capabilities
3569 	 * register is set. The actual current value is 4 times the register
3570 	 * value.
3571 	 */
3572 	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3573 	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3574 		int curr = regulator_get_current_limit(mmc->supply.vmmc);
3575 		if (curr > 0) {
3576 
3577 			/* convert to SDHCI_MAX_CURRENT format */
3578 			curr = curr/1000;  /* convert to mA */
3579 			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3580 
3581 			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3582 			max_current_caps =
3583 				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3584 				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3585 				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
3586 		}
3587 	}
3588 
3589 	if (host->caps & SDHCI_CAN_VDD_330) {
3590 		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3591 
3592 		mmc->max_current_330 = ((max_current_caps &
3593 				   SDHCI_MAX_CURRENT_330_MASK) >>
3594 				   SDHCI_MAX_CURRENT_330_SHIFT) *
3595 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3596 	}
3597 	if (host->caps & SDHCI_CAN_VDD_300) {
3598 		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3599 
3600 		mmc->max_current_300 = ((max_current_caps &
3601 				   SDHCI_MAX_CURRENT_300_MASK) >>
3602 				   SDHCI_MAX_CURRENT_300_SHIFT) *
3603 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3604 	}
3605 	if (host->caps & SDHCI_CAN_VDD_180) {
3606 		ocr_avail |= MMC_VDD_165_195;
3607 
3608 		mmc->max_current_180 = ((max_current_caps &
3609 				   SDHCI_MAX_CURRENT_180_MASK) >>
3610 				   SDHCI_MAX_CURRENT_180_SHIFT) *
3611 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3612 	}
3613 
3614 	/* If OCR set by host, use it instead. */
3615 	if (host->ocr_mask)
3616 		ocr_avail = host->ocr_mask;
3617 
3618 	/* If OCR set by external regulators, give it highest prio. */
3619 	if (mmc->ocr_avail)
3620 		ocr_avail = mmc->ocr_avail;
3621 
3622 	mmc->ocr_avail = ocr_avail;
3623 	mmc->ocr_avail_sdio = ocr_avail;
3624 	if (host->ocr_avail_sdio)
3625 		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3626 	mmc->ocr_avail_sd = ocr_avail;
3627 	if (host->ocr_avail_sd)
3628 		mmc->ocr_avail_sd &= host->ocr_avail_sd;
3629 	else /* normal SD controllers don't support 1.8V */
3630 		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3631 	mmc->ocr_avail_mmc = ocr_avail;
3632 	if (host->ocr_avail_mmc)
3633 		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3634 
3635 	if (mmc->ocr_avail == 0) {
3636 		pr_err("%s: Hardware doesn't report any support voltages.\n",
3637 		       mmc_hostname(mmc));
3638 		ret = -ENODEV;
3639 		goto unreg;
3640 	}
3641 
3642 	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3643 			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3644 			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3645 	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3646 		host->flags |= SDHCI_SIGNALING_180;
3647 
3648 	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3649 		host->flags |= SDHCI_SIGNALING_120;
3650 
3651 	spin_lock_init(&host->lock);
3652 
3653 	/*
3654 	 * Maximum number of segments. Depends on if the hardware
3655 	 * can do scatter/gather or not.
3656 	 */
3657 	if (host->flags & SDHCI_USE_ADMA)
3658 		mmc->max_segs = SDHCI_MAX_SEGS;
3659 	else if (host->flags & SDHCI_USE_SDMA)
3660 		mmc->max_segs = 1;
3661 	else /* PIO */
3662 		mmc->max_segs = SDHCI_MAX_SEGS;
3663 
3664 	/*
3665 	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3666 	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3667 	 * is less anyway.
3668 	 */
3669 	mmc->max_req_size = 524288;
3670 
3671 	/*
3672 	 * Maximum segment size. Could be one segment with the maximum number
3673 	 * of bytes. When doing hardware scatter/gather, each entry cannot
3674 	 * be larger than 64 KiB though.
3675 	 */
3676 	if (host->flags & SDHCI_USE_ADMA) {
3677 		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3678 			mmc->max_seg_size = 65535;
3679 		else
3680 			mmc->max_seg_size = 65536;
3681 	} else {
3682 		mmc->max_seg_size = mmc->max_req_size;
3683 	}
3684 
3685 	/*
3686 	 * Maximum block size. This varies from controller to controller and
3687 	 * is specified in the capabilities register.
3688 	 */
3689 	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3690 		mmc->max_blk_size = 2;
3691 	} else {
3692 		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3693 				SDHCI_MAX_BLOCK_SHIFT;
3694 		if (mmc->max_blk_size >= 3) {
3695 			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3696 				mmc_hostname(mmc));
3697 			mmc->max_blk_size = 0;
3698 		}
3699 	}
3700 
3701 	mmc->max_blk_size = 512 << mmc->max_blk_size;
3702 
3703 	/*
3704 	 * Maximum block count.
3705 	 */
3706 	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3707 
3708 	return 0;
3709 
3710 unreg:
3711 	if (!IS_ERR(mmc->supply.vqmmc))
3712 		regulator_disable(mmc->supply.vqmmc);
3713 undma:
3714 	if (host->align_buffer)
3715 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3716 				  host->adma_table_sz, host->align_buffer,
3717 				  host->align_addr);
3718 	host->adma_table = NULL;
3719 	host->align_buffer = NULL;
3720 
3721 	return ret;
3722 }
3723 EXPORT_SYMBOL_GPL(sdhci_setup_host);
3724 
3725 void sdhci_cleanup_host(struct sdhci_host *host)
3726 {
3727 	struct mmc_host *mmc = host->mmc;
3728 
3729 	if (!IS_ERR(mmc->supply.vqmmc))
3730 		regulator_disable(mmc->supply.vqmmc);
3731 
3732 	if (host->align_buffer)
3733 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3734 				  host->adma_table_sz, host->align_buffer,
3735 				  host->align_addr);
3736 	host->adma_table = NULL;
3737 	host->align_buffer = NULL;
3738 }
3739 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
3740 
3741 int __sdhci_add_host(struct sdhci_host *host)
3742 {
3743 	struct mmc_host *mmc = host->mmc;
3744 	int ret;
3745 
3746 	/*
3747 	 * Init tasklets.
3748 	 */
3749 	tasklet_init(&host->finish_tasklet,
3750 		sdhci_tasklet_finish, (unsigned long)host);
3751 
3752 	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3753 	setup_timer(&host->data_timer, sdhci_timeout_data_timer,
3754 		    (unsigned long)host);
3755 
3756 	init_waitqueue_head(&host->buf_ready_int);
3757 
3758 	sdhci_init(host, 0);
3759 
3760 	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3761 				   IRQF_SHARED,	mmc_hostname(mmc), host);
3762 	if (ret) {
3763 		pr_err("%s: Failed to request IRQ %d: %d\n",
3764 		       mmc_hostname(mmc), host->irq, ret);
3765 		goto untasklet;
3766 	}
3767 
3768 	ret = sdhci_led_register(host);
3769 	if (ret) {
3770 		pr_err("%s: Failed to register LED device: %d\n",
3771 		       mmc_hostname(mmc), ret);
3772 		goto unirq;
3773 	}
3774 
3775 	mmiowb();
3776 
3777 	ret = mmc_add_host(mmc);
3778 	if (ret)
3779 		goto unled;
3780 
3781 	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3782 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3783 		(host->flags & SDHCI_USE_ADMA) ?
3784 		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3785 		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3786 
3787 	sdhci_enable_card_detection(host);
3788 
3789 	return 0;
3790 
3791 unled:
3792 	sdhci_led_unregister(host);
3793 unirq:
3794 	sdhci_do_reset(host, SDHCI_RESET_ALL);
3795 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3796 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3797 	free_irq(host->irq, host);
3798 untasklet:
3799 	tasklet_kill(&host->finish_tasklet);
3800 
3801 	return ret;
3802 }
3803 EXPORT_SYMBOL_GPL(__sdhci_add_host);
3804 
3805 int sdhci_add_host(struct sdhci_host *host)
3806 {
3807 	int ret;
3808 
3809 	ret = sdhci_setup_host(host);
3810 	if (ret)
3811 		return ret;
3812 
3813 	ret = __sdhci_add_host(host);
3814 	if (ret)
3815 		goto cleanup;
3816 
3817 	return 0;
3818 
3819 cleanup:
3820 	sdhci_cleanup_host(host);
3821 
3822 	return ret;
3823 }
3824 EXPORT_SYMBOL_GPL(sdhci_add_host);
3825 
3826 void sdhci_remove_host(struct sdhci_host *host, int dead)
3827 {
3828 	struct mmc_host *mmc = host->mmc;
3829 	unsigned long flags;
3830 
3831 	if (dead) {
3832 		spin_lock_irqsave(&host->lock, flags);
3833 
3834 		host->flags |= SDHCI_DEVICE_DEAD;
3835 
3836 		if (sdhci_has_requests(host)) {
3837 			pr_err("%s: Controller removed during "
3838 				" transfer!\n", mmc_hostname(mmc));
3839 			sdhci_error_out_mrqs(host, -ENOMEDIUM);
3840 		}
3841 
3842 		spin_unlock_irqrestore(&host->lock, flags);
3843 	}
3844 
3845 	sdhci_disable_card_detection(host);
3846 
3847 	mmc_remove_host(mmc);
3848 
3849 	sdhci_led_unregister(host);
3850 
3851 	if (!dead)
3852 		sdhci_do_reset(host, SDHCI_RESET_ALL);
3853 
3854 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3855 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3856 	free_irq(host->irq, host);
3857 
3858 	del_timer_sync(&host->timer);
3859 	del_timer_sync(&host->data_timer);
3860 
3861 	tasklet_kill(&host->finish_tasklet);
3862 
3863 	if (!IS_ERR(mmc->supply.vqmmc))
3864 		regulator_disable(mmc->supply.vqmmc);
3865 
3866 	if (host->align_buffer)
3867 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3868 				  host->adma_table_sz, host->align_buffer,
3869 				  host->align_addr);
3870 
3871 	host->adma_table = NULL;
3872 	host->align_buffer = NULL;
3873 }
3874 
3875 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3876 
3877 void sdhci_free_host(struct sdhci_host *host)
3878 {
3879 	mmc_free_host(host->mmc);
3880 }
3881 
3882 EXPORT_SYMBOL_GPL(sdhci_free_host);
3883 
3884 /*****************************************************************************\
3885  *                                                                           *
3886  * Driver init/exit                                                          *
3887  *                                                                           *
3888 \*****************************************************************************/
3889 
3890 static int __init sdhci_drv_init(void)
3891 {
3892 	pr_info(DRIVER_NAME
3893 		": Secure Digital Host Controller Interface driver\n");
3894 	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3895 
3896 	return 0;
3897 }
3898 
3899 static void __exit sdhci_drv_exit(void)
3900 {
3901 }
3902 
3903 module_init(sdhci_drv_init);
3904 module_exit(sdhci_drv_exit);
3905 
3906 module_param(debug_quirks, uint, 0444);
3907 module_param(debug_quirks2, uint, 0444);
3908 
3909 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3910 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3911 MODULE_LICENSE("GPL");
3912 
3913 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3914 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
3915