xref: /openbmc/linux/drivers/mmc/host/sdhci.c (revision f7d84fa7)
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/ktime.h>
18 #include <linux/highmem.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/of.h>
27 
28 #include <linux/leds.h>
29 
30 #include <linux/mmc/mmc.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/card.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/slot-gpio.h>
35 
36 #include "sdhci.h"
37 
38 #define DRIVER_NAME "sdhci"
39 
40 #define DBG(f, x...) \
41 	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
42 
43 #define SDHCI_DUMP(f, x...) \
44 	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
45 
46 #define MAX_TUNING_LOOP 40
47 
48 static unsigned int debug_quirks = 0;
49 static unsigned int debug_quirks2;
50 
51 static void sdhci_finish_data(struct sdhci_host *);
52 
53 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
54 
55 void sdhci_dumpregs(struct sdhci_host *host)
56 {
57 	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
58 
59 	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
60 		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
61 		   sdhci_readw(host, SDHCI_HOST_VERSION));
62 	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
63 		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
64 		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
65 	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
66 		   sdhci_readl(host, SDHCI_ARGUMENT),
67 		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
68 	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
69 		   sdhci_readl(host, SDHCI_PRESENT_STATE),
70 		   sdhci_readb(host, SDHCI_HOST_CONTROL));
71 	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
72 		   sdhci_readb(host, SDHCI_POWER_CONTROL),
73 		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
74 	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
75 		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
76 		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
77 	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
78 		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
79 		   sdhci_readl(host, SDHCI_INT_STATUS));
80 	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
81 		   sdhci_readl(host, SDHCI_INT_ENABLE),
82 		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
83 	SDHCI_DUMP("AC12 err:  0x%08x | Slot int: 0x%08x\n",
84 		   sdhci_readw(host, SDHCI_ACMD12_ERR),
85 		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
86 	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
87 		   sdhci_readl(host, SDHCI_CAPABILITIES),
88 		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
89 	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
90 		   sdhci_readw(host, SDHCI_COMMAND),
91 		   sdhci_readl(host, SDHCI_MAX_CURRENT));
92 	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
93 		   sdhci_readl(host, SDHCI_RESPONSE),
94 		   sdhci_readl(host, SDHCI_RESPONSE + 4));
95 	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
96 		   sdhci_readl(host, SDHCI_RESPONSE + 8),
97 		   sdhci_readl(host, SDHCI_RESPONSE + 12));
98 	SDHCI_DUMP("Host ctl2: 0x%08x\n",
99 		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
100 
101 	if (host->flags & SDHCI_USE_ADMA) {
102 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
103 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
104 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
105 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
106 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
107 		} else {
108 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
109 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
110 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
111 		}
112 	}
113 
114 	SDHCI_DUMP("============================================\n");
115 }
116 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
117 
118 /*****************************************************************************\
119  *                                                                           *
120  * Low level functions                                                       *
121  *                                                                           *
122 \*****************************************************************************/
123 
124 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
125 {
126 	return cmd->data || cmd->flags & MMC_RSP_BUSY;
127 }
128 
129 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
130 {
131 	u32 present;
132 
133 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
134 	    !mmc_card_is_removable(host->mmc))
135 		return;
136 
137 	if (enable) {
138 		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
139 				      SDHCI_CARD_PRESENT;
140 
141 		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
142 				       SDHCI_INT_CARD_INSERT;
143 	} else {
144 		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
145 	}
146 
147 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
148 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
149 }
150 
151 static void sdhci_enable_card_detection(struct sdhci_host *host)
152 {
153 	sdhci_set_card_detection(host, true);
154 }
155 
156 static void sdhci_disable_card_detection(struct sdhci_host *host)
157 {
158 	sdhci_set_card_detection(host, false);
159 }
160 
161 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
162 {
163 	if (host->bus_on)
164 		return;
165 	host->bus_on = true;
166 	pm_runtime_get_noresume(host->mmc->parent);
167 }
168 
169 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
170 {
171 	if (!host->bus_on)
172 		return;
173 	host->bus_on = false;
174 	pm_runtime_put_noidle(host->mmc->parent);
175 }
176 
177 void sdhci_reset(struct sdhci_host *host, u8 mask)
178 {
179 	ktime_t timeout;
180 
181 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
182 
183 	if (mask & SDHCI_RESET_ALL) {
184 		host->clock = 0;
185 		/* Reset-all turns off SD Bus Power */
186 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
187 			sdhci_runtime_pm_bus_off(host);
188 	}
189 
190 	/* Wait max 100 ms */
191 	timeout = ktime_add_ms(ktime_get(), 100);
192 
193 	/* hw clears the bit when it's done */
194 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
195 		if (ktime_after(ktime_get(), timeout)) {
196 			pr_err("%s: Reset 0x%x never completed.\n",
197 				mmc_hostname(host->mmc), (int)mask);
198 			sdhci_dumpregs(host);
199 			return;
200 		}
201 		udelay(10);
202 	}
203 }
204 EXPORT_SYMBOL_GPL(sdhci_reset);
205 
206 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
207 {
208 	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
209 		struct mmc_host *mmc = host->mmc;
210 
211 		if (!mmc->ops->get_cd(mmc))
212 			return;
213 	}
214 
215 	host->ops->reset(host, mask);
216 
217 	if (mask & SDHCI_RESET_ALL) {
218 		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
219 			if (host->ops->enable_dma)
220 				host->ops->enable_dma(host);
221 		}
222 
223 		/* Resetting the controller clears many */
224 		host->preset_enabled = false;
225 	}
226 }
227 
228 static void sdhci_set_default_irqs(struct sdhci_host *host)
229 {
230 	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
231 		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
232 		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
233 		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
234 		    SDHCI_INT_RESPONSE;
235 
236 	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
237 	    host->tuning_mode == SDHCI_TUNING_MODE_3)
238 		host->ier |= SDHCI_INT_RETUNE;
239 
240 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
241 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
242 }
243 
244 static void sdhci_init(struct sdhci_host *host, int soft)
245 {
246 	struct mmc_host *mmc = host->mmc;
247 
248 	if (soft)
249 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
250 	else
251 		sdhci_do_reset(host, SDHCI_RESET_ALL);
252 
253 	sdhci_set_default_irqs(host);
254 
255 	host->cqe_on = false;
256 
257 	if (soft) {
258 		/* force clock reconfiguration */
259 		host->clock = 0;
260 		mmc->ops->set_ios(mmc, &mmc->ios);
261 	}
262 }
263 
264 static void sdhci_reinit(struct sdhci_host *host)
265 {
266 	sdhci_init(host, 0);
267 	sdhci_enable_card_detection(host);
268 }
269 
270 static void __sdhci_led_activate(struct sdhci_host *host)
271 {
272 	u8 ctrl;
273 
274 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
275 	ctrl |= SDHCI_CTRL_LED;
276 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
277 }
278 
279 static void __sdhci_led_deactivate(struct sdhci_host *host)
280 {
281 	u8 ctrl;
282 
283 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
284 	ctrl &= ~SDHCI_CTRL_LED;
285 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
286 }
287 
288 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
289 static void sdhci_led_control(struct led_classdev *led,
290 			      enum led_brightness brightness)
291 {
292 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
293 	unsigned long flags;
294 
295 	spin_lock_irqsave(&host->lock, flags);
296 
297 	if (host->runtime_suspended)
298 		goto out;
299 
300 	if (brightness == LED_OFF)
301 		__sdhci_led_deactivate(host);
302 	else
303 		__sdhci_led_activate(host);
304 out:
305 	spin_unlock_irqrestore(&host->lock, flags);
306 }
307 
308 static int sdhci_led_register(struct sdhci_host *host)
309 {
310 	struct mmc_host *mmc = host->mmc;
311 
312 	snprintf(host->led_name, sizeof(host->led_name),
313 		 "%s::", mmc_hostname(mmc));
314 
315 	host->led.name = host->led_name;
316 	host->led.brightness = LED_OFF;
317 	host->led.default_trigger = mmc_hostname(mmc);
318 	host->led.brightness_set = sdhci_led_control;
319 
320 	return led_classdev_register(mmc_dev(mmc), &host->led);
321 }
322 
323 static void sdhci_led_unregister(struct sdhci_host *host)
324 {
325 	led_classdev_unregister(&host->led);
326 }
327 
328 static inline void sdhci_led_activate(struct sdhci_host *host)
329 {
330 }
331 
332 static inline void sdhci_led_deactivate(struct sdhci_host *host)
333 {
334 }
335 
336 #else
337 
338 static inline int sdhci_led_register(struct sdhci_host *host)
339 {
340 	return 0;
341 }
342 
343 static inline void sdhci_led_unregister(struct sdhci_host *host)
344 {
345 }
346 
347 static inline void sdhci_led_activate(struct sdhci_host *host)
348 {
349 	__sdhci_led_activate(host);
350 }
351 
352 static inline void sdhci_led_deactivate(struct sdhci_host *host)
353 {
354 	__sdhci_led_deactivate(host);
355 }
356 
357 #endif
358 
359 /*****************************************************************************\
360  *                                                                           *
361  * Core functions                                                            *
362  *                                                                           *
363 \*****************************************************************************/
364 
365 static void sdhci_read_block_pio(struct sdhci_host *host)
366 {
367 	unsigned long flags;
368 	size_t blksize, len, chunk;
369 	u32 uninitialized_var(scratch);
370 	u8 *buf;
371 
372 	DBG("PIO reading\n");
373 
374 	blksize = host->data->blksz;
375 	chunk = 0;
376 
377 	local_irq_save(flags);
378 
379 	while (blksize) {
380 		BUG_ON(!sg_miter_next(&host->sg_miter));
381 
382 		len = min(host->sg_miter.length, blksize);
383 
384 		blksize -= len;
385 		host->sg_miter.consumed = len;
386 
387 		buf = host->sg_miter.addr;
388 
389 		while (len) {
390 			if (chunk == 0) {
391 				scratch = sdhci_readl(host, SDHCI_BUFFER);
392 				chunk = 4;
393 			}
394 
395 			*buf = scratch & 0xFF;
396 
397 			buf++;
398 			scratch >>= 8;
399 			chunk--;
400 			len--;
401 		}
402 	}
403 
404 	sg_miter_stop(&host->sg_miter);
405 
406 	local_irq_restore(flags);
407 }
408 
409 static void sdhci_write_block_pio(struct sdhci_host *host)
410 {
411 	unsigned long flags;
412 	size_t blksize, len, chunk;
413 	u32 scratch;
414 	u8 *buf;
415 
416 	DBG("PIO writing\n");
417 
418 	blksize = host->data->blksz;
419 	chunk = 0;
420 	scratch = 0;
421 
422 	local_irq_save(flags);
423 
424 	while (blksize) {
425 		BUG_ON(!sg_miter_next(&host->sg_miter));
426 
427 		len = min(host->sg_miter.length, blksize);
428 
429 		blksize -= len;
430 		host->sg_miter.consumed = len;
431 
432 		buf = host->sg_miter.addr;
433 
434 		while (len) {
435 			scratch |= (u32)*buf << (chunk * 8);
436 
437 			buf++;
438 			chunk++;
439 			len--;
440 
441 			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
442 				sdhci_writel(host, scratch, SDHCI_BUFFER);
443 				chunk = 0;
444 				scratch = 0;
445 			}
446 		}
447 	}
448 
449 	sg_miter_stop(&host->sg_miter);
450 
451 	local_irq_restore(flags);
452 }
453 
454 static void sdhci_transfer_pio(struct sdhci_host *host)
455 {
456 	u32 mask;
457 
458 	if (host->blocks == 0)
459 		return;
460 
461 	if (host->data->flags & MMC_DATA_READ)
462 		mask = SDHCI_DATA_AVAILABLE;
463 	else
464 		mask = SDHCI_SPACE_AVAILABLE;
465 
466 	/*
467 	 * Some controllers (JMicron JMB38x) mess up the buffer bits
468 	 * for transfers < 4 bytes. As long as it is just one block,
469 	 * we can ignore the bits.
470 	 */
471 	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
472 		(host->data->blocks == 1))
473 		mask = ~0;
474 
475 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
476 		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
477 			udelay(100);
478 
479 		if (host->data->flags & MMC_DATA_READ)
480 			sdhci_read_block_pio(host);
481 		else
482 			sdhci_write_block_pio(host);
483 
484 		host->blocks--;
485 		if (host->blocks == 0)
486 			break;
487 	}
488 
489 	DBG("PIO transfer complete.\n");
490 }
491 
492 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
493 				  struct mmc_data *data, int cookie)
494 {
495 	int sg_count;
496 
497 	/*
498 	 * If the data buffers are already mapped, return the previous
499 	 * dma_map_sg() result.
500 	 */
501 	if (data->host_cookie == COOKIE_PRE_MAPPED)
502 		return data->sg_count;
503 
504 	sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
505 			      mmc_get_dma_dir(data));
506 
507 	if (sg_count == 0)
508 		return -ENOSPC;
509 
510 	data->sg_count = sg_count;
511 	data->host_cookie = cookie;
512 
513 	return sg_count;
514 }
515 
516 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
517 {
518 	local_irq_save(*flags);
519 	return kmap_atomic(sg_page(sg)) + sg->offset;
520 }
521 
522 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
523 {
524 	kunmap_atomic(buffer);
525 	local_irq_restore(*flags);
526 }
527 
528 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
529 				  dma_addr_t addr, int len, unsigned cmd)
530 {
531 	struct sdhci_adma2_64_desc *dma_desc = desc;
532 
533 	/* 32-bit and 64-bit descriptors have these members in same position */
534 	dma_desc->cmd = cpu_to_le16(cmd);
535 	dma_desc->len = cpu_to_le16(len);
536 	dma_desc->addr_lo = cpu_to_le32((u32)addr);
537 
538 	if (host->flags & SDHCI_USE_64_BIT_DMA)
539 		dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
540 }
541 
542 static void sdhci_adma_mark_end(void *desc)
543 {
544 	struct sdhci_adma2_64_desc *dma_desc = desc;
545 
546 	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
547 	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
548 }
549 
550 static void sdhci_adma_table_pre(struct sdhci_host *host,
551 	struct mmc_data *data, int sg_count)
552 {
553 	struct scatterlist *sg;
554 	unsigned long flags;
555 	dma_addr_t addr, align_addr;
556 	void *desc, *align;
557 	char *buffer;
558 	int len, offset, i;
559 
560 	/*
561 	 * The spec does not specify endianness of descriptor table.
562 	 * We currently guess that it is LE.
563 	 */
564 
565 	host->sg_count = sg_count;
566 
567 	desc = host->adma_table;
568 	align = host->align_buffer;
569 
570 	align_addr = host->align_addr;
571 
572 	for_each_sg(data->sg, sg, host->sg_count, i) {
573 		addr = sg_dma_address(sg);
574 		len = sg_dma_len(sg);
575 
576 		/*
577 		 * The SDHCI specification states that ADMA addresses must
578 		 * be 32-bit aligned. If they aren't, then we use a bounce
579 		 * buffer for the (up to three) bytes that screw up the
580 		 * alignment.
581 		 */
582 		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
583 			 SDHCI_ADMA2_MASK;
584 		if (offset) {
585 			if (data->flags & MMC_DATA_WRITE) {
586 				buffer = sdhci_kmap_atomic(sg, &flags);
587 				memcpy(align, buffer, offset);
588 				sdhci_kunmap_atomic(buffer, &flags);
589 			}
590 
591 			/* tran, valid */
592 			sdhci_adma_write_desc(host, desc, align_addr, offset,
593 					      ADMA2_TRAN_VALID);
594 
595 			BUG_ON(offset > 65536);
596 
597 			align += SDHCI_ADMA2_ALIGN;
598 			align_addr += SDHCI_ADMA2_ALIGN;
599 
600 			desc += host->desc_sz;
601 
602 			addr += offset;
603 			len -= offset;
604 		}
605 
606 		BUG_ON(len > 65536);
607 
608 		if (len) {
609 			/* tran, valid */
610 			sdhci_adma_write_desc(host, desc, addr, len,
611 					      ADMA2_TRAN_VALID);
612 			desc += host->desc_sz;
613 		}
614 
615 		/*
616 		 * If this triggers then we have a calculation bug
617 		 * somewhere. :/
618 		 */
619 		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
620 	}
621 
622 	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
623 		/* Mark the last descriptor as the terminating descriptor */
624 		if (desc != host->adma_table) {
625 			desc -= host->desc_sz;
626 			sdhci_adma_mark_end(desc);
627 		}
628 	} else {
629 		/* Add a terminating entry - nop, end, valid */
630 		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
631 	}
632 }
633 
634 static void sdhci_adma_table_post(struct sdhci_host *host,
635 	struct mmc_data *data)
636 {
637 	struct scatterlist *sg;
638 	int i, size;
639 	void *align;
640 	char *buffer;
641 	unsigned long flags;
642 
643 	if (data->flags & MMC_DATA_READ) {
644 		bool has_unaligned = false;
645 
646 		/* Do a quick scan of the SG list for any unaligned mappings */
647 		for_each_sg(data->sg, sg, host->sg_count, i)
648 			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
649 				has_unaligned = true;
650 				break;
651 			}
652 
653 		if (has_unaligned) {
654 			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
655 					    data->sg_len, DMA_FROM_DEVICE);
656 
657 			align = host->align_buffer;
658 
659 			for_each_sg(data->sg, sg, host->sg_count, i) {
660 				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
661 					size = SDHCI_ADMA2_ALIGN -
662 					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
663 
664 					buffer = sdhci_kmap_atomic(sg, &flags);
665 					memcpy(buffer, align, size);
666 					sdhci_kunmap_atomic(buffer, &flags);
667 
668 					align += SDHCI_ADMA2_ALIGN;
669 				}
670 			}
671 		}
672 	}
673 }
674 
675 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
676 {
677 	u8 count;
678 	struct mmc_data *data = cmd->data;
679 	unsigned target_timeout, current_timeout;
680 
681 	/*
682 	 * If the host controller provides us with an incorrect timeout
683 	 * value, just skip the check and use 0xE.  The hardware may take
684 	 * longer to time out, but that's much better than having a too-short
685 	 * timeout value.
686 	 */
687 	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
688 		return 0xE;
689 
690 	/* Unspecified timeout, assume max */
691 	if (!data && !cmd->busy_timeout)
692 		return 0xE;
693 
694 	/* timeout in us */
695 	if (!data)
696 		target_timeout = cmd->busy_timeout * 1000;
697 	else {
698 		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
699 		if (host->clock && data->timeout_clks) {
700 			unsigned long long val;
701 
702 			/*
703 			 * data->timeout_clks is in units of clock cycles.
704 			 * host->clock is in Hz.  target_timeout is in us.
705 			 * Hence, us = 1000000 * cycles / Hz.  Round up.
706 			 */
707 			val = 1000000ULL * data->timeout_clks;
708 			if (do_div(val, host->clock))
709 				target_timeout++;
710 			target_timeout += val;
711 		}
712 	}
713 
714 	/*
715 	 * Figure out needed cycles.
716 	 * We do this in steps in order to fit inside a 32 bit int.
717 	 * The first step is the minimum timeout, which will have a
718 	 * minimum resolution of 6 bits:
719 	 * (1) 2^13*1000 > 2^22,
720 	 * (2) host->timeout_clk < 2^16
721 	 *     =>
722 	 *     (1) / (2) > 2^6
723 	 */
724 	count = 0;
725 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
726 	while (current_timeout < target_timeout) {
727 		count++;
728 		current_timeout <<= 1;
729 		if (count >= 0xF)
730 			break;
731 	}
732 
733 	if (count >= 0xF) {
734 		DBG("Too large timeout 0x%x requested for CMD%d!\n",
735 		    count, cmd->opcode);
736 		count = 0xE;
737 	}
738 
739 	return count;
740 }
741 
742 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
743 {
744 	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
745 	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
746 
747 	if (host->flags & SDHCI_REQ_USE_DMA)
748 		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
749 	else
750 		host->ier = (host->ier & ~dma_irqs) | pio_irqs;
751 
752 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
753 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
754 }
755 
756 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
757 {
758 	u8 count;
759 
760 	if (host->ops->set_timeout) {
761 		host->ops->set_timeout(host, cmd);
762 	} else {
763 		count = sdhci_calc_timeout(host, cmd);
764 		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
765 	}
766 }
767 
768 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
769 {
770 	u8 ctrl;
771 	struct mmc_data *data = cmd->data;
772 
773 	if (sdhci_data_line_cmd(cmd))
774 		sdhci_set_timeout(host, cmd);
775 
776 	if (!data)
777 		return;
778 
779 	WARN_ON(host->data);
780 
781 	/* Sanity checks */
782 	BUG_ON(data->blksz * data->blocks > 524288);
783 	BUG_ON(data->blksz > host->mmc->max_blk_size);
784 	BUG_ON(data->blocks > 65535);
785 
786 	host->data = data;
787 	host->data_early = 0;
788 	host->data->bytes_xfered = 0;
789 
790 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
791 		struct scatterlist *sg;
792 		unsigned int length_mask, offset_mask;
793 		int i;
794 
795 		host->flags |= SDHCI_REQ_USE_DMA;
796 
797 		/*
798 		 * FIXME: This doesn't account for merging when mapping the
799 		 * scatterlist.
800 		 *
801 		 * The assumption here being that alignment and lengths are
802 		 * the same after DMA mapping to device address space.
803 		 */
804 		length_mask = 0;
805 		offset_mask = 0;
806 		if (host->flags & SDHCI_USE_ADMA) {
807 			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
808 				length_mask = 3;
809 				/*
810 				 * As we use up to 3 byte chunks to work
811 				 * around alignment problems, we need to
812 				 * check the offset as well.
813 				 */
814 				offset_mask = 3;
815 			}
816 		} else {
817 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
818 				length_mask = 3;
819 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
820 				offset_mask = 3;
821 		}
822 
823 		if (unlikely(length_mask | offset_mask)) {
824 			for_each_sg(data->sg, sg, data->sg_len, i) {
825 				if (sg->length & length_mask) {
826 					DBG("Reverting to PIO because of transfer size (%d)\n",
827 					    sg->length);
828 					host->flags &= ~SDHCI_REQ_USE_DMA;
829 					break;
830 				}
831 				if (sg->offset & offset_mask) {
832 					DBG("Reverting to PIO because of bad alignment\n");
833 					host->flags &= ~SDHCI_REQ_USE_DMA;
834 					break;
835 				}
836 			}
837 		}
838 	}
839 
840 	if (host->flags & SDHCI_REQ_USE_DMA) {
841 		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
842 
843 		if (sg_cnt <= 0) {
844 			/*
845 			 * This only happens when someone fed
846 			 * us an invalid request.
847 			 */
848 			WARN_ON(1);
849 			host->flags &= ~SDHCI_REQ_USE_DMA;
850 		} else if (host->flags & SDHCI_USE_ADMA) {
851 			sdhci_adma_table_pre(host, data, sg_cnt);
852 
853 			sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
854 			if (host->flags & SDHCI_USE_64_BIT_DMA)
855 				sdhci_writel(host,
856 					     (u64)host->adma_addr >> 32,
857 					     SDHCI_ADMA_ADDRESS_HI);
858 		} else {
859 			WARN_ON(sg_cnt != 1);
860 			sdhci_writel(host, sg_dma_address(data->sg),
861 				SDHCI_DMA_ADDRESS);
862 		}
863 	}
864 
865 	/*
866 	 * Always adjust the DMA selection as some controllers
867 	 * (e.g. JMicron) can't do PIO properly when the selection
868 	 * is ADMA.
869 	 */
870 	if (host->version >= SDHCI_SPEC_200) {
871 		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
872 		ctrl &= ~SDHCI_CTRL_DMA_MASK;
873 		if ((host->flags & SDHCI_REQ_USE_DMA) &&
874 			(host->flags & SDHCI_USE_ADMA)) {
875 			if (host->flags & SDHCI_USE_64_BIT_DMA)
876 				ctrl |= SDHCI_CTRL_ADMA64;
877 			else
878 				ctrl |= SDHCI_CTRL_ADMA32;
879 		} else {
880 			ctrl |= SDHCI_CTRL_SDMA;
881 		}
882 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
883 	}
884 
885 	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
886 		int flags;
887 
888 		flags = SG_MITER_ATOMIC;
889 		if (host->data->flags & MMC_DATA_READ)
890 			flags |= SG_MITER_TO_SG;
891 		else
892 			flags |= SG_MITER_FROM_SG;
893 		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
894 		host->blocks = data->blocks;
895 	}
896 
897 	sdhci_set_transfer_irqs(host);
898 
899 	/* Set the DMA boundary value and block size */
900 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
901 		data->blksz), SDHCI_BLOCK_SIZE);
902 	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
903 }
904 
905 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
906 				    struct mmc_request *mrq)
907 {
908 	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
909 	       !mrq->cap_cmd_during_tfr;
910 }
911 
912 static void sdhci_set_transfer_mode(struct sdhci_host *host,
913 	struct mmc_command *cmd)
914 {
915 	u16 mode = 0;
916 	struct mmc_data *data = cmd->data;
917 
918 	if (data == NULL) {
919 		if (host->quirks2 &
920 			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
921 			sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
922 		} else {
923 		/* clear Auto CMD settings for no data CMDs */
924 			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
925 			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
926 				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
927 		}
928 		return;
929 	}
930 
931 	WARN_ON(!host->data);
932 
933 	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
934 		mode = SDHCI_TRNS_BLK_CNT_EN;
935 
936 	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
937 		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
938 		/*
939 		 * If we are sending CMD23, CMD12 never gets sent
940 		 * on successful completion (so no Auto-CMD12).
941 		 */
942 		if (sdhci_auto_cmd12(host, cmd->mrq) &&
943 		    (cmd->opcode != SD_IO_RW_EXTENDED))
944 			mode |= SDHCI_TRNS_AUTO_CMD12;
945 		else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
946 			mode |= SDHCI_TRNS_AUTO_CMD23;
947 			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
948 		}
949 	}
950 
951 	if (data->flags & MMC_DATA_READ)
952 		mode |= SDHCI_TRNS_READ;
953 	if (host->flags & SDHCI_REQ_USE_DMA)
954 		mode |= SDHCI_TRNS_DMA;
955 
956 	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
957 }
958 
959 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
960 {
961 	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
962 		((mrq->cmd && mrq->cmd->error) ||
963 		 (mrq->sbc && mrq->sbc->error) ||
964 		 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
965 				(mrq->data->stop && mrq->data->stop->error))) ||
966 		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
967 }
968 
969 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
970 {
971 	int i;
972 
973 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
974 		if (host->mrqs_done[i] == mrq) {
975 			WARN_ON(1);
976 			return;
977 		}
978 	}
979 
980 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
981 		if (!host->mrqs_done[i]) {
982 			host->mrqs_done[i] = mrq;
983 			break;
984 		}
985 	}
986 
987 	WARN_ON(i >= SDHCI_MAX_MRQS);
988 
989 	tasklet_schedule(&host->finish_tasklet);
990 }
991 
992 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
993 {
994 	if (host->cmd && host->cmd->mrq == mrq)
995 		host->cmd = NULL;
996 
997 	if (host->data_cmd && host->data_cmd->mrq == mrq)
998 		host->data_cmd = NULL;
999 
1000 	if (host->data && host->data->mrq == mrq)
1001 		host->data = NULL;
1002 
1003 	if (sdhci_needs_reset(host, mrq))
1004 		host->pending_reset = true;
1005 
1006 	__sdhci_finish_mrq(host, mrq);
1007 }
1008 
1009 static void sdhci_finish_data(struct sdhci_host *host)
1010 {
1011 	struct mmc_command *data_cmd = host->data_cmd;
1012 	struct mmc_data *data = host->data;
1013 
1014 	host->data = NULL;
1015 	host->data_cmd = NULL;
1016 
1017 	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1018 	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1019 		sdhci_adma_table_post(host, data);
1020 
1021 	/*
1022 	 * The specification states that the block count register must
1023 	 * be updated, but it does not specify at what point in the
1024 	 * data flow. That makes the register entirely useless to read
1025 	 * back so we have to assume that nothing made it to the card
1026 	 * in the event of an error.
1027 	 */
1028 	if (data->error)
1029 		data->bytes_xfered = 0;
1030 	else
1031 		data->bytes_xfered = data->blksz * data->blocks;
1032 
1033 	/*
1034 	 * Need to send CMD12 if -
1035 	 * a) open-ended multiblock transfer (no CMD23)
1036 	 * b) error in multiblock transfer
1037 	 */
1038 	if (data->stop &&
1039 	    (data->error ||
1040 	     !data->mrq->sbc)) {
1041 
1042 		/*
1043 		 * The controller needs a reset of internal state machines
1044 		 * upon error conditions.
1045 		 */
1046 		if (data->error) {
1047 			if (!host->cmd || host->cmd == data_cmd)
1048 				sdhci_do_reset(host, SDHCI_RESET_CMD);
1049 			sdhci_do_reset(host, SDHCI_RESET_DATA);
1050 		}
1051 
1052 		/*
1053 		 * 'cap_cmd_during_tfr' request must not use the command line
1054 		 * after mmc_command_done() has been called. It is upper layer's
1055 		 * responsibility to send the stop command if required.
1056 		 */
1057 		if (data->mrq->cap_cmd_during_tfr) {
1058 			sdhci_finish_mrq(host, data->mrq);
1059 		} else {
1060 			/* Avoid triggering warning in sdhci_send_command() */
1061 			host->cmd = NULL;
1062 			sdhci_send_command(host, data->stop);
1063 		}
1064 	} else {
1065 		sdhci_finish_mrq(host, data->mrq);
1066 	}
1067 }
1068 
1069 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1070 			    unsigned long timeout)
1071 {
1072 	if (sdhci_data_line_cmd(mrq->cmd))
1073 		mod_timer(&host->data_timer, timeout);
1074 	else
1075 		mod_timer(&host->timer, timeout);
1076 }
1077 
1078 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1079 {
1080 	if (sdhci_data_line_cmd(mrq->cmd))
1081 		del_timer(&host->data_timer);
1082 	else
1083 		del_timer(&host->timer);
1084 }
1085 
1086 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1087 {
1088 	int flags;
1089 	u32 mask;
1090 	unsigned long timeout;
1091 
1092 	WARN_ON(host->cmd);
1093 
1094 	/* Initially, a command has no error */
1095 	cmd->error = 0;
1096 
1097 	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1098 	    cmd->opcode == MMC_STOP_TRANSMISSION)
1099 		cmd->flags |= MMC_RSP_BUSY;
1100 
1101 	/* Wait max 10 ms */
1102 	timeout = 10;
1103 
1104 	mask = SDHCI_CMD_INHIBIT;
1105 	if (sdhci_data_line_cmd(cmd))
1106 		mask |= SDHCI_DATA_INHIBIT;
1107 
1108 	/* We shouldn't wait for data inihibit for stop commands, even
1109 	   though they might use busy signaling */
1110 	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1111 		mask &= ~SDHCI_DATA_INHIBIT;
1112 
1113 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1114 		if (timeout == 0) {
1115 			pr_err("%s: Controller never released inhibit bit(s).\n",
1116 			       mmc_hostname(host->mmc));
1117 			sdhci_dumpregs(host);
1118 			cmd->error = -EIO;
1119 			sdhci_finish_mrq(host, cmd->mrq);
1120 			return;
1121 		}
1122 		timeout--;
1123 		mdelay(1);
1124 	}
1125 
1126 	timeout = jiffies;
1127 	if (!cmd->data && cmd->busy_timeout > 9000)
1128 		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1129 	else
1130 		timeout += 10 * HZ;
1131 	sdhci_mod_timer(host, cmd->mrq, timeout);
1132 
1133 	host->cmd = cmd;
1134 	if (sdhci_data_line_cmd(cmd)) {
1135 		WARN_ON(host->data_cmd);
1136 		host->data_cmd = cmd;
1137 	}
1138 
1139 	sdhci_prepare_data(host, cmd);
1140 
1141 	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1142 
1143 	sdhci_set_transfer_mode(host, cmd);
1144 
1145 	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1146 		pr_err("%s: Unsupported response type!\n",
1147 			mmc_hostname(host->mmc));
1148 		cmd->error = -EINVAL;
1149 		sdhci_finish_mrq(host, cmd->mrq);
1150 		return;
1151 	}
1152 
1153 	if (!(cmd->flags & MMC_RSP_PRESENT))
1154 		flags = SDHCI_CMD_RESP_NONE;
1155 	else if (cmd->flags & MMC_RSP_136)
1156 		flags = SDHCI_CMD_RESP_LONG;
1157 	else if (cmd->flags & MMC_RSP_BUSY)
1158 		flags = SDHCI_CMD_RESP_SHORT_BUSY;
1159 	else
1160 		flags = SDHCI_CMD_RESP_SHORT;
1161 
1162 	if (cmd->flags & MMC_RSP_CRC)
1163 		flags |= SDHCI_CMD_CRC;
1164 	if (cmd->flags & MMC_RSP_OPCODE)
1165 		flags |= SDHCI_CMD_INDEX;
1166 
1167 	/* CMD19 is special in that the Data Present Select should be set */
1168 	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1169 	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1170 		flags |= SDHCI_CMD_DATA;
1171 
1172 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1173 }
1174 EXPORT_SYMBOL_GPL(sdhci_send_command);
1175 
1176 static void sdhci_finish_command(struct sdhci_host *host)
1177 {
1178 	struct mmc_command *cmd = host->cmd;
1179 	int i;
1180 
1181 	host->cmd = NULL;
1182 
1183 	if (cmd->flags & MMC_RSP_PRESENT) {
1184 		if (cmd->flags & MMC_RSP_136) {
1185 			/* CRC is stripped so we need to do some shifting. */
1186 			for (i = 0;i < 4;i++) {
1187 				cmd->resp[i] = sdhci_readl(host,
1188 					SDHCI_RESPONSE + (3-i)*4) << 8;
1189 				if (i != 3)
1190 					cmd->resp[i] |=
1191 						sdhci_readb(host,
1192 						SDHCI_RESPONSE + (3-i)*4-1);
1193 			}
1194 		} else {
1195 			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1196 		}
1197 	}
1198 
1199 	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1200 		mmc_command_done(host->mmc, cmd->mrq);
1201 
1202 	/*
1203 	 * The host can send and interrupt when the busy state has
1204 	 * ended, allowing us to wait without wasting CPU cycles.
1205 	 * The busy signal uses DAT0 so this is similar to waiting
1206 	 * for data to complete.
1207 	 *
1208 	 * Note: The 1.0 specification is a bit ambiguous about this
1209 	 *       feature so there might be some problems with older
1210 	 *       controllers.
1211 	 */
1212 	if (cmd->flags & MMC_RSP_BUSY) {
1213 		if (cmd->data) {
1214 			DBG("Cannot wait for busy signal when also doing a data transfer");
1215 		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1216 			   cmd == host->data_cmd) {
1217 			/* Command complete before busy is ended */
1218 			return;
1219 		}
1220 	}
1221 
1222 	/* Finished CMD23, now send actual command. */
1223 	if (cmd == cmd->mrq->sbc) {
1224 		sdhci_send_command(host, cmd->mrq->cmd);
1225 	} else {
1226 
1227 		/* Processed actual command. */
1228 		if (host->data && host->data_early)
1229 			sdhci_finish_data(host);
1230 
1231 		if (!cmd->data)
1232 			sdhci_finish_mrq(host, cmd->mrq);
1233 	}
1234 }
1235 
1236 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1237 {
1238 	u16 preset = 0;
1239 
1240 	switch (host->timing) {
1241 	case MMC_TIMING_UHS_SDR12:
1242 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1243 		break;
1244 	case MMC_TIMING_UHS_SDR25:
1245 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1246 		break;
1247 	case MMC_TIMING_UHS_SDR50:
1248 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1249 		break;
1250 	case MMC_TIMING_UHS_SDR104:
1251 	case MMC_TIMING_MMC_HS200:
1252 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1253 		break;
1254 	case MMC_TIMING_UHS_DDR50:
1255 	case MMC_TIMING_MMC_DDR52:
1256 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1257 		break;
1258 	case MMC_TIMING_MMC_HS400:
1259 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1260 		break;
1261 	default:
1262 		pr_warn("%s: Invalid UHS-I mode selected\n",
1263 			mmc_hostname(host->mmc));
1264 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1265 		break;
1266 	}
1267 	return preset;
1268 }
1269 
1270 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1271 		   unsigned int *actual_clock)
1272 {
1273 	int div = 0; /* Initialized for compiler warning */
1274 	int real_div = div, clk_mul = 1;
1275 	u16 clk = 0;
1276 	bool switch_base_clk = false;
1277 
1278 	if (host->version >= SDHCI_SPEC_300) {
1279 		if (host->preset_enabled) {
1280 			u16 pre_val;
1281 
1282 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1283 			pre_val = sdhci_get_preset_value(host);
1284 			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1285 				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1286 			if (host->clk_mul &&
1287 				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1288 				clk = SDHCI_PROG_CLOCK_MODE;
1289 				real_div = div + 1;
1290 				clk_mul = host->clk_mul;
1291 			} else {
1292 				real_div = max_t(int, 1, div << 1);
1293 			}
1294 			goto clock_set;
1295 		}
1296 
1297 		/*
1298 		 * Check if the Host Controller supports Programmable Clock
1299 		 * Mode.
1300 		 */
1301 		if (host->clk_mul) {
1302 			for (div = 1; div <= 1024; div++) {
1303 				if ((host->max_clk * host->clk_mul / div)
1304 					<= clock)
1305 					break;
1306 			}
1307 			if ((host->max_clk * host->clk_mul / div) <= clock) {
1308 				/*
1309 				 * Set Programmable Clock Mode in the Clock
1310 				 * Control register.
1311 				 */
1312 				clk = SDHCI_PROG_CLOCK_MODE;
1313 				real_div = div;
1314 				clk_mul = host->clk_mul;
1315 				div--;
1316 			} else {
1317 				/*
1318 				 * Divisor can be too small to reach clock
1319 				 * speed requirement. Then use the base clock.
1320 				 */
1321 				switch_base_clk = true;
1322 			}
1323 		}
1324 
1325 		if (!host->clk_mul || switch_base_clk) {
1326 			/* Version 3.00 divisors must be a multiple of 2. */
1327 			if (host->max_clk <= clock)
1328 				div = 1;
1329 			else {
1330 				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1331 				     div += 2) {
1332 					if ((host->max_clk / div) <= clock)
1333 						break;
1334 				}
1335 			}
1336 			real_div = div;
1337 			div >>= 1;
1338 			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1339 				&& !div && host->max_clk <= 25000000)
1340 				div = 1;
1341 		}
1342 	} else {
1343 		/* Version 2.00 divisors must be a power of 2. */
1344 		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1345 			if ((host->max_clk / div) <= clock)
1346 				break;
1347 		}
1348 		real_div = div;
1349 		div >>= 1;
1350 	}
1351 
1352 clock_set:
1353 	if (real_div)
1354 		*actual_clock = (host->max_clk * clk_mul) / real_div;
1355 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1356 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1357 		<< SDHCI_DIVIDER_HI_SHIFT;
1358 
1359 	return clk;
1360 }
1361 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1362 
1363 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1364 {
1365 	ktime_t timeout;
1366 
1367 	clk |= SDHCI_CLOCK_INT_EN;
1368 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1369 
1370 	/* Wait max 20 ms */
1371 	timeout = ktime_add_ms(ktime_get(), 20);
1372 	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1373 		& SDHCI_CLOCK_INT_STABLE)) {
1374 		if (ktime_after(ktime_get(), timeout)) {
1375 			pr_err("%s: Internal clock never stabilised.\n",
1376 			       mmc_hostname(host->mmc));
1377 			sdhci_dumpregs(host);
1378 			return;
1379 		}
1380 		udelay(10);
1381 	}
1382 
1383 	clk |= SDHCI_CLOCK_CARD_EN;
1384 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1385 }
1386 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1387 
1388 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1389 {
1390 	u16 clk;
1391 
1392 	host->mmc->actual_clock = 0;
1393 
1394 	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1395 
1396 	if (clock == 0)
1397 		return;
1398 
1399 	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1400 	sdhci_enable_clk(host, clk);
1401 }
1402 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1403 
1404 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1405 				unsigned short vdd)
1406 {
1407 	struct mmc_host *mmc = host->mmc;
1408 
1409 	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1410 
1411 	if (mode != MMC_POWER_OFF)
1412 		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1413 	else
1414 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1415 }
1416 
1417 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1418 			   unsigned short vdd)
1419 {
1420 	u8 pwr = 0;
1421 
1422 	if (mode != MMC_POWER_OFF) {
1423 		switch (1 << vdd) {
1424 		case MMC_VDD_165_195:
1425 			pwr = SDHCI_POWER_180;
1426 			break;
1427 		case MMC_VDD_29_30:
1428 		case MMC_VDD_30_31:
1429 			pwr = SDHCI_POWER_300;
1430 			break;
1431 		case MMC_VDD_32_33:
1432 		case MMC_VDD_33_34:
1433 			pwr = SDHCI_POWER_330;
1434 			break;
1435 		default:
1436 			WARN(1, "%s: Invalid vdd %#x\n",
1437 			     mmc_hostname(host->mmc), vdd);
1438 			break;
1439 		}
1440 	}
1441 
1442 	if (host->pwr == pwr)
1443 		return;
1444 
1445 	host->pwr = pwr;
1446 
1447 	if (pwr == 0) {
1448 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1449 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1450 			sdhci_runtime_pm_bus_off(host);
1451 	} else {
1452 		/*
1453 		 * Spec says that we should clear the power reg before setting
1454 		 * a new value. Some controllers don't seem to like this though.
1455 		 */
1456 		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1457 			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1458 
1459 		/*
1460 		 * At least the Marvell CaFe chip gets confused if we set the
1461 		 * voltage and set turn on power at the same time, so set the
1462 		 * voltage first.
1463 		 */
1464 		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1465 			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1466 
1467 		pwr |= SDHCI_POWER_ON;
1468 
1469 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1470 
1471 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1472 			sdhci_runtime_pm_bus_on(host);
1473 
1474 		/*
1475 		 * Some controllers need an extra 10ms delay of 10ms before
1476 		 * they can apply clock after applying power
1477 		 */
1478 		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1479 			mdelay(10);
1480 	}
1481 }
1482 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1483 
1484 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1485 		     unsigned short vdd)
1486 {
1487 	if (IS_ERR(host->mmc->supply.vmmc))
1488 		sdhci_set_power_noreg(host, mode, vdd);
1489 	else
1490 		sdhci_set_power_reg(host, mode, vdd);
1491 }
1492 EXPORT_SYMBOL_GPL(sdhci_set_power);
1493 
1494 /*****************************************************************************\
1495  *                                                                           *
1496  * MMC callbacks                                                             *
1497  *                                                                           *
1498 \*****************************************************************************/
1499 
1500 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1501 {
1502 	struct sdhci_host *host;
1503 	int present;
1504 	unsigned long flags;
1505 
1506 	host = mmc_priv(mmc);
1507 
1508 	/* Firstly check card presence */
1509 	present = mmc->ops->get_cd(mmc);
1510 
1511 	spin_lock_irqsave(&host->lock, flags);
1512 
1513 	sdhci_led_activate(host);
1514 
1515 	/*
1516 	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1517 	 * requests if Auto-CMD12 is enabled.
1518 	 */
1519 	if (sdhci_auto_cmd12(host, mrq)) {
1520 		if (mrq->stop) {
1521 			mrq->data->stop = NULL;
1522 			mrq->stop = NULL;
1523 		}
1524 	}
1525 
1526 	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1527 		mrq->cmd->error = -ENOMEDIUM;
1528 		sdhci_finish_mrq(host, mrq);
1529 	} else {
1530 		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1531 			sdhci_send_command(host, mrq->sbc);
1532 		else
1533 			sdhci_send_command(host, mrq->cmd);
1534 	}
1535 
1536 	mmiowb();
1537 	spin_unlock_irqrestore(&host->lock, flags);
1538 }
1539 
1540 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1541 {
1542 	u8 ctrl;
1543 
1544 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1545 	if (width == MMC_BUS_WIDTH_8) {
1546 		ctrl &= ~SDHCI_CTRL_4BITBUS;
1547 		if (host->version >= SDHCI_SPEC_300)
1548 			ctrl |= SDHCI_CTRL_8BITBUS;
1549 	} else {
1550 		if (host->version >= SDHCI_SPEC_300)
1551 			ctrl &= ~SDHCI_CTRL_8BITBUS;
1552 		if (width == MMC_BUS_WIDTH_4)
1553 			ctrl |= SDHCI_CTRL_4BITBUS;
1554 		else
1555 			ctrl &= ~SDHCI_CTRL_4BITBUS;
1556 	}
1557 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1558 }
1559 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1560 
1561 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1562 {
1563 	u16 ctrl_2;
1564 
1565 	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1566 	/* Select Bus Speed Mode for host */
1567 	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1568 	if ((timing == MMC_TIMING_MMC_HS200) ||
1569 	    (timing == MMC_TIMING_UHS_SDR104))
1570 		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1571 	else if (timing == MMC_TIMING_UHS_SDR12)
1572 		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1573 	else if (timing == MMC_TIMING_UHS_SDR25)
1574 		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1575 	else if (timing == MMC_TIMING_UHS_SDR50)
1576 		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1577 	else if ((timing == MMC_TIMING_UHS_DDR50) ||
1578 		 (timing == MMC_TIMING_MMC_DDR52))
1579 		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1580 	else if (timing == MMC_TIMING_MMC_HS400)
1581 		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1582 	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1583 }
1584 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1585 
1586 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1587 {
1588 	struct sdhci_host *host = mmc_priv(mmc);
1589 	u8 ctrl;
1590 
1591 	if (ios->power_mode == MMC_POWER_UNDEFINED)
1592 		return;
1593 
1594 	if (host->flags & SDHCI_DEVICE_DEAD) {
1595 		if (!IS_ERR(mmc->supply.vmmc) &&
1596 		    ios->power_mode == MMC_POWER_OFF)
1597 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1598 		return;
1599 	}
1600 
1601 	/*
1602 	 * Reset the chip on each power off.
1603 	 * Should clear out any weird states.
1604 	 */
1605 	if (ios->power_mode == MMC_POWER_OFF) {
1606 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1607 		sdhci_reinit(host);
1608 	}
1609 
1610 	if (host->version >= SDHCI_SPEC_300 &&
1611 		(ios->power_mode == MMC_POWER_UP) &&
1612 		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1613 		sdhci_enable_preset_value(host, false);
1614 
1615 	if (!ios->clock || ios->clock != host->clock) {
1616 		host->ops->set_clock(host, ios->clock);
1617 		host->clock = ios->clock;
1618 
1619 		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1620 		    host->clock) {
1621 			host->timeout_clk = host->mmc->actual_clock ?
1622 						host->mmc->actual_clock / 1000 :
1623 						host->clock / 1000;
1624 			host->mmc->max_busy_timeout =
1625 				host->ops->get_max_timeout_count ?
1626 				host->ops->get_max_timeout_count(host) :
1627 				1 << 27;
1628 			host->mmc->max_busy_timeout /= host->timeout_clk;
1629 		}
1630 	}
1631 
1632 	if (host->ops->set_power)
1633 		host->ops->set_power(host, ios->power_mode, ios->vdd);
1634 	else
1635 		sdhci_set_power(host, ios->power_mode, ios->vdd);
1636 
1637 	if (host->ops->platform_send_init_74_clocks)
1638 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1639 
1640 	host->ops->set_bus_width(host, ios->bus_width);
1641 
1642 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1643 
1644 	if ((ios->timing == MMC_TIMING_SD_HS ||
1645 	     ios->timing == MMC_TIMING_MMC_HS ||
1646 	     ios->timing == MMC_TIMING_MMC_HS400 ||
1647 	     ios->timing == MMC_TIMING_MMC_HS200 ||
1648 	     ios->timing == MMC_TIMING_MMC_DDR52 ||
1649 	     ios->timing == MMC_TIMING_UHS_SDR50 ||
1650 	     ios->timing == MMC_TIMING_UHS_SDR104 ||
1651 	     ios->timing == MMC_TIMING_UHS_DDR50 ||
1652 	     ios->timing == MMC_TIMING_UHS_SDR25)
1653 	    && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1654 		ctrl |= SDHCI_CTRL_HISPD;
1655 	else
1656 		ctrl &= ~SDHCI_CTRL_HISPD;
1657 
1658 	if (host->version >= SDHCI_SPEC_300) {
1659 		u16 clk, ctrl_2;
1660 
1661 		if (!host->preset_enabled) {
1662 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1663 			/*
1664 			 * We only need to set Driver Strength if the
1665 			 * preset value enable is not set.
1666 			 */
1667 			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1668 			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1669 			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1670 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1671 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1672 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1673 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1674 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1675 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1676 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1677 			else {
1678 				pr_warn("%s: invalid driver type, default to driver type B\n",
1679 					mmc_hostname(mmc));
1680 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1681 			}
1682 
1683 			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1684 		} else {
1685 			/*
1686 			 * According to SDHC Spec v3.00, if the Preset Value
1687 			 * Enable in the Host Control 2 register is set, we
1688 			 * need to reset SD Clock Enable before changing High
1689 			 * Speed Enable to avoid generating clock gliches.
1690 			 */
1691 
1692 			/* Reset SD Clock Enable */
1693 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1694 			clk &= ~SDHCI_CLOCK_CARD_EN;
1695 			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1696 
1697 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1698 
1699 			/* Re-enable SD Clock */
1700 			host->ops->set_clock(host, host->clock);
1701 		}
1702 
1703 		/* Reset SD Clock Enable */
1704 		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1705 		clk &= ~SDHCI_CLOCK_CARD_EN;
1706 		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1707 
1708 		host->ops->set_uhs_signaling(host, ios->timing);
1709 		host->timing = ios->timing;
1710 
1711 		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1712 				((ios->timing == MMC_TIMING_UHS_SDR12) ||
1713 				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1714 				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1715 				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1716 				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1717 				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1718 			u16 preset;
1719 
1720 			sdhci_enable_preset_value(host, true);
1721 			preset = sdhci_get_preset_value(host);
1722 			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1723 				>> SDHCI_PRESET_DRV_SHIFT;
1724 		}
1725 
1726 		/* Re-enable SD Clock */
1727 		host->ops->set_clock(host, host->clock);
1728 	} else
1729 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1730 
1731 	/*
1732 	 * Some (ENE) controllers go apeshit on some ios operation,
1733 	 * signalling timeout and CRC errors even on CMD0. Resetting
1734 	 * it on each ios seems to solve the problem.
1735 	 */
1736 	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1737 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1738 
1739 	mmiowb();
1740 }
1741 EXPORT_SYMBOL_GPL(sdhci_set_ios);
1742 
1743 static int sdhci_get_cd(struct mmc_host *mmc)
1744 {
1745 	struct sdhci_host *host = mmc_priv(mmc);
1746 	int gpio_cd = mmc_gpio_get_cd(mmc);
1747 
1748 	if (host->flags & SDHCI_DEVICE_DEAD)
1749 		return 0;
1750 
1751 	/* If nonremovable, assume that the card is always present. */
1752 	if (!mmc_card_is_removable(host->mmc))
1753 		return 1;
1754 
1755 	/*
1756 	 * Try slot gpio detect, if defined it take precedence
1757 	 * over build in controller functionality
1758 	 */
1759 	if (gpio_cd >= 0)
1760 		return !!gpio_cd;
1761 
1762 	/* If polling, assume that the card is always present. */
1763 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1764 		return 1;
1765 
1766 	/* Host native card detect */
1767 	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1768 }
1769 
1770 static int sdhci_check_ro(struct sdhci_host *host)
1771 {
1772 	unsigned long flags;
1773 	int is_readonly;
1774 
1775 	spin_lock_irqsave(&host->lock, flags);
1776 
1777 	if (host->flags & SDHCI_DEVICE_DEAD)
1778 		is_readonly = 0;
1779 	else if (host->ops->get_ro)
1780 		is_readonly = host->ops->get_ro(host);
1781 	else
1782 		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1783 				& SDHCI_WRITE_PROTECT);
1784 
1785 	spin_unlock_irqrestore(&host->lock, flags);
1786 
1787 	/* This quirk needs to be replaced by a callback-function later */
1788 	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1789 		!is_readonly : is_readonly;
1790 }
1791 
1792 #define SAMPLE_COUNT	5
1793 
1794 static int sdhci_get_ro(struct mmc_host *mmc)
1795 {
1796 	struct sdhci_host *host = mmc_priv(mmc);
1797 	int i, ro_count;
1798 
1799 	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1800 		return sdhci_check_ro(host);
1801 
1802 	ro_count = 0;
1803 	for (i = 0; i < SAMPLE_COUNT; i++) {
1804 		if (sdhci_check_ro(host)) {
1805 			if (++ro_count > SAMPLE_COUNT / 2)
1806 				return 1;
1807 		}
1808 		msleep(30);
1809 	}
1810 	return 0;
1811 }
1812 
1813 static void sdhci_hw_reset(struct mmc_host *mmc)
1814 {
1815 	struct sdhci_host *host = mmc_priv(mmc);
1816 
1817 	if (host->ops && host->ops->hw_reset)
1818 		host->ops->hw_reset(host);
1819 }
1820 
1821 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1822 {
1823 	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1824 		if (enable)
1825 			host->ier |= SDHCI_INT_CARD_INT;
1826 		else
1827 			host->ier &= ~SDHCI_INT_CARD_INT;
1828 
1829 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1830 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1831 		mmiowb();
1832 	}
1833 }
1834 
1835 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1836 {
1837 	struct sdhci_host *host = mmc_priv(mmc);
1838 	unsigned long flags;
1839 
1840 	if (enable)
1841 		pm_runtime_get_noresume(host->mmc->parent);
1842 
1843 	spin_lock_irqsave(&host->lock, flags);
1844 	if (enable)
1845 		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1846 	else
1847 		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1848 
1849 	sdhci_enable_sdio_irq_nolock(host, enable);
1850 	spin_unlock_irqrestore(&host->lock, flags);
1851 
1852 	if (!enable)
1853 		pm_runtime_put_noidle(host->mmc->parent);
1854 }
1855 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
1856 
1857 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1858 				      struct mmc_ios *ios)
1859 {
1860 	struct sdhci_host *host = mmc_priv(mmc);
1861 	u16 ctrl;
1862 	int ret;
1863 
1864 	/*
1865 	 * Signal Voltage Switching is only applicable for Host Controllers
1866 	 * v3.00 and above.
1867 	 */
1868 	if (host->version < SDHCI_SPEC_300)
1869 		return 0;
1870 
1871 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1872 
1873 	switch (ios->signal_voltage) {
1874 	case MMC_SIGNAL_VOLTAGE_330:
1875 		if (!(host->flags & SDHCI_SIGNALING_330))
1876 			return -EINVAL;
1877 		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1878 		ctrl &= ~SDHCI_CTRL_VDD_180;
1879 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1880 
1881 		if (!IS_ERR(mmc->supply.vqmmc)) {
1882 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1883 			if (ret) {
1884 				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1885 					mmc_hostname(mmc));
1886 				return -EIO;
1887 			}
1888 		}
1889 		/* Wait for 5ms */
1890 		usleep_range(5000, 5500);
1891 
1892 		/* 3.3V regulator output should be stable within 5 ms */
1893 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1894 		if (!(ctrl & SDHCI_CTRL_VDD_180))
1895 			return 0;
1896 
1897 		pr_warn("%s: 3.3V regulator output did not became stable\n",
1898 			mmc_hostname(mmc));
1899 
1900 		return -EAGAIN;
1901 	case MMC_SIGNAL_VOLTAGE_180:
1902 		if (!(host->flags & SDHCI_SIGNALING_180))
1903 			return -EINVAL;
1904 		if (!IS_ERR(mmc->supply.vqmmc)) {
1905 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1906 			if (ret) {
1907 				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1908 					mmc_hostname(mmc));
1909 				return -EIO;
1910 			}
1911 		}
1912 
1913 		/*
1914 		 * Enable 1.8V Signal Enable in the Host Control2
1915 		 * register
1916 		 */
1917 		ctrl |= SDHCI_CTRL_VDD_180;
1918 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1919 
1920 		/* Some controller need to do more when switching */
1921 		if (host->ops->voltage_switch)
1922 			host->ops->voltage_switch(host);
1923 
1924 		/* 1.8V regulator output should be stable within 5 ms */
1925 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1926 		if (ctrl & SDHCI_CTRL_VDD_180)
1927 			return 0;
1928 
1929 		pr_warn("%s: 1.8V regulator output did not became stable\n",
1930 			mmc_hostname(mmc));
1931 
1932 		return -EAGAIN;
1933 	case MMC_SIGNAL_VOLTAGE_120:
1934 		if (!(host->flags & SDHCI_SIGNALING_120))
1935 			return -EINVAL;
1936 		if (!IS_ERR(mmc->supply.vqmmc)) {
1937 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1938 			if (ret) {
1939 				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1940 					mmc_hostname(mmc));
1941 				return -EIO;
1942 			}
1943 		}
1944 		return 0;
1945 	default:
1946 		/* No signal voltage switch required */
1947 		return 0;
1948 	}
1949 }
1950 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
1951 
1952 static int sdhci_card_busy(struct mmc_host *mmc)
1953 {
1954 	struct sdhci_host *host = mmc_priv(mmc);
1955 	u32 present_state;
1956 
1957 	/* Check whether DAT[0] is 0 */
1958 	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1959 
1960 	return !(present_state & SDHCI_DATA_0_LVL_MASK);
1961 }
1962 
1963 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1964 {
1965 	struct sdhci_host *host = mmc_priv(mmc);
1966 	unsigned long flags;
1967 
1968 	spin_lock_irqsave(&host->lock, flags);
1969 	host->flags |= SDHCI_HS400_TUNING;
1970 	spin_unlock_irqrestore(&host->lock, flags);
1971 
1972 	return 0;
1973 }
1974 
1975 static void sdhci_start_tuning(struct sdhci_host *host)
1976 {
1977 	u16 ctrl;
1978 
1979 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1980 	ctrl |= SDHCI_CTRL_EXEC_TUNING;
1981 	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1982 		ctrl |= SDHCI_CTRL_TUNED_CLK;
1983 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1984 
1985 	/*
1986 	 * As per the Host Controller spec v3.00, tuning command
1987 	 * generates Buffer Read Ready interrupt, so enable that.
1988 	 *
1989 	 * Note: The spec clearly says that when tuning sequence
1990 	 * is being performed, the controller does not generate
1991 	 * interrupts other than Buffer Read Ready interrupt. But
1992 	 * to make sure we don't hit a controller bug, we _only_
1993 	 * enable Buffer Read Ready interrupt here.
1994 	 */
1995 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
1996 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1997 }
1998 
1999 static void sdhci_end_tuning(struct sdhci_host *host)
2000 {
2001 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2002 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2003 }
2004 
2005 static void sdhci_reset_tuning(struct sdhci_host *host)
2006 {
2007 	u16 ctrl;
2008 
2009 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2010 	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2011 	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2012 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2013 }
2014 
2015 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2016 {
2017 	sdhci_reset_tuning(host);
2018 
2019 	sdhci_do_reset(host, SDHCI_RESET_CMD);
2020 	sdhci_do_reset(host, SDHCI_RESET_DATA);
2021 
2022 	sdhci_end_tuning(host);
2023 
2024 	mmc_abort_tuning(host->mmc, opcode);
2025 }
2026 
2027 /*
2028  * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2029  * tuning command does not have a data payload (or rather the hardware does it
2030  * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2031  * interrupt setup is different to other commands and there is no timeout
2032  * interrupt so special handling is needed.
2033  */
2034 static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2035 {
2036 	struct mmc_host *mmc = host->mmc;
2037 	struct mmc_command cmd = {};
2038 	struct mmc_request mrq = {};
2039 	unsigned long flags;
2040 
2041 	spin_lock_irqsave(&host->lock, flags);
2042 
2043 	cmd.opcode = opcode;
2044 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2045 	cmd.mrq = &mrq;
2046 
2047 	mrq.cmd = &cmd;
2048 	/*
2049 	 * In response to CMD19, the card sends 64 bytes of tuning
2050 	 * block to the Host Controller. So we set the block size
2051 	 * to 64 here.
2052 	 */
2053 	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2054 	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2055 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE);
2056 	else
2057 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
2058 
2059 	/*
2060 	 * The tuning block is sent by the card to the host controller.
2061 	 * So we set the TRNS_READ bit in the Transfer Mode register.
2062 	 * This also takes care of setting DMA Enable and Multi Block
2063 	 * Select in the same register to 0.
2064 	 */
2065 	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2066 
2067 	sdhci_send_command(host, &cmd);
2068 
2069 	host->cmd = NULL;
2070 
2071 	sdhci_del_timer(host, &mrq);
2072 
2073 	host->tuning_done = 0;
2074 
2075 	mmiowb();
2076 	spin_unlock_irqrestore(&host->lock, flags);
2077 
2078 	/* Wait for Buffer Read Ready interrupt */
2079 	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2080 			   msecs_to_jiffies(50));
2081 
2082 }
2083 
2084 static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2085 {
2086 	int i;
2087 
2088 	/*
2089 	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2090 	 * of loops reaches 40 times.
2091 	 */
2092 	for (i = 0; i < MAX_TUNING_LOOP; i++) {
2093 		u16 ctrl;
2094 
2095 		sdhci_send_tuning(host, opcode);
2096 
2097 		if (!host->tuning_done) {
2098 			pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2099 				mmc_hostname(host->mmc));
2100 			sdhci_abort_tuning(host, opcode);
2101 			return;
2102 		}
2103 
2104 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2105 		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2106 			if (ctrl & SDHCI_CTRL_TUNED_CLK)
2107 				return; /* Success! */
2108 			break;
2109 		}
2110 
2111 		/* Spec does not require a delay between tuning cycles */
2112 		if (host->tuning_delay > 0)
2113 			mdelay(host->tuning_delay);
2114 	}
2115 
2116 	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2117 		mmc_hostname(host->mmc));
2118 	sdhci_reset_tuning(host);
2119 }
2120 
2121 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2122 {
2123 	struct sdhci_host *host = mmc_priv(mmc);
2124 	int err = 0;
2125 	unsigned int tuning_count = 0;
2126 	bool hs400_tuning;
2127 
2128 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2129 
2130 	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2131 		tuning_count = host->tuning_count;
2132 
2133 	/*
2134 	 * The Host Controller needs tuning in case of SDR104 and DDR50
2135 	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2136 	 * the Capabilities register.
2137 	 * If the Host Controller supports the HS200 mode then the
2138 	 * tuning function has to be executed.
2139 	 */
2140 	switch (host->timing) {
2141 	/* HS400 tuning is done in HS200 mode */
2142 	case MMC_TIMING_MMC_HS400:
2143 		err = -EINVAL;
2144 		goto out;
2145 
2146 	case MMC_TIMING_MMC_HS200:
2147 		/*
2148 		 * Periodic re-tuning for HS400 is not expected to be needed, so
2149 		 * disable it here.
2150 		 */
2151 		if (hs400_tuning)
2152 			tuning_count = 0;
2153 		break;
2154 
2155 	case MMC_TIMING_UHS_SDR104:
2156 	case MMC_TIMING_UHS_DDR50:
2157 		break;
2158 
2159 	case MMC_TIMING_UHS_SDR50:
2160 		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2161 			break;
2162 		/* FALLTHROUGH */
2163 
2164 	default:
2165 		goto out;
2166 	}
2167 
2168 	if (host->ops->platform_execute_tuning) {
2169 		err = host->ops->platform_execute_tuning(host, opcode);
2170 		goto out;
2171 	}
2172 
2173 	host->mmc->retune_period = tuning_count;
2174 
2175 	if (host->tuning_delay < 0)
2176 		host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2177 
2178 	sdhci_start_tuning(host);
2179 
2180 	__sdhci_execute_tuning(host, opcode);
2181 
2182 	sdhci_end_tuning(host);
2183 out:
2184 	host->flags &= ~SDHCI_HS400_TUNING;
2185 
2186 	return err;
2187 }
2188 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2189 
2190 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2191 {
2192 	/* Host Controller v3.00 defines preset value registers */
2193 	if (host->version < SDHCI_SPEC_300)
2194 		return;
2195 
2196 	/*
2197 	 * We only enable or disable Preset Value if they are not already
2198 	 * enabled or disabled respectively. Otherwise, we bail out.
2199 	 */
2200 	if (host->preset_enabled != enable) {
2201 		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2202 
2203 		if (enable)
2204 			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2205 		else
2206 			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2207 
2208 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2209 
2210 		if (enable)
2211 			host->flags |= SDHCI_PV_ENABLED;
2212 		else
2213 			host->flags &= ~SDHCI_PV_ENABLED;
2214 
2215 		host->preset_enabled = enable;
2216 	}
2217 }
2218 
2219 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2220 				int err)
2221 {
2222 	struct sdhci_host *host = mmc_priv(mmc);
2223 	struct mmc_data *data = mrq->data;
2224 
2225 	if (data->host_cookie != COOKIE_UNMAPPED)
2226 		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2227 			     mmc_get_dma_dir(data));
2228 
2229 	data->host_cookie = COOKIE_UNMAPPED;
2230 }
2231 
2232 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2233 {
2234 	struct sdhci_host *host = mmc_priv(mmc);
2235 
2236 	mrq->data->host_cookie = COOKIE_UNMAPPED;
2237 
2238 	if (host->flags & SDHCI_REQ_USE_DMA)
2239 		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2240 }
2241 
2242 static inline bool sdhci_has_requests(struct sdhci_host *host)
2243 {
2244 	return host->cmd || host->data_cmd;
2245 }
2246 
2247 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2248 {
2249 	if (host->data_cmd) {
2250 		host->data_cmd->error = err;
2251 		sdhci_finish_mrq(host, host->data_cmd->mrq);
2252 	}
2253 
2254 	if (host->cmd) {
2255 		host->cmd->error = err;
2256 		sdhci_finish_mrq(host, host->cmd->mrq);
2257 	}
2258 }
2259 
2260 static void sdhci_card_event(struct mmc_host *mmc)
2261 {
2262 	struct sdhci_host *host = mmc_priv(mmc);
2263 	unsigned long flags;
2264 	int present;
2265 
2266 	/* First check if client has provided their own card event */
2267 	if (host->ops->card_event)
2268 		host->ops->card_event(host);
2269 
2270 	present = mmc->ops->get_cd(mmc);
2271 
2272 	spin_lock_irqsave(&host->lock, flags);
2273 
2274 	/* Check sdhci_has_requests() first in case we are runtime suspended */
2275 	if (sdhci_has_requests(host) && !present) {
2276 		pr_err("%s: Card removed during transfer!\n",
2277 			mmc_hostname(host->mmc));
2278 		pr_err("%s: Resetting controller.\n",
2279 			mmc_hostname(host->mmc));
2280 
2281 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2282 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2283 
2284 		sdhci_error_out_mrqs(host, -ENOMEDIUM);
2285 	}
2286 
2287 	spin_unlock_irqrestore(&host->lock, flags);
2288 }
2289 
2290 static const struct mmc_host_ops sdhci_ops = {
2291 	.request	= sdhci_request,
2292 	.post_req	= sdhci_post_req,
2293 	.pre_req	= sdhci_pre_req,
2294 	.set_ios	= sdhci_set_ios,
2295 	.get_cd		= sdhci_get_cd,
2296 	.get_ro		= sdhci_get_ro,
2297 	.hw_reset	= sdhci_hw_reset,
2298 	.enable_sdio_irq = sdhci_enable_sdio_irq,
2299 	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2300 	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2301 	.execute_tuning			= sdhci_execute_tuning,
2302 	.card_event			= sdhci_card_event,
2303 	.card_busy	= sdhci_card_busy,
2304 };
2305 
2306 /*****************************************************************************\
2307  *                                                                           *
2308  * Tasklets                                                                  *
2309  *                                                                           *
2310 \*****************************************************************************/
2311 
2312 static bool sdhci_request_done(struct sdhci_host *host)
2313 {
2314 	unsigned long flags;
2315 	struct mmc_request *mrq;
2316 	int i;
2317 
2318 	spin_lock_irqsave(&host->lock, flags);
2319 
2320 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2321 		mrq = host->mrqs_done[i];
2322 		if (mrq)
2323 			break;
2324 	}
2325 
2326 	if (!mrq) {
2327 		spin_unlock_irqrestore(&host->lock, flags);
2328 		return true;
2329 	}
2330 
2331 	sdhci_del_timer(host, mrq);
2332 
2333 	/*
2334 	 * Always unmap the data buffers if they were mapped by
2335 	 * sdhci_prepare_data() whenever we finish with a request.
2336 	 * This avoids leaking DMA mappings on error.
2337 	 */
2338 	if (host->flags & SDHCI_REQ_USE_DMA) {
2339 		struct mmc_data *data = mrq->data;
2340 
2341 		if (data && data->host_cookie == COOKIE_MAPPED) {
2342 			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2343 				     mmc_get_dma_dir(data));
2344 			data->host_cookie = COOKIE_UNMAPPED;
2345 		}
2346 	}
2347 
2348 	/*
2349 	 * The controller needs a reset of internal state machines
2350 	 * upon error conditions.
2351 	 */
2352 	if (sdhci_needs_reset(host, mrq)) {
2353 		/*
2354 		 * Do not finish until command and data lines are available for
2355 		 * reset. Note there can only be one other mrq, so it cannot
2356 		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2357 		 * would both be null.
2358 		 */
2359 		if (host->cmd || host->data_cmd) {
2360 			spin_unlock_irqrestore(&host->lock, flags);
2361 			return true;
2362 		}
2363 
2364 		/* Some controllers need this kick or reset won't work here */
2365 		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2366 			/* This is to force an update */
2367 			host->ops->set_clock(host, host->clock);
2368 
2369 		/* Spec says we should do both at the same time, but Ricoh
2370 		   controllers do not like that. */
2371 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2372 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2373 
2374 		host->pending_reset = false;
2375 	}
2376 
2377 	if (!sdhci_has_requests(host))
2378 		sdhci_led_deactivate(host);
2379 
2380 	host->mrqs_done[i] = NULL;
2381 
2382 	mmiowb();
2383 	spin_unlock_irqrestore(&host->lock, flags);
2384 
2385 	mmc_request_done(host->mmc, mrq);
2386 
2387 	return false;
2388 }
2389 
2390 static void sdhci_tasklet_finish(unsigned long param)
2391 {
2392 	struct sdhci_host *host = (struct sdhci_host *)param;
2393 
2394 	while (!sdhci_request_done(host))
2395 		;
2396 }
2397 
2398 static void sdhci_timeout_timer(unsigned long data)
2399 {
2400 	struct sdhci_host *host;
2401 	unsigned long flags;
2402 
2403 	host = (struct sdhci_host*)data;
2404 
2405 	spin_lock_irqsave(&host->lock, flags);
2406 
2407 	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2408 		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2409 		       mmc_hostname(host->mmc));
2410 		sdhci_dumpregs(host);
2411 
2412 		host->cmd->error = -ETIMEDOUT;
2413 		sdhci_finish_mrq(host, host->cmd->mrq);
2414 	}
2415 
2416 	mmiowb();
2417 	spin_unlock_irqrestore(&host->lock, flags);
2418 }
2419 
2420 static void sdhci_timeout_data_timer(unsigned long data)
2421 {
2422 	struct sdhci_host *host;
2423 	unsigned long flags;
2424 
2425 	host = (struct sdhci_host *)data;
2426 
2427 	spin_lock_irqsave(&host->lock, flags);
2428 
2429 	if (host->data || host->data_cmd ||
2430 	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2431 		pr_err("%s: Timeout waiting for hardware interrupt.\n",
2432 		       mmc_hostname(host->mmc));
2433 		sdhci_dumpregs(host);
2434 
2435 		if (host->data) {
2436 			host->data->error = -ETIMEDOUT;
2437 			sdhci_finish_data(host);
2438 		} else if (host->data_cmd) {
2439 			host->data_cmd->error = -ETIMEDOUT;
2440 			sdhci_finish_mrq(host, host->data_cmd->mrq);
2441 		} else {
2442 			host->cmd->error = -ETIMEDOUT;
2443 			sdhci_finish_mrq(host, host->cmd->mrq);
2444 		}
2445 	}
2446 
2447 	mmiowb();
2448 	spin_unlock_irqrestore(&host->lock, flags);
2449 }
2450 
2451 /*****************************************************************************\
2452  *                                                                           *
2453  * Interrupt handling                                                        *
2454  *                                                                           *
2455 \*****************************************************************************/
2456 
2457 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2458 {
2459 	if (!host->cmd) {
2460 		/*
2461 		 * SDHCI recovers from errors by resetting the cmd and data
2462 		 * circuits.  Until that is done, there very well might be more
2463 		 * interrupts, so ignore them in that case.
2464 		 */
2465 		if (host->pending_reset)
2466 			return;
2467 		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2468 		       mmc_hostname(host->mmc), (unsigned)intmask);
2469 		sdhci_dumpregs(host);
2470 		return;
2471 	}
2472 
2473 	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2474 		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2475 		if (intmask & SDHCI_INT_TIMEOUT)
2476 			host->cmd->error = -ETIMEDOUT;
2477 		else
2478 			host->cmd->error = -EILSEQ;
2479 
2480 		/*
2481 		 * If this command initiates a data phase and a response
2482 		 * CRC error is signalled, the card can start transferring
2483 		 * data - the card may have received the command without
2484 		 * error.  We must not terminate the mmc_request early.
2485 		 *
2486 		 * If the card did not receive the command or returned an
2487 		 * error which prevented it sending data, the data phase
2488 		 * will time out.
2489 		 */
2490 		if (host->cmd->data &&
2491 		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2492 		     SDHCI_INT_CRC) {
2493 			host->cmd = NULL;
2494 			return;
2495 		}
2496 
2497 		sdhci_finish_mrq(host, host->cmd->mrq);
2498 		return;
2499 	}
2500 
2501 	if (intmask & SDHCI_INT_RESPONSE)
2502 		sdhci_finish_command(host);
2503 }
2504 
2505 #ifdef CONFIG_MMC_DEBUG
2506 static void sdhci_adma_show_error(struct sdhci_host *host)
2507 {
2508 	void *desc = host->adma_table;
2509 
2510 	sdhci_dumpregs(host);
2511 
2512 	while (true) {
2513 		struct sdhci_adma2_64_desc *dma_desc = desc;
2514 
2515 		if (host->flags & SDHCI_USE_64_BIT_DMA)
2516 			DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2517 			    desc, le32_to_cpu(dma_desc->addr_hi),
2518 			    le32_to_cpu(dma_desc->addr_lo),
2519 			    le16_to_cpu(dma_desc->len),
2520 			    le16_to_cpu(dma_desc->cmd));
2521 		else
2522 			DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2523 			    desc, le32_to_cpu(dma_desc->addr_lo),
2524 			    le16_to_cpu(dma_desc->len),
2525 			    le16_to_cpu(dma_desc->cmd));
2526 
2527 		desc += host->desc_sz;
2528 
2529 		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2530 			break;
2531 	}
2532 }
2533 #else
2534 static void sdhci_adma_show_error(struct sdhci_host *host) { }
2535 #endif
2536 
2537 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2538 {
2539 	u32 command;
2540 
2541 	/* CMD19 generates _only_ Buffer Read Ready interrupt */
2542 	if (intmask & SDHCI_INT_DATA_AVAIL) {
2543 		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2544 		if (command == MMC_SEND_TUNING_BLOCK ||
2545 		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2546 			host->tuning_done = 1;
2547 			wake_up(&host->buf_ready_int);
2548 			return;
2549 		}
2550 	}
2551 
2552 	if (!host->data) {
2553 		struct mmc_command *data_cmd = host->data_cmd;
2554 
2555 		/*
2556 		 * The "data complete" interrupt is also used to
2557 		 * indicate that a busy state has ended. See comment
2558 		 * above in sdhci_cmd_irq().
2559 		 */
2560 		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2561 			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2562 				host->data_cmd = NULL;
2563 				data_cmd->error = -ETIMEDOUT;
2564 				sdhci_finish_mrq(host, data_cmd->mrq);
2565 				return;
2566 			}
2567 			if (intmask & SDHCI_INT_DATA_END) {
2568 				host->data_cmd = NULL;
2569 				/*
2570 				 * Some cards handle busy-end interrupt
2571 				 * before the command completed, so make
2572 				 * sure we do things in the proper order.
2573 				 */
2574 				if (host->cmd == data_cmd)
2575 					return;
2576 
2577 				sdhci_finish_mrq(host, data_cmd->mrq);
2578 				return;
2579 			}
2580 		}
2581 
2582 		/*
2583 		 * SDHCI recovers from errors by resetting the cmd and data
2584 		 * circuits. Until that is done, there very well might be more
2585 		 * interrupts, so ignore them in that case.
2586 		 */
2587 		if (host->pending_reset)
2588 			return;
2589 
2590 		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2591 		       mmc_hostname(host->mmc), (unsigned)intmask);
2592 		sdhci_dumpregs(host);
2593 
2594 		return;
2595 	}
2596 
2597 	if (intmask & SDHCI_INT_DATA_TIMEOUT)
2598 		host->data->error = -ETIMEDOUT;
2599 	else if (intmask & SDHCI_INT_DATA_END_BIT)
2600 		host->data->error = -EILSEQ;
2601 	else if ((intmask & SDHCI_INT_DATA_CRC) &&
2602 		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2603 			!= MMC_BUS_TEST_R)
2604 		host->data->error = -EILSEQ;
2605 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2606 		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2607 		sdhci_adma_show_error(host);
2608 		host->data->error = -EIO;
2609 		if (host->ops->adma_workaround)
2610 			host->ops->adma_workaround(host, intmask);
2611 	}
2612 
2613 	if (host->data->error)
2614 		sdhci_finish_data(host);
2615 	else {
2616 		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2617 			sdhci_transfer_pio(host);
2618 
2619 		/*
2620 		 * We currently don't do anything fancy with DMA
2621 		 * boundaries, but as we can't disable the feature
2622 		 * we need to at least restart the transfer.
2623 		 *
2624 		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2625 		 * should return a valid address to continue from, but as
2626 		 * some controllers are faulty, don't trust them.
2627 		 */
2628 		if (intmask & SDHCI_INT_DMA_END) {
2629 			u32 dmastart, dmanow;
2630 			dmastart = sg_dma_address(host->data->sg);
2631 			dmanow = dmastart + host->data->bytes_xfered;
2632 			/*
2633 			 * Force update to the next DMA block boundary.
2634 			 */
2635 			dmanow = (dmanow &
2636 				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2637 				SDHCI_DEFAULT_BOUNDARY_SIZE;
2638 			host->data->bytes_xfered = dmanow - dmastart;
2639 			DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2640 			    dmastart, host->data->bytes_xfered, dmanow);
2641 			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2642 		}
2643 
2644 		if (intmask & SDHCI_INT_DATA_END) {
2645 			if (host->cmd == host->data_cmd) {
2646 				/*
2647 				 * Data managed to finish before the
2648 				 * command completed. Make sure we do
2649 				 * things in the proper order.
2650 				 */
2651 				host->data_early = 1;
2652 			} else {
2653 				sdhci_finish_data(host);
2654 			}
2655 		}
2656 	}
2657 }
2658 
2659 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2660 {
2661 	irqreturn_t result = IRQ_NONE;
2662 	struct sdhci_host *host = dev_id;
2663 	u32 intmask, mask, unexpected = 0;
2664 	int max_loops = 16;
2665 
2666 	spin_lock(&host->lock);
2667 
2668 	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2669 		spin_unlock(&host->lock);
2670 		return IRQ_NONE;
2671 	}
2672 
2673 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2674 	if (!intmask || intmask == 0xffffffff) {
2675 		result = IRQ_NONE;
2676 		goto out;
2677 	}
2678 
2679 	do {
2680 		DBG("IRQ status 0x%08x\n", intmask);
2681 
2682 		if (host->ops->irq) {
2683 			intmask = host->ops->irq(host, intmask);
2684 			if (!intmask)
2685 				goto cont;
2686 		}
2687 
2688 		/* Clear selected interrupts. */
2689 		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2690 				  SDHCI_INT_BUS_POWER);
2691 		sdhci_writel(host, mask, SDHCI_INT_STATUS);
2692 
2693 		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2694 			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2695 				      SDHCI_CARD_PRESENT;
2696 
2697 			/*
2698 			 * There is a observation on i.mx esdhc.  INSERT
2699 			 * bit will be immediately set again when it gets
2700 			 * cleared, if a card is inserted.  We have to mask
2701 			 * the irq to prevent interrupt storm which will
2702 			 * freeze the system.  And the REMOVE gets the
2703 			 * same situation.
2704 			 *
2705 			 * More testing are needed here to ensure it works
2706 			 * for other platforms though.
2707 			 */
2708 			host->ier &= ~(SDHCI_INT_CARD_INSERT |
2709 				       SDHCI_INT_CARD_REMOVE);
2710 			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2711 					       SDHCI_INT_CARD_INSERT;
2712 			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2713 			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2714 
2715 			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2716 				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2717 
2718 			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2719 						       SDHCI_INT_CARD_REMOVE);
2720 			result = IRQ_WAKE_THREAD;
2721 		}
2722 
2723 		if (intmask & SDHCI_INT_CMD_MASK)
2724 			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2725 
2726 		if (intmask & SDHCI_INT_DATA_MASK)
2727 			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2728 
2729 		if (intmask & SDHCI_INT_BUS_POWER)
2730 			pr_err("%s: Card is consuming too much power!\n",
2731 				mmc_hostname(host->mmc));
2732 
2733 		if (intmask & SDHCI_INT_RETUNE)
2734 			mmc_retune_needed(host->mmc);
2735 
2736 		if ((intmask & SDHCI_INT_CARD_INT) &&
2737 		    (host->ier & SDHCI_INT_CARD_INT)) {
2738 			sdhci_enable_sdio_irq_nolock(host, false);
2739 			host->thread_isr |= SDHCI_INT_CARD_INT;
2740 			result = IRQ_WAKE_THREAD;
2741 		}
2742 
2743 		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2744 			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2745 			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2746 			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2747 
2748 		if (intmask) {
2749 			unexpected |= intmask;
2750 			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2751 		}
2752 cont:
2753 		if (result == IRQ_NONE)
2754 			result = IRQ_HANDLED;
2755 
2756 		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2757 	} while (intmask && --max_loops);
2758 out:
2759 	spin_unlock(&host->lock);
2760 
2761 	if (unexpected) {
2762 		pr_err("%s: Unexpected interrupt 0x%08x.\n",
2763 			   mmc_hostname(host->mmc), unexpected);
2764 		sdhci_dumpregs(host);
2765 	}
2766 
2767 	return result;
2768 }
2769 
2770 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2771 {
2772 	struct sdhci_host *host = dev_id;
2773 	unsigned long flags;
2774 	u32 isr;
2775 
2776 	spin_lock_irqsave(&host->lock, flags);
2777 	isr = host->thread_isr;
2778 	host->thread_isr = 0;
2779 	spin_unlock_irqrestore(&host->lock, flags);
2780 
2781 	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2782 		struct mmc_host *mmc = host->mmc;
2783 
2784 		mmc->ops->card_event(mmc);
2785 		mmc_detect_change(mmc, msecs_to_jiffies(200));
2786 	}
2787 
2788 	if (isr & SDHCI_INT_CARD_INT) {
2789 		sdio_run_irqs(host->mmc);
2790 
2791 		spin_lock_irqsave(&host->lock, flags);
2792 		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2793 			sdhci_enable_sdio_irq_nolock(host, true);
2794 		spin_unlock_irqrestore(&host->lock, flags);
2795 	}
2796 
2797 	return isr ? IRQ_HANDLED : IRQ_NONE;
2798 }
2799 
2800 /*****************************************************************************\
2801  *                                                                           *
2802  * Suspend/resume                                                            *
2803  *                                                                           *
2804 \*****************************************************************************/
2805 
2806 #ifdef CONFIG_PM
2807 /*
2808  * To enable wakeup events, the corresponding events have to be enabled in
2809  * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2810  * Table' in the SD Host Controller Standard Specification.
2811  * It is useless to restore SDHCI_INT_ENABLE state in
2812  * sdhci_disable_irq_wakeups() since it will be set by
2813  * sdhci_enable_card_detection() or sdhci_init().
2814  */
2815 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2816 {
2817 	u8 val;
2818 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2819 			| SDHCI_WAKE_ON_INT;
2820 	u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2821 		      SDHCI_INT_CARD_INT;
2822 
2823 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2824 	val |= mask ;
2825 	/* Avoid fake wake up */
2826 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
2827 		val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2828 		irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2829 	}
2830 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2831 	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
2832 }
2833 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2834 
2835 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2836 {
2837 	u8 val;
2838 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2839 			| SDHCI_WAKE_ON_INT;
2840 
2841 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2842 	val &= ~mask;
2843 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2844 }
2845 
2846 int sdhci_suspend_host(struct sdhci_host *host)
2847 {
2848 	sdhci_disable_card_detection(host);
2849 
2850 	mmc_retune_timer_stop(host->mmc);
2851 
2852 	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2853 		host->ier = 0;
2854 		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2855 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2856 		free_irq(host->irq, host);
2857 	} else {
2858 		sdhci_enable_irq_wakeups(host);
2859 		enable_irq_wake(host->irq);
2860 	}
2861 	return 0;
2862 }
2863 
2864 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2865 
2866 int sdhci_resume_host(struct sdhci_host *host)
2867 {
2868 	struct mmc_host *mmc = host->mmc;
2869 	int ret = 0;
2870 
2871 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2872 		if (host->ops->enable_dma)
2873 			host->ops->enable_dma(host);
2874 	}
2875 
2876 	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2877 	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2878 		/* Card keeps power but host controller does not */
2879 		sdhci_init(host, 0);
2880 		host->pwr = 0;
2881 		host->clock = 0;
2882 		mmc->ops->set_ios(mmc, &mmc->ios);
2883 	} else {
2884 		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2885 		mmiowb();
2886 	}
2887 
2888 	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2889 		ret = request_threaded_irq(host->irq, sdhci_irq,
2890 					   sdhci_thread_irq, IRQF_SHARED,
2891 					   mmc_hostname(host->mmc), host);
2892 		if (ret)
2893 			return ret;
2894 	} else {
2895 		sdhci_disable_irq_wakeups(host);
2896 		disable_irq_wake(host->irq);
2897 	}
2898 
2899 	sdhci_enable_card_detection(host);
2900 
2901 	return ret;
2902 }
2903 
2904 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2905 
2906 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2907 {
2908 	unsigned long flags;
2909 
2910 	mmc_retune_timer_stop(host->mmc);
2911 
2912 	spin_lock_irqsave(&host->lock, flags);
2913 	host->ier &= SDHCI_INT_CARD_INT;
2914 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2915 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2916 	spin_unlock_irqrestore(&host->lock, flags);
2917 
2918 	synchronize_hardirq(host->irq);
2919 
2920 	spin_lock_irqsave(&host->lock, flags);
2921 	host->runtime_suspended = true;
2922 	spin_unlock_irqrestore(&host->lock, flags);
2923 
2924 	return 0;
2925 }
2926 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2927 
2928 int sdhci_runtime_resume_host(struct sdhci_host *host)
2929 {
2930 	struct mmc_host *mmc = host->mmc;
2931 	unsigned long flags;
2932 	int host_flags = host->flags;
2933 
2934 	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2935 		if (host->ops->enable_dma)
2936 			host->ops->enable_dma(host);
2937 	}
2938 
2939 	sdhci_init(host, 0);
2940 
2941 	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) {
2942 		/* Force clock and power re-program */
2943 		host->pwr = 0;
2944 		host->clock = 0;
2945 		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
2946 		mmc->ops->set_ios(mmc, &mmc->ios);
2947 
2948 		if ((host_flags & SDHCI_PV_ENABLED) &&
2949 		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2950 			spin_lock_irqsave(&host->lock, flags);
2951 			sdhci_enable_preset_value(host, true);
2952 			spin_unlock_irqrestore(&host->lock, flags);
2953 		}
2954 
2955 		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
2956 		    mmc->ops->hs400_enhanced_strobe)
2957 			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
2958 	}
2959 
2960 	spin_lock_irqsave(&host->lock, flags);
2961 
2962 	host->runtime_suspended = false;
2963 
2964 	/* Enable SDIO IRQ */
2965 	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2966 		sdhci_enable_sdio_irq_nolock(host, true);
2967 
2968 	/* Enable Card Detection */
2969 	sdhci_enable_card_detection(host);
2970 
2971 	spin_unlock_irqrestore(&host->lock, flags);
2972 
2973 	return 0;
2974 }
2975 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2976 
2977 #endif /* CONFIG_PM */
2978 
2979 /*****************************************************************************\
2980  *                                                                           *
2981  * Command Queue Engine (CQE) helpers                                        *
2982  *                                                                           *
2983 \*****************************************************************************/
2984 
2985 void sdhci_cqe_enable(struct mmc_host *mmc)
2986 {
2987 	struct sdhci_host *host = mmc_priv(mmc);
2988 	unsigned long flags;
2989 	u8 ctrl;
2990 
2991 	spin_lock_irqsave(&host->lock, flags);
2992 
2993 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2994 	ctrl &= ~SDHCI_CTRL_DMA_MASK;
2995 	if (host->flags & SDHCI_USE_64_BIT_DMA)
2996 		ctrl |= SDHCI_CTRL_ADMA64;
2997 	else
2998 		ctrl |= SDHCI_CTRL_ADMA32;
2999 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3000 
3001 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 512),
3002 		     SDHCI_BLOCK_SIZE);
3003 
3004 	/* Set maximum timeout */
3005 	sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3006 
3007 	host->ier = host->cqe_ier;
3008 
3009 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3010 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3011 
3012 	host->cqe_on = true;
3013 
3014 	pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3015 		 mmc_hostname(mmc), host->ier,
3016 		 sdhci_readl(host, SDHCI_INT_STATUS));
3017 
3018 	mmiowb();
3019 	spin_unlock_irqrestore(&host->lock, flags);
3020 }
3021 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3022 
3023 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3024 {
3025 	struct sdhci_host *host = mmc_priv(mmc);
3026 	unsigned long flags;
3027 
3028 	spin_lock_irqsave(&host->lock, flags);
3029 
3030 	sdhci_set_default_irqs(host);
3031 
3032 	host->cqe_on = false;
3033 
3034 	if (recovery) {
3035 		sdhci_do_reset(host, SDHCI_RESET_CMD);
3036 		sdhci_do_reset(host, SDHCI_RESET_DATA);
3037 	}
3038 
3039 	pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3040 		 mmc_hostname(mmc), host->ier,
3041 		 sdhci_readl(host, SDHCI_INT_STATUS));
3042 
3043 	mmiowb();
3044 	spin_unlock_irqrestore(&host->lock, flags);
3045 }
3046 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3047 
3048 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3049 		   int *data_error)
3050 {
3051 	u32 mask;
3052 
3053 	if (!host->cqe_on)
3054 		return false;
3055 
3056 	if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3057 		*cmd_error = -EILSEQ;
3058 	else if (intmask & SDHCI_INT_TIMEOUT)
3059 		*cmd_error = -ETIMEDOUT;
3060 	else
3061 		*cmd_error = 0;
3062 
3063 	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3064 		*data_error = -EILSEQ;
3065 	else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3066 		*data_error = -ETIMEDOUT;
3067 	else if (intmask & SDHCI_INT_ADMA_ERROR)
3068 		*data_error = -EIO;
3069 	else
3070 		*data_error = 0;
3071 
3072 	/* Clear selected interrupts. */
3073 	mask = intmask & host->cqe_ier;
3074 	sdhci_writel(host, mask, SDHCI_INT_STATUS);
3075 
3076 	if (intmask & SDHCI_INT_BUS_POWER)
3077 		pr_err("%s: Card is consuming too much power!\n",
3078 		       mmc_hostname(host->mmc));
3079 
3080 	intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3081 	if (intmask) {
3082 		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3083 		pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3084 		       mmc_hostname(host->mmc), intmask);
3085 		sdhci_dumpregs(host);
3086 	}
3087 
3088 	return true;
3089 }
3090 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3091 
3092 /*****************************************************************************\
3093  *                                                                           *
3094  * Device allocation/registration                                            *
3095  *                                                                           *
3096 \*****************************************************************************/
3097 
3098 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3099 	size_t priv_size)
3100 {
3101 	struct mmc_host *mmc;
3102 	struct sdhci_host *host;
3103 
3104 	WARN_ON(dev == NULL);
3105 
3106 	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3107 	if (!mmc)
3108 		return ERR_PTR(-ENOMEM);
3109 
3110 	host = mmc_priv(mmc);
3111 	host->mmc = mmc;
3112 	host->mmc_host_ops = sdhci_ops;
3113 	mmc->ops = &host->mmc_host_ops;
3114 
3115 	host->flags = SDHCI_SIGNALING_330;
3116 
3117 	host->cqe_ier     = SDHCI_CQE_INT_MASK;
3118 	host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3119 
3120 	host->tuning_delay = -1;
3121 
3122 	return host;
3123 }
3124 
3125 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3126 
3127 static int sdhci_set_dma_mask(struct sdhci_host *host)
3128 {
3129 	struct mmc_host *mmc = host->mmc;
3130 	struct device *dev = mmc_dev(mmc);
3131 	int ret = -EINVAL;
3132 
3133 	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3134 		host->flags &= ~SDHCI_USE_64_BIT_DMA;
3135 
3136 	/* Try 64-bit mask if hardware is capable  of it */
3137 	if (host->flags & SDHCI_USE_64_BIT_DMA) {
3138 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3139 		if (ret) {
3140 			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3141 				mmc_hostname(mmc));
3142 			host->flags &= ~SDHCI_USE_64_BIT_DMA;
3143 		}
3144 	}
3145 
3146 	/* 32-bit mask as default & fallback */
3147 	if (ret) {
3148 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3149 		if (ret)
3150 			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3151 				mmc_hostname(mmc));
3152 	}
3153 
3154 	return ret;
3155 }
3156 
3157 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3158 {
3159 	u16 v;
3160 	u64 dt_caps_mask = 0;
3161 	u64 dt_caps = 0;
3162 
3163 	if (host->read_caps)
3164 		return;
3165 
3166 	host->read_caps = true;
3167 
3168 	if (debug_quirks)
3169 		host->quirks = debug_quirks;
3170 
3171 	if (debug_quirks2)
3172 		host->quirks2 = debug_quirks2;
3173 
3174 	sdhci_do_reset(host, SDHCI_RESET_ALL);
3175 
3176 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3177 			     "sdhci-caps-mask", &dt_caps_mask);
3178 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3179 			     "sdhci-caps", &dt_caps);
3180 
3181 	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3182 	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3183 
3184 	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3185 		return;
3186 
3187 	if (caps) {
3188 		host->caps = *caps;
3189 	} else {
3190 		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3191 		host->caps &= ~lower_32_bits(dt_caps_mask);
3192 		host->caps |= lower_32_bits(dt_caps);
3193 	}
3194 
3195 	if (host->version < SDHCI_SPEC_300)
3196 		return;
3197 
3198 	if (caps1) {
3199 		host->caps1 = *caps1;
3200 	} else {
3201 		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3202 		host->caps1 &= ~upper_32_bits(dt_caps_mask);
3203 		host->caps1 |= upper_32_bits(dt_caps);
3204 	}
3205 }
3206 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3207 
3208 int sdhci_setup_host(struct sdhci_host *host)
3209 {
3210 	struct mmc_host *mmc;
3211 	u32 max_current_caps;
3212 	unsigned int ocr_avail;
3213 	unsigned int override_timeout_clk;
3214 	u32 max_clk;
3215 	int ret;
3216 
3217 	WARN_ON(host == NULL);
3218 	if (host == NULL)
3219 		return -EINVAL;
3220 
3221 	mmc = host->mmc;
3222 
3223 	/*
3224 	 * If there are external regulators, get them. Note this must be done
3225 	 * early before resetting the host and reading the capabilities so that
3226 	 * the host can take the appropriate action if regulators are not
3227 	 * available.
3228 	 */
3229 	ret = mmc_regulator_get_supply(mmc);
3230 	if (ret == -EPROBE_DEFER)
3231 		return ret;
3232 
3233 	sdhci_read_caps(host);
3234 
3235 	override_timeout_clk = host->timeout_clk;
3236 
3237 	if (host->version > SDHCI_SPEC_300) {
3238 		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3239 		       mmc_hostname(mmc), host->version);
3240 	}
3241 
3242 	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3243 		host->flags |= SDHCI_USE_SDMA;
3244 	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3245 		DBG("Controller doesn't have SDMA capability\n");
3246 	else
3247 		host->flags |= SDHCI_USE_SDMA;
3248 
3249 	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3250 		(host->flags & SDHCI_USE_SDMA)) {
3251 		DBG("Disabling DMA as it is marked broken\n");
3252 		host->flags &= ~SDHCI_USE_SDMA;
3253 	}
3254 
3255 	if ((host->version >= SDHCI_SPEC_200) &&
3256 		(host->caps & SDHCI_CAN_DO_ADMA2))
3257 		host->flags |= SDHCI_USE_ADMA;
3258 
3259 	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3260 		(host->flags & SDHCI_USE_ADMA)) {
3261 		DBG("Disabling ADMA as it is marked broken\n");
3262 		host->flags &= ~SDHCI_USE_ADMA;
3263 	}
3264 
3265 	/*
3266 	 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3267 	 * and *must* do 64-bit DMA.  A driver has the opportunity to change
3268 	 * that during the first call to ->enable_dma().  Similarly
3269 	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3270 	 * implement.
3271 	 */
3272 	if (host->caps & SDHCI_CAN_64BIT)
3273 		host->flags |= SDHCI_USE_64_BIT_DMA;
3274 
3275 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3276 		ret = sdhci_set_dma_mask(host);
3277 
3278 		if (!ret && host->ops->enable_dma)
3279 			ret = host->ops->enable_dma(host);
3280 
3281 		if (ret) {
3282 			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3283 				mmc_hostname(mmc));
3284 			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3285 
3286 			ret = 0;
3287 		}
3288 	}
3289 
3290 	/* SDMA does not support 64-bit DMA */
3291 	if (host->flags & SDHCI_USE_64_BIT_DMA)
3292 		host->flags &= ~SDHCI_USE_SDMA;
3293 
3294 	if (host->flags & SDHCI_USE_ADMA) {
3295 		dma_addr_t dma;
3296 		void *buf;
3297 
3298 		/*
3299 		 * The DMA descriptor table size is calculated as the maximum
3300 		 * number of segments times 2, to allow for an alignment
3301 		 * descriptor for each segment, plus 1 for a nop end descriptor,
3302 		 * all multipled by the descriptor size.
3303 		 */
3304 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
3305 			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3306 					      SDHCI_ADMA2_64_DESC_SZ;
3307 			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3308 		} else {
3309 			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3310 					      SDHCI_ADMA2_32_DESC_SZ;
3311 			host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3312 		}
3313 
3314 		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3315 		buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3316 					 host->adma_table_sz, &dma, GFP_KERNEL);
3317 		if (!buf) {
3318 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3319 				mmc_hostname(mmc));
3320 			host->flags &= ~SDHCI_USE_ADMA;
3321 		} else if ((dma + host->align_buffer_sz) &
3322 			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3323 			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3324 				mmc_hostname(mmc));
3325 			host->flags &= ~SDHCI_USE_ADMA;
3326 			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3327 					  host->adma_table_sz, buf, dma);
3328 		} else {
3329 			host->align_buffer = buf;
3330 			host->align_addr = dma;
3331 
3332 			host->adma_table = buf + host->align_buffer_sz;
3333 			host->adma_addr = dma + host->align_buffer_sz;
3334 		}
3335 	}
3336 
3337 	/*
3338 	 * If we use DMA, then it's up to the caller to set the DMA
3339 	 * mask, but PIO does not need the hw shim so we set a new
3340 	 * mask here in that case.
3341 	 */
3342 	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3343 		host->dma_mask = DMA_BIT_MASK(64);
3344 		mmc_dev(mmc)->dma_mask = &host->dma_mask;
3345 	}
3346 
3347 	if (host->version >= SDHCI_SPEC_300)
3348 		host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3349 			>> SDHCI_CLOCK_BASE_SHIFT;
3350 	else
3351 		host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3352 			>> SDHCI_CLOCK_BASE_SHIFT;
3353 
3354 	host->max_clk *= 1000000;
3355 	if (host->max_clk == 0 || host->quirks &
3356 			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3357 		if (!host->ops->get_max_clock) {
3358 			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3359 			       mmc_hostname(mmc));
3360 			ret = -ENODEV;
3361 			goto undma;
3362 		}
3363 		host->max_clk = host->ops->get_max_clock(host);
3364 	}
3365 
3366 	/*
3367 	 * In case of Host Controller v3.00, find out whether clock
3368 	 * multiplier is supported.
3369 	 */
3370 	host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3371 			SDHCI_CLOCK_MUL_SHIFT;
3372 
3373 	/*
3374 	 * In case the value in Clock Multiplier is 0, then programmable
3375 	 * clock mode is not supported, otherwise the actual clock
3376 	 * multiplier is one more than the value of Clock Multiplier
3377 	 * in the Capabilities Register.
3378 	 */
3379 	if (host->clk_mul)
3380 		host->clk_mul += 1;
3381 
3382 	/*
3383 	 * Set host parameters.
3384 	 */
3385 	max_clk = host->max_clk;
3386 
3387 	if (host->ops->get_min_clock)
3388 		mmc->f_min = host->ops->get_min_clock(host);
3389 	else if (host->version >= SDHCI_SPEC_300) {
3390 		if (host->clk_mul) {
3391 			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3392 			max_clk = host->max_clk * host->clk_mul;
3393 		} else
3394 			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3395 	} else
3396 		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3397 
3398 	if (!mmc->f_max || mmc->f_max > max_clk)
3399 		mmc->f_max = max_clk;
3400 
3401 	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3402 		host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3403 					SDHCI_TIMEOUT_CLK_SHIFT;
3404 
3405 		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3406 			host->timeout_clk *= 1000;
3407 
3408 		if (host->timeout_clk == 0) {
3409 			if (!host->ops->get_timeout_clock) {
3410 				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3411 					mmc_hostname(mmc));
3412 				ret = -ENODEV;
3413 				goto undma;
3414 			}
3415 
3416 			host->timeout_clk =
3417 				DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3418 					     1000);
3419 		}
3420 
3421 		if (override_timeout_clk)
3422 			host->timeout_clk = override_timeout_clk;
3423 
3424 		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3425 			host->ops->get_max_timeout_count(host) : 1 << 27;
3426 		mmc->max_busy_timeout /= host->timeout_clk;
3427 	}
3428 
3429 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3430 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3431 
3432 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3433 		host->flags |= SDHCI_AUTO_CMD12;
3434 
3435 	/* Auto-CMD23 stuff only works in ADMA or PIO. */
3436 	if ((host->version >= SDHCI_SPEC_300) &&
3437 	    ((host->flags & SDHCI_USE_ADMA) ||
3438 	     !(host->flags & SDHCI_USE_SDMA)) &&
3439 	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3440 		host->flags |= SDHCI_AUTO_CMD23;
3441 		DBG("Auto-CMD23 available\n");
3442 	} else {
3443 		DBG("Auto-CMD23 unavailable\n");
3444 	}
3445 
3446 	/*
3447 	 * A controller may support 8-bit width, but the board itself
3448 	 * might not have the pins brought out.  Boards that support
3449 	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3450 	 * their platform code before calling sdhci_add_host(), and we
3451 	 * won't assume 8-bit width for hosts without that CAP.
3452 	 */
3453 	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3454 		mmc->caps |= MMC_CAP_4_BIT_DATA;
3455 
3456 	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3457 		mmc->caps &= ~MMC_CAP_CMD23;
3458 
3459 	if (host->caps & SDHCI_CAN_DO_HISPD)
3460 		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3461 
3462 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3463 	    mmc_card_is_removable(mmc) &&
3464 	    mmc_gpio_get_cd(host->mmc) < 0)
3465 		mmc->caps |= MMC_CAP_NEEDS_POLL;
3466 
3467 	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3468 	if (!IS_ERR(mmc->supply.vqmmc)) {
3469 		ret = regulator_enable(mmc->supply.vqmmc);
3470 		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3471 						    1950000))
3472 			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3473 					 SDHCI_SUPPORT_SDR50 |
3474 					 SDHCI_SUPPORT_DDR50);
3475 		if (ret) {
3476 			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3477 				mmc_hostname(mmc), ret);
3478 			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3479 		}
3480 	}
3481 
3482 	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3483 		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3484 				 SDHCI_SUPPORT_DDR50);
3485 	}
3486 
3487 	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3488 	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3489 			   SDHCI_SUPPORT_DDR50))
3490 		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3491 
3492 	/* SDR104 supports also implies SDR50 support */
3493 	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3494 		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3495 		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
3496 		 * field can be promoted to support HS200.
3497 		 */
3498 		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3499 			mmc->caps2 |= MMC_CAP2_HS200;
3500 	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3501 		mmc->caps |= MMC_CAP_UHS_SDR50;
3502 	}
3503 
3504 	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3505 	    (host->caps1 & SDHCI_SUPPORT_HS400))
3506 		mmc->caps2 |= MMC_CAP2_HS400;
3507 
3508 	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3509 	    (IS_ERR(mmc->supply.vqmmc) ||
3510 	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3511 					     1300000)))
3512 		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3513 
3514 	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3515 	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3516 		mmc->caps |= MMC_CAP_UHS_DDR50;
3517 
3518 	/* Does the host need tuning for SDR50? */
3519 	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3520 		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3521 
3522 	/* Driver Type(s) (A, C, D) supported by the host */
3523 	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3524 		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3525 	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3526 		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3527 	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3528 		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3529 
3530 	/* Initial value for re-tuning timer count */
3531 	host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3532 			     SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3533 
3534 	/*
3535 	 * In case Re-tuning Timer is not disabled, the actual value of
3536 	 * re-tuning timer will be 2 ^ (n - 1).
3537 	 */
3538 	if (host->tuning_count)
3539 		host->tuning_count = 1 << (host->tuning_count - 1);
3540 
3541 	/* Re-tuning mode supported by the Host Controller */
3542 	host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3543 			     SDHCI_RETUNING_MODE_SHIFT;
3544 
3545 	ocr_avail = 0;
3546 
3547 	/*
3548 	 * According to SD Host Controller spec v3.00, if the Host System
3549 	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3550 	 * the value is meaningful only if Voltage Support in the Capabilities
3551 	 * register is set. The actual current value is 4 times the register
3552 	 * value.
3553 	 */
3554 	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3555 	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3556 		int curr = regulator_get_current_limit(mmc->supply.vmmc);
3557 		if (curr > 0) {
3558 
3559 			/* convert to SDHCI_MAX_CURRENT format */
3560 			curr = curr/1000;  /* convert to mA */
3561 			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3562 
3563 			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3564 			max_current_caps =
3565 				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3566 				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3567 				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
3568 		}
3569 	}
3570 
3571 	if (host->caps & SDHCI_CAN_VDD_330) {
3572 		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3573 
3574 		mmc->max_current_330 = ((max_current_caps &
3575 				   SDHCI_MAX_CURRENT_330_MASK) >>
3576 				   SDHCI_MAX_CURRENT_330_SHIFT) *
3577 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3578 	}
3579 	if (host->caps & SDHCI_CAN_VDD_300) {
3580 		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3581 
3582 		mmc->max_current_300 = ((max_current_caps &
3583 				   SDHCI_MAX_CURRENT_300_MASK) >>
3584 				   SDHCI_MAX_CURRENT_300_SHIFT) *
3585 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3586 	}
3587 	if (host->caps & SDHCI_CAN_VDD_180) {
3588 		ocr_avail |= MMC_VDD_165_195;
3589 
3590 		mmc->max_current_180 = ((max_current_caps &
3591 				   SDHCI_MAX_CURRENT_180_MASK) >>
3592 				   SDHCI_MAX_CURRENT_180_SHIFT) *
3593 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3594 	}
3595 
3596 	/* If OCR set by host, use it instead. */
3597 	if (host->ocr_mask)
3598 		ocr_avail = host->ocr_mask;
3599 
3600 	/* If OCR set by external regulators, give it highest prio. */
3601 	if (mmc->ocr_avail)
3602 		ocr_avail = mmc->ocr_avail;
3603 
3604 	mmc->ocr_avail = ocr_avail;
3605 	mmc->ocr_avail_sdio = ocr_avail;
3606 	if (host->ocr_avail_sdio)
3607 		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3608 	mmc->ocr_avail_sd = ocr_avail;
3609 	if (host->ocr_avail_sd)
3610 		mmc->ocr_avail_sd &= host->ocr_avail_sd;
3611 	else /* normal SD controllers don't support 1.8V */
3612 		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3613 	mmc->ocr_avail_mmc = ocr_avail;
3614 	if (host->ocr_avail_mmc)
3615 		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3616 
3617 	if (mmc->ocr_avail == 0) {
3618 		pr_err("%s: Hardware doesn't report any support voltages.\n",
3619 		       mmc_hostname(mmc));
3620 		ret = -ENODEV;
3621 		goto unreg;
3622 	}
3623 
3624 	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3625 			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3626 			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3627 	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3628 		host->flags |= SDHCI_SIGNALING_180;
3629 
3630 	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3631 		host->flags |= SDHCI_SIGNALING_120;
3632 
3633 	spin_lock_init(&host->lock);
3634 
3635 	/*
3636 	 * Maximum number of segments. Depends on if the hardware
3637 	 * can do scatter/gather or not.
3638 	 */
3639 	if (host->flags & SDHCI_USE_ADMA)
3640 		mmc->max_segs = SDHCI_MAX_SEGS;
3641 	else if (host->flags & SDHCI_USE_SDMA)
3642 		mmc->max_segs = 1;
3643 	else /* PIO */
3644 		mmc->max_segs = SDHCI_MAX_SEGS;
3645 
3646 	/*
3647 	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3648 	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3649 	 * is less anyway.
3650 	 */
3651 	mmc->max_req_size = 524288;
3652 
3653 	/*
3654 	 * Maximum segment size. Could be one segment with the maximum number
3655 	 * of bytes. When doing hardware scatter/gather, each entry cannot
3656 	 * be larger than 64 KiB though.
3657 	 */
3658 	if (host->flags & SDHCI_USE_ADMA) {
3659 		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3660 			mmc->max_seg_size = 65535;
3661 		else
3662 			mmc->max_seg_size = 65536;
3663 	} else {
3664 		mmc->max_seg_size = mmc->max_req_size;
3665 	}
3666 
3667 	/*
3668 	 * Maximum block size. This varies from controller to controller and
3669 	 * is specified in the capabilities register.
3670 	 */
3671 	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3672 		mmc->max_blk_size = 2;
3673 	} else {
3674 		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3675 				SDHCI_MAX_BLOCK_SHIFT;
3676 		if (mmc->max_blk_size >= 3) {
3677 			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3678 				mmc_hostname(mmc));
3679 			mmc->max_blk_size = 0;
3680 		}
3681 	}
3682 
3683 	mmc->max_blk_size = 512 << mmc->max_blk_size;
3684 
3685 	/*
3686 	 * Maximum block count.
3687 	 */
3688 	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3689 
3690 	return 0;
3691 
3692 unreg:
3693 	if (!IS_ERR(mmc->supply.vqmmc))
3694 		regulator_disable(mmc->supply.vqmmc);
3695 undma:
3696 	if (host->align_buffer)
3697 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3698 				  host->adma_table_sz, host->align_buffer,
3699 				  host->align_addr);
3700 	host->adma_table = NULL;
3701 	host->align_buffer = NULL;
3702 
3703 	return ret;
3704 }
3705 EXPORT_SYMBOL_GPL(sdhci_setup_host);
3706 
3707 void sdhci_cleanup_host(struct sdhci_host *host)
3708 {
3709 	struct mmc_host *mmc = host->mmc;
3710 
3711 	if (!IS_ERR(mmc->supply.vqmmc))
3712 		regulator_disable(mmc->supply.vqmmc);
3713 
3714 	if (host->align_buffer)
3715 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3716 				  host->adma_table_sz, host->align_buffer,
3717 				  host->align_addr);
3718 	host->adma_table = NULL;
3719 	host->align_buffer = NULL;
3720 }
3721 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
3722 
3723 int __sdhci_add_host(struct sdhci_host *host)
3724 {
3725 	struct mmc_host *mmc = host->mmc;
3726 	int ret;
3727 
3728 	/*
3729 	 * Init tasklets.
3730 	 */
3731 	tasklet_init(&host->finish_tasklet,
3732 		sdhci_tasklet_finish, (unsigned long)host);
3733 
3734 	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3735 	setup_timer(&host->data_timer, sdhci_timeout_data_timer,
3736 		    (unsigned long)host);
3737 
3738 	init_waitqueue_head(&host->buf_ready_int);
3739 
3740 	sdhci_init(host, 0);
3741 
3742 	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3743 				   IRQF_SHARED,	mmc_hostname(mmc), host);
3744 	if (ret) {
3745 		pr_err("%s: Failed to request IRQ %d: %d\n",
3746 		       mmc_hostname(mmc), host->irq, ret);
3747 		goto untasklet;
3748 	}
3749 
3750 #ifdef CONFIG_MMC_DEBUG
3751 	sdhci_dumpregs(host);
3752 #endif
3753 
3754 	ret = sdhci_led_register(host);
3755 	if (ret) {
3756 		pr_err("%s: Failed to register LED device: %d\n",
3757 		       mmc_hostname(mmc), ret);
3758 		goto unirq;
3759 	}
3760 
3761 	mmiowb();
3762 
3763 	ret = mmc_add_host(mmc);
3764 	if (ret)
3765 		goto unled;
3766 
3767 	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3768 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3769 		(host->flags & SDHCI_USE_ADMA) ?
3770 		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3771 		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3772 
3773 	sdhci_enable_card_detection(host);
3774 
3775 	return 0;
3776 
3777 unled:
3778 	sdhci_led_unregister(host);
3779 unirq:
3780 	sdhci_do_reset(host, SDHCI_RESET_ALL);
3781 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3782 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3783 	free_irq(host->irq, host);
3784 untasklet:
3785 	tasklet_kill(&host->finish_tasklet);
3786 
3787 	return ret;
3788 }
3789 EXPORT_SYMBOL_GPL(__sdhci_add_host);
3790 
3791 int sdhci_add_host(struct sdhci_host *host)
3792 {
3793 	int ret;
3794 
3795 	ret = sdhci_setup_host(host);
3796 	if (ret)
3797 		return ret;
3798 
3799 	ret = __sdhci_add_host(host);
3800 	if (ret)
3801 		goto cleanup;
3802 
3803 	return 0;
3804 
3805 cleanup:
3806 	sdhci_cleanup_host(host);
3807 
3808 	return ret;
3809 }
3810 EXPORT_SYMBOL_GPL(sdhci_add_host);
3811 
3812 void sdhci_remove_host(struct sdhci_host *host, int dead)
3813 {
3814 	struct mmc_host *mmc = host->mmc;
3815 	unsigned long flags;
3816 
3817 	if (dead) {
3818 		spin_lock_irqsave(&host->lock, flags);
3819 
3820 		host->flags |= SDHCI_DEVICE_DEAD;
3821 
3822 		if (sdhci_has_requests(host)) {
3823 			pr_err("%s: Controller removed during "
3824 				" transfer!\n", mmc_hostname(mmc));
3825 			sdhci_error_out_mrqs(host, -ENOMEDIUM);
3826 		}
3827 
3828 		spin_unlock_irqrestore(&host->lock, flags);
3829 	}
3830 
3831 	sdhci_disable_card_detection(host);
3832 
3833 	mmc_remove_host(mmc);
3834 
3835 	sdhci_led_unregister(host);
3836 
3837 	if (!dead)
3838 		sdhci_do_reset(host, SDHCI_RESET_ALL);
3839 
3840 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3841 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3842 	free_irq(host->irq, host);
3843 
3844 	del_timer_sync(&host->timer);
3845 	del_timer_sync(&host->data_timer);
3846 
3847 	tasklet_kill(&host->finish_tasklet);
3848 
3849 	if (!IS_ERR(mmc->supply.vqmmc))
3850 		regulator_disable(mmc->supply.vqmmc);
3851 
3852 	if (host->align_buffer)
3853 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3854 				  host->adma_table_sz, host->align_buffer,
3855 				  host->align_addr);
3856 
3857 	host->adma_table = NULL;
3858 	host->align_buffer = NULL;
3859 }
3860 
3861 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3862 
3863 void sdhci_free_host(struct sdhci_host *host)
3864 {
3865 	mmc_free_host(host->mmc);
3866 }
3867 
3868 EXPORT_SYMBOL_GPL(sdhci_free_host);
3869 
3870 /*****************************************************************************\
3871  *                                                                           *
3872  * Driver init/exit                                                          *
3873  *                                                                           *
3874 \*****************************************************************************/
3875 
3876 static int __init sdhci_drv_init(void)
3877 {
3878 	pr_info(DRIVER_NAME
3879 		": Secure Digital Host Controller Interface driver\n");
3880 	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3881 
3882 	return 0;
3883 }
3884 
3885 static void __exit sdhci_drv_exit(void)
3886 {
3887 }
3888 
3889 module_init(sdhci_drv_init);
3890 module_exit(sdhci_drv_exit);
3891 
3892 module_param(debug_quirks, uint, 0444);
3893 module_param(debug_quirks2, uint, 0444);
3894 
3895 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3896 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3897 MODULE_LICENSE("GPL");
3898 
3899 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3900 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
3901