xref: /openbmc/linux/drivers/mmc/host/sdhci.c (revision 65417d9f)
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/ktime.h>
18 #include <linux/highmem.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/swiotlb.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/of.h>
28 
29 #include <linux/leds.h>
30 
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/sdio.h>
35 #include <linux/mmc/slot-gpio.h>
36 
37 #include "sdhci.h"
38 
39 #define DRIVER_NAME "sdhci"
40 
41 #define DBG(f, x...) \
42 	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
43 
44 #define SDHCI_DUMP(f, x...) \
45 	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
46 
47 #define MAX_TUNING_LOOP 40
48 
49 static unsigned int debug_quirks = 0;
50 static unsigned int debug_quirks2;
51 
52 static void sdhci_finish_data(struct sdhci_host *);
53 
54 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
55 
56 void sdhci_dumpregs(struct sdhci_host *host)
57 {
58 	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
59 
60 	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
61 		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
62 		   sdhci_readw(host, SDHCI_HOST_VERSION));
63 	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
64 		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
65 		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
66 	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
67 		   sdhci_readl(host, SDHCI_ARGUMENT),
68 		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
69 	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
70 		   sdhci_readl(host, SDHCI_PRESENT_STATE),
71 		   sdhci_readb(host, SDHCI_HOST_CONTROL));
72 	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
73 		   sdhci_readb(host, SDHCI_POWER_CONTROL),
74 		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
75 	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
76 		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
77 		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
78 	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
79 		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
80 		   sdhci_readl(host, SDHCI_INT_STATUS));
81 	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
82 		   sdhci_readl(host, SDHCI_INT_ENABLE),
83 		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
84 	SDHCI_DUMP("AC12 err:  0x%08x | Slot int: 0x%08x\n",
85 		   sdhci_readw(host, SDHCI_ACMD12_ERR),
86 		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
87 	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
88 		   sdhci_readl(host, SDHCI_CAPABILITIES),
89 		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
90 	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
91 		   sdhci_readw(host, SDHCI_COMMAND),
92 		   sdhci_readl(host, SDHCI_MAX_CURRENT));
93 	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
94 		   sdhci_readl(host, SDHCI_RESPONSE),
95 		   sdhci_readl(host, SDHCI_RESPONSE + 4));
96 	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
97 		   sdhci_readl(host, SDHCI_RESPONSE + 8),
98 		   sdhci_readl(host, SDHCI_RESPONSE + 12));
99 	SDHCI_DUMP("Host ctl2: 0x%08x\n",
100 		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
101 
102 	if (host->flags & SDHCI_USE_ADMA) {
103 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
104 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
105 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
106 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
107 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
108 		} else {
109 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
110 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
111 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
112 		}
113 	}
114 
115 	SDHCI_DUMP("============================================\n");
116 }
117 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
118 
119 /*****************************************************************************\
120  *                                                                           *
121  * Low level functions                                                       *
122  *                                                                           *
123 \*****************************************************************************/
124 
125 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
126 {
127 	return cmd->data || cmd->flags & MMC_RSP_BUSY;
128 }
129 
130 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
131 {
132 	u32 present;
133 
134 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
135 	    !mmc_card_is_removable(host->mmc))
136 		return;
137 
138 	if (enable) {
139 		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
140 				      SDHCI_CARD_PRESENT;
141 
142 		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
143 				       SDHCI_INT_CARD_INSERT;
144 	} else {
145 		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
146 	}
147 
148 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
149 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
150 }
151 
152 static void sdhci_enable_card_detection(struct sdhci_host *host)
153 {
154 	sdhci_set_card_detection(host, true);
155 }
156 
157 static void sdhci_disable_card_detection(struct sdhci_host *host)
158 {
159 	sdhci_set_card_detection(host, false);
160 }
161 
162 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
163 {
164 	if (host->bus_on)
165 		return;
166 	host->bus_on = true;
167 	pm_runtime_get_noresume(host->mmc->parent);
168 }
169 
170 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
171 {
172 	if (!host->bus_on)
173 		return;
174 	host->bus_on = false;
175 	pm_runtime_put_noidle(host->mmc->parent);
176 }
177 
178 void sdhci_reset(struct sdhci_host *host, u8 mask)
179 {
180 	ktime_t timeout;
181 
182 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
183 
184 	if (mask & SDHCI_RESET_ALL) {
185 		host->clock = 0;
186 		/* Reset-all turns off SD Bus Power */
187 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
188 			sdhci_runtime_pm_bus_off(host);
189 	}
190 
191 	/* Wait max 100 ms */
192 	timeout = ktime_add_ms(ktime_get(), 100);
193 
194 	/* hw clears the bit when it's done */
195 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
196 		if (ktime_after(ktime_get(), timeout)) {
197 			pr_err("%s: Reset 0x%x never completed.\n",
198 				mmc_hostname(host->mmc), (int)mask);
199 			sdhci_dumpregs(host);
200 			return;
201 		}
202 		udelay(10);
203 	}
204 }
205 EXPORT_SYMBOL_GPL(sdhci_reset);
206 
207 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
208 {
209 	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
210 		struct mmc_host *mmc = host->mmc;
211 
212 		if (!mmc->ops->get_cd(mmc))
213 			return;
214 	}
215 
216 	host->ops->reset(host, mask);
217 
218 	if (mask & SDHCI_RESET_ALL) {
219 		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
220 			if (host->ops->enable_dma)
221 				host->ops->enable_dma(host);
222 		}
223 
224 		/* Resetting the controller clears many */
225 		host->preset_enabled = false;
226 	}
227 }
228 
229 static void sdhci_set_default_irqs(struct sdhci_host *host)
230 {
231 	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
232 		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
233 		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
234 		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
235 		    SDHCI_INT_RESPONSE;
236 
237 	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
238 	    host->tuning_mode == SDHCI_TUNING_MODE_3)
239 		host->ier |= SDHCI_INT_RETUNE;
240 
241 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
242 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
243 }
244 
245 static void sdhci_init(struct sdhci_host *host, int soft)
246 {
247 	struct mmc_host *mmc = host->mmc;
248 
249 	if (soft)
250 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
251 	else
252 		sdhci_do_reset(host, SDHCI_RESET_ALL);
253 
254 	sdhci_set_default_irqs(host);
255 
256 	host->cqe_on = false;
257 
258 	if (soft) {
259 		/* force clock reconfiguration */
260 		host->clock = 0;
261 		mmc->ops->set_ios(mmc, &mmc->ios);
262 	}
263 }
264 
265 static void sdhci_reinit(struct sdhci_host *host)
266 {
267 	sdhci_init(host, 0);
268 	sdhci_enable_card_detection(host);
269 }
270 
271 static void __sdhci_led_activate(struct sdhci_host *host)
272 {
273 	u8 ctrl;
274 
275 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
276 	ctrl |= SDHCI_CTRL_LED;
277 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
278 }
279 
280 static void __sdhci_led_deactivate(struct sdhci_host *host)
281 {
282 	u8 ctrl;
283 
284 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
285 	ctrl &= ~SDHCI_CTRL_LED;
286 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
287 }
288 
289 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
290 static void sdhci_led_control(struct led_classdev *led,
291 			      enum led_brightness brightness)
292 {
293 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
294 	unsigned long flags;
295 
296 	spin_lock_irqsave(&host->lock, flags);
297 
298 	if (host->runtime_suspended)
299 		goto out;
300 
301 	if (brightness == LED_OFF)
302 		__sdhci_led_deactivate(host);
303 	else
304 		__sdhci_led_activate(host);
305 out:
306 	spin_unlock_irqrestore(&host->lock, flags);
307 }
308 
309 static int sdhci_led_register(struct sdhci_host *host)
310 {
311 	struct mmc_host *mmc = host->mmc;
312 
313 	snprintf(host->led_name, sizeof(host->led_name),
314 		 "%s::", mmc_hostname(mmc));
315 
316 	host->led.name = host->led_name;
317 	host->led.brightness = LED_OFF;
318 	host->led.default_trigger = mmc_hostname(mmc);
319 	host->led.brightness_set = sdhci_led_control;
320 
321 	return led_classdev_register(mmc_dev(mmc), &host->led);
322 }
323 
324 static void sdhci_led_unregister(struct sdhci_host *host)
325 {
326 	led_classdev_unregister(&host->led);
327 }
328 
329 static inline void sdhci_led_activate(struct sdhci_host *host)
330 {
331 }
332 
333 static inline void sdhci_led_deactivate(struct sdhci_host *host)
334 {
335 }
336 
337 #else
338 
339 static inline int sdhci_led_register(struct sdhci_host *host)
340 {
341 	return 0;
342 }
343 
344 static inline void sdhci_led_unregister(struct sdhci_host *host)
345 {
346 }
347 
348 static inline void sdhci_led_activate(struct sdhci_host *host)
349 {
350 	__sdhci_led_activate(host);
351 }
352 
353 static inline void sdhci_led_deactivate(struct sdhci_host *host)
354 {
355 	__sdhci_led_deactivate(host);
356 }
357 
358 #endif
359 
360 /*****************************************************************************\
361  *                                                                           *
362  * Core functions                                                            *
363  *                                                                           *
364 \*****************************************************************************/
365 
366 static void sdhci_read_block_pio(struct sdhci_host *host)
367 {
368 	unsigned long flags;
369 	size_t blksize, len, chunk;
370 	u32 uninitialized_var(scratch);
371 	u8 *buf;
372 
373 	DBG("PIO reading\n");
374 
375 	blksize = host->data->blksz;
376 	chunk = 0;
377 
378 	local_irq_save(flags);
379 
380 	while (blksize) {
381 		BUG_ON(!sg_miter_next(&host->sg_miter));
382 
383 		len = min(host->sg_miter.length, blksize);
384 
385 		blksize -= len;
386 		host->sg_miter.consumed = len;
387 
388 		buf = host->sg_miter.addr;
389 
390 		while (len) {
391 			if (chunk == 0) {
392 				scratch = sdhci_readl(host, SDHCI_BUFFER);
393 				chunk = 4;
394 			}
395 
396 			*buf = scratch & 0xFF;
397 
398 			buf++;
399 			scratch >>= 8;
400 			chunk--;
401 			len--;
402 		}
403 	}
404 
405 	sg_miter_stop(&host->sg_miter);
406 
407 	local_irq_restore(flags);
408 }
409 
410 static void sdhci_write_block_pio(struct sdhci_host *host)
411 {
412 	unsigned long flags;
413 	size_t blksize, len, chunk;
414 	u32 scratch;
415 	u8 *buf;
416 
417 	DBG("PIO writing\n");
418 
419 	blksize = host->data->blksz;
420 	chunk = 0;
421 	scratch = 0;
422 
423 	local_irq_save(flags);
424 
425 	while (blksize) {
426 		BUG_ON(!sg_miter_next(&host->sg_miter));
427 
428 		len = min(host->sg_miter.length, blksize);
429 
430 		blksize -= len;
431 		host->sg_miter.consumed = len;
432 
433 		buf = host->sg_miter.addr;
434 
435 		while (len) {
436 			scratch |= (u32)*buf << (chunk * 8);
437 
438 			buf++;
439 			chunk++;
440 			len--;
441 
442 			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
443 				sdhci_writel(host, scratch, SDHCI_BUFFER);
444 				chunk = 0;
445 				scratch = 0;
446 			}
447 		}
448 	}
449 
450 	sg_miter_stop(&host->sg_miter);
451 
452 	local_irq_restore(flags);
453 }
454 
455 static void sdhci_transfer_pio(struct sdhci_host *host)
456 {
457 	u32 mask;
458 
459 	if (host->blocks == 0)
460 		return;
461 
462 	if (host->data->flags & MMC_DATA_READ)
463 		mask = SDHCI_DATA_AVAILABLE;
464 	else
465 		mask = SDHCI_SPACE_AVAILABLE;
466 
467 	/*
468 	 * Some controllers (JMicron JMB38x) mess up the buffer bits
469 	 * for transfers < 4 bytes. As long as it is just one block,
470 	 * we can ignore the bits.
471 	 */
472 	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
473 		(host->data->blocks == 1))
474 		mask = ~0;
475 
476 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
477 		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
478 			udelay(100);
479 
480 		if (host->data->flags & MMC_DATA_READ)
481 			sdhci_read_block_pio(host);
482 		else
483 			sdhci_write_block_pio(host);
484 
485 		host->blocks--;
486 		if (host->blocks == 0)
487 			break;
488 	}
489 
490 	DBG("PIO transfer complete.\n");
491 }
492 
493 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
494 				  struct mmc_data *data, int cookie)
495 {
496 	int sg_count;
497 
498 	/*
499 	 * If the data buffers are already mapped, return the previous
500 	 * dma_map_sg() result.
501 	 */
502 	if (data->host_cookie == COOKIE_PRE_MAPPED)
503 		return data->sg_count;
504 
505 	sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
506 			      mmc_get_dma_dir(data));
507 
508 	if (sg_count == 0)
509 		return -ENOSPC;
510 
511 	data->sg_count = sg_count;
512 	data->host_cookie = cookie;
513 
514 	return sg_count;
515 }
516 
517 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
518 {
519 	local_irq_save(*flags);
520 	return kmap_atomic(sg_page(sg)) + sg->offset;
521 }
522 
523 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
524 {
525 	kunmap_atomic(buffer);
526 	local_irq_restore(*flags);
527 }
528 
529 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
530 				  dma_addr_t addr, int len, unsigned cmd)
531 {
532 	struct sdhci_adma2_64_desc *dma_desc = desc;
533 
534 	/* 32-bit and 64-bit descriptors have these members in same position */
535 	dma_desc->cmd = cpu_to_le16(cmd);
536 	dma_desc->len = cpu_to_le16(len);
537 	dma_desc->addr_lo = cpu_to_le32((u32)addr);
538 
539 	if (host->flags & SDHCI_USE_64_BIT_DMA)
540 		dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
541 }
542 
543 static void sdhci_adma_mark_end(void *desc)
544 {
545 	struct sdhci_adma2_64_desc *dma_desc = desc;
546 
547 	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
548 	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
549 }
550 
551 static void sdhci_adma_table_pre(struct sdhci_host *host,
552 	struct mmc_data *data, int sg_count)
553 {
554 	struct scatterlist *sg;
555 	unsigned long flags;
556 	dma_addr_t addr, align_addr;
557 	void *desc, *align;
558 	char *buffer;
559 	int len, offset, i;
560 
561 	/*
562 	 * The spec does not specify endianness of descriptor table.
563 	 * We currently guess that it is LE.
564 	 */
565 
566 	host->sg_count = sg_count;
567 
568 	desc = host->adma_table;
569 	align = host->align_buffer;
570 
571 	align_addr = host->align_addr;
572 
573 	for_each_sg(data->sg, sg, host->sg_count, i) {
574 		addr = sg_dma_address(sg);
575 		len = sg_dma_len(sg);
576 
577 		/*
578 		 * The SDHCI specification states that ADMA addresses must
579 		 * be 32-bit aligned. If they aren't, then we use a bounce
580 		 * buffer for the (up to three) bytes that screw up the
581 		 * alignment.
582 		 */
583 		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
584 			 SDHCI_ADMA2_MASK;
585 		if (offset) {
586 			if (data->flags & MMC_DATA_WRITE) {
587 				buffer = sdhci_kmap_atomic(sg, &flags);
588 				memcpy(align, buffer, offset);
589 				sdhci_kunmap_atomic(buffer, &flags);
590 			}
591 
592 			/* tran, valid */
593 			sdhci_adma_write_desc(host, desc, align_addr, offset,
594 					      ADMA2_TRAN_VALID);
595 
596 			BUG_ON(offset > 65536);
597 
598 			align += SDHCI_ADMA2_ALIGN;
599 			align_addr += SDHCI_ADMA2_ALIGN;
600 
601 			desc += host->desc_sz;
602 
603 			addr += offset;
604 			len -= offset;
605 		}
606 
607 		BUG_ON(len > 65536);
608 
609 		if (len) {
610 			/* tran, valid */
611 			sdhci_adma_write_desc(host, desc, addr, len,
612 					      ADMA2_TRAN_VALID);
613 			desc += host->desc_sz;
614 		}
615 
616 		/*
617 		 * If this triggers then we have a calculation bug
618 		 * somewhere. :/
619 		 */
620 		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
621 	}
622 
623 	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
624 		/* Mark the last descriptor as the terminating descriptor */
625 		if (desc != host->adma_table) {
626 			desc -= host->desc_sz;
627 			sdhci_adma_mark_end(desc);
628 		}
629 	} else {
630 		/* Add a terminating entry - nop, end, valid */
631 		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
632 	}
633 }
634 
635 static void sdhci_adma_table_post(struct sdhci_host *host,
636 	struct mmc_data *data)
637 {
638 	struct scatterlist *sg;
639 	int i, size;
640 	void *align;
641 	char *buffer;
642 	unsigned long flags;
643 
644 	if (data->flags & MMC_DATA_READ) {
645 		bool has_unaligned = false;
646 
647 		/* Do a quick scan of the SG list for any unaligned mappings */
648 		for_each_sg(data->sg, sg, host->sg_count, i)
649 			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
650 				has_unaligned = true;
651 				break;
652 			}
653 
654 		if (has_unaligned) {
655 			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
656 					    data->sg_len, DMA_FROM_DEVICE);
657 
658 			align = host->align_buffer;
659 
660 			for_each_sg(data->sg, sg, host->sg_count, i) {
661 				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
662 					size = SDHCI_ADMA2_ALIGN -
663 					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
664 
665 					buffer = sdhci_kmap_atomic(sg, &flags);
666 					memcpy(buffer, align, size);
667 					sdhci_kunmap_atomic(buffer, &flags);
668 
669 					align += SDHCI_ADMA2_ALIGN;
670 				}
671 			}
672 		}
673 	}
674 }
675 
676 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
677 {
678 	u8 count;
679 	struct mmc_data *data = cmd->data;
680 	unsigned target_timeout, current_timeout;
681 
682 	/*
683 	 * If the host controller provides us with an incorrect timeout
684 	 * value, just skip the check and use 0xE.  The hardware may take
685 	 * longer to time out, but that's much better than having a too-short
686 	 * timeout value.
687 	 */
688 	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
689 		return 0xE;
690 
691 	/* Unspecified timeout, assume max */
692 	if (!data && !cmd->busy_timeout)
693 		return 0xE;
694 
695 	/* timeout in us */
696 	if (!data)
697 		target_timeout = cmd->busy_timeout * 1000;
698 	else {
699 		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
700 		if (host->clock && data->timeout_clks) {
701 			unsigned long long val;
702 
703 			/*
704 			 * data->timeout_clks is in units of clock cycles.
705 			 * host->clock is in Hz.  target_timeout is in us.
706 			 * Hence, us = 1000000 * cycles / Hz.  Round up.
707 			 */
708 			val = 1000000ULL * data->timeout_clks;
709 			if (do_div(val, host->clock))
710 				target_timeout++;
711 			target_timeout += val;
712 		}
713 	}
714 
715 	/*
716 	 * Figure out needed cycles.
717 	 * We do this in steps in order to fit inside a 32 bit int.
718 	 * The first step is the minimum timeout, which will have a
719 	 * minimum resolution of 6 bits:
720 	 * (1) 2^13*1000 > 2^22,
721 	 * (2) host->timeout_clk < 2^16
722 	 *     =>
723 	 *     (1) / (2) > 2^6
724 	 */
725 	count = 0;
726 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
727 	while (current_timeout < target_timeout) {
728 		count++;
729 		current_timeout <<= 1;
730 		if (count >= 0xF)
731 			break;
732 	}
733 
734 	if (count >= 0xF) {
735 		DBG("Too large timeout 0x%x requested for CMD%d!\n",
736 		    count, cmd->opcode);
737 		count = 0xE;
738 	}
739 
740 	return count;
741 }
742 
743 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
744 {
745 	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
746 	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
747 
748 	if (host->flags & SDHCI_REQ_USE_DMA)
749 		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
750 	else
751 		host->ier = (host->ier & ~dma_irqs) | pio_irqs;
752 
753 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
754 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
755 }
756 
757 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
758 {
759 	u8 count;
760 
761 	if (host->ops->set_timeout) {
762 		host->ops->set_timeout(host, cmd);
763 	} else {
764 		count = sdhci_calc_timeout(host, cmd);
765 		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
766 	}
767 }
768 
769 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
770 {
771 	u8 ctrl;
772 	struct mmc_data *data = cmd->data;
773 
774 	if (sdhci_data_line_cmd(cmd))
775 		sdhci_set_timeout(host, cmd);
776 
777 	if (!data)
778 		return;
779 
780 	WARN_ON(host->data);
781 
782 	/* Sanity checks */
783 	BUG_ON(data->blksz * data->blocks > 524288);
784 	BUG_ON(data->blksz > host->mmc->max_blk_size);
785 	BUG_ON(data->blocks > 65535);
786 
787 	host->data = data;
788 	host->data_early = 0;
789 	host->data->bytes_xfered = 0;
790 
791 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
792 		struct scatterlist *sg;
793 		unsigned int length_mask, offset_mask;
794 		int i;
795 
796 		host->flags |= SDHCI_REQ_USE_DMA;
797 
798 		/*
799 		 * FIXME: This doesn't account for merging when mapping the
800 		 * scatterlist.
801 		 *
802 		 * The assumption here being that alignment and lengths are
803 		 * the same after DMA mapping to device address space.
804 		 */
805 		length_mask = 0;
806 		offset_mask = 0;
807 		if (host->flags & SDHCI_USE_ADMA) {
808 			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
809 				length_mask = 3;
810 				/*
811 				 * As we use up to 3 byte chunks to work
812 				 * around alignment problems, we need to
813 				 * check the offset as well.
814 				 */
815 				offset_mask = 3;
816 			}
817 		} else {
818 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
819 				length_mask = 3;
820 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
821 				offset_mask = 3;
822 		}
823 
824 		if (unlikely(length_mask | offset_mask)) {
825 			for_each_sg(data->sg, sg, data->sg_len, i) {
826 				if (sg->length & length_mask) {
827 					DBG("Reverting to PIO because of transfer size (%d)\n",
828 					    sg->length);
829 					host->flags &= ~SDHCI_REQ_USE_DMA;
830 					break;
831 				}
832 				if (sg->offset & offset_mask) {
833 					DBG("Reverting to PIO because of bad alignment\n");
834 					host->flags &= ~SDHCI_REQ_USE_DMA;
835 					break;
836 				}
837 			}
838 		}
839 	}
840 
841 	if (host->flags & SDHCI_REQ_USE_DMA) {
842 		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
843 
844 		if (sg_cnt <= 0) {
845 			/*
846 			 * This only happens when someone fed
847 			 * us an invalid request.
848 			 */
849 			WARN_ON(1);
850 			host->flags &= ~SDHCI_REQ_USE_DMA;
851 		} else if (host->flags & SDHCI_USE_ADMA) {
852 			sdhci_adma_table_pre(host, data, sg_cnt);
853 
854 			sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
855 			if (host->flags & SDHCI_USE_64_BIT_DMA)
856 				sdhci_writel(host,
857 					     (u64)host->adma_addr >> 32,
858 					     SDHCI_ADMA_ADDRESS_HI);
859 		} else {
860 			WARN_ON(sg_cnt != 1);
861 			sdhci_writel(host, sg_dma_address(data->sg),
862 				SDHCI_DMA_ADDRESS);
863 		}
864 	}
865 
866 	/*
867 	 * Always adjust the DMA selection as some controllers
868 	 * (e.g. JMicron) can't do PIO properly when the selection
869 	 * is ADMA.
870 	 */
871 	if (host->version >= SDHCI_SPEC_200) {
872 		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
873 		ctrl &= ~SDHCI_CTRL_DMA_MASK;
874 		if ((host->flags & SDHCI_REQ_USE_DMA) &&
875 			(host->flags & SDHCI_USE_ADMA)) {
876 			if (host->flags & SDHCI_USE_64_BIT_DMA)
877 				ctrl |= SDHCI_CTRL_ADMA64;
878 			else
879 				ctrl |= SDHCI_CTRL_ADMA32;
880 		} else {
881 			ctrl |= SDHCI_CTRL_SDMA;
882 		}
883 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
884 	}
885 
886 	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
887 		int flags;
888 
889 		flags = SG_MITER_ATOMIC;
890 		if (host->data->flags & MMC_DATA_READ)
891 			flags |= SG_MITER_TO_SG;
892 		else
893 			flags |= SG_MITER_FROM_SG;
894 		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
895 		host->blocks = data->blocks;
896 	}
897 
898 	sdhci_set_transfer_irqs(host);
899 
900 	/* Set the DMA boundary value and block size */
901 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
902 		     SDHCI_BLOCK_SIZE);
903 	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
904 }
905 
906 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
907 				    struct mmc_request *mrq)
908 {
909 	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
910 	       !mrq->cap_cmd_during_tfr;
911 }
912 
913 static void sdhci_set_transfer_mode(struct sdhci_host *host,
914 	struct mmc_command *cmd)
915 {
916 	u16 mode = 0;
917 	struct mmc_data *data = cmd->data;
918 
919 	if (data == NULL) {
920 		if (host->quirks2 &
921 			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
922 			sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
923 		} else {
924 		/* clear Auto CMD settings for no data CMDs */
925 			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
926 			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
927 				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
928 		}
929 		return;
930 	}
931 
932 	WARN_ON(!host->data);
933 
934 	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
935 		mode = SDHCI_TRNS_BLK_CNT_EN;
936 
937 	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
938 		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
939 		/*
940 		 * If we are sending CMD23, CMD12 never gets sent
941 		 * on successful completion (so no Auto-CMD12).
942 		 */
943 		if (sdhci_auto_cmd12(host, cmd->mrq) &&
944 		    (cmd->opcode != SD_IO_RW_EXTENDED))
945 			mode |= SDHCI_TRNS_AUTO_CMD12;
946 		else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
947 			mode |= SDHCI_TRNS_AUTO_CMD23;
948 			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
949 		}
950 	}
951 
952 	if (data->flags & MMC_DATA_READ)
953 		mode |= SDHCI_TRNS_READ;
954 	if (host->flags & SDHCI_REQ_USE_DMA)
955 		mode |= SDHCI_TRNS_DMA;
956 
957 	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
958 }
959 
960 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
961 {
962 	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
963 		((mrq->cmd && mrq->cmd->error) ||
964 		 (mrq->sbc && mrq->sbc->error) ||
965 		 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
966 				(mrq->data->stop && mrq->data->stop->error))) ||
967 		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
968 }
969 
970 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
971 {
972 	int i;
973 
974 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
975 		if (host->mrqs_done[i] == mrq) {
976 			WARN_ON(1);
977 			return;
978 		}
979 	}
980 
981 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
982 		if (!host->mrqs_done[i]) {
983 			host->mrqs_done[i] = mrq;
984 			break;
985 		}
986 	}
987 
988 	WARN_ON(i >= SDHCI_MAX_MRQS);
989 
990 	tasklet_schedule(&host->finish_tasklet);
991 }
992 
993 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
994 {
995 	if (host->cmd && host->cmd->mrq == mrq)
996 		host->cmd = NULL;
997 
998 	if (host->data_cmd && host->data_cmd->mrq == mrq)
999 		host->data_cmd = NULL;
1000 
1001 	if (host->data && host->data->mrq == mrq)
1002 		host->data = NULL;
1003 
1004 	if (sdhci_needs_reset(host, mrq))
1005 		host->pending_reset = true;
1006 
1007 	__sdhci_finish_mrq(host, mrq);
1008 }
1009 
1010 static void sdhci_finish_data(struct sdhci_host *host)
1011 {
1012 	struct mmc_command *data_cmd = host->data_cmd;
1013 	struct mmc_data *data = host->data;
1014 
1015 	host->data = NULL;
1016 	host->data_cmd = NULL;
1017 
1018 	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1019 	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1020 		sdhci_adma_table_post(host, data);
1021 
1022 	/*
1023 	 * The specification states that the block count register must
1024 	 * be updated, but it does not specify at what point in the
1025 	 * data flow. That makes the register entirely useless to read
1026 	 * back so we have to assume that nothing made it to the card
1027 	 * in the event of an error.
1028 	 */
1029 	if (data->error)
1030 		data->bytes_xfered = 0;
1031 	else
1032 		data->bytes_xfered = data->blksz * data->blocks;
1033 
1034 	/*
1035 	 * Need to send CMD12 if -
1036 	 * a) open-ended multiblock transfer (no CMD23)
1037 	 * b) error in multiblock transfer
1038 	 */
1039 	if (data->stop &&
1040 	    (data->error ||
1041 	     !data->mrq->sbc)) {
1042 
1043 		/*
1044 		 * The controller needs a reset of internal state machines
1045 		 * upon error conditions.
1046 		 */
1047 		if (data->error) {
1048 			if (!host->cmd || host->cmd == data_cmd)
1049 				sdhci_do_reset(host, SDHCI_RESET_CMD);
1050 			sdhci_do_reset(host, SDHCI_RESET_DATA);
1051 		}
1052 
1053 		/*
1054 		 * 'cap_cmd_during_tfr' request must not use the command line
1055 		 * after mmc_command_done() has been called. It is upper layer's
1056 		 * responsibility to send the stop command if required.
1057 		 */
1058 		if (data->mrq->cap_cmd_during_tfr) {
1059 			sdhci_finish_mrq(host, data->mrq);
1060 		} else {
1061 			/* Avoid triggering warning in sdhci_send_command() */
1062 			host->cmd = NULL;
1063 			sdhci_send_command(host, data->stop);
1064 		}
1065 	} else {
1066 		sdhci_finish_mrq(host, data->mrq);
1067 	}
1068 }
1069 
1070 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1071 			    unsigned long timeout)
1072 {
1073 	if (sdhci_data_line_cmd(mrq->cmd))
1074 		mod_timer(&host->data_timer, timeout);
1075 	else
1076 		mod_timer(&host->timer, timeout);
1077 }
1078 
1079 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1080 {
1081 	if (sdhci_data_line_cmd(mrq->cmd))
1082 		del_timer(&host->data_timer);
1083 	else
1084 		del_timer(&host->timer);
1085 }
1086 
1087 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1088 {
1089 	int flags;
1090 	u32 mask;
1091 	unsigned long timeout;
1092 
1093 	WARN_ON(host->cmd);
1094 
1095 	/* Initially, a command has no error */
1096 	cmd->error = 0;
1097 
1098 	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1099 	    cmd->opcode == MMC_STOP_TRANSMISSION)
1100 		cmd->flags |= MMC_RSP_BUSY;
1101 
1102 	/* Wait max 10 ms */
1103 	timeout = 10;
1104 
1105 	mask = SDHCI_CMD_INHIBIT;
1106 	if (sdhci_data_line_cmd(cmd))
1107 		mask |= SDHCI_DATA_INHIBIT;
1108 
1109 	/* We shouldn't wait for data inihibit for stop commands, even
1110 	   though they might use busy signaling */
1111 	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1112 		mask &= ~SDHCI_DATA_INHIBIT;
1113 
1114 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1115 		if (timeout == 0) {
1116 			pr_err("%s: Controller never released inhibit bit(s).\n",
1117 			       mmc_hostname(host->mmc));
1118 			sdhci_dumpregs(host);
1119 			cmd->error = -EIO;
1120 			sdhci_finish_mrq(host, cmd->mrq);
1121 			return;
1122 		}
1123 		timeout--;
1124 		mdelay(1);
1125 	}
1126 
1127 	timeout = jiffies;
1128 	if (!cmd->data && cmd->busy_timeout > 9000)
1129 		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1130 	else
1131 		timeout += 10 * HZ;
1132 	sdhci_mod_timer(host, cmd->mrq, timeout);
1133 
1134 	host->cmd = cmd;
1135 	if (sdhci_data_line_cmd(cmd)) {
1136 		WARN_ON(host->data_cmd);
1137 		host->data_cmd = cmd;
1138 	}
1139 
1140 	sdhci_prepare_data(host, cmd);
1141 
1142 	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1143 
1144 	sdhci_set_transfer_mode(host, cmd);
1145 
1146 	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1147 		pr_err("%s: Unsupported response type!\n",
1148 			mmc_hostname(host->mmc));
1149 		cmd->error = -EINVAL;
1150 		sdhci_finish_mrq(host, cmd->mrq);
1151 		return;
1152 	}
1153 
1154 	if (!(cmd->flags & MMC_RSP_PRESENT))
1155 		flags = SDHCI_CMD_RESP_NONE;
1156 	else if (cmd->flags & MMC_RSP_136)
1157 		flags = SDHCI_CMD_RESP_LONG;
1158 	else if (cmd->flags & MMC_RSP_BUSY)
1159 		flags = SDHCI_CMD_RESP_SHORT_BUSY;
1160 	else
1161 		flags = SDHCI_CMD_RESP_SHORT;
1162 
1163 	if (cmd->flags & MMC_RSP_CRC)
1164 		flags |= SDHCI_CMD_CRC;
1165 	if (cmd->flags & MMC_RSP_OPCODE)
1166 		flags |= SDHCI_CMD_INDEX;
1167 
1168 	/* CMD19 is special in that the Data Present Select should be set */
1169 	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1170 	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1171 		flags |= SDHCI_CMD_DATA;
1172 
1173 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1174 }
1175 EXPORT_SYMBOL_GPL(sdhci_send_command);
1176 
1177 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1178 {
1179 	int i, reg;
1180 
1181 	for (i = 0; i < 4; i++) {
1182 		reg = SDHCI_RESPONSE + (3 - i) * 4;
1183 		cmd->resp[i] = sdhci_readl(host, reg);
1184 	}
1185 
1186 	if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1187 		return;
1188 
1189 	/* CRC is stripped so we need to do some shifting */
1190 	for (i = 0; i < 4; i++) {
1191 		cmd->resp[i] <<= 8;
1192 		if (i != 3)
1193 			cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1194 	}
1195 }
1196 
1197 static void sdhci_finish_command(struct sdhci_host *host)
1198 {
1199 	struct mmc_command *cmd = host->cmd;
1200 
1201 	host->cmd = NULL;
1202 
1203 	if (cmd->flags & MMC_RSP_PRESENT) {
1204 		if (cmd->flags & MMC_RSP_136) {
1205 			sdhci_read_rsp_136(host, cmd);
1206 		} else {
1207 			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1208 		}
1209 	}
1210 
1211 	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1212 		mmc_command_done(host->mmc, cmd->mrq);
1213 
1214 	/*
1215 	 * The host can send and interrupt when the busy state has
1216 	 * ended, allowing us to wait without wasting CPU cycles.
1217 	 * The busy signal uses DAT0 so this is similar to waiting
1218 	 * for data to complete.
1219 	 *
1220 	 * Note: The 1.0 specification is a bit ambiguous about this
1221 	 *       feature so there might be some problems with older
1222 	 *       controllers.
1223 	 */
1224 	if (cmd->flags & MMC_RSP_BUSY) {
1225 		if (cmd->data) {
1226 			DBG("Cannot wait for busy signal when also doing a data transfer");
1227 		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1228 			   cmd == host->data_cmd) {
1229 			/* Command complete before busy is ended */
1230 			return;
1231 		}
1232 	}
1233 
1234 	/* Finished CMD23, now send actual command. */
1235 	if (cmd == cmd->mrq->sbc) {
1236 		sdhci_send_command(host, cmd->mrq->cmd);
1237 	} else {
1238 
1239 		/* Processed actual command. */
1240 		if (host->data && host->data_early)
1241 			sdhci_finish_data(host);
1242 
1243 		if (!cmd->data)
1244 			sdhci_finish_mrq(host, cmd->mrq);
1245 	}
1246 }
1247 
1248 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1249 {
1250 	u16 preset = 0;
1251 
1252 	switch (host->timing) {
1253 	case MMC_TIMING_UHS_SDR12:
1254 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1255 		break;
1256 	case MMC_TIMING_UHS_SDR25:
1257 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1258 		break;
1259 	case MMC_TIMING_UHS_SDR50:
1260 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1261 		break;
1262 	case MMC_TIMING_UHS_SDR104:
1263 	case MMC_TIMING_MMC_HS200:
1264 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1265 		break;
1266 	case MMC_TIMING_UHS_DDR50:
1267 	case MMC_TIMING_MMC_DDR52:
1268 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1269 		break;
1270 	case MMC_TIMING_MMC_HS400:
1271 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1272 		break;
1273 	default:
1274 		pr_warn("%s: Invalid UHS-I mode selected\n",
1275 			mmc_hostname(host->mmc));
1276 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1277 		break;
1278 	}
1279 	return preset;
1280 }
1281 
1282 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1283 		   unsigned int *actual_clock)
1284 {
1285 	int div = 0; /* Initialized for compiler warning */
1286 	int real_div = div, clk_mul = 1;
1287 	u16 clk = 0;
1288 	bool switch_base_clk = false;
1289 
1290 	if (host->version >= SDHCI_SPEC_300) {
1291 		if (host->preset_enabled) {
1292 			u16 pre_val;
1293 
1294 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1295 			pre_val = sdhci_get_preset_value(host);
1296 			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1297 				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1298 			if (host->clk_mul &&
1299 				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1300 				clk = SDHCI_PROG_CLOCK_MODE;
1301 				real_div = div + 1;
1302 				clk_mul = host->clk_mul;
1303 			} else {
1304 				real_div = max_t(int, 1, div << 1);
1305 			}
1306 			goto clock_set;
1307 		}
1308 
1309 		/*
1310 		 * Check if the Host Controller supports Programmable Clock
1311 		 * Mode.
1312 		 */
1313 		if (host->clk_mul) {
1314 			for (div = 1; div <= 1024; div++) {
1315 				if ((host->max_clk * host->clk_mul / div)
1316 					<= clock)
1317 					break;
1318 			}
1319 			if ((host->max_clk * host->clk_mul / div) <= clock) {
1320 				/*
1321 				 * Set Programmable Clock Mode in the Clock
1322 				 * Control register.
1323 				 */
1324 				clk = SDHCI_PROG_CLOCK_MODE;
1325 				real_div = div;
1326 				clk_mul = host->clk_mul;
1327 				div--;
1328 			} else {
1329 				/*
1330 				 * Divisor can be too small to reach clock
1331 				 * speed requirement. Then use the base clock.
1332 				 */
1333 				switch_base_clk = true;
1334 			}
1335 		}
1336 
1337 		if (!host->clk_mul || switch_base_clk) {
1338 			/* Version 3.00 divisors must be a multiple of 2. */
1339 			if (host->max_clk <= clock)
1340 				div = 1;
1341 			else {
1342 				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1343 				     div += 2) {
1344 					if ((host->max_clk / div) <= clock)
1345 						break;
1346 				}
1347 			}
1348 			real_div = div;
1349 			div >>= 1;
1350 			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1351 				&& !div && host->max_clk <= 25000000)
1352 				div = 1;
1353 		}
1354 	} else {
1355 		/* Version 2.00 divisors must be a power of 2. */
1356 		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1357 			if ((host->max_clk / div) <= clock)
1358 				break;
1359 		}
1360 		real_div = div;
1361 		div >>= 1;
1362 	}
1363 
1364 clock_set:
1365 	if (real_div)
1366 		*actual_clock = (host->max_clk * clk_mul) / real_div;
1367 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1368 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1369 		<< SDHCI_DIVIDER_HI_SHIFT;
1370 
1371 	return clk;
1372 }
1373 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1374 
1375 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1376 {
1377 	ktime_t timeout;
1378 
1379 	clk |= SDHCI_CLOCK_INT_EN;
1380 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1381 
1382 	/* Wait max 20 ms */
1383 	timeout = ktime_add_ms(ktime_get(), 20);
1384 	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1385 		& SDHCI_CLOCK_INT_STABLE)) {
1386 		if (ktime_after(ktime_get(), timeout)) {
1387 			pr_err("%s: Internal clock never stabilised.\n",
1388 			       mmc_hostname(host->mmc));
1389 			sdhci_dumpregs(host);
1390 			return;
1391 		}
1392 		udelay(10);
1393 	}
1394 
1395 	clk |= SDHCI_CLOCK_CARD_EN;
1396 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1397 }
1398 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1399 
1400 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1401 {
1402 	u16 clk;
1403 
1404 	host->mmc->actual_clock = 0;
1405 
1406 	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1407 
1408 	if (clock == 0)
1409 		return;
1410 
1411 	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1412 	sdhci_enable_clk(host, clk);
1413 }
1414 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1415 
1416 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1417 				unsigned short vdd)
1418 {
1419 	struct mmc_host *mmc = host->mmc;
1420 
1421 	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1422 
1423 	if (mode != MMC_POWER_OFF)
1424 		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1425 	else
1426 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1427 }
1428 
1429 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1430 			   unsigned short vdd)
1431 {
1432 	u8 pwr = 0;
1433 
1434 	if (mode != MMC_POWER_OFF) {
1435 		switch (1 << vdd) {
1436 		case MMC_VDD_165_195:
1437 			pwr = SDHCI_POWER_180;
1438 			break;
1439 		case MMC_VDD_29_30:
1440 		case MMC_VDD_30_31:
1441 			pwr = SDHCI_POWER_300;
1442 			break;
1443 		case MMC_VDD_32_33:
1444 		case MMC_VDD_33_34:
1445 			pwr = SDHCI_POWER_330;
1446 			break;
1447 		default:
1448 			WARN(1, "%s: Invalid vdd %#x\n",
1449 			     mmc_hostname(host->mmc), vdd);
1450 			break;
1451 		}
1452 	}
1453 
1454 	if (host->pwr == pwr)
1455 		return;
1456 
1457 	host->pwr = pwr;
1458 
1459 	if (pwr == 0) {
1460 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1461 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1462 			sdhci_runtime_pm_bus_off(host);
1463 	} else {
1464 		/*
1465 		 * Spec says that we should clear the power reg before setting
1466 		 * a new value. Some controllers don't seem to like this though.
1467 		 */
1468 		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1469 			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1470 
1471 		/*
1472 		 * At least the Marvell CaFe chip gets confused if we set the
1473 		 * voltage and set turn on power at the same time, so set the
1474 		 * voltage first.
1475 		 */
1476 		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1477 			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1478 
1479 		pwr |= SDHCI_POWER_ON;
1480 
1481 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1482 
1483 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1484 			sdhci_runtime_pm_bus_on(host);
1485 
1486 		/*
1487 		 * Some controllers need an extra 10ms delay of 10ms before
1488 		 * they can apply clock after applying power
1489 		 */
1490 		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1491 			mdelay(10);
1492 	}
1493 }
1494 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1495 
1496 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1497 		     unsigned short vdd)
1498 {
1499 	if (IS_ERR(host->mmc->supply.vmmc))
1500 		sdhci_set_power_noreg(host, mode, vdd);
1501 	else
1502 		sdhci_set_power_reg(host, mode, vdd);
1503 }
1504 EXPORT_SYMBOL_GPL(sdhci_set_power);
1505 
1506 /*****************************************************************************\
1507  *                                                                           *
1508  * MMC callbacks                                                             *
1509  *                                                                           *
1510 \*****************************************************************************/
1511 
1512 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1513 {
1514 	struct sdhci_host *host;
1515 	int present;
1516 	unsigned long flags;
1517 
1518 	host = mmc_priv(mmc);
1519 
1520 	/* Firstly check card presence */
1521 	present = mmc->ops->get_cd(mmc);
1522 
1523 	spin_lock_irqsave(&host->lock, flags);
1524 
1525 	sdhci_led_activate(host);
1526 
1527 	/*
1528 	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1529 	 * requests if Auto-CMD12 is enabled.
1530 	 */
1531 	if (sdhci_auto_cmd12(host, mrq)) {
1532 		if (mrq->stop) {
1533 			mrq->data->stop = NULL;
1534 			mrq->stop = NULL;
1535 		}
1536 	}
1537 
1538 	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1539 		mrq->cmd->error = -ENOMEDIUM;
1540 		sdhci_finish_mrq(host, mrq);
1541 	} else {
1542 		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1543 			sdhci_send_command(host, mrq->sbc);
1544 		else
1545 			sdhci_send_command(host, mrq->cmd);
1546 	}
1547 
1548 	mmiowb();
1549 	spin_unlock_irqrestore(&host->lock, flags);
1550 }
1551 
1552 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1553 {
1554 	u8 ctrl;
1555 
1556 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1557 	if (width == MMC_BUS_WIDTH_8) {
1558 		ctrl &= ~SDHCI_CTRL_4BITBUS;
1559 		ctrl |= SDHCI_CTRL_8BITBUS;
1560 	} else {
1561 		if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1562 			ctrl &= ~SDHCI_CTRL_8BITBUS;
1563 		if (width == MMC_BUS_WIDTH_4)
1564 			ctrl |= SDHCI_CTRL_4BITBUS;
1565 		else
1566 			ctrl &= ~SDHCI_CTRL_4BITBUS;
1567 	}
1568 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1569 }
1570 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1571 
1572 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1573 {
1574 	u16 ctrl_2;
1575 
1576 	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1577 	/* Select Bus Speed Mode for host */
1578 	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1579 	if ((timing == MMC_TIMING_MMC_HS200) ||
1580 	    (timing == MMC_TIMING_UHS_SDR104))
1581 		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1582 	else if (timing == MMC_TIMING_UHS_SDR12)
1583 		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1584 	else if (timing == MMC_TIMING_UHS_SDR25)
1585 		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1586 	else if (timing == MMC_TIMING_UHS_SDR50)
1587 		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1588 	else if ((timing == MMC_TIMING_UHS_DDR50) ||
1589 		 (timing == MMC_TIMING_MMC_DDR52))
1590 		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1591 	else if (timing == MMC_TIMING_MMC_HS400)
1592 		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1593 	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1594 }
1595 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1596 
1597 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1598 {
1599 	struct sdhci_host *host = mmc_priv(mmc);
1600 	u8 ctrl;
1601 
1602 	if (ios->power_mode == MMC_POWER_UNDEFINED)
1603 		return;
1604 
1605 	if (host->flags & SDHCI_DEVICE_DEAD) {
1606 		if (!IS_ERR(mmc->supply.vmmc) &&
1607 		    ios->power_mode == MMC_POWER_OFF)
1608 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1609 		return;
1610 	}
1611 
1612 	/*
1613 	 * Reset the chip on each power off.
1614 	 * Should clear out any weird states.
1615 	 */
1616 	if (ios->power_mode == MMC_POWER_OFF) {
1617 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1618 		sdhci_reinit(host);
1619 	}
1620 
1621 	if (host->version >= SDHCI_SPEC_300 &&
1622 		(ios->power_mode == MMC_POWER_UP) &&
1623 		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1624 		sdhci_enable_preset_value(host, false);
1625 
1626 	if (!ios->clock || ios->clock != host->clock) {
1627 		host->ops->set_clock(host, ios->clock);
1628 		host->clock = ios->clock;
1629 
1630 		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1631 		    host->clock) {
1632 			host->timeout_clk = host->mmc->actual_clock ?
1633 						host->mmc->actual_clock / 1000 :
1634 						host->clock / 1000;
1635 			host->mmc->max_busy_timeout =
1636 				host->ops->get_max_timeout_count ?
1637 				host->ops->get_max_timeout_count(host) :
1638 				1 << 27;
1639 			host->mmc->max_busy_timeout /= host->timeout_clk;
1640 		}
1641 	}
1642 
1643 	if (host->ops->set_power)
1644 		host->ops->set_power(host, ios->power_mode, ios->vdd);
1645 	else
1646 		sdhci_set_power(host, ios->power_mode, ios->vdd);
1647 
1648 	if (host->ops->platform_send_init_74_clocks)
1649 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1650 
1651 	host->ops->set_bus_width(host, ios->bus_width);
1652 
1653 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1654 
1655 	if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1656 		if (ios->timing == MMC_TIMING_SD_HS ||
1657 		     ios->timing == MMC_TIMING_MMC_HS ||
1658 		     ios->timing == MMC_TIMING_MMC_HS400 ||
1659 		     ios->timing == MMC_TIMING_MMC_HS200 ||
1660 		     ios->timing == MMC_TIMING_MMC_DDR52 ||
1661 		     ios->timing == MMC_TIMING_UHS_SDR50 ||
1662 		     ios->timing == MMC_TIMING_UHS_SDR104 ||
1663 		     ios->timing == MMC_TIMING_UHS_DDR50 ||
1664 		     ios->timing == MMC_TIMING_UHS_SDR25)
1665 			ctrl |= SDHCI_CTRL_HISPD;
1666 		else
1667 			ctrl &= ~SDHCI_CTRL_HISPD;
1668 	}
1669 
1670 	if (host->version >= SDHCI_SPEC_300) {
1671 		u16 clk, ctrl_2;
1672 
1673 		if (!host->preset_enabled) {
1674 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1675 			/*
1676 			 * We only need to set Driver Strength if the
1677 			 * preset value enable is not set.
1678 			 */
1679 			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1680 			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1681 			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1682 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1683 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1684 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1685 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1686 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1687 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1688 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1689 			else {
1690 				pr_warn("%s: invalid driver type, default to driver type B\n",
1691 					mmc_hostname(mmc));
1692 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1693 			}
1694 
1695 			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1696 		} else {
1697 			/*
1698 			 * According to SDHC Spec v3.00, if the Preset Value
1699 			 * Enable in the Host Control 2 register is set, we
1700 			 * need to reset SD Clock Enable before changing High
1701 			 * Speed Enable to avoid generating clock gliches.
1702 			 */
1703 
1704 			/* Reset SD Clock Enable */
1705 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1706 			clk &= ~SDHCI_CLOCK_CARD_EN;
1707 			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1708 
1709 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1710 
1711 			/* Re-enable SD Clock */
1712 			host->ops->set_clock(host, host->clock);
1713 		}
1714 
1715 		/* Reset SD Clock Enable */
1716 		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1717 		clk &= ~SDHCI_CLOCK_CARD_EN;
1718 		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1719 
1720 		host->ops->set_uhs_signaling(host, ios->timing);
1721 		host->timing = ios->timing;
1722 
1723 		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1724 				((ios->timing == MMC_TIMING_UHS_SDR12) ||
1725 				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1726 				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1727 				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1728 				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1729 				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1730 			u16 preset;
1731 
1732 			sdhci_enable_preset_value(host, true);
1733 			preset = sdhci_get_preset_value(host);
1734 			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1735 				>> SDHCI_PRESET_DRV_SHIFT;
1736 		}
1737 
1738 		/* Re-enable SD Clock */
1739 		host->ops->set_clock(host, host->clock);
1740 	} else
1741 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1742 
1743 	/*
1744 	 * Some (ENE) controllers go apeshit on some ios operation,
1745 	 * signalling timeout and CRC errors even on CMD0. Resetting
1746 	 * it on each ios seems to solve the problem.
1747 	 */
1748 	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1749 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1750 
1751 	mmiowb();
1752 }
1753 EXPORT_SYMBOL_GPL(sdhci_set_ios);
1754 
1755 static int sdhci_get_cd(struct mmc_host *mmc)
1756 {
1757 	struct sdhci_host *host = mmc_priv(mmc);
1758 	int gpio_cd = mmc_gpio_get_cd(mmc);
1759 
1760 	if (host->flags & SDHCI_DEVICE_DEAD)
1761 		return 0;
1762 
1763 	/* If nonremovable, assume that the card is always present. */
1764 	if (!mmc_card_is_removable(host->mmc))
1765 		return 1;
1766 
1767 	/*
1768 	 * Try slot gpio detect, if defined it take precedence
1769 	 * over build in controller functionality
1770 	 */
1771 	if (gpio_cd >= 0)
1772 		return !!gpio_cd;
1773 
1774 	/* If polling, assume that the card is always present. */
1775 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1776 		return 1;
1777 
1778 	/* Host native card detect */
1779 	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1780 }
1781 
1782 static int sdhci_check_ro(struct sdhci_host *host)
1783 {
1784 	unsigned long flags;
1785 	int is_readonly;
1786 
1787 	spin_lock_irqsave(&host->lock, flags);
1788 
1789 	if (host->flags & SDHCI_DEVICE_DEAD)
1790 		is_readonly = 0;
1791 	else if (host->ops->get_ro)
1792 		is_readonly = host->ops->get_ro(host);
1793 	else
1794 		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1795 				& SDHCI_WRITE_PROTECT);
1796 
1797 	spin_unlock_irqrestore(&host->lock, flags);
1798 
1799 	/* This quirk needs to be replaced by a callback-function later */
1800 	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1801 		!is_readonly : is_readonly;
1802 }
1803 
1804 #define SAMPLE_COUNT	5
1805 
1806 static int sdhci_get_ro(struct mmc_host *mmc)
1807 {
1808 	struct sdhci_host *host = mmc_priv(mmc);
1809 	int i, ro_count;
1810 
1811 	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1812 		return sdhci_check_ro(host);
1813 
1814 	ro_count = 0;
1815 	for (i = 0; i < SAMPLE_COUNT; i++) {
1816 		if (sdhci_check_ro(host)) {
1817 			if (++ro_count > SAMPLE_COUNT / 2)
1818 				return 1;
1819 		}
1820 		msleep(30);
1821 	}
1822 	return 0;
1823 }
1824 
1825 static void sdhci_hw_reset(struct mmc_host *mmc)
1826 {
1827 	struct sdhci_host *host = mmc_priv(mmc);
1828 
1829 	if (host->ops && host->ops->hw_reset)
1830 		host->ops->hw_reset(host);
1831 }
1832 
1833 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1834 {
1835 	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1836 		if (enable)
1837 			host->ier |= SDHCI_INT_CARD_INT;
1838 		else
1839 			host->ier &= ~SDHCI_INT_CARD_INT;
1840 
1841 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1842 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1843 		mmiowb();
1844 	}
1845 }
1846 
1847 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1848 {
1849 	struct sdhci_host *host = mmc_priv(mmc);
1850 	unsigned long flags;
1851 
1852 	if (enable)
1853 		pm_runtime_get_noresume(host->mmc->parent);
1854 
1855 	spin_lock_irqsave(&host->lock, flags);
1856 	if (enable)
1857 		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1858 	else
1859 		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1860 
1861 	sdhci_enable_sdio_irq_nolock(host, enable);
1862 	spin_unlock_irqrestore(&host->lock, flags);
1863 
1864 	if (!enable)
1865 		pm_runtime_put_noidle(host->mmc->parent);
1866 }
1867 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
1868 
1869 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1870 				      struct mmc_ios *ios)
1871 {
1872 	struct sdhci_host *host = mmc_priv(mmc);
1873 	u16 ctrl;
1874 	int ret;
1875 
1876 	/*
1877 	 * Signal Voltage Switching is only applicable for Host Controllers
1878 	 * v3.00 and above.
1879 	 */
1880 	if (host->version < SDHCI_SPEC_300)
1881 		return 0;
1882 
1883 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1884 
1885 	switch (ios->signal_voltage) {
1886 	case MMC_SIGNAL_VOLTAGE_330:
1887 		if (!(host->flags & SDHCI_SIGNALING_330))
1888 			return -EINVAL;
1889 		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1890 		ctrl &= ~SDHCI_CTRL_VDD_180;
1891 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1892 
1893 		if (!IS_ERR(mmc->supply.vqmmc)) {
1894 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1895 			if (ret) {
1896 				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1897 					mmc_hostname(mmc));
1898 				return -EIO;
1899 			}
1900 		}
1901 		/* Wait for 5ms */
1902 		usleep_range(5000, 5500);
1903 
1904 		/* 3.3V regulator output should be stable within 5 ms */
1905 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1906 		if (!(ctrl & SDHCI_CTRL_VDD_180))
1907 			return 0;
1908 
1909 		pr_warn("%s: 3.3V regulator output did not became stable\n",
1910 			mmc_hostname(mmc));
1911 
1912 		return -EAGAIN;
1913 	case MMC_SIGNAL_VOLTAGE_180:
1914 		if (!(host->flags & SDHCI_SIGNALING_180))
1915 			return -EINVAL;
1916 		if (!IS_ERR(mmc->supply.vqmmc)) {
1917 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1918 			if (ret) {
1919 				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1920 					mmc_hostname(mmc));
1921 				return -EIO;
1922 			}
1923 		}
1924 
1925 		/*
1926 		 * Enable 1.8V Signal Enable in the Host Control2
1927 		 * register
1928 		 */
1929 		ctrl |= SDHCI_CTRL_VDD_180;
1930 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1931 
1932 		/* Some controller need to do more when switching */
1933 		if (host->ops->voltage_switch)
1934 			host->ops->voltage_switch(host);
1935 
1936 		/* 1.8V regulator output should be stable within 5 ms */
1937 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1938 		if (ctrl & SDHCI_CTRL_VDD_180)
1939 			return 0;
1940 
1941 		pr_warn("%s: 1.8V regulator output did not became stable\n",
1942 			mmc_hostname(mmc));
1943 
1944 		return -EAGAIN;
1945 	case MMC_SIGNAL_VOLTAGE_120:
1946 		if (!(host->flags & SDHCI_SIGNALING_120))
1947 			return -EINVAL;
1948 		if (!IS_ERR(mmc->supply.vqmmc)) {
1949 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1950 			if (ret) {
1951 				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1952 					mmc_hostname(mmc));
1953 				return -EIO;
1954 			}
1955 		}
1956 		return 0;
1957 	default:
1958 		/* No signal voltage switch required */
1959 		return 0;
1960 	}
1961 }
1962 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
1963 
1964 static int sdhci_card_busy(struct mmc_host *mmc)
1965 {
1966 	struct sdhci_host *host = mmc_priv(mmc);
1967 	u32 present_state;
1968 
1969 	/* Check whether DAT[0] is 0 */
1970 	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1971 
1972 	return !(present_state & SDHCI_DATA_0_LVL_MASK);
1973 }
1974 
1975 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1976 {
1977 	struct sdhci_host *host = mmc_priv(mmc);
1978 	unsigned long flags;
1979 
1980 	spin_lock_irqsave(&host->lock, flags);
1981 	host->flags |= SDHCI_HS400_TUNING;
1982 	spin_unlock_irqrestore(&host->lock, flags);
1983 
1984 	return 0;
1985 }
1986 
1987 static void sdhci_start_tuning(struct sdhci_host *host)
1988 {
1989 	u16 ctrl;
1990 
1991 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1992 	ctrl |= SDHCI_CTRL_EXEC_TUNING;
1993 	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1994 		ctrl |= SDHCI_CTRL_TUNED_CLK;
1995 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1996 
1997 	/*
1998 	 * As per the Host Controller spec v3.00, tuning command
1999 	 * generates Buffer Read Ready interrupt, so enable that.
2000 	 *
2001 	 * Note: The spec clearly says that when tuning sequence
2002 	 * is being performed, the controller does not generate
2003 	 * interrupts other than Buffer Read Ready interrupt. But
2004 	 * to make sure we don't hit a controller bug, we _only_
2005 	 * enable Buffer Read Ready interrupt here.
2006 	 */
2007 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2008 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2009 }
2010 
2011 static void sdhci_end_tuning(struct sdhci_host *host)
2012 {
2013 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2014 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2015 }
2016 
2017 static void sdhci_reset_tuning(struct sdhci_host *host)
2018 {
2019 	u16 ctrl;
2020 
2021 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2022 	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2023 	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2024 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2025 }
2026 
2027 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2028 {
2029 	sdhci_reset_tuning(host);
2030 
2031 	sdhci_do_reset(host, SDHCI_RESET_CMD);
2032 	sdhci_do_reset(host, SDHCI_RESET_DATA);
2033 
2034 	sdhci_end_tuning(host);
2035 
2036 	mmc_abort_tuning(host->mmc, opcode);
2037 }
2038 
2039 /*
2040  * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2041  * tuning command does not have a data payload (or rather the hardware does it
2042  * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2043  * interrupt setup is different to other commands and there is no timeout
2044  * interrupt so special handling is needed.
2045  */
2046 static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2047 {
2048 	struct mmc_host *mmc = host->mmc;
2049 	struct mmc_command cmd = {};
2050 	struct mmc_request mrq = {};
2051 	unsigned long flags;
2052 	u32 b = host->sdma_boundary;
2053 
2054 	spin_lock_irqsave(&host->lock, flags);
2055 
2056 	cmd.opcode = opcode;
2057 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2058 	cmd.mrq = &mrq;
2059 
2060 	mrq.cmd = &cmd;
2061 	/*
2062 	 * In response to CMD19, the card sends 64 bytes of tuning
2063 	 * block to the Host Controller. So we set the block size
2064 	 * to 64 here.
2065 	 */
2066 	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2067 	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2068 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2069 	else
2070 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2071 
2072 	/*
2073 	 * The tuning block is sent by the card to the host controller.
2074 	 * So we set the TRNS_READ bit in the Transfer Mode register.
2075 	 * This also takes care of setting DMA Enable and Multi Block
2076 	 * Select in the same register to 0.
2077 	 */
2078 	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2079 
2080 	sdhci_send_command(host, &cmd);
2081 
2082 	host->cmd = NULL;
2083 
2084 	sdhci_del_timer(host, &mrq);
2085 
2086 	host->tuning_done = 0;
2087 
2088 	mmiowb();
2089 	spin_unlock_irqrestore(&host->lock, flags);
2090 
2091 	/* Wait for Buffer Read Ready interrupt */
2092 	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2093 			   msecs_to_jiffies(50));
2094 
2095 }
2096 
2097 static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2098 {
2099 	int i;
2100 
2101 	/*
2102 	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2103 	 * of loops reaches 40 times.
2104 	 */
2105 	for (i = 0; i < MAX_TUNING_LOOP; i++) {
2106 		u16 ctrl;
2107 
2108 		sdhci_send_tuning(host, opcode);
2109 
2110 		if (!host->tuning_done) {
2111 			pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2112 				mmc_hostname(host->mmc));
2113 			sdhci_abort_tuning(host, opcode);
2114 			return;
2115 		}
2116 
2117 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2118 		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2119 			if (ctrl & SDHCI_CTRL_TUNED_CLK)
2120 				return; /* Success! */
2121 			break;
2122 		}
2123 
2124 		/* Spec does not require a delay between tuning cycles */
2125 		if (host->tuning_delay > 0)
2126 			mdelay(host->tuning_delay);
2127 	}
2128 
2129 	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2130 		mmc_hostname(host->mmc));
2131 	sdhci_reset_tuning(host);
2132 }
2133 
2134 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2135 {
2136 	struct sdhci_host *host = mmc_priv(mmc);
2137 	int err = 0;
2138 	unsigned int tuning_count = 0;
2139 	bool hs400_tuning;
2140 
2141 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2142 
2143 	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2144 		tuning_count = host->tuning_count;
2145 
2146 	/*
2147 	 * The Host Controller needs tuning in case of SDR104 and DDR50
2148 	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2149 	 * the Capabilities register.
2150 	 * If the Host Controller supports the HS200 mode then the
2151 	 * tuning function has to be executed.
2152 	 */
2153 	switch (host->timing) {
2154 	/* HS400 tuning is done in HS200 mode */
2155 	case MMC_TIMING_MMC_HS400:
2156 		err = -EINVAL;
2157 		goto out;
2158 
2159 	case MMC_TIMING_MMC_HS200:
2160 		/*
2161 		 * Periodic re-tuning for HS400 is not expected to be needed, so
2162 		 * disable it here.
2163 		 */
2164 		if (hs400_tuning)
2165 			tuning_count = 0;
2166 		break;
2167 
2168 	case MMC_TIMING_UHS_SDR104:
2169 	case MMC_TIMING_UHS_DDR50:
2170 		break;
2171 
2172 	case MMC_TIMING_UHS_SDR50:
2173 		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2174 			break;
2175 		/* FALLTHROUGH */
2176 
2177 	default:
2178 		goto out;
2179 	}
2180 
2181 	if (host->ops->platform_execute_tuning) {
2182 		err = host->ops->platform_execute_tuning(host, opcode);
2183 		goto out;
2184 	}
2185 
2186 	host->mmc->retune_period = tuning_count;
2187 
2188 	if (host->tuning_delay < 0)
2189 		host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2190 
2191 	sdhci_start_tuning(host);
2192 
2193 	__sdhci_execute_tuning(host, opcode);
2194 
2195 	sdhci_end_tuning(host);
2196 out:
2197 	host->flags &= ~SDHCI_HS400_TUNING;
2198 
2199 	return err;
2200 }
2201 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2202 
2203 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2204 {
2205 	/* Host Controller v3.00 defines preset value registers */
2206 	if (host->version < SDHCI_SPEC_300)
2207 		return;
2208 
2209 	/*
2210 	 * We only enable or disable Preset Value if they are not already
2211 	 * enabled or disabled respectively. Otherwise, we bail out.
2212 	 */
2213 	if (host->preset_enabled != enable) {
2214 		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2215 
2216 		if (enable)
2217 			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2218 		else
2219 			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2220 
2221 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2222 
2223 		if (enable)
2224 			host->flags |= SDHCI_PV_ENABLED;
2225 		else
2226 			host->flags &= ~SDHCI_PV_ENABLED;
2227 
2228 		host->preset_enabled = enable;
2229 	}
2230 }
2231 
2232 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2233 				int err)
2234 {
2235 	struct sdhci_host *host = mmc_priv(mmc);
2236 	struct mmc_data *data = mrq->data;
2237 
2238 	if (data->host_cookie != COOKIE_UNMAPPED)
2239 		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2240 			     mmc_get_dma_dir(data));
2241 
2242 	data->host_cookie = COOKIE_UNMAPPED;
2243 }
2244 
2245 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2246 {
2247 	struct sdhci_host *host = mmc_priv(mmc);
2248 
2249 	mrq->data->host_cookie = COOKIE_UNMAPPED;
2250 
2251 	if (host->flags & SDHCI_REQ_USE_DMA)
2252 		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2253 }
2254 
2255 static inline bool sdhci_has_requests(struct sdhci_host *host)
2256 {
2257 	return host->cmd || host->data_cmd;
2258 }
2259 
2260 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2261 {
2262 	if (host->data_cmd) {
2263 		host->data_cmd->error = err;
2264 		sdhci_finish_mrq(host, host->data_cmd->mrq);
2265 	}
2266 
2267 	if (host->cmd) {
2268 		host->cmd->error = err;
2269 		sdhci_finish_mrq(host, host->cmd->mrq);
2270 	}
2271 }
2272 
2273 static void sdhci_card_event(struct mmc_host *mmc)
2274 {
2275 	struct sdhci_host *host = mmc_priv(mmc);
2276 	unsigned long flags;
2277 	int present;
2278 
2279 	/* First check if client has provided their own card event */
2280 	if (host->ops->card_event)
2281 		host->ops->card_event(host);
2282 
2283 	present = mmc->ops->get_cd(mmc);
2284 
2285 	spin_lock_irqsave(&host->lock, flags);
2286 
2287 	/* Check sdhci_has_requests() first in case we are runtime suspended */
2288 	if (sdhci_has_requests(host) && !present) {
2289 		pr_err("%s: Card removed during transfer!\n",
2290 			mmc_hostname(host->mmc));
2291 		pr_err("%s: Resetting controller.\n",
2292 			mmc_hostname(host->mmc));
2293 
2294 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2295 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2296 
2297 		sdhci_error_out_mrqs(host, -ENOMEDIUM);
2298 	}
2299 
2300 	spin_unlock_irqrestore(&host->lock, flags);
2301 }
2302 
2303 static const struct mmc_host_ops sdhci_ops = {
2304 	.request	= sdhci_request,
2305 	.post_req	= sdhci_post_req,
2306 	.pre_req	= sdhci_pre_req,
2307 	.set_ios	= sdhci_set_ios,
2308 	.get_cd		= sdhci_get_cd,
2309 	.get_ro		= sdhci_get_ro,
2310 	.hw_reset	= sdhci_hw_reset,
2311 	.enable_sdio_irq = sdhci_enable_sdio_irq,
2312 	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2313 	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2314 	.execute_tuning			= sdhci_execute_tuning,
2315 	.card_event			= sdhci_card_event,
2316 	.card_busy	= sdhci_card_busy,
2317 };
2318 
2319 /*****************************************************************************\
2320  *                                                                           *
2321  * Tasklets                                                                  *
2322  *                                                                           *
2323 \*****************************************************************************/
2324 
2325 static bool sdhci_request_done(struct sdhci_host *host)
2326 {
2327 	unsigned long flags;
2328 	struct mmc_request *mrq;
2329 	int i;
2330 
2331 	spin_lock_irqsave(&host->lock, flags);
2332 
2333 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2334 		mrq = host->mrqs_done[i];
2335 		if (mrq)
2336 			break;
2337 	}
2338 
2339 	if (!mrq) {
2340 		spin_unlock_irqrestore(&host->lock, flags);
2341 		return true;
2342 	}
2343 
2344 	sdhci_del_timer(host, mrq);
2345 
2346 	/*
2347 	 * Always unmap the data buffers if they were mapped by
2348 	 * sdhci_prepare_data() whenever we finish with a request.
2349 	 * This avoids leaking DMA mappings on error.
2350 	 */
2351 	if (host->flags & SDHCI_REQ_USE_DMA) {
2352 		struct mmc_data *data = mrq->data;
2353 
2354 		if (data && data->host_cookie == COOKIE_MAPPED) {
2355 			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2356 				     mmc_get_dma_dir(data));
2357 			data->host_cookie = COOKIE_UNMAPPED;
2358 		}
2359 	}
2360 
2361 	/*
2362 	 * The controller needs a reset of internal state machines
2363 	 * upon error conditions.
2364 	 */
2365 	if (sdhci_needs_reset(host, mrq)) {
2366 		/*
2367 		 * Do not finish until command and data lines are available for
2368 		 * reset. Note there can only be one other mrq, so it cannot
2369 		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2370 		 * would both be null.
2371 		 */
2372 		if (host->cmd || host->data_cmd) {
2373 			spin_unlock_irqrestore(&host->lock, flags);
2374 			return true;
2375 		}
2376 
2377 		/* Some controllers need this kick or reset won't work here */
2378 		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2379 			/* This is to force an update */
2380 			host->ops->set_clock(host, host->clock);
2381 
2382 		/* Spec says we should do both at the same time, but Ricoh
2383 		   controllers do not like that. */
2384 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2385 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2386 
2387 		host->pending_reset = false;
2388 	}
2389 
2390 	if (!sdhci_has_requests(host))
2391 		sdhci_led_deactivate(host);
2392 
2393 	host->mrqs_done[i] = NULL;
2394 
2395 	mmiowb();
2396 	spin_unlock_irqrestore(&host->lock, flags);
2397 
2398 	mmc_request_done(host->mmc, mrq);
2399 
2400 	return false;
2401 }
2402 
2403 static void sdhci_tasklet_finish(unsigned long param)
2404 {
2405 	struct sdhci_host *host = (struct sdhci_host *)param;
2406 
2407 	while (!sdhci_request_done(host))
2408 		;
2409 }
2410 
2411 static void sdhci_timeout_timer(struct timer_list *t)
2412 {
2413 	struct sdhci_host *host;
2414 	unsigned long flags;
2415 
2416 	host = from_timer(host, t, timer);
2417 
2418 	spin_lock_irqsave(&host->lock, flags);
2419 
2420 	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2421 		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2422 		       mmc_hostname(host->mmc));
2423 		sdhci_dumpregs(host);
2424 
2425 		host->cmd->error = -ETIMEDOUT;
2426 		sdhci_finish_mrq(host, host->cmd->mrq);
2427 	}
2428 
2429 	mmiowb();
2430 	spin_unlock_irqrestore(&host->lock, flags);
2431 }
2432 
2433 static void sdhci_timeout_data_timer(struct timer_list *t)
2434 {
2435 	struct sdhci_host *host;
2436 	unsigned long flags;
2437 
2438 	host = from_timer(host, t, data_timer);
2439 
2440 	spin_lock_irqsave(&host->lock, flags);
2441 
2442 	if (host->data || host->data_cmd ||
2443 	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2444 		pr_err("%s: Timeout waiting for hardware interrupt.\n",
2445 		       mmc_hostname(host->mmc));
2446 		sdhci_dumpregs(host);
2447 
2448 		if (host->data) {
2449 			host->data->error = -ETIMEDOUT;
2450 			sdhci_finish_data(host);
2451 		} else if (host->data_cmd) {
2452 			host->data_cmd->error = -ETIMEDOUT;
2453 			sdhci_finish_mrq(host, host->data_cmd->mrq);
2454 		} else {
2455 			host->cmd->error = -ETIMEDOUT;
2456 			sdhci_finish_mrq(host, host->cmd->mrq);
2457 		}
2458 	}
2459 
2460 	mmiowb();
2461 	spin_unlock_irqrestore(&host->lock, flags);
2462 }
2463 
2464 /*****************************************************************************\
2465  *                                                                           *
2466  * Interrupt handling                                                        *
2467  *                                                                           *
2468 \*****************************************************************************/
2469 
2470 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2471 {
2472 	if (!host->cmd) {
2473 		/*
2474 		 * SDHCI recovers from errors by resetting the cmd and data
2475 		 * circuits.  Until that is done, there very well might be more
2476 		 * interrupts, so ignore them in that case.
2477 		 */
2478 		if (host->pending_reset)
2479 			return;
2480 		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2481 		       mmc_hostname(host->mmc), (unsigned)intmask);
2482 		sdhci_dumpregs(host);
2483 		return;
2484 	}
2485 
2486 	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2487 		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2488 		if (intmask & SDHCI_INT_TIMEOUT)
2489 			host->cmd->error = -ETIMEDOUT;
2490 		else
2491 			host->cmd->error = -EILSEQ;
2492 
2493 		/*
2494 		 * If this command initiates a data phase and a response
2495 		 * CRC error is signalled, the card can start transferring
2496 		 * data - the card may have received the command without
2497 		 * error.  We must not terminate the mmc_request early.
2498 		 *
2499 		 * If the card did not receive the command or returned an
2500 		 * error which prevented it sending data, the data phase
2501 		 * will time out.
2502 		 */
2503 		if (host->cmd->data &&
2504 		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2505 		     SDHCI_INT_CRC) {
2506 			host->cmd = NULL;
2507 			return;
2508 		}
2509 
2510 		sdhci_finish_mrq(host, host->cmd->mrq);
2511 		return;
2512 	}
2513 
2514 	if (intmask & SDHCI_INT_RESPONSE)
2515 		sdhci_finish_command(host);
2516 }
2517 
2518 static void sdhci_adma_show_error(struct sdhci_host *host)
2519 {
2520 	void *desc = host->adma_table;
2521 
2522 	sdhci_dumpregs(host);
2523 
2524 	while (true) {
2525 		struct sdhci_adma2_64_desc *dma_desc = desc;
2526 
2527 		if (host->flags & SDHCI_USE_64_BIT_DMA)
2528 			DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2529 			    desc, le32_to_cpu(dma_desc->addr_hi),
2530 			    le32_to_cpu(dma_desc->addr_lo),
2531 			    le16_to_cpu(dma_desc->len),
2532 			    le16_to_cpu(dma_desc->cmd));
2533 		else
2534 			DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2535 			    desc, le32_to_cpu(dma_desc->addr_lo),
2536 			    le16_to_cpu(dma_desc->len),
2537 			    le16_to_cpu(dma_desc->cmd));
2538 
2539 		desc += host->desc_sz;
2540 
2541 		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2542 			break;
2543 	}
2544 }
2545 
2546 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2547 {
2548 	u32 command;
2549 
2550 	/* CMD19 generates _only_ Buffer Read Ready interrupt */
2551 	if (intmask & SDHCI_INT_DATA_AVAIL) {
2552 		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2553 		if (command == MMC_SEND_TUNING_BLOCK ||
2554 		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2555 			host->tuning_done = 1;
2556 			wake_up(&host->buf_ready_int);
2557 			return;
2558 		}
2559 	}
2560 
2561 	if (!host->data) {
2562 		struct mmc_command *data_cmd = host->data_cmd;
2563 
2564 		/*
2565 		 * The "data complete" interrupt is also used to
2566 		 * indicate that a busy state has ended. See comment
2567 		 * above in sdhci_cmd_irq().
2568 		 */
2569 		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2570 			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2571 				host->data_cmd = NULL;
2572 				data_cmd->error = -ETIMEDOUT;
2573 				sdhci_finish_mrq(host, data_cmd->mrq);
2574 				return;
2575 			}
2576 			if (intmask & SDHCI_INT_DATA_END) {
2577 				host->data_cmd = NULL;
2578 				/*
2579 				 * Some cards handle busy-end interrupt
2580 				 * before the command completed, so make
2581 				 * sure we do things in the proper order.
2582 				 */
2583 				if (host->cmd == data_cmd)
2584 					return;
2585 
2586 				sdhci_finish_mrq(host, data_cmd->mrq);
2587 				return;
2588 			}
2589 		}
2590 
2591 		/*
2592 		 * SDHCI recovers from errors by resetting the cmd and data
2593 		 * circuits. Until that is done, there very well might be more
2594 		 * interrupts, so ignore them in that case.
2595 		 */
2596 		if (host->pending_reset)
2597 			return;
2598 
2599 		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2600 		       mmc_hostname(host->mmc), (unsigned)intmask);
2601 		sdhci_dumpregs(host);
2602 
2603 		return;
2604 	}
2605 
2606 	if (intmask & SDHCI_INT_DATA_TIMEOUT)
2607 		host->data->error = -ETIMEDOUT;
2608 	else if (intmask & SDHCI_INT_DATA_END_BIT)
2609 		host->data->error = -EILSEQ;
2610 	else if ((intmask & SDHCI_INT_DATA_CRC) &&
2611 		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2612 			!= MMC_BUS_TEST_R)
2613 		host->data->error = -EILSEQ;
2614 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2615 		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2616 		sdhci_adma_show_error(host);
2617 		host->data->error = -EIO;
2618 		if (host->ops->adma_workaround)
2619 			host->ops->adma_workaround(host, intmask);
2620 	}
2621 
2622 	if (host->data->error)
2623 		sdhci_finish_data(host);
2624 	else {
2625 		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2626 			sdhci_transfer_pio(host);
2627 
2628 		/*
2629 		 * We currently don't do anything fancy with DMA
2630 		 * boundaries, but as we can't disable the feature
2631 		 * we need to at least restart the transfer.
2632 		 *
2633 		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2634 		 * should return a valid address to continue from, but as
2635 		 * some controllers are faulty, don't trust them.
2636 		 */
2637 		if (intmask & SDHCI_INT_DMA_END) {
2638 			u32 dmastart, dmanow;
2639 			dmastart = sg_dma_address(host->data->sg);
2640 			dmanow = dmastart + host->data->bytes_xfered;
2641 			/*
2642 			 * Force update to the next DMA block boundary.
2643 			 */
2644 			dmanow = (dmanow &
2645 				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2646 				SDHCI_DEFAULT_BOUNDARY_SIZE;
2647 			host->data->bytes_xfered = dmanow - dmastart;
2648 			DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2649 			    dmastart, host->data->bytes_xfered, dmanow);
2650 			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2651 		}
2652 
2653 		if (intmask & SDHCI_INT_DATA_END) {
2654 			if (host->cmd == host->data_cmd) {
2655 				/*
2656 				 * Data managed to finish before the
2657 				 * command completed. Make sure we do
2658 				 * things in the proper order.
2659 				 */
2660 				host->data_early = 1;
2661 			} else {
2662 				sdhci_finish_data(host);
2663 			}
2664 		}
2665 	}
2666 }
2667 
2668 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2669 {
2670 	irqreturn_t result = IRQ_NONE;
2671 	struct sdhci_host *host = dev_id;
2672 	u32 intmask, mask, unexpected = 0;
2673 	int max_loops = 16;
2674 
2675 	spin_lock(&host->lock);
2676 
2677 	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2678 		spin_unlock(&host->lock);
2679 		return IRQ_NONE;
2680 	}
2681 
2682 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2683 	if (!intmask || intmask == 0xffffffff) {
2684 		result = IRQ_NONE;
2685 		goto out;
2686 	}
2687 
2688 	do {
2689 		DBG("IRQ status 0x%08x\n", intmask);
2690 
2691 		if (host->ops->irq) {
2692 			intmask = host->ops->irq(host, intmask);
2693 			if (!intmask)
2694 				goto cont;
2695 		}
2696 
2697 		/* Clear selected interrupts. */
2698 		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2699 				  SDHCI_INT_BUS_POWER);
2700 		sdhci_writel(host, mask, SDHCI_INT_STATUS);
2701 
2702 		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2703 			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2704 				      SDHCI_CARD_PRESENT;
2705 
2706 			/*
2707 			 * There is a observation on i.mx esdhc.  INSERT
2708 			 * bit will be immediately set again when it gets
2709 			 * cleared, if a card is inserted.  We have to mask
2710 			 * the irq to prevent interrupt storm which will
2711 			 * freeze the system.  And the REMOVE gets the
2712 			 * same situation.
2713 			 *
2714 			 * More testing are needed here to ensure it works
2715 			 * for other platforms though.
2716 			 */
2717 			host->ier &= ~(SDHCI_INT_CARD_INSERT |
2718 				       SDHCI_INT_CARD_REMOVE);
2719 			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2720 					       SDHCI_INT_CARD_INSERT;
2721 			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2722 			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2723 
2724 			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2725 				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2726 
2727 			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2728 						       SDHCI_INT_CARD_REMOVE);
2729 			result = IRQ_WAKE_THREAD;
2730 		}
2731 
2732 		if (intmask & SDHCI_INT_CMD_MASK)
2733 			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2734 
2735 		if (intmask & SDHCI_INT_DATA_MASK)
2736 			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2737 
2738 		if (intmask & SDHCI_INT_BUS_POWER)
2739 			pr_err("%s: Card is consuming too much power!\n",
2740 				mmc_hostname(host->mmc));
2741 
2742 		if (intmask & SDHCI_INT_RETUNE)
2743 			mmc_retune_needed(host->mmc);
2744 
2745 		if ((intmask & SDHCI_INT_CARD_INT) &&
2746 		    (host->ier & SDHCI_INT_CARD_INT)) {
2747 			sdhci_enable_sdio_irq_nolock(host, false);
2748 			host->thread_isr |= SDHCI_INT_CARD_INT;
2749 			result = IRQ_WAKE_THREAD;
2750 		}
2751 
2752 		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2753 			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2754 			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2755 			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2756 
2757 		if (intmask) {
2758 			unexpected |= intmask;
2759 			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2760 		}
2761 cont:
2762 		if (result == IRQ_NONE)
2763 			result = IRQ_HANDLED;
2764 
2765 		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2766 	} while (intmask && --max_loops);
2767 out:
2768 	spin_unlock(&host->lock);
2769 
2770 	if (unexpected) {
2771 		pr_err("%s: Unexpected interrupt 0x%08x.\n",
2772 			   mmc_hostname(host->mmc), unexpected);
2773 		sdhci_dumpregs(host);
2774 	}
2775 
2776 	return result;
2777 }
2778 
2779 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2780 {
2781 	struct sdhci_host *host = dev_id;
2782 	unsigned long flags;
2783 	u32 isr;
2784 
2785 	spin_lock_irqsave(&host->lock, flags);
2786 	isr = host->thread_isr;
2787 	host->thread_isr = 0;
2788 	spin_unlock_irqrestore(&host->lock, flags);
2789 
2790 	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2791 		struct mmc_host *mmc = host->mmc;
2792 
2793 		mmc->ops->card_event(mmc);
2794 		mmc_detect_change(mmc, msecs_to_jiffies(200));
2795 	}
2796 
2797 	if (isr & SDHCI_INT_CARD_INT) {
2798 		sdio_run_irqs(host->mmc);
2799 
2800 		spin_lock_irqsave(&host->lock, flags);
2801 		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2802 			sdhci_enable_sdio_irq_nolock(host, true);
2803 		spin_unlock_irqrestore(&host->lock, flags);
2804 	}
2805 
2806 	return isr ? IRQ_HANDLED : IRQ_NONE;
2807 }
2808 
2809 /*****************************************************************************\
2810  *                                                                           *
2811  * Suspend/resume                                                            *
2812  *                                                                           *
2813 \*****************************************************************************/
2814 
2815 #ifdef CONFIG_PM
2816 /*
2817  * To enable wakeup events, the corresponding events have to be enabled in
2818  * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2819  * Table' in the SD Host Controller Standard Specification.
2820  * It is useless to restore SDHCI_INT_ENABLE state in
2821  * sdhci_disable_irq_wakeups() since it will be set by
2822  * sdhci_enable_card_detection() or sdhci_init().
2823  */
2824 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2825 {
2826 	u8 val;
2827 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2828 			| SDHCI_WAKE_ON_INT;
2829 	u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2830 		      SDHCI_INT_CARD_INT;
2831 
2832 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2833 	val |= mask ;
2834 	/* Avoid fake wake up */
2835 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
2836 		val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2837 		irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2838 	}
2839 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2840 	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
2841 }
2842 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2843 
2844 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2845 {
2846 	u8 val;
2847 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2848 			| SDHCI_WAKE_ON_INT;
2849 
2850 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2851 	val &= ~mask;
2852 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2853 }
2854 
2855 int sdhci_suspend_host(struct sdhci_host *host)
2856 {
2857 	sdhci_disable_card_detection(host);
2858 
2859 	mmc_retune_timer_stop(host->mmc);
2860 
2861 	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2862 		host->ier = 0;
2863 		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2864 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2865 		free_irq(host->irq, host);
2866 	} else {
2867 		sdhci_enable_irq_wakeups(host);
2868 		enable_irq_wake(host->irq);
2869 	}
2870 	return 0;
2871 }
2872 
2873 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2874 
2875 int sdhci_resume_host(struct sdhci_host *host)
2876 {
2877 	struct mmc_host *mmc = host->mmc;
2878 	int ret = 0;
2879 
2880 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2881 		if (host->ops->enable_dma)
2882 			host->ops->enable_dma(host);
2883 	}
2884 
2885 	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2886 	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2887 		/* Card keeps power but host controller does not */
2888 		sdhci_init(host, 0);
2889 		host->pwr = 0;
2890 		host->clock = 0;
2891 		mmc->ops->set_ios(mmc, &mmc->ios);
2892 	} else {
2893 		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2894 		mmiowb();
2895 	}
2896 
2897 	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2898 		ret = request_threaded_irq(host->irq, sdhci_irq,
2899 					   sdhci_thread_irq, IRQF_SHARED,
2900 					   mmc_hostname(host->mmc), host);
2901 		if (ret)
2902 			return ret;
2903 	} else {
2904 		sdhci_disable_irq_wakeups(host);
2905 		disable_irq_wake(host->irq);
2906 	}
2907 
2908 	sdhci_enable_card_detection(host);
2909 
2910 	return ret;
2911 }
2912 
2913 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2914 
2915 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2916 {
2917 	unsigned long flags;
2918 
2919 	mmc_retune_timer_stop(host->mmc);
2920 
2921 	spin_lock_irqsave(&host->lock, flags);
2922 	host->ier &= SDHCI_INT_CARD_INT;
2923 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2924 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2925 	spin_unlock_irqrestore(&host->lock, flags);
2926 
2927 	synchronize_hardirq(host->irq);
2928 
2929 	spin_lock_irqsave(&host->lock, flags);
2930 	host->runtime_suspended = true;
2931 	spin_unlock_irqrestore(&host->lock, flags);
2932 
2933 	return 0;
2934 }
2935 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2936 
2937 int sdhci_runtime_resume_host(struct sdhci_host *host)
2938 {
2939 	struct mmc_host *mmc = host->mmc;
2940 	unsigned long flags;
2941 	int host_flags = host->flags;
2942 
2943 	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2944 		if (host->ops->enable_dma)
2945 			host->ops->enable_dma(host);
2946 	}
2947 
2948 	sdhci_init(host, 0);
2949 
2950 	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
2951 	    mmc->ios.power_mode != MMC_POWER_OFF) {
2952 		/* Force clock and power re-program */
2953 		host->pwr = 0;
2954 		host->clock = 0;
2955 		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
2956 		mmc->ops->set_ios(mmc, &mmc->ios);
2957 
2958 		if ((host_flags & SDHCI_PV_ENABLED) &&
2959 		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2960 			spin_lock_irqsave(&host->lock, flags);
2961 			sdhci_enable_preset_value(host, true);
2962 			spin_unlock_irqrestore(&host->lock, flags);
2963 		}
2964 
2965 		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
2966 		    mmc->ops->hs400_enhanced_strobe)
2967 			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
2968 	}
2969 
2970 	spin_lock_irqsave(&host->lock, flags);
2971 
2972 	host->runtime_suspended = false;
2973 
2974 	/* Enable SDIO IRQ */
2975 	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2976 		sdhci_enable_sdio_irq_nolock(host, true);
2977 
2978 	/* Enable Card Detection */
2979 	sdhci_enable_card_detection(host);
2980 
2981 	spin_unlock_irqrestore(&host->lock, flags);
2982 
2983 	return 0;
2984 }
2985 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2986 
2987 #endif /* CONFIG_PM */
2988 
2989 /*****************************************************************************\
2990  *                                                                           *
2991  * Command Queue Engine (CQE) helpers                                        *
2992  *                                                                           *
2993 \*****************************************************************************/
2994 
2995 void sdhci_cqe_enable(struct mmc_host *mmc)
2996 {
2997 	struct sdhci_host *host = mmc_priv(mmc);
2998 	unsigned long flags;
2999 	u8 ctrl;
3000 
3001 	spin_lock_irqsave(&host->lock, flags);
3002 
3003 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3004 	ctrl &= ~SDHCI_CTRL_DMA_MASK;
3005 	if (host->flags & SDHCI_USE_64_BIT_DMA)
3006 		ctrl |= SDHCI_CTRL_ADMA64;
3007 	else
3008 		ctrl |= SDHCI_CTRL_ADMA32;
3009 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3010 
3011 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3012 		     SDHCI_BLOCK_SIZE);
3013 
3014 	/* Set maximum timeout */
3015 	sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3016 
3017 	host->ier = host->cqe_ier;
3018 
3019 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3020 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3021 
3022 	host->cqe_on = true;
3023 
3024 	pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3025 		 mmc_hostname(mmc), host->ier,
3026 		 sdhci_readl(host, SDHCI_INT_STATUS));
3027 
3028 	mmiowb();
3029 	spin_unlock_irqrestore(&host->lock, flags);
3030 }
3031 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3032 
3033 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3034 {
3035 	struct sdhci_host *host = mmc_priv(mmc);
3036 	unsigned long flags;
3037 
3038 	spin_lock_irqsave(&host->lock, flags);
3039 
3040 	sdhci_set_default_irqs(host);
3041 
3042 	host->cqe_on = false;
3043 
3044 	if (recovery) {
3045 		sdhci_do_reset(host, SDHCI_RESET_CMD);
3046 		sdhci_do_reset(host, SDHCI_RESET_DATA);
3047 	}
3048 
3049 	pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3050 		 mmc_hostname(mmc), host->ier,
3051 		 sdhci_readl(host, SDHCI_INT_STATUS));
3052 
3053 	mmiowb();
3054 	spin_unlock_irqrestore(&host->lock, flags);
3055 }
3056 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3057 
3058 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3059 		   int *data_error)
3060 {
3061 	u32 mask;
3062 
3063 	if (!host->cqe_on)
3064 		return false;
3065 
3066 	if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3067 		*cmd_error = -EILSEQ;
3068 	else if (intmask & SDHCI_INT_TIMEOUT)
3069 		*cmd_error = -ETIMEDOUT;
3070 	else
3071 		*cmd_error = 0;
3072 
3073 	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3074 		*data_error = -EILSEQ;
3075 	else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3076 		*data_error = -ETIMEDOUT;
3077 	else if (intmask & SDHCI_INT_ADMA_ERROR)
3078 		*data_error = -EIO;
3079 	else
3080 		*data_error = 0;
3081 
3082 	/* Clear selected interrupts. */
3083 	mask = intmask & host->cqe_ier;
3084 	sdhci_writel(host, mask, SDHCI_INT_STATUS);
3085 
3086 	if (intmask & SDHCI_INT_BUS_POWER)
3087 		pr_err("%s: Card is consuming too much power!\n",
3088 		       mmc_hostname(host->mmc));
3089 
3090 	intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3091 	if (intmask) {
3092 		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3093 		pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3094 		       mmc_hostname(host->mmc), intmask);
3095 		sdhci_dumpregs(host);
3096 	}
3097 
3098 	return true;
3099 }
3100 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3101 
3102 /*****************************************************************************\
3103  *                                                                           *
3104  * Device allocation/registration                                            *
3105  *                                                                           *
3106 \*****************************************************************************/
3107 
3108 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3109 	size_t priv_size)
3110 {
3111 	struct mmc_host *mmc;
3112 	struct sdhci_host *host;
3113 
3114 	WARN_ON(dev == NULL);
3115 
3116 	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3117 	if (!mmc)
3118 		return ERR_PTR(-ENOMEM);
3119 
3120 	host = mmc_priv(mmc);
3121 	host->mmc = mmc;
3122 	host->mmc_host_ops = sdhci_ops;
3123 	mmc->ops = &host->mmc_host_ops;
3124 
3125 	host->flags = SDHCI_SIGNALING_330;
3126 
3127 	host->cqe_ier     = SDHCI_CQE_INT_MASK;
3128 	host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3129 
3130 	host->tuning_delay = -1;
3131 
3132 	host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3133 
3134 	return host;
3135 }
3136 
3137 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3138 
3139 static int sdhci_set_dma_mask(struct sdhci_host *host)
3140 {
3141 	struct mmc_host *mmc = host->mmc;
3142 	struct device *dev = mmc_dev(mmc);
3143 	int ret = -EINVAL;
3144 
3145 	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3146 		host->flags &= ~SDHCI_USE_64_BIT_DMA;
3147 
3148 	/* Try 64-bit mask if hardware is capable  of it */
3149 	if (host->flags & SDHCI_USE_64_BIT_DMA) {
3150 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3151 		if (ret) {
3152 			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3153 				mmc_hostname(mmc));
3154 			host->flags &= ~SDHCI_USE_64_BIT_DMA;
3155 		}
3156 	}
3157 
3158 	/* 32-bit mask as default & fallback */
3159 	if (ret) {
3160 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3161 		if (ret)
3162 			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3163 				mmc_hostname(mmc));
3164 	}
3165 
3166 	return ret;
3167 }
3168 
3169 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3170 {
3171 	u16 v;
3172 	u64 dt_caps_mask = 0;
3173 	u64 dt_caps = 0;
3174 
3175 	if (host->read_caps)
3176 		return;
3177 
3178 	host->read_caps = true;
3179 
3180 	if (debug_quirks)
3181 		host->quirks = debug_quirks;
3182 
3183 	if (debug_quirks2)
3184 		host->quirks2 = debug_quirks2;
3185 
3186 	sdhci_do_reset(host, SDHCI_RESET_ALL);
3187 
3188 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3189 			     "sdhci-caps-mask", &dt_caps_mask);
3190 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3191 			     "sdhci-caps", &dt_caps);
3192 
3193 	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3194 	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3195 
3196 	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3197 		return;
3198 
3199 	if (caps) {
3200 		host->caps = *caps;
3201 	} else {
3202 		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3203 		host->caps &= ~lower_32_bits(dt_caps_mask);
3204 		host->caps |= lower_32_bits(dt_caps);
3205 	}
3206 
3207 	if (host->version < SDHCI_SPEC_300)
3208 		return;
3209 
3210 	if (caps1) {
3211 		host->caps1 = *caps1;
3212 	} else {
3213 		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3214 		host->caps1 &= ~upper_32_bits(dt_caps_mask);
3215 		host->caps1 |= upper_32_bits(dt_caps);
3216 	}
3217 }
3218 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3219 
3220 int sdhci_setup_host(struct sdhci_host *host)
3221 {
3222 	struct mmc_host *mmc;
3223 	u32 max_current_caps;
3224 	unsigned int ocr_avail;
3225 	unsigned int override_timeout_clk;
3226 	u32 max_clk;
3227 	int ret;
3228 
3229 	WARN_ON(host == NULL);
3230 	if (host == NULL)
3231 		return -EINVAL;
3232 
3233 	mmc = host->mmc;
3234 
3235 	/*
3236 	 * If there are external regulators, get them. Note this must be done
3237 	 * early before resetting the host and reading the capabilities so that
3238 	 * the host can take the appropriate action if regulators are not
3239 	 * available.
3240 	 */
3241 	ret = mmc_regulator_get_supply(mmc);
3242 	if (ret)
3243 		return ret;
3244 
3245 	DBG("Version:   0x%08x | Present:  0x%08x\n",
3246 	    sdhci_readw(host, SDHCI_HOST_VERSION),
3247 	    sdhci_readl(host, SDHCI_PRESENT_STATE));
3248 	DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
3249 	    sdhci_readl(host, SDHCI_CAPABILITIES),
3250 	    sdhci_readl(host, SDHCI_CAPABILITIES_1));
3251 
3252 	sdhci_read_caps(host);
3253 
3254 	override_timeout_clk = host->timeout_clk;
3255 
3256 	if (host->version > SDHCI_SPEC_300) {
3257 		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3258 		       mmc_hostname(mmc), host->version);
3259 	}
3260 
3261 	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3262 		host->flags |= SDHCI_USE_SDMA;
3263 	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3264 		DBG("Controller doesn't have SDMA capability\n");
3265 	else
3266 		host->flags |= SDHCI_USE_SDMA;
3267 
3268 	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3269 		(host->flags & SDHCI_USE_SDMA)) {
3270 		DBG("Disabling DMA as it is marked broken\n");
3271 		host->flags &= ~SDHCI_USE_SDMA;
3272 	}
3273 
3274 	if ((host->version >= SDHCI_SPEC_200) &&
3275 		(host->caps & SDHCI_CAN_DO_ADMA2))
3276 		host->flags |= SDHCI_USE_ADMA;
3277 
3278 	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3279 		(host->flags & SDHCI_USE_ADMA)) {
3280 		DBG("Disabling ADMA as it is marked broken\n");
3281 		host->flags &= ~SDHCI_USE_ADMA;
3282 	}
3283 
3284 	/*
3285 	 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3286 	 * and *must* do 64-bit DMA.  A driver has the opportunity to change
3287 	 * that during the first call to ->enable_dma().  Similarly
3288 	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3289 	 * implement.
3290 	 */
3291 	if (host->caps & SDHCI_CAN_64BIT)
3292 		host->flags |= SDHCI_USE_64_BIT_DMA;
3293 
3294 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3295 		ret = sdhci_set_dma_mask(host);
3296 
3297 		if (!ret && host->ops->enable_dma)
3298 			ret = host->ops->enable_dma(host);
3299 
3300 		if (ret) {
3301 			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3302 				mmc_hostname(mmc));
3303 			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3304 
3305 			ret = 0;
3306 		}
3307 	}
3308 
3309 	/* SDMA does not support 64-bit DMA */
3310 	if (host->flags & SDHCI_USE_64_BIT_DMA)
3311 		host->flags &= ~SDHCI_USE_SDMA;
3312 
3313 	if (host->flags & SDHCI_USE_ADMA) {
3314 		dma_addr_t dma;
3315 		void *buf;
3316 
3317 		/*
3318 		 * The DMA descriptor table size is calculated as the maximum
3319 		 * number of segments times 2, to allow for an alignment
3320 		 * descriptor for each segment, plus 1 for a nop end descriptor,
3321 		 * all multipled by the descriptor size.
3322 		 */
3323 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
3324 			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3325 					      SDHCI_ADMA2_64_DESC_SZ;
3326 			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3327 		} else {
3328 			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3329 					      SDHCI_ADMA2_32_DESC_SZ;
3330 			host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3331 		}
3332 
3333 		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3334 		buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3335 					 host->adma_table_sz, &dma, GFP_KERNEL);
3336 		if (!buf) {
3337 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3338 				mmc_hostname(mmc));
3339 			host->flags &= ~SDHCI_USE_ADMA;
3340 		} else if ((dma + host->align_buffer_sz) &
3341 			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3342 			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3343 				mmc_hostname(mmc));
3344 			host->flags &= ~SDHCI_USE_ADMA;
3345 			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3346 					  host->adma_table_sz, buf, dma);
3347 		} else {
3348 			host->align_buffer = buf;
3349 			host->align_addr = dma;
3350 
3351 			host->adma_table = buf + host->align_buffer_sz;
3352 			host->adma_addr = dma + host->align_buffer_sz;
3353 		}
3354 	}
3355 
3356 	/*
3357 	 * If we use DMA, then it's up to the caller to set the DMA
3358 	 * mask, but PIO does not need the hw shim so we set a new
3359 	 * mask here in that case.
3360 	 */
3361 	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3362 		host->dma_mask = DMA_BIT_MASK(64);
3363 		mmc_dev(mmc)->dma_mask = &host->dma_mask;
3364 	}
3365 
3366 	if (host->version >= SDHCI_SPEC_300)
3367 		host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3368 			>> SDHCI_CLOCK_BASE_SHIFT;
3369 	else
3370 		host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3371 			>> SDHCI_CLOCK_BASE_SHIFT;
3372 
3373 	host->max_clk *= 1000000;
3374 	if (host->max_clk == 0 || host->quirks &
3375 			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3376 		if (!host->ops->get_max_clock) {
3377 			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3378 			       mmc_hostname(mmc));
3379 			ret = -ENODEV;
3380 			goto undma;
3381 		}
3382 		host->max_clk = host->ops->get_max_clock(host);
3383 	}
3384 
3385 	/*
3386 	 * In case of Host Controller v3.00, find out whether clock
3387 	 * multiplier is supported.
3388 	 */
3389 	host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3390 			SDHCI_CLOCK_MUL_SHIFT;
3391 
3392 	/*
3393 	 * In case the value in Clock Multiplier is 0, then programmable
3394 	 * clock mode is not supported, otherwise the actual clock
3395 	 * multiplier is one more than the value of Clock Multiplier
3396 	 * in the Capabilities Register.
3397 	 */
3398 	if (host->clk_mul)
3399 		host->clk_mul += 1;
3400 
3401 	/*
3402 	 * Set host parameters.
3403 	 */
3404 	max_clk = host->max_clk;
3405 
3406 	if (host->ops->get_min_clock)
3407 		mmc->f_min = host->ops->get_min_clock(host);
3408 	else if (host->version >= SDHCI_SPEC_300) {
3409 		if (host->clk_mul) {
3410 			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3411 			max_clk = host->max_clk * host->clk_mul;
3412 		} else
3413 			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3414 	} else
3415 		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3416 
3417 	if (!mmc->f_max || mmc->f_max > max_clk)
3418 		mmc->f_max = max_clk;
3419 
3420 	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3421 		host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3422 					SDHCI_TIMEOUT_CLK_SHIFT;
3423 
3424 		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3425 			host->timeout_clk *= 1000;
3426 
3427 		if (host->timeout_clk == 0) {
3428 			if (!host->ops->get_timeout_clock) {
3429 				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3430 					mmc_hostname(mmc));
3431 				ret = -ENODEV;
3432 				goto undma;
3433 			}
3434 
3435 			host->timeout_clk =
3436 				DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3437 					     1000);
3438 		}
3439 
3440 		if (override_timeout_clk)
3441 			host->timeout_clk = override_timeout_clk;
3442 
3443 		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3444 			host->ops->get_max_timeout_count(host) : 1 << 27;
3445 		mmc->max_busy_timeout /= host->timeout_clk;
3446 	}
3447 
3448 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3449 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3450 
3451 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3452 		host->flags |= SDHCI_AUTO_CMD12;
3453 
3454 	/* Auto-CMD23 stuff only works in ADMA or PIO. */
3455 	if ((host->version >= SDHCI_SPEC_300) &&
3456 	    ((host->flags & SDHCI_USE_ADMA) ||
3457 	     !(host->flags & SDHCI_USE_SDMA)) &&
3458 	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3459 		host->flags |= SDHCI_AUTO_CMD23;
3460 		DBG("Auto-CMD23 available\n");
3461 	} else {
3462 		DBG("Auto-CMD23 unavailable\n");
3463 	}
3464 
3465 	/*
3466 	 * A controller may support 8-bit width, but the board itself
3467 	 * might not have the pins brought out.  Boards that support
3468 	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3469 	 * their platform code before calling sdhci_add_host(), and we
3470 	 * won't assume 8-bit width for hosts without that CAP.
3471 	 */
3472 	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3473 		mmc->caps |= MMC_CAP_4_BIT_DATA;
3474 
3475 	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3476 		mmc->caps &= ~MMC_CAP_CMD23;
3477 
3478 	if (host->caps & SDHCI_CAN_DO_HISPD)
3479 		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3480 
3481 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3482 	    mmc_card_is_removable(mmc) &&
3483 	    mmc_gpio_get_cd(host->mmc) < 0)
3484 		mmc->caps |= MMC_CAP_NEEDS_POLL;
3485 
3486 	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3487 	if (!IS_ERR(mmc->supply.vqmmc)) {
3488 		ret = regulator_enable(mmc->supply.vqmmc);
3489 		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3490 						    1950000))
3491 			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3492 					 SDHCI_SUPPORT_SDR50 |
3493 					 SDHCI_SUPPORT_DDR50);
3494 		if (ret) {
3495 			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3496 				mmc_hostname(mmc), ret);
3497 			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3498 		}
3499 	}
3500 
3501 	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3502 		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3503 				 SDHCI_SUPPORT_DDR50);
3504 	}
3505 
3506 	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3507 	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3508 			   SDHCI_SUPPORT_DDR50))
3509 		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3510 
3511 	/* SDR104 supports also implies SDR50 support */
3512 	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3513 		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3514 		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
3515 		 * field can be promoted to support HS200.
3516 		 */
3517 		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3518 			mmc->caps2 |= MMC_CAP2_HS200;
3519 	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3520 		mmc->caps |= MMC_CAP_UHS_SDR50;
3521 	}
3522 
3523 	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3524 	    (host->caps1 & SDHCI_SUPPORT_HS400))
3525 		mmc->caps2 |= MMC_CAP2_HS400;
3526 
3527 	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3528 	    (IS_ERR(mmc->supply.vqmmc) ||
3529 	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3530 					     1300000)))
3531 		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3532 
3533 	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3534 	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3535 		mmc->caps |= MMC_CAP_UHS_DDR50;
3536 
3537 	/* Does the host need tuning for SDR50? */
3538 	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3539 		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3540 
3541 	/* Driver Type(s) (A, C, D) supported by the host */
3542 	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3543 		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3544 	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3545 		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3546 	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3547 		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3548 
3549 	/* Initial value for re-tuning timer count */
3550 	host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3551 			     SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3552 
3553 	/*
3554 	 * In case Re-tuning Timer is not disabled, the actual value of
3555 	 * re-tuning timer will be 2 ^ (n - 1).
3556 	 */
3557 	if (host->tuning_count)
3558 		host->tuning_count = 1 << (host->tuning_count - 1);
3559 
3560 	/* Re-tuning mode supported by the Host Controller */
3561 	host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3562 			     SDHCI_RETUNING_MODE_SHIFT;
3563 
3564 	ocr_avail = 0;
3565 
3566 	/*
3567 	 * According to SD Host Controller spec v3.00, if the Host System
3568 	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3569 	 * the value is meaningful only if Voltage Support in the Capabilities
3570 	 * register is set. The actual current value is 4 times the register
3571 	 * value.
3572 	 */
3573 	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3574 	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3575 		int curr = regulator_get_current_limit(mmc->supply.vmmc);
3576 		if (curr > 0) {
3577 
3578 			/* convert to SDHCI_MAX_CURRENT format */
3579 			curr = curr/1000;  /* convert to mA */
3580 			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3581 
3582 			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3583 			max_current_caps =
3584 				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3585 				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3586 				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
3587 		}
3588 	}
3589 
3590 	if (host->caps & SDHCI_CAN_VDD_330) {
3591 		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3592 
3593 		mmc->max_current_330 = ((max_current_caps &
3594 				   SDHCI_MAX_CURRENT_330_MASK) >>
3595 				   SDHCI_MAX_CURRENT_330_SHIFT) *
3596 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3597 	}
3598 	if (host->caps & SDHCI_CAN_VDD_300) {
3599 		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3600 
3601 		mmc->max_current_300 = ((max_current_caps &
3602 				   SDHCI_MAX_CURRENT_300_MASK) >>
3603 				   SDHCI_MAX_CURRENT_300_SHIFT) *
3604 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3605 	}
3606 	if (host->caps & SDHCI_CAN_VDD_180) {
3607 		ocr_avail |= MMC_VDD_165_195;
3608 
3609 		mmc->max_current_180 = ((max_current_caps &
3610 				   SDHCI_MAX_CURRENT_180_MASK) >>
3611 				   SDHCI_MAX_CURRENT_180_SHIFT) *
3612 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3613 	}
3614 
3615 	/* If OCR set by host, use it instead. */
3616 	if (host->ocr_mask)
3617 		ocr_avail = host->ocr_mask;
3618 
3619 	/* If OCR set by external regulators, give it highest prio. */
3620 	if (mmc->ocr_avail)
3621 		ocr_avail = mmc->ocr_avail;
3622 
3623 	mmc->ocr_avail = ocr_avail;
3624 	mmc->ocr_avail_sdio = ocr_avail;
3625 	if (host->ocr_avail_sdio)
3626 		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3627 	mmc->ocr_avail_sd = ocr_avail;
3628 	if (host->ocr_avail_sd)
3629 		mmc->ocr_avail_sd &= host->ocr_avail_sd;
3630 	else /* normal SD controllers don't support 1.8V */
3631 		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3632 	mmc->ocr_avail_mmc = ocr_avail;
3633 	if (host->ocr_avail_mmc)
3634 		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3635 
3636 	if (mmc->ocr_avail == 0) {
3637 		pr_err("%s: Hardware doesn't report any support voltages.\n",
3638 		       mmc_hostname(mmc));
3639 		ret = -ENODEV;
3640 		goto unreg;
3641 	}
3642 
3643 	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3644 			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3645 			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3646 	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3647 		host->flags |= SDHCI_SIGNALING_180;
3648 
3649 	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3650 		host->flags |= SDHCI_SIGNALING_120;
3651 
3652 	spin_lock_init(&host->lock);
3653 
3654 	/*
3655 	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3656 	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3657 	 * is less anyway.
3658 	 */
3659 	mmc->max_req_size = 524288;
3660 
3661 	/*
3662 	 * Maximum number of segments. Depends on if the hardware
3663 	 * can do scatter/gather or not.
3664 	 */
3665 	if (host->flags & SDHCI_USE_ADMA) {
3666 		mmc->max_segs = SDHCI_MAX_SEGS;
3667 	} else if (host->flags & SDHCI_USE_SDMA) {
3668 		mmc->max_segs = 1;
3669 		if (swiotlb_max_segment()) {
3670 			unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
3671 						IO_TLB_SEGSIZE;
3672 			mmc->max_req_size = min(mmc->max_req_size,
3673 						max_req_size);
3674 		}
3675 	} else { /* PIO */
3676 		mmc->max_segs = SDHCI_MAX_SEGS;
3677 	}
3678 
3679 	/*
3680 	 * Maximum segment size. Could be one segment with the maximum number
3681 	 * of bytes. When doing hardware scatter/gather, each entry cannot
3682 	 * be larger than 64 KiB though.
3683 	 */
3684 	if (host->flags & SDHCI_USE_ADMA) {
3685 		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3686 			mmc->max_seg_size = 65535;
3687 		else
3688 			mmc->max_seg_size = 65536;
3689 	} else {
3690 		mmc->max_seg_size = mmc->max_req_size;
3691 	}
3692 
3693 	/*
3694 	 * Maximum block size. This varies from controller to controller and
3695 	 * is specified in the capabilities register.
3696 	 */
3697 	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3698 		mmc->max_blk_size = 2;
3699 	} else {
3700 		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3701 				SDHCI_MAX_BLOCK_SHIFT;
3702 		if (mmc->max_blk_size >= 3) {
3703 			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3704 				mmc_hostname(mmc));
3705 			mmc->max_blk_size = 0;
3706 		}
3707 	}
3708 
3709 	mmc->max_blk_size = 512 << mmc->max_blk_size;
3710 
3711 	/*
3712 	 * Maximum block count.
3713 	 */
3714 	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3715 
3716 	return 0;
3717 
3718 unreg:
3719 	if (!IS_ERR(mmc->supply.vqmmc))
3720 		regulator_disable(mmc->supply.vqmmc);
3721 undma:
3722 	if (host->align_buffer)
3723 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3724 				  host->adma_table_sz, host->align_buffer,
3725 				  host->align_addr);
3726 	host->adma_table = NULL;
3727 	host->align_buffer = NULL;
3728 
3729 	return ret;
3730 }
3731 EXPORT_SYMBOL_GPL(sdhci_setup_host);
3732 
3733 void sdhci_cleanup_host(struct sdhci_host *host)
3734 {
3735 	struct mmc_host *mmc = host->mmc;
3736 
3737 	if (!IS_ERR(mmc->supply.vqmmc))
3738 		regulator_disable(mmc->supply.vqmmc);
3739 
3740 	if (host->align_buffer)
3741 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3742 				  host->adma_table_sz, host->align_buffer,
3743 				  host->align_addr);
3744 	host->adma_table = NULL;
3745 	host->align_buffer = NULL;
3746 }
3747 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
3748 
3749 int __sdhci_add_host(struct sdhci_host *host)
3750 {
3751 	struct mmc_host *mmc = host->mmc;
3752 	int ret;
3753 
3754 	/*
3755 	 * Init tasklets.
3756 	 */
3757 	tasklet_init(&host->finish_tasklet,
3758 		sdhci_tasklet_finish, (unsigned long)host);
3759 
3760 	timer_setup(&host->timer, sdhci_timeout_timer, 0);
3761 	timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
3762 
3763 	init_waitqueue_head(&host->buf_ready_int);
3764 
3765 	sdhci_init(host, 0);
3766 
3767 	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3768 				   IRQF_SHARED,	mmc_hostname(mmc), host);
3769 	if (ret) {
3770 		pr_err("%s: Failed to request IRQ %d: %d\n",
3771 		       mmc_hostname(mmc), host->irq, ret);
3772 		goto untasklet;
3773 	}
3774 
3775 	ret = sdhci_led_register(host);
3776 	if (ret) {
3777 		pr_err("%s: Failed to register LED device: %d\n",
3778 		       mmc_hostname(mmc), ret);
3779 		goto unirq;
3780 	}
3781 
3782 	mmiowb();
3783 
3784 	ret = mmc_add_host(mmc);
3785 	if (ret)
3786 		goto unled;
3787 
3788 	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3789 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3790 		(host->flags & SDHCI_USE_ADMA) ?
3791 		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3792 		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3793 
3794 	sdhci_enable_card_detection(host);
3795 
3796 	return 0;
3797 
3798 unled:
3799 	sdhci_led_unregister(host);
3800 unirq:
3801 	sdhci_do_reset(host, SDHCI_RESET_ALL);
3802 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3803 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3804 	free_irq(host->irq, host);
3805 untasklet:
3806 	tasklet_kill(&host->finish_tasklet);
3807 
3808 	return ret;
3809 }
3810 EXPORT_SYMBOL_GPL(__sdhci_add_host);
3811 
3812 int sdhci_add_host(struct sdhci_host *host)
3813 {
3814 	int ret;
3815 
3816 	ret = sdhci_setup_host(host);
3817 	if (ret)
3818 		return ret;
3819 
3820 	ret = __sdhci_add_host(host);
3821 	if (ret)
3822 		goto cleanup;
3823 
3824 	return 0;
3825 
3826 cleanup:
3827 	sdhci_cleanup_host(host);
3828 
3829 	return ret;
3830 }
3831 EXPORT_SYMBOL_GPL(sdhci_add_host);
3832 
3833 void sdhci_remove_host(struct sdhci_host *host, int dead)
3834 {
3835 	struct mmc_host *mmc = host->mmc;
3836 	unsigned long flags;
3837 
3838 	if (dead) {
3839 		spin_lock_irqsave(&host->lock, flags);
3840 
3841 		host->flags |= SDHCI_DEVICE_DEAD;
3842 
3843 		if (sdhci_has_requests(host)) {
3844 			pr_err("%s: Controller removed during "
3845 				" transfer!\n", mmc_hostname(mmc));
3846 			sdhci_error_out_mrqs(host, -ENOMEDIUM);
3847 		}
3848 
3849 		spin_unlock_irqrestore(&host->lock, flags);
3850 	}
3851 
3852 	sdhci_disable_card_detection(host);
3853 
3854 	mmc_remove_host(mmc);
3855 
3856 	sdhci_led_unregister(host);
3857 
3858 	if (!dead)
3859 		sdhci_do_reset(host, SDHCI_RESET_ALL);
3860 
3861 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3862 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3863 	free_irq(host->irq, host);
3864 
3865 	del_timer_sync(&host->timer);
3866 	del_timer_sync(&host->data_timer);
3867 
3868 	tasklet_kill(&host->finish_tasklet);
3869 
3870 	if (!IS_ERR(mmc->supply.vqmmc))
3871 		regulator_disable(mmc->supply.vqmmc);
3872 
3873 	if (host->align_buffer)
3874 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3875 				  host->adma_table_sz, host->align_buffer,
3876 				  host->align_addr);
3877 
3878 	host->adma_table = NULL;
3879 	host->align_buffer = NULL;
3880 }
3881 
3882 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3883 
3884 void sdhci_free_host(struct sdhci_host *host)
3885 {
3886 	mmc_free_host(host->mmc);
3887 }
3888 
3889 EXPORT_SYMBOL_GPL(sdhci_free_host);
3890 
3891 /*****************************************************************************\
3892  *                                                                           *
3893  * Driver init/exit                                                          *
3894  *                                                                           *
3895 \*****************************************************************************/
3896 
3897 static int __init sdhci_drv_init(void)
3898 {
3899 	pr_info(DRIVER_NAME
3900 		": Secure Digital Host Controller Interface driver\n");
3901 	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3902 
3903 	return 0;
3904 }
3905 
3906 static void __exit sdhci_drv_exit(void)
3907 {
3908 }
3909 
3910 module_init(sdhci_drv_init);
3911 module_exit(sdhci_drv_exit);
3912 
3913 module_param(debug_quirks, uint, 0444);
3914 module_param(debug_quirks2, uint, 0444);
3915 
3916 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3917 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3918 MODULE_LICENSE("GPL");
3919 
3920 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3921 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
3922