xref: /openbmc/u-boot/drivers/mmc/mmc.c (revision 6d9a98c5)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2008, Freescale Semiconductor, Inc
4  * Andy Fleming
5  *
6  * Based vaguely on the Linux code
7  */
8 
9 #include <config.h>
10 #include <common.h>
11 #include <command.h>
12 #include <dm.h>
13 #include <dm/device-internal.h>
14 #include <errno.h>
15 #include <mmc.h>
16 #include <part.h>
17 #include <power/regulator.h>
18 #include <malloc.h>
19 #include <memalign.h>
20 #include <linux/list.h>
21 #include <div64.h>
22 #include "mmc_private.h"
23 
24 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
25 static int mmc_power_cycle(struct mmc *mmc);
26 #if !CONFIG_IS_ENABLED(MMC_TINY)
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
28 #endif
29 
30 #if !CONFIG_IS_ENABLED(DM_MMC)
31 
32 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
33 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
34 {
35 	return -ENOSYS;
36 }
37 #endif
38 
39 __weak int board_mmc_getwp(struct mmc *mmc)
40 {
41 	return -1;
42 }
43 
44 int mmc_getwp(struct mmc *mmc)
45 {
46 	int wp;
47 
48 	wp = board_mmc_getwp(mmc);
49 
50 	if (wp < 0) {
51 		if (mmc->cfg->ops->getwp)
52 			wp = mmc->cfg->ops->getwp(mmc);
53 		else
54 			wp = 0;
55 	}
56 
57 	return wp;
58 }
59 
60 __weak int board_mmc_getcd(struct mmc *mmc)
61 {
62 	return -1;
63 }
64 #endif
65 
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
68 {
69 	printf("CMD_SEND:%d\n", cmd->cmdidx);
70 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
71 }
72 
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
74 {
75 	int i;
76 	u8 *ptr;
77 
78 	if (ret) {
79 		printf("\t\tRET\t\t\t %d\n", ret);
80 	} else {
81 		switch (cmd->resp_type) {
82 		case MMC_RSP_NONE:
83 			printf("\t\tMMC_RSP_NONE\n");
84 			break;
85 		case MMC_RSP_R1:
86 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
87 				cmd->response[0]);
88 			break;
89 		case MMC_RSP_R1b:
90 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
91 				cmd->response[0]);
92 			break;
93 		case MMC_RSP_R2:
94 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
95 				cmd->response[0]);
96 			printf("\t\t          \t\t 0x%08X \n",
97 				cmd->response[1]);
98 			printf("\t\t          \t\t 0x%08X \n",
99 				cmd->response[2]);
100 			printf("\t\t          \t\t 0x%08X \n",
101 				cmd->response[3]);
102 			printf("\n");
103 			printf("\t\t\t\t\tDUMPING DATA\n");
104 			for (i = 0; i < 4; i++) {
105 				int j;
106 				printf("\t\t\t\t\t%03d - ", i*4);
107 				ptr = (u8 *)&cmd->response[i];
108 				ptr += 3;
109 				for (j = 0; j < 4; j++)
110 					printf("%02X ", *ptr--);
111 				printf("\n");
112 			}
113 			break;
114 		case MMC_RSP_R3:
115 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
116 				cmd->response[0]);
117 			break;
118 		default:
119 			printf("\t\tERROR MMC rsp not supported\n");
120 			break;
121 		}
122 	}
123 }
124 
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
126 {
127 	int status;
128 
129 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 	printf("CURR STATE:%d\n", status);
131 }
132 #endif
133 
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
136 {
137 	static const char *const names[] = {
138 	      [MMC_LEGACY]	= "MMC legacy",
139 	      [SD_LEGACY]	= "SD Legacy",
140 	      [MMC_HS]		= "MMC High Speed (26MHz)",
141 	      [SD_HS]		= "SD High Speed (50MHz)",
142 	      [UHS_SDR12]	= "UHS SDR12 (25MHz)",
143 	      [UHS_SDR25]	= "UHS SDR25 (50MHz)",
144 	      [UHS_SDR50]	= "UHS SDR50 (100MHz)",
145 	      [UHS_SDR104]	= "UHS SDR104 (208MHz)",
146 	      [UHS_DDR50]	= "UHS DDR50 (50MHz)",
147 	      [MMC_HS_52]	= "MMC High Speed (52MHz)",
148 	      [MMC_DDR_52]	= "MMC DDR52 (52MHz)",
149 	      [MMC_HS_200]	= "HS200 (200MHz)",
150 	      [MMC_HS_400]	= "HS400 (200MHz)",
151 	};
152 
153 	if (mode >= MMC_MODES_END)
154 		return "Unknown mode";
155 	else
156 		return names[mode];
157 }
158 #endif
159 
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
161 {
162 	static const int freqs[] = {
163 	      [MMC_LEGACY]	= 25000000,
164 	      [SD_LEGACY]	= 25000000,
165 	      [MMC_HS]		= 26000000,
166 	      [SD_HS]		= 50000000,
167 	      [MMC_HS_52]	= 52000000,
168 	      [MMC_DDR_52]	= 52000000,
169 	      [UHS_SDR12]	= 25000000,
170 	      [UHS_SDR25]	= 50000000,
171 	      [UHS_SDR50]	= 100000000,
172 	      [UHS_DDR50]	= 50000000,
173 	      [UHS_SDR104]	= 208000000,
174 	      [MMC_HS_200]	= 200000000,
175 	      [MMC_HS_400]	= 200000000,
176 	};
177 
178 	if (mode == MMC_LEGACY)
179 		return mmc->legacy_speed;
180 	else if (mode >= MMC_MODES_END)
181 		return 0;
182 	else
183 		return freqs[mode];
184 }
185 
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
187 {
188 	mmc->selected_mode = mode;
189 	mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 	mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 	pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 		 mmc->tran_speed / 1000000);
193 	return 0;
194 }
195 
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
198 {
199 	int ret;
200 
201 	mmmc_trace_before_send(mmc, cmd);
202 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 	mmmc_trace_after_send(mmc, cmd, ret);
204 
205 	return ret;
206 }
207 #endif
208 
209 int mmc_send_status(struct mmc *mmc, int timeout)
210 {
211 	struct mmc_cmd cmd;
212 	int err, retries = 5;
213 
214 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 	cmd.resp_type = MMC_RSP_R1;
216 	if (!mmc_host_is_spi(mmc))
217 		cmd.cmdarg = mmc->rca << 16;
218 
219 	while (1) {
220 		err = mmc_send_cmd(mmc, &cmd, NULL);
221 		if (!err) {
222 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
223 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
224 			     MMC_STATE_PRG)
225 				break;
226 
227 			if (cmd.response[0] & MMC_STATUS_MASK) {
228 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
229 				pr_err("Status Error: 0x%08X\n",
230 				       cmd.response[0]);
231 #endif
232 				return -ECOMM;
233 			}
234 		} else if (--retries < 0)
235 			return err;
236 
237 		if (timeout-- <= 0)
238 			break;
239 
240 		udelay(1000);
241 	}
242 
243 	mmc_trace_state(mmc, &cmd);
244 	if (timeout <= 0) {
245 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
246 		pr_err("Timeout waiting card ready\n");
247 #endif
248 		return -ETIMEDOUT;
249 	}
250 
251 	return 0;
252 }
253 
254 int mmc_set_blocklen(struct mmc *mmc, int len)
255 {
256 	struct mmc_cmd cmd;
257 	int err;
258 
259 	if (mmc->ddr_mode)
260 		return 0;
261 
262 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
263 	cmd.resp_type = MMC_RSP_R1;
264 	cmd.cmdarg = len;
265 
266 	err = mmc_send_cmd(mmc, &cmd, NULL);
267 
268 #ifdef CONFIG_MMC_QUIRKS
269 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
270 		int retries = 4;
271 		/*
272 		 * It has been seen that SET_BLOCKLEN may fail on the first
273 		 * attempt, let's try a few more time
274 		 */
275 		do {
276 			err = mmc_send_cmd(mmc, &cmd, NULL);
277 			if (!err)
278 				break;
279 		} while (retries--);
280 	}
281 #endif
282 
283 	return err;
284 }
285 
286 #ifdef MMC_SUPPORTS_TUNING
287 static const u8 tuning_blk_pattern_4bit[] = {
288 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
289 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
290 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
291 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
292 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
293 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
294 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
295 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
296 };
297 
298 static const u8 tuning_blk_pattern_8bit[] = {
299 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
300 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
301 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
302 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
303 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
304 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
305 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
306 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
307 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
308 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
309 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
310 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
311 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
312 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
313 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
314 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
315 };
316 
317 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
318 {
319 	struct mmc_cmd cmd;
320 	struct mmc_data data;
321 	const u8 *tuning_block_pattern;
322 	int size, err;
323 
324 	if (mmc->bus_width == 8) {
325 		tuning_block_pattern = tuning_blk_pattern_8bit;
326 		size = sizeof(tuning_blk_pattern_8bit);
327 	} else if (mmc->bus_width == 4) {
328 		tuning_block_pattern = tuning_blk_pattern_4bit;
329 		size = sizeof(tuning_blk_pattern_4bit);
330 	} else {
331 		return -EINVAL;
332 	}
333 
334 	ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
335 
336 	cmd.cmdidx = opcode;
337 	cmd.cmdarg = 0;
338 	cmd.resp_type = MMC_RSP_R1;
339 
340 	data.dest = (void *)data_buf;
341 	data.blocks = 1;
342 	data.blocksize = size;
343 	data.flags = MMC_DATA_READ;
344 
345 	err = mmc_send_cmd(mmc, &cmd, &data);
346 	if (err)
347 		return err;
348 
349 	if (memcmp(data_buf, tuning_block_pattern, size))
350 		return -EIO;
351 
352 	return 0;
353 }
354 #endif
355 
356 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
357 			   lbaint_t blkcnt)
358 {
359 	struct mmc_cmd cmd;
360 	struct mmc_data data;
361 
362 	if (blkcnt > 1)
363 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
364 	else
365 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
366 
367 	if (mmc->high_capacity)
368 		cmd.cmdarg = start;
369 	else
370 		cmd.cmdarg = start * mmc->read_bl_len;
371 
372 	cmd.resp_type = MMC_RSP_R1;
373 
374 	data.dest = dst;
375 	data.blocks = blkcnt;
376 	data.blocksize = mmc->read_bl_len;
377 	data.flags = MMC_DATA_READ;
378 
379 	if (mmc_send_cmd(mmc, &cmd, &data))
380 		return 0;
381 
382 	if (blkcnt > 1) {
383 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
384 		cmd.cmdarg = 0;
385 		cmd.resp_type = MMC_RSP_R1b;
386 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
387 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
388 			pr_err("mmc fail to send stop cmd\n");
389 #endif
390 			return 0;
391 		}
392 	}
393 
394 	return blkcnt;
395 }
396 
397 #if CONFIG_IS_ENABLED(BLK)
398 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
399 #else
400 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
401 		void *dst)
402 #endif
403 {
404 #if CONFIG_IS_ENABLED(BLK)
405 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
406 #endif
407 	int dev_num = block_dev->devnum;
408 	int err;
409 	lbaint_t cur, blocks_todo = blkcnt;
410 
411 	if (blkcnt == 0)
412 		return 0;
413 
414 	struct mmc *mmc = find_mmc_device(dev_num);
415 	if (!mmc)
416 		return 0;
417 
418 	if (CONFIG_IS_ENABLED(MMC_TINY))
419 		err = mmc_switch_part(mmc, block_dev->hwpart);
420 	else
421 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
422 
423 	if (err < 0)
424 		return 0;
425 
426 	if ((start + blkcnt) > block_dev->lba) {
427 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
428 		pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
429 		       start + blkcnt, block_dev->lba);
430 #endif
431 		return 0;
432 	}
433 
434 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
435 		pr_debug("%s: Failed to set blocklen\n", __func__);
436 		return 0;
437 	}
438 
439 	do {
440 		cur = (blocks_todo > mmc->cfg->b_max) ?
441 			mmc->cfg->b_max : blocks_todo;
442 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
443 			pr_debug("%s: Failed to read blocks\n", __func__);
444 			return 0;
445 		}
446 		blocks_todo -= cur;
447 		start += cur;
448 		dst += cur * mmc->read_bl_len;
449 	} while (blocks_todo > 0);
450 
451 	return blkcnt;
452 }
453 
454 static int mmc_go_idle(struct mmc *mmc)
455 {
456 	struct mmc_cmd cmd;
457 	int err;
458 
459 	udelay(1000);
460 
461 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
462 	cmd.cmdarg = 0;
463 	cmd.resp_type = MMC_RSP_NONE;
464 
465 	err = mmc_send_cmd(mmc, &cmd, NULL);
466 
467 	if (err)
468 		return err;
469 
470 	udelay(2000);
471 
472 	return 0;
473 }
474 
475 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
476 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
477 {
478 	struct mmc_cmd cmd;
479 	int err = 0;
480 
481 	/*
482 	 * Send CMD11 only if the request is to switch the card to
483 	 * 1.8V signalling.
484 	 */
485 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
486 		return mmc_set_signal_voltage(mmc, signal_voltage);
487 
488 	cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
489 	cmd.cmdarg = 0;
490 	cmd.resp_type = MMC_RSP_R1;
491 
492 	err = mmc_send_cmd(mmc, &cmd, NULL);
493 	if (err)
494 		return err;
495 
496 	if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
497 		return -EIO;
498 
499 	/*
500 	 * The card should drive cmd and dat[0:3] low immediately
501 	 * after the response of cmd11, but wait 100 us to be sure
502 	 */
503 	err = mmc_wait_dat0(mmc, 0, 100);
504 	if (err == -ENOSYS)
505 		udelay(100);
506 	else if (err)
507 		return -ETIMEDOUT;
508 
509 	/*
510 	 * During a signal voltage level switch, the clock must be gated
511 	 * for 5 ms according to the SD spec
512 	 */
513 	mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
514 
515 	err = mmc_set_signal_voltage(mmc, signal_voltage);
516 	if (err)
517 		return err;
518 
519 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
520 	mdelay(10);
521 	mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
522 
523 	/*
524 	 * Failure to switch is indicated by the card holding
525 	 * dat[0:3] low. Wait for at least 1 ms according to spec
526 	 */
527 	err = mmc_wait_dat0(mmc, 1, 1000);
528 	if (err == -ENOSYS)
529 		udelay(1000);
530 	else if (err)
531 		return -ETIMEDOUT;
532 
533 	return 0;
534 }
535 #endif
536 
537 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
538 {
539 	int timeout = 1000;
540 	int err;
541 	struct mmc_cmd cmd;
542 
543 	while (1) {
544 		cmd.cmdidx = MMC_CMD_APP_CMD;
545 		cmd.resp_type = MMC_RSP_R1;
546 		cmd.cmdarg = 0;
547 
548 		err = mmc_send_cmd(mmc, &cmd, NULL);
549 
550 		if (err)
551 			return err;
552 
553 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
554 		cmd.resp_type = MMC_RSP_R3;
555 
556 		/*
557 		 * Most cards do not answer if some reserved bits
558 		 * in the ocr are set. However, Some controller
559 		 * can set bit 7 (reserved for low voltages), but
560 		 * how to manage low voltages SD card is not yet
561 		 * specified.
562 		 */
563 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
564 			(mmc->cfg->voltages & 0xff8000);
565 
566 		if (mmc->version == SD_VERSION_2)
567 			cmd.cmdarg |= OCR_HCS;
568 
569 		if (uhs_en)
570 			cmd.cmdarg |= OCR_S18R;
571 
572 		err = mmc_send_cmd(mmc, &cmd, NULL);
573 
574 		if (err)
575 			return err;
576 
577 		if (cmd.response[0] & OCR_BUSY)
578 			break;
579 
580 		if (timeout-- <= 0)
581 			return -EOPNOTSUPP;
582 
583 		udelay(1000);
584 	}
585 
586 	if (mmc->version != SD_VERSION_2)
587 		mmc->version = SD_VERSION_1_0;
588 
589 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
590 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
591 		cmd.resp_type = MMC_RSP_R3;
592 		cmd.cmdarg = 0;
593 
594 		err = mmc_send_cmd(mmc, &cmd, NULL);
595 
596 		if (err)
597 			return err;
598 	}
599 
600 	mmc->ocr = cmd.response[0];
601 
602 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
603 	if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
604 	    == 0x41000000) {
605 		err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
606 		if (err)
607 			return err;
608 	}
609 #endif
610 
611 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
612 	mmc->rca = 0;
613 
614 	return 0;
615 }
616 
617 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
618 {
619 	struct mmc_cmd cmd;
620 	int err;
621 
622 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
623 	cmd.resp_type = MMC_RSP_R3;
624 	cmd.cmdarg = 0;
625 	if (use_arg && !mmc_host_is_spi(mmc))
626 		cmd.cmdarg = OCR_HCS |
627 			(mmc->cfg->voltages &
628 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
629 			(mmc->ocr & OCR_ACCESS_MODE);
630 
631 	err = mmc_send_cmd(mmc, &cmd, NULL);
632 	if (err)
633 		return err;
634 	mmc->ocr = cmd.response[0];
635 	return 0;
636 }
637 
638 static int mmc_send_op_cond(struct mmc *mmc)
639 {
640 	int err, i;
641 
642 	/* Some cards seem to need this */
643 	mmc_go_idle(mmc);
644 
645  	/* Asking to the card its capabilities */
646 	for (i = 0; i < 2; i++) {
647 		err = mmc_send_op_cond_iter(mmc, i != 0);
648 		if (err)
649 			return err;
650 
651 		/* exit if not busy (flag seems to be inverted) */
652 		if (mmc->ocr & OCR_BUSY)
653 			break;
654 	}
655 	mmc->op_cond_pending = 1;
656 	return 0;
657 }
658 
659 static int mmc_complete_op_cond(struct mmc *mmc)
660 {
661 	struct mmc_cmd cmd;
662 	int timeout = 1000;
663 	ulong start;
664 	int err;
665 
666 	mmc->op_cond_pending = 0;
667 	if (!(mmc->ocr & OCR_BUSY)) {
668 		/* Some cards seem to need this */
669 		mmc_go_idle(mmc);
670 
671 		start = get_timer(0);
672 		while (1) {
673 			err = mmc_send_op_cond_iter(mmc, 1);
674 			if (err)
675 				return err;
676 			if (mmc->ocr & OCR_BUSY)
677 				break;
678 			if (get_timer(start) > timeout)
679 				return -EOPNOTSUPP;
680 			udelay(100);
681 		}
682 	}
683 
684 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
685 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
686 		cmd.resp_type = MMC_RSP_R3;
687 		cmd.cmdarg = 0;
688 
689 		err = mmc_send_cmd(mmc, &cmd, NULL);
690 
691 		if (err)
692 			return err;
693 
694 		mmc->ocr = cmd.response[0];
695 	}
696 
697 	mmc->version = MMC_VERSION_UNKNOWN;
698 
699 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
700 	mmc->rca = 1;
701 
702 	return 0;
703 }
704 
705 
706 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
707 {
708 	struct mmc_cmd cmd;
709 	struct mmc_data data;
710 	int err;
711 
712 	/* Get the Card Status Register */
713 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
714 	cmd.resp_type = MMC_RSP_R1;
715 	cmd.cmdarg = 0;
716 
717 	data.dest = (char *)ext_csd;
718 	data.blocks = 1;
719 	data.blocksize = MMC_MAX_BLOCK_LEN;
720 	data.flags = MMC_DATA_READ;
721 
722 	err = mmc_send_cmd(mmc, &cmd, &data);
723 
724 	return err;
725 }
726 
727 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
728 {
729 	struct mmc_cmd cmd;
730 	int timeout = 1000;
731 	int retries = 3;
732 	int ret;
733 
734 	cmd.cmdidx = MMC_CMD_SWITCH;
735 	cmd.resp_type = MMC_RSP_R1b;
736 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
737 				 (index << 16) |
738 				 (value << 8);
739 
740 	while (retries > 0) {
741 		ret = mmc_send_cmd(mmc, &cmd, NULL);
742 
743 		/* Waiting for the ready status */
744 		if (!ret) {
745 			ret = mmc_send_status(mmc, timeout);
746 			return ret;
747 		}
748 
749 		retries--;
750 	}
751 
752 	return ret;
753 
754 }
755 
756 #if !CONFIG_IS_ENABLED(MMC_TINY)
757 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
758 			      bool hsdowngrade)
759 {
760 	int err;
761 	int speed_bits;
762 
763 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
764 
765 	switch (mode) {
766 	case MMC_HS:
767 	case MMC_HS_52:
768 	case MMC_DDR_52:
769 		speed_bits = EXT_CSD_TIMING_HS;
770 		break;
771 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
772 	case MMC_HS_200:
773 		speed_bits = EXT_CSD_TIMING_HS200;
774 		break;
775 #endif
776 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
777 	case MMC_HS_400:
778 		speed_bits = EXT_CSD_TIMING_HS400;
779 		break;
780 #endif
781 	case MMC_LEGACY:
782 		speed_bits = EXT_CSD_TIMING_LEGACY;
783 		break;
784 	default:
785 		return -EINVAL;
786 	}
787 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
788 			 speed_bits);
789 	if (err)
790 		return err;
791 
792 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
793     CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
794 	/*
795 	 * In case the eMMC is in HS200/HS400 mode and we are downgrading
796 	 * to HS mode, the card clock are still running much faster than
797 	 * the supported HS mode clock, so we can not reliably read out
798 	 * Extended CSD. Reconfigure the controller to run at HS mode.
799 	 */
800 	if (hsdowngrade) {
801 		mmc_select_mode(mmc, MMC_HS);
802 		mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
803 	}
804 #endif
805 
806 	if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
807 		/* Now check to see that it worked */
808 		err = mmc_send_ext_csd(mmc, test_csd);
809 		if (err)
810 			return err;
811 
812 		/* No high-speed support */
813 		if (!test_csd[EXT_CSD_HS_TIMING])
814 			return -ENOTSUPP;
815 	}
816 
817 	return 0;
818 }
819 
820 static int mmc_get_capabilities(struct mmc *mmc)
821 {
822 	u8 *ext_csd = mmc->ext_csd;
823 	char cardtype;
824 
825 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
826 
827 	if (mmc_host_is_spi(mmc))
828 		return 0;
829 
830 	/* Only version 4 supports high-speed */
831 	if (mmc->version < MMC_VERSION_4)
832 		return 0;
833 
834 	if (!ext_csd) {
835 		pr_err("No ext_csd found!\n"); /* this should enver happen */
836 		return -ENOTSUPP;
837 	}
838 
839 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
840 
841 	cardtype = ext_csd[EXT_CSD_CARD_TYPE];
842 	mmc->cardtype = cardtype;
843 
844 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
845 	if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
846 			EXT_CSD_CARD_TYPE_HS200_1_8V)) {
847 		mmc->card_caps |= MMC_MODE_HS200;
848 	}
849 #endif
850 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
851 	if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
852 			EXT_CSD_CARD_TYPE_HS400_1_8V)) {
853 		mmc->card_caps |= MMC_MODE_HS400;
854 	}
855 #endif
856 	if (cardtype & EXT_CSD_CARD_TYPE_52) {
857 		if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
858 			mmc->card_caps |= MMC_MODE_DDR_52MHz;
859 		mmc->card_caps |= MMC_MODE_HS_52MHz;
860 	}
861 	if (cardtype & EXT_CSD_CARD_TYPE_26)
862 		mmc->card_caps |= MMC_MODE_HS;
863 
864 	return 0;
865 }
866 #endif
867 
868 static int mmc_set_capacity(struct mmc *mmc, int part_num)
869 {
870 	switch (part_num) {
871 	case 0:
872 		mmc->capacity = mmc->capacity_user;
873 		break;
874 	case 1:
875 	case 2:
876 		mmc->capacity = mmc->capacity_boot;
877 		break;
878 	case 3:
879 		mmc->capacity = mmc->capacity_rpmb;
880 		break;
881 	case 4:
882 	case 5:
883 	case 6:
884 	case 7:
885 		mmc->capacity = mmc->capacity_gp[part_num - 4];
886 		break;
887 	default:
888 		return -1;
889 	}
890 
891 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
892 
893 	return 0;
894 }
895 
896 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
897 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
898 {
899 	int forbidden = 0;
900 	bool change = false;
901 
902 	if (part_num & PART_ACCESS_MASK)
903 		forbidden = MMC_CAP(MMC_HS_200);
904 
905 	if (MMC_CAP(mmc->selected_mode) & forbidden) {
906 		pr_debug("selected mode (%s) is forbidden for part %d\n",
907 			 mmc_mode_name(mmc->selected_mode), part_num);
908 		change = true;
909 	} else if (mmc->selected_mode != mmc->best_mode) {
910 		pr_debug("selected mode is not optimal\n");
911 		change = true;
912 	}
913 
914 	if (change)
915 		return mmc_select_mode_and_width(mmc,
916 						 mmc->card_caps & ~forbidden);
917 
918 	return 0;
919 }
920 #else
921 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
922 					   unsigned int part_num)
923 {
924 	return 0;
925 }
926 #endif
927 
928 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
929 {
930 	int ret;
931 
932 	ret = mmc_boot_part_access_chk(mmc, part_num);
933 	if (ret)
934 		return ret;
935 
936 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
937 			 (mmc->part_config & ~PART_ACCESS_MASK)
938 			 | (part_num & PART_ACCESS_MASK));
939 
940 	/*
941 	 * Set the capacity if the switch succeeded or was intended
942 	 * to return to representing the raw device.
943 	 */
944 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
945 		ret = mmc_set_capacity(mmc, part_num);
946 		mmc_get_blk_desc(mmc)->hwpart = part_num;
947 	}
948 
949 	return ret;
950 }
951 
952 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
953 int mmc_hwpart_config(struct mmc *mmc,
954 		      const struct mmc_hwpart_conf *conf,
955 		      enum mmc_hwpart_conf_mode mode)
956 {
957 	u8 part_attrs = 0;
958 	u32 enh_size_mult;
959 	u32 enh_start_addr;
960 	u32 gp_size_mult[4];
961 	u32 max_enh_size_mult;
962 	u32 tot_enh_size_mult = 0;
963 	u8 wr_rel_set;
964 	int i, pidx, err;
965 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
966 
967 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
968 		return -EINVAL;
969 
970 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
971 		pr_err("eMMC >= 4.4 required for enhanced user data area\n");
972 		return -EMEDIUMTYPE;
973 	}
974 
975 	if (!(mmc->part_support & PART_SUPPORT)) {
976 		pr_err("Card does not support partitioning\n");
977 		return -EMEDIUMTYPE;
978 	}
979 
980 	if (!mmc->hc_wp_grp_size) {
981 		pr_err("Card does not define HC WP group size\n");
982 		return -EMEDIUMTYPE;
983 	}
984 
985 	/* check partition alignment and total enhanced size */
986 	if (conf->user.enh_size) {
987 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
988 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
989 			pr_err("User data enhanced area not HC WP group "
990 			       "size aligned\n");
991 			return -EINVAL;
992 		}
993 		part_attrs |= EXT_CSD_ENH_USR;
994 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
995 		if (mmc->high_capacity) {
996 			enh_start_addr = conf->user.enh_start;
997 		} else {
998 			enh_start_addr = (conf->user.enh_start << 9);
999 		}
1000 	} else {
1001 		enh_size_mult = 0;
1002 		enh_start_addr = 0;
1003 	}
1004 	tot_enh_size_mult += enh_size_mult;
1005 
1006 	for (pidx = 0; pidx < 4; pidx++) {
1007 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1008 			pr_err("GP%i partition not HC WP group size "
1009 			       "aligned\n", pidx+1);
1010 			return -EINVAL;
1011 		}
1012 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1013 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1014 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1015 			tot_enh_size_mult += gp_size_mult[pidx];
1016 		}
1017 	}
1018 
1019 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1020 		pr_err("Card does not support enhanced attribute\n");
1021 		return -EMEDIUMTYPE;
1022 	}
1023 
1024 	err = mmc_send_ext_csd(mmc, ext_csd);
1025 	if (err)
1026 		return err;
1027 
1028 	max_enh_size_mult =
1029 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1030 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1031 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1032 	if (tot_enh_size_mult > max_enh_size_mult) {
1033 		pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1034 		       tot_enh_size_mult, max_enh_size_mult);
1035 		return -EMEDIUMTYPE;
1036 	}
1037 
1038 	/* The default value of EXT_CSD_WR_REL_SET is device
1039 	 * dependent, the values can only be changed if the
1040 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1041 	 * changed only once and before partitioning is completed. */
1042 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1043 	if (conf->user.wr_rel_change) {
1044 		if (conf->user.wr_rel_set)
1045 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1046 		else
1047 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1048 	}
1049 	for (pidx = 0; pidx < 4; pidx++) {
1050 		if (conf->gp_part[pidx].wr_rel_change) {
1051 			if (conf->gp_part[pidx].wr_rel_set)
1052 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1053 			else
1054 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1055 		}
1056 	}
1057 
1058 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1059 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1060 		puts("Card does not support host controlled partition write "
1061 		     "reliability settings\n");
1062 		return -EMEDIUMTYPE;
1063 	}
1064 
1065 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1066 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1067 		pr_err("Card already partitioned\n");
1068 		return -EPERM;
1069 	}
1070 
1071 	if (mode == MMC_HWPART_CONF_CHECK)
1072 		return 0;
1073 
1074 	/* Partitioning requires high-capacity size definitions */
1075 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1076 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1077 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1078 
1079 		if (err)
1080 			return err;
1081 
1082 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1083 
1084 		/* update erase group size to be high-capacity */
1085 		mmc->erase_grp_size =
1086 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1087 
1088 	}
1089 
1090 	/* all OK, write the configuration */
1091 	for (i = 0; i < 4; i++) {
1092 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1093 				 EXT_CSD_ENH_START_ADDR+i,
1094 				 (enh_start_addr >> (i*8)) & 0xFF);
1095 		if (err)
1096 			return err;
1097 	}
1098 	for (i = 0; i < 3; i++) {
1099 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1100 				 EXT_CSD_ENH_SIZE_MULT+i,
1101 				 (enh_size_mult >> (i*8)) & 0xFF);
1102 		if (err)
1103 			return err;
1104 	}
1105 	for (pidx = 0; pidx < 4; pidx++) {
1106 		for (i = 0; i < 3; i++) {
1107 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1108 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1109 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1110 			if (err)
1111 				return err;
1112 		}
1113 	}
1114 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1115 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1116 	if (err)
1117 		return err;
1118 
1119 	if (mode == MMC_HWPART_CONF_SET)
1120 		return 0;
1121 
1122 	/* The WR_REL_SET is a write-once register but shall be
1123 	 * written before setting PART_SETTING_COMPLETED. As it is
1124 	 * write-once we can only write it when completing the
1125 	 * partitioning. */
1126 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1127 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1128 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1129 		if (err)
1130 			return err;
1131 	}
1132 
1133 	/* Setting PART_SETTING_COMPLETED confirms the partition
1134 	 * configuration but it only becomes effective after power
1135 	 * cycle, so we do not adjust the partition related settings
1136 	 * in the mmc struct. */
1137 
1138 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1139 			 EXT_CSD_PARTITION_SETTING,
1140 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1141 	if (err)
1142 		return err;
1143 
1144 	return 0;
1145 }
1146 #endif
1147 
1148 #if !CONFIG_IS_ENABLED(DM_MMC)
1149 int mmc_getcd(struct mmc *mmc)
1150 {
1151 	int cd;
1152 
1153 	cd = board_mmc_getcd(mmc);
1154 
1155 	if (cd < 0) {
1156 		if (mmc->cfg->ops->getcd)
1157 			cd = mmc->cfg->ops->getcd(mmc);
1158 		else
1159 			cd = 1;
1160 	}
1161 
1162 	return cd;
1163 }
1164 #endif
1165 
1166 #if !CONFIG_IS_ENABLED(MMC_TINY)
1167 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1168 {
1169 	struct mmc_cmd cmd;
1170 	struct mmc_data data;
1171 
1172 	/* Switch the frequency */
1173 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1174 	cmd.resp_type = MMC_RSP_R1;
1175 	cmd.cmdarg = (mode << 31) | 0xffffff;
1176 	cmd.cmdarg &= ~(0xf << (group * 4));
1177 	cmd.cmdarg |= value << (group * 4);
1178 
1179 	data.dest = (char *)resp;
1180 	data.blocksize = 64;
1181 	data.blocks = 1;
1182 	data.flags = MMC_DATA_READ;
1183 
1184 	return mmc_send_cmd(mmc, &cmd, &data);
1185 }
1186 
1187 static int sd_get_capabilities(struct mmc *mmc)
1188 {
1189 	int err;
1190 	struct mmc_cmd cmd;
1191 	ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1192 	ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1193 	struct mmc_data data;
1194 	int timeout;
1195 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1196 	u32 sd3_bus_mode;
1197 #endif
1198 
1199 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1200 
1201 	if (mmc_host_is_spi(mmc))
1202 		return 0;
1203 
1204 	/* Read the SCR to find out if this card supports higher speeds */
1205 	cmd.cmdidx = MMC_CMD_APP_CMD;
1206 	cmd.resp_type = MMC_RSP_R1;
1207 	cmd.cmdarg = mmc->rca << 16;
1208 
1209 	err = mmc_send_cmd(mmc, &cmd, NULL);
1210 
1211 	if (err)
1212 		return err;
1213 
1214 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1215 	cmd.resp_type = MMC_RSP_R1;
1216 	cmd.cmdarg = 0;
1217 
1218 	timeout = 3;
1219 
1220 retry_scr:
1221 	data.dest = (char *)scr;
1222 	data.blocksize = 8;
1223 	data.blocks = 1;
1224 	data.flags = MMC_DATA_READ;
1225 
1226 	err = mmc_send_cmd(mmc, &cmd, &data);
1227 
1228 	if (err) {
1229 		if (timeout--)
1230 			goto retry_scr;
1231 
1232 		return err;
1233 	}
1234 
1235 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1236 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1237 
1238 	switch ((mmc->scr[0] >> 24) & 0xf) {
1239 	case 0:
1240 		mmc->version = SD_VERSION_1_0;
1241 		break;
1242 	case 1:
1243 		mmc->version = SD_VERSION_1_10;
1244 		break;
1245 	case 2:
1246 		mmc->version = SD_VERSION_2;
1247 		if ((mmc->scr[0] >> 15) & 0x1)
1248 			mmc->version = SD_VERSION_3;
1249 		break;
1250 	default:
1251 		mmc->version = SD_VERSION_1_0;
1252 		break;
1253 	}
1254 
1255 	if (mmc->scr[0] & SD_DATA_4BIT)
1256 		mmc->card_caps |= MMC_MODE_4BIT;
1257 
1258 	/* Version 1.0 doesn't support switching */
1259 	if (mmc->version == SD_VERSION_1_0)
1260 		return 0;
1261 
1262 	timeout = 4;
1263 	while (timeout--) {
1264 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1265 				(u8 *)switch_status);
1266 
1267 		if (err)
1268 			return err;
1269 
1270 		/* The high-speed function is busy.  Try again */
1271 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1272 			break;
1273 	}
1274 
1275 	/* If high-speed isn't supported, we return */
1276 	if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1277 		mmc->card_caps |= MMC_CAP(SD_HS);
1278 
1279 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1280 	/* Version before 3.0 don't support UHS modes */
1281 	if (mmc->version < SD_VERSION_3)
1282 		return 0;
1283 
1284 	sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1285 	if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1286 		mmc->card_caps |= MMC_CAP(UHS_SDR104);
1287 	if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1288 		mmc->card_caps |= MMC_CAP(UHS_SDR50);
1289 	if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1290 		mmc->card_caps |= MMC_CAP(UHS_SDR25);
1291 	if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1292 		mmc->card_caps |= MMC_CAP(UHS_SDR12);
1293 	if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1294 		mmc->card_caps |= MMC_CAP(UHS_DDR50);
1295 #endif
1296 
1297 	return 0;
1298 }
1299 
1300 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1301 {
1302 	int err;
1303 
1304 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1305 	int speed;
1306 
1307 	/* SD version 1.00 and 1.01 does not support CMD 6 */
1308 	if (mmc->version == SD_VERSION_1_0)
1309 		return 0;
1310 
1311 	switch (mode) {
1312 	case SD_LEGACY:
1313 		speed = UHS_SDR12_BUS_SPEED;
1314 		break;
1315 	case SD_HS:
1316 		speed = HIGH_SPEED_BUS_SPEED;
1317 		break;
1318 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1319 	case UHS_SDR12:
1320 		speed = UHS_SDR12_BUS_SPEED;
1321 		break;
1322 	case UHS_SDR25:
1323 		speed = UHS_SDR25_BUS_SPEED;
1324 		break;
1325 	case UHS_SDR50:
1326 		speed = UHS_SDR50_BUS_SPEED;
1327 		break;
1328 	case UHS_DDR50:
1329 		speed = UHS_DDR50_BUS_SPEED;
1330 		break;
1331 	case UHS_SDR104:
1332 		speed = UHS_SDR104_BUS_SPEED;
1333 		break;
1334 #endif
1335 	default:
1336 		return -EINVAL;
1337 	}
1338 
1339 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1340 	if (err)
1341 		return err;
1342 
1343 	if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1344 		return -ENOTSUPP;
1345 
1346 	return 0;
1347 }
1348 
1349 static int sd_select_bus_width(struct mmc *mmc, int w)
1350 {
1351 	int err;
1352 	struct mmc_cmd cmd;
1353 
1354 	if ((w != 4) && (w != 1))
1355 		return -EINVAL;
1356 
1357 	cmd.cmdidx = MMC_CMD_APP_CMD;
1358 	cmd.resp_type = MMC_RSP_R1;
1359 	cmd.cmdarg = mmc->rca << 16;
1360 
1361 	err = mmc_send_cmd(mmc, &cmd, NULL);
1362 	if (err)
1363 		return err;
1364 
1365 	cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1366 	cmd.resp_type = MMC_RSP_R1;
1367 	if (w == 4)
1368 		cmd.cmdarg = 2;
1369 	else if (w == 1)
1370 		cmd.cmdarg = 0;
1371 	err = mmc_send_cmd(mmc, &cmd, NULL);
1372 	if (err)
1373 		return err;
1374 
1375 	return 0;
1376 }
1377 #endif
1378 
1379 #if CONFIG_IS_ENABLED(MMC_WRITE)
1380 static int sd_read_ssr(struct mmc *mmc)
1381 {
1382 	static const unsigned int sd_au_size[] = {
1383 		0,		SZ_16K / 512,		SZ_32K / 512,
1384 		SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
1385 		SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
1386 		SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
1387 		SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,
1388 		SZ_64M / 512,
1389 	};
1390 	int err, i;
1391 	struct mmc_cmd cmd;
1392 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1393 	struct mmc_data data;
1394 	int timeout = 3;
1395 	unsigned int au, eo, et, es;
1396 
1397 	cmd.cmdidx = MMC_CMD_APP_CMD;
1398 	cmd.resp_type = MMC_RSP_R1;
1399 	cmd.cmdarg = mmc->rca << 16;
1400 
1401 	err = mmc_send_cmd(mmc, &cmd, NULL);
1402 	if (err)
1403 		return err;
1404 
1405 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1406 	cmd.resp_type = MMC_RSP_R1;
1407 	cmd.cmdarg = 0;
1408 
1409 retry_ssr:
1410 	data.dest = (char *)ssr;
1411 	data.blocksize = 64;
1412 	data.blocks = 1;
1413 	data.flags = MMC_DATA_READ;
1414 
1415 	err = mmc_send_cmd(mmc, &cmd, &data);
1416 	if (err) {
1417 		if (timeout--)
1418 			goto retry_ssr;
1419 
1420 		return err;
1421 	}
1422 
1423 	for (i = 0; i < 16; i++)
1424 		ssr[i] = be32_to_cpu(ssr[i]);
1425 
1426 	au = (ssr[2] >> 12) & 0xF;
1427 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1428 		mmc->ssr.au = sd_au_size[au];
1429 		es = (ssr[3] >> 24) & 0xFF;
1430 		es |= (ssr[2] & 0xFF) << 8;
1431 		et = (ssr[3] >> 18) & 0x3F;
1432 		if (es && et) {
1433 			eo = (ssr[3] >> 16) & 0x3;
1434 			mmc->ssr.erase_timeout = (et * 1000) / es;
1435 			mmc->ssr.erase_offset = eo * 1000;
1436 		}
1437 	} else {
1438 		pr_debug("Invalid Allocation Unit Size.\n");
1439 	}
1440 
1441 	return 0;
1442 }
1443 #endif
1444 /* frequency bases */
1445 /* divided by 10 to be nice to platforms without floating point */
1446 static const int fbase[] = {
1447 	10000,
1448 	100000,
1449 	1000000,
1450 	10000000,
1451 };
1452 
1453 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1454  * to platforms without floating point.
1455  */
1456 static const u8 multipliers[] = {
1457 	0,	/* reserved */
1458 	10,
1459 	12,
1460 	13,
1461 	15,
1462 	20,
1463 	25,
1464 	30,
1465 	35,
1466 	40,
1467 	45,
1468 	50,
1469 	55,
1470 	60,
1471 	70,
1472 	80,
1473 };
1474 
1475 static inline int bus_width(uint cap)
1476 {
1477 	if (cap == MMC_MODE_8BIT)
1478 		return 8;
1479 	if (cap == MMC_MODE_4BIT)
1480 		return 4;
1481 	if (cap == MMC_MODE_1BIT)
1482 		return 1;
1483 	pr_warn("invalid bus witdh capability 0x%x\n", cap);
1484 	return 0;
1485 }
1486 
1487 #if !CONFIG_IS_ENABLED(DM_MMC)
1488 #ifdef MMC_SUPPORTS_TUNING
1489 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1490 {
1491 	return -ENOTSUPP;
1492 }
1493 #endif
1494 
1495 static void mmc_send_init_stream(struct mmc *mmc)
1496 {
1497 }
1498 
1499 static int mmc_set_ios(struct mmc *mmc)
1500 {
1501 	int ret = 0;
1502 
1503 	if (mmc->cfg->ops->set_ios)
1504 		ret = mmc->cfg->ops->set_ios(mmc);
1505 
1506 	return ret;
1507 }
1508 #endif
1509 
1510 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1511 {
1512 	if (!disable) {
1513 		if (clock > mmc->cfg->f_max)
1514 			clock = mmc->cfg->f_max;
1515 
1516 		if (clock < mmc->cfg->f_min)
1517 			clock = mmc->cfg->f_min;
1518 	}
1519 
1520 	mmc->clock = clock;
1521 	mmc->clk_disable = disable;
1522 
1523 	debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1524 
1525 	return mmc_set_ios(mmc);
1526 }
1527 
1528 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1529 {
1530 	mmc->bus_width = width;
1531 
1532 	return mmc_set_ios(mmc);
1533 }
1534 
1535 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1536 /*
1537  * helper function to display the capabilities in a human
1538  * friendly manner. The capabilities include bus width and
1539  * supported modes.
1540  */
1541 void mmc_dump_capabilities(const char *text, uint caps)
1542 {
1543 	enum bus_mode mode;
1544 
1545 	pr_debug("%s: widths [", text);
1546 	if (caps & MMC_MODE_8BIT)
1547 		pr_debug("8, ");
1548 	if (caps & MMC_MODE_4BIT)
1549 		pr_debug("4, ");
1550 	if (caps & MMC_MODE_1BIT)
1551 		pr_debug("1, ");
1552 	pr_debug("\b\b] modes [");
1553 	for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1554 		if (MMC_CAP(mode) & caps)
1555 			pr_debug("%s, ", mmc_mode_name(mode));
1556 	pr_debug("\b\b]\n");
1557 }
1558 #endif
1559 
1560 struct mode_width_tuning {
1561 	enum bus_mode mode;
1562 	uint widths;
1563 #ifdef MMC_SUPPORTS_TUNING
1564 	uint tuning;
1565 #endif
1566 };
1567 
1568 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1569 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1570 {
1571 	switch (voltage) {
1572 	case MMC_SIGNAL_VOLTAGE_000: return 0;
1573 	case MMC_SIGNAL_VOLTAGE_330: return 3300;
1574 	case MMC_SIGNAL_VOLTAGE_180: return 1800;
1575 	case MMC_SIGNAL_VOLTAGE_120: return 1200;
1576 	}
1577 	return -EINVAL;
1578 }
1579 
1580 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1581 {
1582 	int err;
1583 
1584 	if (mmc->signal_voltage == signal_voltage)
1585 		return 0;
1586 
1587 	mmc->signal_voltage = signal_voltage;
1588 	err = mmc_set_ios(mmc);
1589 	if (err)
1590 		pr_debug("unable to set voltage (err %d)\n", err);
1591 
1592 	return err;
1593 }
1594 #else
1595 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1596 {
1597 	return 0;
1598 }
1599 #endif
1600 
1601 #if !CONFIG_IS_ENABLED(MMC_TINY)
1602 static const struct mode_width_tuning sd_modes_by_pref[] = {
1603 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1604 #ifdef MMC_SUPPORTS_TUNING
1605 	{
1606 		.mode = UHS_SDR104,
1607 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1608 		.tuning = MMC_CMD_SEND_TUNING_BLOCK
1609 	},
1610 #endif
1611 	{
1612 		.mode = UHS_SDR50,
1613 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1614 	},
1615 	{
1616 		.mode = UHS_DDR50,
1617 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1618 	},
1619 	{
1620 		.mode = UHS_SDR25,
1621 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1622 	},
1623 #endif
1624 	{
1625 		.mode = SD_HS,
1626 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1627 	},
1628 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1629 	{
1630 		.mode = UHS_SDR12,
1631 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1632 	},
1633 #endif
1634 	{
1635 		.mode = SD_LEGACY,
1636 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1637 	}
1638 };
1639 
1640 #define for_each_sd_mode_by_pref(caps, mwt) \
1641 	for (mwt = sd_modes_by_pref;\
1642 	     mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1643 	     mwt++) \
1644 		if (caps & MMC_CAP(mwt->mode))
1645 
1646 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1647 {
1648 	int err;
1649 	uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1650 	const struct mode_width_tuning *mwt;
1651 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1652 	bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1653 #else
1654 	bool uhs_en = false;
1655 #endif
1656 	uint caps;
1657 
1658 #ifdef DEBUG
1659 	mmc_dump_capabilities("sd card", card_caps);
1660 	mmc_dump_capabilities("host", mmc->host_caps);
1661 #endif
1662 
1663 	/* Restrict card's capabilities by what the host can do */
1664 	caps = card_caps & mmc->host_caps;
1665 
1666 	if (!uhs_en)
1667 		caps &= ~UHS_CAPS;
1668 
1669 	for_each_sd_mode_by_pref(caps, mwt) {
1670 		uint *w;
1671 
1672 		for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1673 			if (*w & caps & mwt->widths) {
1674 				pr_debug("trying mode %s width %d (at %d MHz)\n",
1675 					 mmc_mode_name(mwt->mode),
1676 					 bus_width(*w),
1677 					 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1678 
1679 				/* configure the bus width (card + host) */
1680 				err = sd_select_bus_width(mmc, bus_width(*w));
1681 				if (err)
1682 					goto error;
1683 				mmc_set_bus_width(mmc, bus_width(*w));
1684 
1685 				/* configure the bus mode (card) */
1686 				err = sd_set_card_speed(mmc, mwt->mode);
1687 				if (err)
1688 					goto error;
1689 
1690 				/* configure the bus mode (host) */
1691 				mmc_select_mode(mmc, mwt->mode);
1692 				mmc_set_clock(mmc, mmc->tran_speed,
1693 						MMC_CLK_ENABLE);
1694 
1695 #ifdef MMC_SUPPORTS_TUNING
1696 				/* execute tuning if needed */
1697 				if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1698 					err = mmc_execute_tuning(mmc,
1699 								 mwt->tuning);
1700 					if (err) {
1701 						pr_debug("tuning failed\n");
1702 						goto error;
1703 					}
1704 				}
1705 #endif
1706 
1707 #if CONFIG_IS_ENABLED(MMC_WRITE)
1708 				err = sd_read_ssr(mmc);
1709 				if (err)
1710 					pr_warn("unable to read ssr\n");
1711 #endif
1712 				if (!err)
1713 					return 0;
1714 
1715 error:
1716 				/* revert to a safer bus speed */
1717 				mmc_select_mode(mmc, SD_LEGACY);
1718 				mmc_set_clock(mmc, mmc->tran_speed,
1719 						MMC_CLK_ENABLE);
1720 			}
1721 		}
1722 	}
1723 
1724 	pr_err("unable to select a mode\n");
1725 	return -ENOTSUPP;
1726 }
1727 
1728 /*
1729  * read the compare the part of ext csd that is constant.
1730  * This can be used to check that the transfer is working
1731  * as expected.
1732  */
1733 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1734 {
1735 	int err;
1736 	const u8 *ext_csd = mmc->ext_csd;
1737 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1738 
1739 	if (mmc->version < MMC_VERSION_4)
1740 		return 0;
1741 
1742 	err = mmc_send_ext_csd(mmc, test_csd);
1743 	if (err)
1744 		return err;
1745 
1746 	/* Only compare read only fields */
1747 	if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1748 		== test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1749 	    ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1750 		== test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1751 	    ext_csd[EXT_CSD_REV]
1752 		== test_csd[EXT_CSD_REV] &&
1753 	    ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1754 		== test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1755 	    memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1756 		   &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1757 		return 0;
1758 
1759 	return -EBADMSG;
1760 }
1761 
1762 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1763 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1764 				  uint32_t allowed_mask)
1765 {
1766 	u32 card_mask = 0;
1767 
1768 	switch (mode) {
1769 	case MMC_HS_400:
1770 	case MMC_HS_200:
1771 		if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1772 		    EXT_CSD_CARD_TYPE_HS400_1_8V))
1773 			card_mask |= MMC_SIGNAL_VOLTAGE_180;
1774 		if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1775 		    EXT_CSD_CARD_TYPE_HS400_1_2V))
1776 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1777 		break;
1778 	case MMC_DDR_52:
1779 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1780 			card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1781 				     MMC_SIGNAL_VOLTAGE_180;
1782 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1783 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1784 		break;
1785 	default:
1786 		card_mask |= MMC_SIGNAL_VOLTAGE_330;
1787 		break;
1788 	}
1789 
1790 	while (card_mask & allowed_mask) {
1791 		enum mmc_voltage best_match;
1792 
1793 		best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1794 		if (!mmc_set_signal_voltage(mmc,  best_match))
1795 			return 0;
1796 
1797 		allowed_mask &= ~best_match;
1798 	}
1799 
1800 	return -ENOTSUPP;
1801 }
1802 #else
1803 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1804 					 uint32_t allowed_mask)
1805 {
1806 	return 0;
1807 }
1808 #endif
1809 
1810 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1811 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1812 	{
1813 		.mode = MMC_HS_400,
1814 		.widths = MMC_MODE_8BIT,
1815 		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1816 	},
1817 #endif
1818 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1819 	{
1820 		.mode = MMC_HS_200,
1821 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1822 		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1823 	},
1824 #endif
1825 	{
1826 		.mode = MMC_DDR_52,
1827 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1828 	},
1829 	{
1830 		.mode = MMC_HS_52,
1831 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1832 	},
1833 	{
1834 		.mode = MMC_HS,
1835 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1836 	},
1837 	{
1838 		.mode = MMC_LEGACY,
1839 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1840 	}
1841 };
1842 
1843 #define for_each_mmc_mode_by_pref(caps, mwt) \
1844 	for (mwt = mmc_modes_by_pref;\
1845 	    mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1846 	    mwt++) \
1847 		if (caps & MMC_CAP(mwt->mode))
1848 
1849 static const struct ext_csd_bus_width {
1850 	uint cap;
1851 	bool is_ddr;
1852 	uint ext_csd_bits;
1853 } ext_csd_bus_width[] = {
1854 	{MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1855 	{MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1856 	{MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1857 	{MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1858 	{MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1859 };
1860 
1861 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1862 static int mmc_select_hs400(struct mmc *mmc)
1863 {
1864 	int err;
1865 
1866 	/* Set timing to HS200 for tuning */
1867 	err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1868 	if (err)
1869 		return err;
1870 
1871 	/* configure the bus mode (host) */
1872 	mmc_select_mode(mmc, MMC_HS_200);
1873 	mmc_set_clock(mmc, mmc->tran_speed, false);
1874 
1875 	/* execute tuning if needed */
1876 	err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1877 	if (err) {
1878 		debug("tuning failed\n");
1879 		return err;
1880 	}
1881 
1882 	/* Set back to HS */
1883 	mmc_set_card_speed(mmc, MMC_HS, false);
1884 	mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
1885 
1886 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1887 			 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1888 	if (err)
1889 		return err;
1890 
1891 	err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1892 	if (err)
1893 		return err;
1894 
1895 	mmc_select_mode(mmc, MMC_HS_400);
1896 	err = mmc_set_clock(mmc, mmc->tran_speed, false);
1897 	if (err)
1898 		return err;
1899 
1900 	return 0;
1901 }
1902 #else
1903 static int mmc_select_hs400(struct mmc *mmc)
1904 {
1905 	return -ENOTSUPP;
1906 }
1907 #endif
1908 
1909 #define for_each_supported_width(caps, ddr, ecbv) \
1910 	for (ecbv = ext_csd_bus_width;\
1911 	    ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1912 	    ecbv++) \
1913 		if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1914 
1915 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1916 {
1917 	int err;
1918 	const struct mode_width_tuning *mwt;
1919 	const struct ext_csd_bus_width *ecbw;
1920 
1921 #ifdef DEBUG
1922 	mmc_dump_capabilities("mmc", card_caps);
1923 	mmc_dump_capabilities("host", mmc->host_caps);
1924 #endif
1925 
1926 	/* Restrict card's capabilities by what the host can do */
1927 	card_caps &= mmc->host_caps;
1928 
1929 	/* Only version 4 of MMC supports wider bus widths */
1930 	if (mmc->version < MMC_VERSION_4)
1931 		return 0;
1932 
1933 	if (!mmc->ext_csd) {
1934 		pr_debug("No ext_csd found!\n"); /* this should enver happen */
1935 		return -ENOTSUPP;
1936 	}
1937 
1938 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
1939     CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1940 	/*
1941 	 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
1942 	 * before doing anything else, since a transition from either of
1943 	 * the HS200/HS400 mode directly to legacy mode is not supported.
1944 	 */
1945 	if (mmc->selected_mode == MMC_HS_200 ||
1946 	    mmc->selected_mode == MMC_HS_400)
1947 		mmc_set_card_speed(mmc, MMC_HS, true);
1948 	else
1949 #endif
1950 		mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1951 
1952 	for_each_mmc_mode_by_pref(card_caps, mwt) {
1953 		for_each_supported_width(card_caps & mwt->widths,
1954 					 mmc_is_mode_ddr(mwt->mode), ecbw) {
1955 			enum mmc_voltage old_voltage;
1956 			pr_debug("trying mode %s width %d (at %d MHz)\n",
1957 				 mmc_mode_name(mwt->mode),
1958 				 bus_width(ecbw->cap),
1959 				 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1960 			old_voltage = mmc->signal_voltage;
1961 			err = mmc_set_lowest_voltage(mmc, mwt->mode,
1962 						     MMC_ALL_SIGNAL_VOLTAGE);
1963 			if (err)
1964 				continue;
1965 
1966 			/* configure the bus width (card + host) */
1967 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1968 				    EXT_CSD_BUS_WIDTH,
1969 				    ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1970 			if (err)
1971 				goto error;
1972 			mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1973 
1974 			if (mwt->mode == MMC_HS_400) {
1975 				err = mmc_select_hs400(mmc);
1976 				if (err) {
1977 					printf("Select HS400 failed %d\n", err);
1978 					goto error;
1979 				}
1980 			} else {
1981 				/* configure the bus speed (card) */
1982 				err = mmc_set_card_speed(mmc, mwt->mode, false);
1983 				if (err)
1984 					goto error;
1985 
1986 				/*
1987 				 * configure the bus width AND the ddr mode
1988 				 * (card). The host side will be taken care
1989 				 * of in the next step
1990 				 */
1991 				if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1992 					err = mmc_switch(mmc,
1993 							 EXT_CSD_CMD_SET_NORMAL,
1994 							 EXT_CSD_BUS_WIDTH,
1995 							 ecbw->ext_csd_bits);
1996 					if (err)
1997 						goto error;
1998 				}
1999 
2000 				/* configure the bus mode (host) */
2001 				mmc_select_mode(mmc, mwt->mode);
2002 				mmc_set_clock(mmc, mmc->tran_speed,
2003 					      MMC_CLK_ENABLE);
2004 #ifdef MMC_SUPPORTS_TUNING
2005 
2006 				/* execute tuning if needed */
2007 				if (mwt->tuning) {
2008 					err = mmc_execute_tuning(mmc,
2009 								 mwt->tuning);
2010 					if (err) {
2011 						pr_debug("tuning failed\n");
2012 						goto error;
2013 					}
2014 				}
2015 #endif
2016 			}
2017 
2018 			/* do a transfer to check the configuration */
2019 			err = mmc_read_and_compare_ext_csd(mmc);
2020 			if (!err)
2021 				return 0;
2022 error:
2023 			mmc_set_signal_voltage(mmc, old_voltage);
2024 			/* if an error occured, revert to a safer bus mode */
2025 			mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2026 				   EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2027 			mmc_select_mode(mmc, MMC_LEGACY);
2028 			mmc_set_bus_width(mmc, 1);
2029 		}
2030 	}
2031 
2032 	pr_err("unable to select a mode\n");
2033 
2034 	return -ENOTSUPP;
2035 }
2036 #endif
2037 
2038 #if CONFIG_IS_ENABLED(MMC_TINY)
2039 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2040 #endif
2041 
2042 static int mmc_startup_v4(struct mmc *mmc)
2043 {
2044 	int err, i;
2045 	u64 capacity;
2046 	bool has_parts = false;
2047 	bool part_completed;
2048 	static const u32 mmc_versions[] = {
2049 		MMC_VERSION_4,
2050 		MMC_VERSION_4_1,
2051 		MMC_VERSION_4_2,
2052 		MMC_VERSION_4_3,
2053 		MMC_VERSION_4_4,
2054 		MMC_VERSION_4_41,
2055 		MMC_VERSION_4_5,
2056 		MMC_VERSION_5_0,
2057 		MMC_VERSION_5_1
2058 	};
2059 
2060 #if CONFIG_IS_ENABLED(MMC_TINY)
2061 	u8 *ext_csd = ext_csd_bkup;
2062 
2063 	if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2064 		return 0;
2065 
2066 	if (!mmc->ext_csd)
2067 		memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2068 
2069 	err = mmc_send_ext_csd(mmc, ext_csd);
2070 	if (err)
2071 		goto error;
2072 
2073 	/* store the ext csd for future reference */
2074 	if (!mmc->ext_csd)
2075 		mmc->ext_csd = ext_csd;
2076 #else
2077 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2078 
2079 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2080 		return 0;
2081 
2082 	/* check  ext_csd version and capacity */
2083 	err = mmc_send_ext_csd(mmc, ext_csd);
2084 	if (err)
2085 		goto error;
2086 
2087 	/* store the ext csd for future reference */
2088 	if (!mmc->ext_csd)
2089 		mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2090 	if (!mmc->ext_csd)
2091 		return -ENOMEM;
2092 	memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2093 #endif
2094 	if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2095 		return -EINVAL;
2096 
2097 	mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2098 
2099 	if (mmc->version >= MMC_VERSION_4_2) {
2100 		/*
2101 		 * According to the JEDEC Standard, the value of
2102 		 * ext_csd's capacity is valid if the value is more
2103 		 * than 2GB
2104 		 */
2105 		capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2106 				| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2107 				| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2108 				| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2109 		capacity *= MMC_MAX_BLOCK_LEN;
2110 		if ((capacity >> 20) > 2 * 1024)
2111 			mmc->capacity_user = capacity;
2112 	}
2113 
2114 	/* The partition data may be non-zero but it is only
2115 	 * effective if PARTITION_SETTING_COMPLETED is set in
2116 	 * EXT_CSD, so ignore any data if this bit is not set,
2117 	 * except for enabling the high-capacity group size
2118 	 * definition (see below).
2119 	 */
2120 	part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2121 			    EXT_CSD_PARTITION_SETTING_COMPLETED);
2122 
2123 	/* store the partition info of emmc */
2124 	mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2125 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2126 	    ext_csd[EXT_CSD_BOOT_MULT])
2127 		mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2128 	if (part_completed &&
2129 	    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2130 		mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2131 
2132 	mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2133 
2134 	mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2135 
2136 	for (i = 0; i < 4; i++) {
2137 		int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2138 		uint mult = (ext_csd[idx + 2] << 16) +
2139 			(ext_csd[idx + 1] << 8) + ext_csd[idx];
2140 		if (mult)
2141 			has_parts = true;
2142 		if (!part_completed)
2143 			continue;
2144 		mmc->capacity_gp[i] = mult;
2145 		mmc->capacity_gp[i] *=
2146 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2147 		mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2148 		mmc->capacity_gp[i] <<= 19;
2149 	}
2150 
2151 #ifndef CONFIG_SPL_BUILD
2152 	if (part_completed) {
2153 		mmc->enh_user_size =
2154 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2155 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2156 			ext_csd[EXT_CSD_ENH_SIZE_MULT];
2157 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2158 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2159 		mmc->enh_user_size <<= 19;
2160 		mmc->enh_user_start =
2161 			(ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2162 			(ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2163 			(ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2164 			ext_csd[EXT_CSD_ENH_START_ADDR];
2165 		if (mmc->high_capacity)
2166 			mmc->enh_user_start <<= 9;
2167 	}
2168 #endif
2169 
2170 	/*
2171 	 * Host needs to enable ERASE_GRP_DEF bit if device is
2172 	 * partitioned. This bit will be lost every time after a reset
2173 	 * or power off. This will affect erase size.
2174 	 */
2175 	if (part_completed)
2176 		has_parts = true;
2177 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2178 	    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2179 		has_parts = true;
2180 	if (has_parts) {
2181 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2182 				 EXT_CSD_ERASE_GROUP_DEF, 1);
2183 
2184 		if (err)
2185 			goto error;
2186 
2187 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2188 	}
2189 
2190 	if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2191 #if CONFIG_IS_ENABLED(MMC_WRITE)
2192 		/* Read out group size from ext_csd */
2193 		mmc->erase_grp_size =
2194 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2195 #endif
2196 		/*
2197 		 * if high capacity and partition setting completed
2198 		 * SEC_COUNT is valid even if it is smaller than 2 GiB
2199 		 * JEDEC Standard JESD84-B45, 6.2.4
2200 		 */
2201 		if (mmc->high_capacity && part_completed) {
2202 			capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2203 				(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2204 				(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2205 				(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2206 			capacity *= MMC_MAX_BLOCK_LEN;
2207 			mmc->capacity_user = capacity;
2208 		}
2209 	}
2210 #if CONFIG_IS_ENABLED(MMC_WRITE)
2211 	else {
2212 		/* Calculate the group size from the csd value. */
2213 		int erase_gsz, erase_gmul;
2214 
2215 		erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2216 		erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2217 		mmc->erase_grp_size = (erase_gsz + 1)
2218 			* (erase_gmul + 1);
2219 	}
2220 #endif
2221 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2222 	mmc->hc_wp_grp_size = 1024
2223 		* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2224 		* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2225 #endif
2226 
2227 	mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2228 
2229 	return 0;
2230 error:
2231 	if (mmc->ext_csd) {
2232 #if !CONFIG_IS_ENABLED(MMC_TINY)
2233 		free(mmc->ext_csd);
2234 #endif
2235 		mmc->ext_csd = NULL;
2236 	}
2237 	return err;
2238 }
2239 
2240 static int mmc_startup(struct mmc *mmc)
2241 {
2242 	int err, i;
2243 	uint mult, freq;
2244 	u64 cmult, csize;
2245 	struct mmc_cmd cmd;
2246 	struct blk_desc *bdesc;
2247 
2248 #ifdef CONFIG_MMC_SPI_CRC_ON
2249 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2250 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2251 		cmd.resp_type = MMC_RSP_R1;
2252 		cmd.cmdarg = 1;
2253 		err = mmc_send_cmd(mmc, &cmd, NULL);
2254 		if (err)
2255 			return err;
2256 	}
2257 #endif
2258 
2259 	/* Put the Card in Identify Mode */
2260 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2261 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2262 	cmd.resp_type = MMC_RSP_R2;
2263 	cmd.cmdarg = 0;
2264 
2265 	err = mmc_send_cmd(mmc, &cmd, NULL);
2266 
2267 #ifdef CONFIG_MMC_QUIRKS
2268 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2269 		int retries = 4;
2270 		/*
2271 		 * It has been seen that SEND_CID may fail on the first
2272 		 * attempt, let's try a few more time
2273 		 */
2274 		do {
2275 			err = mmc_send_cmd(mmc, &cmd, NULL);
2276 			if (!err)
2277 				break;
2278 		} while (retries--);
2279 	}
2280 #endif
2281 
2282 	if (err)
2283 		return err;
2284 
2285 	memcpy(mmc->cid, cmd.response, 16);
2286 
2287 	/*
2288 	 * For MMC cards, set the Relative Address.
2289 	 * For SD cards, get the Relatvie Address.
2290 	 * This also puts the cards into Standby State
2291 	 */
2292 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2293 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2294 		cmd.cmdarg = mmc->rca << 16;
2295 		cmd.resp_type = MMC_RSP_R6;
2296 
2297 		err = mmc_send_cmd(mmc, &cmd, NULL);
2298 
2299 		if (err)
2300 			return err;
2301 
2302 		if (IS_SD(mmc))
2303 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2304 	}
2305 
2306 	/* Get the Card-Specific Data */
2307 	cmd.cmdidx = MMC_CMD_SEND_CSD;
2308 	cmd.resp_type = MMC_RSP_R2;
2309 	cmd.cmdarg = mmc->rca << 16;
2310 
2311 	err = mmc_send_cmd(mmc, &cmd, NULL);
2312 
2313 	if (err)
2314 		return err;
2315 
2316 	mmc->csd[0] = cmd.response[0];
2317 	mmc->csd[1] = cmd.response[1];
2318 	mmc->csd[2] = cmd.response[2];
2319 	mmc->csd[3] = cmd.response[3];
2320 
2321 	if (mmc->version == MMC_VERSION_UNKNOWN) {
2322 		int version = (cmd.response[0] >> 26) & 0xf;
2323 
2324 		switch (version) {
2325 		case 0:
2326 			mmc->version = MMC_VERSION_1_2;
2327 			break;
2328 		case 1:
2329 			mmc->version = MMC_VERSION_1_4;
2330 			break;
2331 		case 2:
2332 			mmc->version = MMC_VERSION_2_2;
2333 			break;
2334 		case 3:
2335 			mmc->version = MMC_VERSION_3;
2336 			break;
2337 		case 4:
2338 			mmc->version = MMC_VERSION_4;
2339 			break;
2340 		default:
2341 			mmc->version = MMC_VERSION_1_2;
2342 			break;
2343 		}
2344 	}
2345 
2346 	/* divide frequency by 10, since the mults are 10x bigger */
2347 	freq = fbase[(cmd.response[0] & 0x7)];
2348 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2349 
2350 	mmc->legacy_speed = freq * mult;
2351 	mmc_select_mode(mmc, MMC_LEGACY);
2352 
2353 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2354 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2355 #if CONFIG_IS_ENABLED(MMC_WRITE)
2356 
2357 	if (IS_SD(mmc))
2358 		mmc->write_bl_len = mmc->read_bl_len;
2359 	else
2360 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2361 #endif
2362 
2363 	if (mmc->high_capacity) {
2364 		csize = (mmc->csd[1] & 0x3f) << 16
2365 			| (mmc->csd[2] & 0xffff0000) >> 16;
2366 		cmult = 8;
2367 	} else {
2368 		csize = (mmc->csd[1] & 0x3ff) << 2
2369 			| (mmc->csd[2] & 0xc0000000) >> 30;
2370 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
2371 	}
2372 
2373 	mmc->capacity_user = (csize + 1) << (cmult + 2);
2374 	mmc->capacity_user *= mmc->read_bl_len;
2375 	mmc->capacity_boot = 0;
2376 	mmc->capacity_rpmb = 0;
2377 	for (i = 0; i < 4; i++)
2378 		mmc->capacity_gp[i] = 0;
2379 
2380 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2381 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2382 
2383 #if CONFIG_IS_ENABLED(MMC_WRITE)
2384 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2385 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2386 #endif
2387 
2388 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2389 		cmd.cmdidx = MMC_CMD_SET_DSR;
2390 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2391 		cmd.resp_type = MMC_RSP_NONE;
2392 		if (mmc_send_cmd(mmc, &cmd, NULL))
2393 			pr_warn("MMC: SET_DSR failed\n");
2394 	}
2395 
2396 	/* Select the card, and put it into Transfer Mode */
2397 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2398 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2399 		cmd.resp_type = MMC_RSP_R1;
2400 		cmd.cmdarg = mmc->rca << 16;
2401 		err = mmc_send_cmd(mmc, &cmd, NULL);
2402 
2403 		if (err)
2404 			return err;
2405 	}
2406 
2407 	/*
2408 	 * For SD, its erase group is always one sector
2409 	 */
2410 #if CONFIG_IS_ENABLED(MMC_WRITE)
2411 	mmc->erase_grp_size = 1;
2412 #endif
2413 	mmc->part_config = MMCPART_NOAVAILABLE;
2414 
2415 	err = mmc_startup_v4(mmc);
2416 	if (err)
2417 		return err;
2418 
2419 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2420 	if (err)
2421 		return err;
2422 
2423 #if CONFIG_IS_ENABLED(MMC_TINY)
2424 	mmc_set_clock(mmc, mmc->legacy_speed, false);
2425 	mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2426 	mmc_set_bus_width(mmc, 1);
2427 #else
2428 	if (IS_SD(mmc)) {
2429 		err = sd_get_capabilities(mmc);
2430 		if (err)
2431 			return err;
2432 		err = sd_select_mode_and_width(mmc, mmc->card_caps);
2433 	} else {
2434 		err = mmc_get_capabilities(mmc);
2435 		if (err)
2436 			return err;
2437 		mmc_select_mode_and_width(mmc, mmc->card_caps);
2438 	}
2439 #endif
2440 	if (err)
2441 		return err;
2442 
2443 	mmc->best_mode = mmc->selected_mode;
2444 
2445 	/* Fix the block length for DDR mode */
2446 	if (mmc->ddr_mode) {
2447 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2448 #if CONFIG_IS_ENABLED(MMC_WRITE)
2449 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2450 #endif
2451 	}
2452 
2453 	/* fill in device description */
2454 	bdesc = mmc_get_blk_desc(mmc);
2455 	bdesc->lun = 0;
2456 	bdesc->hwpart = 0;
2457 	bdesc->type = 0;
2458 	bdesc->blksz = mmc->read_bl_len;
2459 	bdesc->log2blksz = LOG2(bdesc->blksz);
2460 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2461 #if !defined(CONFIG_SPL_BUILD) || \
2462 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2463 		!defined(CONFIG_USE_TINY_PRINTF))
2464 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2465 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2466 		(mmc->cid[3] >> 16) & 0xffff);
2467 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2468 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2469 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2470 		(mmc->cid[2] >> 24) & 0xff);
2471 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2472 		(mmc->cid[2] >> 16) & 0xf);
2473 #else
2474 	bdesc->vendor[0] = 0;
2475 	bdesc->product[0] = 0;
2476 	bdesc->revision[0] = 0;
2477 #endif
2478 
2479 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2480 	part_init(bdesc);
2481 #endif
2482 
2483 	return 0;
2484 }
2485 
2486 static int mmc_send_if_cond(struct mmc *mmc)
2487 {
2488 	struct mmc_cmd cmd;
2489 	int err;
2490 
2491 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2492 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2493 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2494 	cmd.resp_type = MMC_RSP_R7;
2495 
2496 	err = mmc_send_cmd(mmc, &cmd, NULL);
2497 
2498 	if (err)
2499 		return err;
2500 
2501 	if ((cmd.response[0] & 0xff) != 0xaa)
2502 		return -EOPNOTSUPP;
2503 	else
2504 		mmc->version = SD_VERSION_2;
2505 
2506 	return 0;
2507 }
2508 
2509 #if !CONFIG_IS_ENABLED(DM_MMC)
2510 /* board-specific MMC power initializations. */
2511 __weak void board_mmc_power_init(void)
2512 {
2513 }
2514 #endif
2515 
2516 static int mmc_power_init(struct mmc *mmc)
2517 {
2518 #if CONFIG_IS_ENABLED(DM_MMC)
2519 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2520 	int ret;
2521 
2522 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2523 					  &mmc->vmmc_supply);
2524 	if (ret)
2525 		pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2526 
2527 	ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2528 					  &mmc->vqmmc_supply);
2529 	if (ret)
2530 		pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2531 #endif
2532 #else /* !CONFIG_DM_MMC */
2533 	/*
2534 	 * Driver model should use a regulator, as above, rather than calling
2535 	 * out to board code.
2536 	 */
2537 	board_mmc_power_init();
2538 #endif
2539 	return 0;
2540 }
2541 
2542 /*
2543  * put the host in the initial state:
2544  * - turn on Vdd (card power supply)
2545  * - configure the bus width and clock to minimal values
2546  */
2547 static void mmc_set_initial_state(struct mmc *mmc)
2548 {
2549 	int err;
2550 
2551 	/* First try to set 3.3V. If it fails set to 1.8V */
2552 	err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2553 	if (err != 0)
2554 		err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2555 	if (err != 0)
2556 		pr_warn("mmc: failed to set signal voltage\n");
2557 
2558 	mmc_select_mode(mmc, MMC_LEGACY);
2559 	mmc_set_bus_width(mmc, 1);
2560 	mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2561 }
2562 
2563 static int mmc_power_on(struct mmc *mmc)
2564 {
2565 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2566 	if (mmc->vmmc_supply) {
2567 		int ret = regulator_set_enable(mmc->vmmc_supply, true);
2568 
2569 		if (ret) {
2570 			puts("Error enabling VMMC supply\n");
2571 			return ret;
2572 		}
2573 	}
2574 #endif
2575 	return 0;
2576 }
2577 
2578 static int mmc_power_off(struct mmc *mmc)
2579 {
2580 	mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2581 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2582 	if (mmc->vmmc_supply) {
2583 		int ret = regulator_set_enable(mmc->vmmc_supply, false);
2584 
2585 		if (ret) {
2586 			pr_debug("Error disabling VMMC supply\n");
2587 			return ret;
2588 		}
2589 	}
2590 #endif
2591 	return 0;
2592 }
2593 
2594 static int mmc_power_cycle(struct mmc *mmc)
2595 {
2596 	int ret;
2597 
2598 	ret = mmc_power_off(mmc);
2599 	if (ret)
2600 		return ret;
2601 	/*
2602 	 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2603 	 * to be on the safer side.
2604 	 */
2605 	udelay(2000);
2606 	return mmc_power_on(mmc);
2607 }
2608 
2609 int mmc_get_op_cond(struct mmc *mmc)
2610 {
2611 	bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2612 	int err;
2613 
2614 	if (mmc->has_init)
2615 		return 0;
2616 
2617 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2618 	mmc_adapter_card_type_ident();
2619 #endif
2620 	err = mmc_power_init(mmc);
2621 	if (err)
2622 		return err;
2623 
2624 #ifdef CONFIG_MMC_QUIRKS
2625 	mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2626 		      MMC_QUIRK_RETRY_SEND_CID;
2627 #endif
2628 
2629 	err = mmc_power_cycle(mmc);
2630 	if (err) {
2631 		/*
2632 		 * if power cycling is not supported, we should not try
2633 		 * to use the UHS modes, because we wouldn't be able to
2634 		 * recover from an error during the UHS initialization.
2635 		 */
2636 		pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2637 		uhs_en = false;
2638 		mmc->host_caps &= ~UHS_CAPS;
2639 		err = mmc_power_on(mmc);
2640 	}
2641 	if (err)
2642 		return err;
2643 
2644 #if CONFIG_IS_ENABLED(DM_MMC)
2645 	/* The device has already been probed ready for use */
2646 #else
2647 	/* made sure it's not NULL earlier */
2648 	err = mmc->cfg->ops->init(mmc);
2649 	if (err)
2650 		return err;
2651 #endif
2652 	mmc->ddr_mode = 0;
2653 
2654 retry:
2655 	mmc_set_initial_state(mmc);
2656 	mmc_send_init_stream(mmc);
2657 
2658 	/* Reset the Card */
2659 	err = mmc_go_idle(mmc);
2660 
2661 	if (err)
2662 		return err;
2663 
2664 	/* The internal partition reset to user partition(0) at every CMD0*/
2665 	mmc_get_blk_desc(mmc)->hwpart = 0;
2666 
2667 	/* Test for SD version 2 */
2668 	err = mmc_send_if_cond(mmc);
2669 
2670 	/* Now try to get the SD card's operating condition */
2671 	err = sd_send_op_cond(mmc, uhs_en);
2672 	if (err && uhs_en) {
2673 		uhs_en = false;
2674 		mmc_power_cycle(mmc);
2675 		goto retry;
2676 	}
2677 
2678 	/* If the command timed out, we check for an MMC card */
2679 	if (err == -ETIMEDOUT) {
2680 		err = mmc_send_op_cond(mmc);
2681 
2682 		if (err) {
2683 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2684 			pr_err("Card did not respond to voltage select!\n");
2685 #endif
2686 			return -EOPNOTSUPP;
2687 		}
2688 	}
2689 
2690 	return err;
2691 }
2692 
2693 int mmc_start_init(struct mmc *mmc)
2694 {
2695 	bool no_card;
2696 	int err = 0;
2697 
2698 	/*
2699 	 * all hosts are capable of 1 bit bus-width and able to use the legacy
2700 	 * timings.
2701 	 */
2702 	mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2703 			 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2704 
2705 #if !defined(CONFIG_MMC_BROKEN_CD)
2706 	/* we pretend there's no card when init is NULL */
2707 	no_card = mmc_getcd(mmc) == 0;
2708 #else
2709 	no_card = 0;
2710 #endif
2711 #if !CONFIG_IS_ENABLED(DM_MMC)
2712 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2713 #endif
2714 	if (no_card) {
2715 		mmc->has_init = 0;
2716 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2717 		pr_err("MMC: no card present\n");
2718 #endif
2719 		return -ENOMEDIUM;
2720 	}
2721 
2722 	err = mmc_get_op_cond(mmc);
2723 
2724 	if (!err)
2725 		mmc->init_in_progress = 1;
2726 
2727 	return err;
2728 }
2729 
2730 static int mmc_complete_init(struct mmc *mmc)
2731 {
2732 	int err = 0;
2733 
2734 	mmc->init_in_progress = 0;
2735 	if (mmc->op_cond_pending)
2736 		err = mmc_complete_op_cond(mmc);
2737 
2738 	if (!err)
2739 		err = mmc_startup(mmc);
2740 	if (err)
2741 		mmc->has_init = 0;
2742 	else
2743 		mmc->has_init = 1;
2744 	return err;
2745 }
2746 
2747 int mmc_init(struct mmc *mmc)
2748 {
2749 	int err = 0;
2750 	__maybe_unused ulong start;
2751 #if CONFIG_IS_ENABLED(DM_MMC)
2752 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2753 
2754 	upriv->mmc = mmc;
2755 #endif
2756 	if (mmc->has_init)
2757 		return 0;
2758 
2759 	start = get_timer(0);
2760 
2761 	if (!mmc->init_in_progress)
2762 		err = mmc_start_init(mmc);
2763 
2764 	if (!err)
2765 		err = mmc_complete_init(mmc);
2766 	if (err)
2767 		pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2768 
2769 	return err;
2770 }
2771 
2772 int mmc_set_dsr(struct mmc *mmc, u16 val)
2773 {
2774 	mmc->dsr = val;
2775 	return 0;
2776 }
2777 
2778 /* CPU-specific MMC initializations */
2779 __weak int cpu_mmc_init(bd_t *bis)
2780 {
2781 	return -1;
2782 }
2783 
2784 /* board-specific MMC initializations. */
2785 __weak int board_mmc_init(bd_t *bis)
2786 {
2787 	return -1;
2788 }
2789 
2790 void mmc_set_preinit(struct mmc *mmc, int preinit)
2791 {
2792 	mmc->preinit = preinit;
2793 }
2794 
2795 #if CONFIG_IS_ENABLED(DM_MMC)
2796 static int mmc_probe(bd_t *bis)
2797 {
2798 	int ret, i;
2799 	struct uclass *uc;
2800 	struct udevice *dev;
2801 
2802 	ret = uclass_get(UCLASS_MMC, &uc);
2803 	if (ret)
2804 		return ret;
2805 
2806 	/*
2807 	 * Try to add them in sequence order. Really with driver model we
2808 	 * should allow holes, but the current MMC list does not allow that.
2809 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2810 	 */
2811 	for (i = 0; ; i++) {
2812 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2813 		if (ret == -ENODEV)
2814 			break;
2815 	}
2816 	uclass_foreach_dev(dev, uc) {
2817 		ret = device_probe(dev);
2818 		if (ret)
2819 			pr_err("%s - probe failed: %d\n", dev->name, ret);
2820 	}
2821 
2822 	return 0;
2823 }
2824 #else
2825 static int mmc_probe(bd_t *bis)
2826 {
2827 	if (board_mmc_init(bis) < 0)
2828 		cpu_mmc_init(bis);
2829 
2830 	return 0;
2831 }
2832 #endif
2833 
2834 int mmc_initialize(bd_t *bis)
2835 {
2836 	static int initialized = 0;
2837 	int ret;
2838 	if (initialized)	/* Avoid initializing mmc multiple times */
2839 		return 0;
2840 	initialized = 1;
2841 
2842 #if !CONFIG_IS_ENABLED(BLK)
2843 #if !CONFIG_IS_ENABLED(MMC_TINY)
2844 	mmc_list_init();
2845 #endif
2846 #endif
2847 	ret = mmc_probe(bis);
2848 	if (ret)
2849 		return ret;
2850 
2851 #ifndef CONFIG_SPL_BUILD
2852 	print_mmc_devices(',');
2853 #endif
2854 
2855 	mmc_do_preinit();
2856 	return 0;
2857 }
2858 
2859 #ifdef CONFIG_CMD_BKOPS_ENABLE
2860 int mmc_set_bkops_enable(struct mmc *mmc)
2861 {
2862 	int err;
2863 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2864 
2865 	err = mmc_send_ext_csd(mmc, ext_csd);
2866 	if (err) {
2867 		puts("Could not get ext_csd register values\n");
2868 		return err;
2869 	}
2870 
2871 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2872 		puts("Background operations not supported on device\n");
2873 		return -EMEDIUMTYPE;
2874 	}
2875 
2876 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2877 		puts("Background operations already enabled\n");
2878 		return 0;
2879 	}
2880 
2881 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2882 	if (err) {
2883 		puts("Failed to enable manual background operations\n");
2884 		return err;
2885 	}
2886 
2887 	puts("Enabled manual background operations\n");
2888 
2889 	return 0;
2890 }
2891 #endif
2892