xref: /openbmc/u-boot/drivers/mmc/mmc.c (revision 85231c08)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
26 static int mmc_power_cycle(struct mmc *mmc);
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
28 
29 #if CONFIG_IS_ENABLED(MMC_TINY)
30 static struct mmc mmc_static;
31 struct mmc *find_mmc_device(int dev_num)
32 {
33 	return &mmc_static;
34 }
35 
36 void mmc_do_preinit(void)
37 {
38 	struct mmc *m = &mmc_static;
39 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
40 	mmc_set_preinit(m, 1);
41 #endif
42 	if (m->preinit)
43 		mmc_start_init(m);
44 }
45 
46 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
47 {
48 	return &mmc->block_dev;
49 }
50 #endif
51 
52 #if !CONFIG_IS_ENABLED(DM_MMC)
53 
54 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
55 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
56 {
57 	return -ENOSYS;
58 }
59 #endif
60 
61 __weak int board_mmc_getwp(struct mmc *mmc)
62 {
63 	return -1;
64 }
65 
66 int mmc_getwp(struct mmc *mmc)
67 {
68 	int wp;
69 
70 	wp = board_mmc_getwp(mmc);
71 
72 	if (wp < 0) {
73 		if (mmc->cfg->ops->getwp)
74 			wp = mmc->cfg->ops->getwp(mmc);
75 		else
76 			wp = 0;
77 	}
78 
79 	return wp;
80 }
81 
82 __weak int board_mmc_getcd(struct mmc *mmc)
83 {
84 	return -1;
85 }
86 #endif
87 
88 #ifdef CONFIG_MMC_TRACE
89 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
90 {
91 	printf("CMD_SEND:%d\n", cmd->cmdidx);
92 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
93 }
94 
95 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
96 {
97 	int i;
98 	u8 *ptr;
99 
100 	if (ret) {
101 		printf("\t\tRET\t\t\t %d\n", ret);
102 	} else {
103 		switch (cmd->resp_type) {
104 		case MMC_RSP_NONE:
105 			printf("\t\tMMC_RSP_NONE\n");
106 			break;
107 		case MMC_RSP_R1:
108 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
109 				cmd->response[0]);
110 			break;
111 		case MMC_RSP_R1b:
112 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
113 				cmd->response[0]);
114 			break;
115 		case MMC_RSP_R2:
116 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
117 				cmd->response[0]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[1]);
120 			printf("\t\t          \t\t 0x%08X \n",
121 				cmd->response[2]);
122 			printf("\t\t          \t\t 0x%08X \n",
123 				cmd->response[3]);
124 			printf("\n");
125 			printf("\t\t\t\t\tDUMPING DATA\n");
126 			for (i = 0; i < 4; i++) {
127 				int j;
128 				printf("\t\t\t\t\t%03d - ", i*4);
129 				ptr = (u8 *)&cmd->response[i];
130 				ptr += 3;
131 				for (j = 0; j < 4; j++)
132 					printf("%02X ", *ptr--);
133 				printf("\n");
134 			}
135 			break;
136 		case MMC_RSP_R3:
137 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
138 				cmd->response[0]);
139 			break;
140 		default:
141 			printf("\t\tERROR MMC rsp not supported\n");
142 			break;
143 		}
144 	}
145 }
146 
147 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
148 {
149 	int status;
150 
151 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
152 	printf("CURR STATE:%d\n", status);
153 }
154 #endif
155 
156 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
157 const char *mmc_mode_name(enum bus_mode mode)
158 {
159 	static const char *const names[] = {
160 	      [MMC_LEGACY]	= "MMC legacy",
161 	      [SD_LEGACY]	= "SD Legacy",
162 	      [MMC_HS]		= "MMC High Speed (26MHz)",
163 	      [SD_HS]		= "SD High Speed (50MHz)",
164 	      [UHS_SDR12]	= "UHS SDR12 (25MHz)",
165 	      [UHS_SDR25]	= "UHS SDR25 (50MHz)",
166 	      [UHS_SDR50]	= "UHS SDR50 (100MHz)",
167 	      [UHS_SDR104]	= "UHS SDR104 (208MHz)",
168 	      [UHS_DDR50]	= "UHS DDR50 (50MHz)",
169 	      [MMC_HS_52]	= "MMC High Speed (52MHz)",
170 	      [MMC_DDR_52]	= "MMC DDR52 (52MHz)",
171 	      [MMC_HS_200]	= "HS200 (200MHz)",
172 	};
173 
174 	if (mode >= MMC_MODES_END)
175 		return "Unknown mode";
176 	else
177 		return names[mode];
178 }
179 #endif
180 
181 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
182 {
183 	static const int freqs[] = {
184 	      [MMC_LEGACY]	= 25000000,
185 	      [SD_LEGACY]	= 25000000,
186 	      [MMC_HS]		= 26000000,
187 	      [SD_HS]		= 50000000,
188 	      [MMC_HS_52]	= 52000000,
189 	      [MMC_DDR_52]	= 52000000,
190 	      [UHS_SDR12]	= 25000000,
191 	      [UHS_SDR25]	= 50000000,
192 	      [UHS_SDR50]	= 100000000,
193 	      [UHS_DDR50]	= 50000000,
194 	      [UHS_SDR104]	= 208000000,
195 	      [MMC_HS_200]	= 200000000,
196 	};
197 
198 	if (mode == MMC_LEGACY)
199 		return mmc->legacy_speed;
200 	else if (mode >= MMC_MODES_END)
201 		return 0;
202 	else
203 		return freqs[mode];
204 }
205 
206 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
207 {
208 	mmc->selected_mode = mode;
209 	mmc->tran_speed = mmc_mode2freq(mmc, mode);
210 	mmc->ddr_mode = mmc_is_mode_ddr(mode);
211 	pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
212 		 mmc->tran_speed / 1000000);
213 	return 0;
214 }
215 
216 #if !CONFIG_IS_ENABLED(DM_MMC)
217 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
218 {
219 	int ret;
220 
221 	mmmc_trace_before_send(mmc, cmd);
222 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
223 	mmmc_trace_after_send(mmc, cmd, ret);
224 
225 	return ret;
226 }
227 #endif
228 
229 int mmc_send_status(struct mmc *mmc, int timeout)
230 {
231 	struct mmc_cmd cmd;
232 	int err, retries = 5;
233 
234 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
235 	cmd.resp_type = MMC_RSP_R1;
236 	if (!mmc_host_is_spi(mmc))
237 		cmd.cmdarg = mmc->rca << 16;
238 
239 	while (1) {
240 		err = mmc_send_cmd(mmc, &cmd, NULL);
241 		if (!err) {
242 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
243 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
244 			     MMC_STATE_PRG)
245 				break;
246 
247 			if (cmd.response[0] & MMC_STATUS_MASK) {
248 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
249 				pr_err("Status Error: 0x%08X\n",
250 				       cmd.response[0]);
251 #endif
252 				return -ECOMM;
253 			}
254 		} else if (--retries < 0)
255 			return err;
256 
257 		if (timeout-- <= 0)
258 			break;
259 
260 		udelay(1000);
261 	}
262 
263 	mmc_trace_state(mmc, &cmd);
264 	if (timeout <= 0) {
265 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
266 		pr_err("Timeout waiting card ready\n");
267 #endif
268 		return -ETIMEDOUT;
269 	}
270 
271 	return 0;
272 }
273 
274 int mmc_set_blocklen(struct mmc *mmc, int len)
275 {
276 	struct mmc_cmd cmd;
277 	int err;
278 
279 	if (mmc->ddr_mode)
280 		return 0;
281 
282 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
283 	cmd.resp_type = MMC_RSP_R1;
284 	cmd.cmdarg = len;
285 
286 	err = mmc_send_cmd(mmc, &cmd, NULL);
287 
288 #ifdef CONFIG_MMC_QUIRKS
289 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
290 		int retries = 4;
291 		/*
292 		 * It has been seen that SET_BLOCKLEN may fail on the first
293 		 * attempt, let's try a few more time
294 		 */
295 		do {
296 			err = mmc_send_cmd(mmc, &cmd, NULL);
297 			if (!err)
298 				break;
299 		} while (retries--);
300 	}
301 #endif
302 
303 	return err;
304 }
305 
306 #ifdef MMC_SUPPORTS_TUNING
307 static const u8 tuning_blk_pattern_4bit[] = {
308 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
309 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
310 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
311 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
312 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
313 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
314 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
315 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
316 };
317 
318 static const u8 tuning_blk_pattern_8bit[] = {
319 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
320 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
321 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
322 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
323 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
324 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
325 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
326 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
327 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
328 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
329 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
330 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
331 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
332 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
333 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
334 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
335 };
336 
337 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
338 {
339 	struct mmc_cmd cmd;
340 	struct mmc_data data;
341 	const u8 *tuning_block_pattern;
342 	int size, err;
343 
344 	if (mmc->bus_width == 8) {
345 		tuning_block_pattern = tuning_blk_pattern_8bit;
346 		size = sizeof(tuning_blk_pattern_8bit);
347 	} else if (mmc->bus_width == 4) {
348 		tuning_block_pattern = tuning_blk_pattern_4bit;
349 		size = sizeof(tuning_blk_pattern_4bit);
350 	} else {
351 		return -EINVAL;
352 	}
353 
354 	ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
355 
356 	cmd.cmdidx = opcode;
357 	cmd.cmdarg = 0;
358 	cmd.resp_type = MMC_RSP_R1;
359 
360 	data.dest = (void *)data_buf;
361 	data.blocks = 1;
362 	data.blocksize = size;
363 	data.flags = MMC_DATA_READ;
364 
365 	err = mmc_send_cmd(mmc, &cmd, &data);
366 	if (err)
367 		return err;
368 
369 	if (memcmp(data_buf, tuning_block_pattern, size))
370 		return -EIO;
371 
372 	return 0;
373 }
374 #endif
375 
376 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
377 			   lbaint_t blkcnt)
378 {
379 	struct mmc_cmd cmd;
380 	struct mmc_data data;
381 
382 	if (blkcnt > 1)
383 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
384 	else
385 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
386 
387 	if (mmc->high_capacity)
388 		cmd.cmdarg = start;
389 	else
390 		cmd.cmdarg = start * mmc->read_bl_len;
391 
392 	cmd.resp_type = MMC_RSP_R1;
393 
394 	data.dest = dst;
395 	data.blocks = blkcnt;
396 	data.blocksize = mmc->read_bl_len;
397 	data.flags = MMC_DATA_READ;
398 
399 	if (mmc_send_cmd(mmc, &cmd, &data))
400 		return 0;
401 
402 	if (blkcnt > 1) {
403 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
404 		cmd.cmdarg = 0;
405 		cmd.resp_type = MMC_RSP_R1b;
406 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
407 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
408 			pr_err("mmc fail to send stop cmd\n");
409 #endif
410 			return 0;
411 		}
412 	}
413 
414 	return blkcnt;
415 }
416 
417 #if CONFIG_IS_ENABLED(BLK)
418 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
419 #else
420 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
421 		void *dst)
422 #endif
423 {
424 #if CONFIG_IS_ENABLED(BLK)
425 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
426 #endif
427 	int dev_num = block_dev->devnum;
428 	int err;
429 	lbaint_t cur, blocks_todo = blkcnt;
430 
431 	if (blkcnt == 0)
432 		return 0;
433 
434 	struct mmc *mmc = find_mmc_device(dev_num);
435 	if (!mmc)
436 		return 0;
437 
438 	if (CONFIG_IS_ENABLED(MMC_TINY))
439 		err = mmc_switch_part(mmc, block_dev->hwpart);
440 	else
441 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
442 
443 	if (err < 0)
444 		return 0;
445 
446 	if ((start + blkcnt) > block_dev->lba) {
447 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
448 		pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
449 		       start + blkcnt, block_dev->lba);
450 #endif
451 		return 0;
452 	}
453 
454 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
455 		pr_debug("%s: Failed to set blocklen\n", __func__);
456 		return 0;
457 	}
458 
459 	do {
460 		cur = (blocks_todo > mmc->cfg->b_max) ?
461 			mmc->cfg->b_max : blocks_todo;
462 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
463 			pr_debug("%s: Failed to read blocks\n", __func__);
464 			return 0;
465 		}
466 		blocks_todo -= cur;
467 		start += cur;
468 		dst += cur * mmc->read_bl_len;
469 	} while (blocks_todo > 0);
470 
471 	return blkcnt;
472 }
473 
474 static int mmc_go_idle(struct mmc *mmc)
475 {
476 	struct mmc_cmd cmd;
477 	int err;
478 
479 	udelay(1000);
480 
481 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
482 	cmd.cmdarg = 0;
483 	cmd.resp_type = MMC_RSP_NONE;
484 
485 	err = mmc_send_cmd(mmc, &cmd, NULL);
486 
487 	if (err)
488 		return err;
489 
490 	udelay(2000);
491 
492 	return 0;
493 }
494 
495 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
496 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
497 {
498 	struct mmc_cmd cmd;
499 	int err = 0;
500 
501 	/*
502 	 * Send CMD11 only if the request is to switch the card to
503 	 * 1.8V signalling.
504 	 */
505 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
506 		return mmc_set_signal_voltage(mmc, signal_voltage);
507 
508 	cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
509 	cmd.cmdarg = 0;
510 	cmd.resp_type = MMC_RSP_R1;
511 
512 	err = mmc_send_cmd(mmc, &cmd, NULL);
513 	if (err)
514 		return err;
515 
516 	if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
517 		return -EIO;
518 
519 	/*
520 	 * The card should drive cmd and dat[0:3] low immediately
521 	 * after the response of cmd11, but wait 100 us to be sure
522 	 */
523 	err = mmc_wait_dat0(mmc, 0, 100);
524 	if (err == -ENOSYS)
525 		udelay(100);
526 	else if (err)
527 		return -ETIMEDOUT;
528 
529 	/*
530 	 * During a signal voltage level switch, the clock must be gated
531 	 * for 5 ms according to the SD spec
532 	 */
533 	mmc_set_clock(mmc, mmc->clock, true);
534 
535 	err = mmc_set_signal_voltage(mmc, signal_voltage);
536 	if (err)
537 		return err;
538 
539 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
540 	mdelay(10);
541 	mmc_set_clock(mmc, mmc->clock, false);
542 
543 	/*
544 	 * Failure to switch is indicated by the card holding
545 	 * dat[0:3] low. Wait for at least 1 ms according to spec
546 	 */
547 	err = mmc_wait_dat0(mmc, 1, 1000);
548 	if (err == -ENOSYS)
549 		udelay(1000);
550 	else if (err)
551 		return -ETIMEDOUT;
552 
553 	return 0;
554 }
555 #endif
556 
557 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
558 {
559 	int timeout = 1000;
560 	int err;
561 	struct mmc_cmd cmd;
562 
563 	while (1) {
564 		cmd.cmdidx = MMC_CMD_APP_CMD;
565 		cmd.resp_type = MMC_RSP_R1;
566 		cmd.cmdarg = 0;
567 
568 		err = mmc_send_cmd(mmc, &cmd, NULL);
569 
570 		if (err)
571 			return err;
572 
573 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
574 		cmd.resp_type = MMC_RSP_R3;
575 
576 		/*
577 		 * Most cards do not answer if some reserved bits
578 		 * in the ocr are set. However, Some controller
579 		 * can set bit 7 (reserved for low voltages), but
580 		 * how to manage low voltages SD card is not yet
581 		 * specified.
582 		 */
583 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
584 			(mmc->cfg->voltages & 0xff8000);
585 
586 		if (mmc->version == SD_VERSION_2)
587 			cmd.cmdarg |= OCR_HCS;
588 
589 		if (uhs_en)
590 			cmd.cmdarg |= OCR_S18R;
591 
592 		err = mmc_send_cmd(mmc, &cmd, NULL);
593 
594 		if (err)
595 			return err;
596 
597 		if (cmd.response[0] & OCR_BUSY)
598 			break;
599 
600 		if (timeout-- <= 0)
601 			return -EOPNOTSUPP;
602 
603 		udelay(1000);
604 	}
605 
606 	if (mmc->version != SD_VERSION_2)
607 		mmc->version = SD_VERSION_1_0;
608 
609 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
610 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
611 		cmd.resp_type = MMC_RSP_R3;
612 		cmd.cmdarg = 0;
613 
614 		err = mmc_send_cmd(mmc, &cmd, NULL);
615 
616 		if (err)
617 			return err;
618 	}
619 
620 	mmc->ocr = cmd.response[0];
621 
622 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
623 	if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
624 	    == 0x41000000) {
625 		err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
626 		if (err)
627 			return err;
628 	}
629 #endif
630 
631 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
632 	mmc->rca = 0;
633 
634 	return 0;
635 }
636 
637 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
638 {
639 	struct mmc_cmd cmd;
640 	int err;
641 
642 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
643 	cmd.resp_type = MMC_RSP_R3;
644 	cmd.cmdarg = 0;
645 	if (use_arg && !mmc_host_is_spi(mmc))
646 		cmd.cmdarg = OCR_HCS |
647 			(mmc->cfg->voltages &
648 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
649 			(mmc->ocr & OCR_ACCESS_MODE);
650 
651 	err = mmc_send_cmd(mmc, &cmd, NULL);
652 	if (err)
653 		return err;
654 	mmc->ocr = cmd.response[0];
655 	return 0;
656 }
657 
658 static int mmc_send_op_cond(struct mmc *mmc)
659 {
660 	int err, i;
661 
662 	/* Some cards seem to need this */
663 	mmc_go_idle(mmc);
664 
665  	/* Asking to the card its capabilities */
666 	for (i = 0; i < 2; i++) {
667 		err = mmc_send_op_cond_iter(mmc, i != 0);
668 		if (err)
669 			return err;
670 
671 		/* exit if not busy (flag seems to be inverted) */
672 		if (mmc->ocr & OCR_BUSY)
673 			break;
674 	}
675 	mmc->op_cond_pending = 1;
676 	return 0;
677 }
678 
679 static int mmc_complete_op_cond(struct mmc *mmc)
680 {
681 	struct mmc_cmd cmd;
682 	int timeout = 1000;
683 	uint start;
684 	int err;
685 
686 	mmc->op_cond_pending = 0;
687 	if (!(mmc->ocr & OCR_BUSY)) {
688 		/* Some cards seem to need this */
689 		mmc_go_idle(mmc);
690 
691 		start = get_timer(0);
692 		while (1) {
693 			err = mmc_send_op_cond_iter(mmc, 1);
694 			if (err)
695 				return err;
696 			if (mmc->ocr & OCR_BUSY)
697 				break;
698 			if (get_timer(start) > timeout)
699 				return -EOPNOTSUPP;
700 			udelay(100);
701 		}
702 	}
703 
704 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
705 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
706 		cmd.resp_type = MMC_RSP_R3;
707 		cmd.cmdarg = 0;
708 
709 		err = mmc_send_cmd(mmc, &cmd, NULL);
710 
711 		if (err)
712 			return err;
713 
714 		mmc->ocr = cmd.response[0];
715 	}
716 
717 	mmc->version = MMC_VERSION_UNKNOWN;
718 
719 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
720 	mmc->rca = 1;
721 
722 	return 0;
723 }
724 
725 
726 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
727 {
728 	struct mmc_cmd cmd;
729 	struct mmc_data data;
730 	int err;
731 
732 	/* Get the Card Status Register */
733 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
734 	cmd.resp_type = MMC_RSP_R1;
735 	cmd.cmdarg = 0;
736 
737 	data.dest = (char *)ext_csd;
738 	data.blocks = 1;
739 	data.blocksize = MMC_MAX_BLOCK_LEN;
740 	data.flags = MMC_DATA_READ;
741 
742 	err = mmc_send_cmd(mmc, &cmd, &data);
743 
744 	return err;
745 }
746 
747 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
748 {
749 	struct mmc_cmd cmd;
750 	int timeout = 1000;
751 	int retries = 3;
752 	int ret;
753 
754 	cmd.cmdidx = MMC_CMD_SWITCH;
755 	cmd.resp_type = MMC_RSP_R1b;
756 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
757 				 (index << 16) |
758 				 (value << 8);
759 
760 	while (retries > 0) {
761 		ret = mmc_send_cmd(mmc, &cmd, NULL);
762 
763 		/* Waiting for the ready status */
764 		if (!ret) {
765 			ret = mmc_send_status(mmc, timeout);
766 			return ret;
767 		}
768 
769 		retries--;
770 	}
771 
772 	return ret;
773 
774 }
775 
776 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
777 {
778 	int err;
779 	int speed_bits;
780 
781 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
782 
783 	switch (mode) {
784 	case MMC_HS:
785 	case MMC_HS_52:
786 	case MMC_DDR_52:
787 		speed_bits = EXT_CSD_TIMING_HS;
788 		break;
789 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
790 	case MMC_HS_200:
791 		speed_bits = EXT_CSD_TIMING_HS200;
792 		break;
793 #endif
794 	case MMC_LEGACY:
795 		speed_bits = EXT_CSD_TIMING_LEGACY;
796 		break;
797 	default:
798 		return -EINVAL;
799 	}
800 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
801 			 speed_bits);
802 	if (err)
803 		return err;
804 
805 	if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
806 		/* Now check to see that it worked */
807 		err = mmc_send_ext_csd(mmc, test_csd);
808 		if (err)
809 			return err;
810 
811 		/* No high-speed support */
812 		if (!test_csd[EXT_CSD_HS_TIMING])
813 			return -ENOTSUPP;
814 	}
815 
816 	return 0;
817 }
818 
819 static int mmc_get_capabilities(struct mmc *mmc)
820 {
821 	u8 *ext_csd = mmc->ext_csd;
822 	char cardtype;
823 
824 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
825 
826 	if (mmc_host_is_spi(mmc))
827 		return 0;
828 
829 	/* Only version 4 supports high-speed */
830 	if (mmc->version < MMC_VERSION_4)
831 		return 0;
832 
833 	if (!ext_csd) {
834 		pr_err("No ext_csd found!\n"); /* this should enver happen */
835 		return -ENOTSUPP;
836 	}
837 
838 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
839 
840 	cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
841 	mmc->cardtype = cardtype;
842 
843 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
844 	if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
845 			EXT_CSD_CARD_TYPE_HS200_1_8V)) {
846 		mmc->card_caps |= MMC_MODE_HS200;
847 	}
848 #endif
849 	if (cardtype & EXT_CSD_CARD_TYPE_52) {
850 		if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
851 			mmc->card_caps |= MMC_MODE_DDR_52MHz;
852 		mmc->card_caps |= MMC_MODE_HS_52MHz;
853 	}
854 	if (cardtype & EXT_CSD_CARD_TYPE_26)
855 		mmc->card_caps |= MMC_MODE_HS;
856 
857 	return 0;
858 }
859 
860 static int mmc_set_capacity(struct mmc *mmc, int part_num)
861 {
862 	switch (part_num) {
863 	case 0:
864 		mmc->capacity = mmc->capacity_user;
865 		break;
866 	case 1:
867 	case 2:
868 		mmc->capacity = mmc->capacity_boot;
869 		break;
870 	case 3:
871 		mmc->capacity = mmc->capacity_rpmb;
872 		break;
873 	case 4:
874 	case 5:
875 	case 6:
876 	case 7:
877 		mmc->capacity = mmc->capacity_gp[part_num - 4];
878 		break;
879 	default:
880 		return -1;
881 	}
882 
883 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
884 
885 	return 0;
886 }
887 
888 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
889 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
890 {
891 	int forbidden = 0;
892 	bool change = false;
893 
894 	if (part_num & PART_ACCESS_MASK)
895 		forbidden = MMC_CAP(MMC_HS_200);
896 
897 	if (MMC_CAP(mmc->selected_mode) & forbidden) {
898 		pr_debug("selected mode (%s) is forbidden for part %d\n",
899 			 mmc_mode_name(mmc->selected_mode), part_num);
900 		change = true;
901 	} else if (mmc->selected_mode != mmc->best_mode) {
902 		pr_debug("selected mode is not optimal\n");
903 		change = true;
904 	}
905 
906 	if (change)
907 		return mmc_select_mode_and_width(mmc,
908 						 mmc->card_caps & ~forbidden);
909 
910 	return 0;
911 }
912 #else
913 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
914 					   unsigned int part_num)
915 {
916 	return 0;
917 }
918 #endif
919 
920 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
921 {
922 	int ret;
923 
924 	ret = mmc_boot_part_access_chk(mmc, part_num);
925 	if (ret)
926 		return ret;
927 
928 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
929 			 (mmc->part_config & ~PART_ACCESS_MASK)
930 			 | (part_num & PART_ACCESS_MASK));
931 
932 	/*
933 	 * Set the capacity if the switch succeeded or was intended
934 	 * to return to representing the raw device.
935 	 */
936 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
937 		ret = mmc_set_capacity(mmc, part_num);
938 		mmc_get_blk_desc(mmc)->hwpart = part_num;
939 	}
940 
941 	return ret;
942 }
943 
944 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
945 int mmc_hwpart_config(struct mmc *mmc,
946 		      const struct mmc_hwpart_conf *conf,
947 		      enum mmc_hwpart_conf_mode mode)
948 {
949 	u8 part_attrs = 0;
950 	u32 enh_size_mult;
951 	u32 enh_start_addr;
952 	u32 gp_size_mult[4];
953 	u32 max_enh_size_mult;
954 	u32 tot_enh_size_mult = 0;
955 	u8 wr_rel_set;
956 	int i, pidx, err;
957 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
958 
959 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
960 		return -EINVAL;
961 
962 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
963 		pr_err("eMMC >= 4.4 required for enhanced user data area\n");
964 		return -EMEDIUMTYPE;
965 	}
966 
967 	if (!(mmc->part_support & PART_SUPPORT)) {
968 		pr_err("Card does not support partitioning\n");
969 		return -EMEDIUMTYPE;
970 	}
971 
972 	if (!mmc->hc_wp_grp_size) {
973 		pr_err("Card does not define HC WP group size\n");
974 		return -EMEDIUMTYPE;
975 	}
976 
977 	/* check partition alignment and total enhanced size */
978 	if (conf->user.enh_size) {
979 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
980 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
981 			pr_err("User data enhanced area not HC WP group "
982 			       "size aligned\n");
983 			return -EINVAL;
984 		}
985 		part_attrs |= EXT_CSD_ENH_USR;
986 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
987 		if (mmc->high_capacity) {
988 			enh_start_addr = conf->user.enh_start;
989 		} else {
990 			enh_start_addr = (conf->user.enh_start << 9);
991 		}
992 	} else {
993 		enh_size_mult = 0;
994 		enh_start_addr = 0;
995 	}
996 	tot_enh_size_mult += enh_size_mult;
997 
998 	for (pidx = 0; pidx < 4; pidx++) {
999 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1000 			pr_err("GP%i partition not HC WP group size "
1001 			       "aligned\n", pidx+1);
1002 			return -EINVAL;
1003 		}
1004 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1005 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1006 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1007 			tot_enh_size_mult += gp_size_mult[pidx];
1008 		}
1009 	}
1010 
1011 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1012 		pr_err("Card does not support enhanced attribute\n");
1013 		return -EMEDIUMTYPE;
1014 	}
1015 
1016 	err = mmc_send_ext_csd(mmc, ext_csd);
1017 	if (err)
1018 		return err;
1019 
1020 	max_enh_size_mult =
1021 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1022 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1023 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1024 	if (tot_enh_size_mult > max_enh_size_mult) {
1025 		pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1026 		       tot_enh_size_mult, max_enh_size_mult);
1027 		return -EMEDIUMTYPE;
1028 	}
1029 
1030 	/* The default value of EXT_CSD_WR_REL_SET is device
1031 	 * dependent, the values can only be changed if the
1032 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1033 	 * changed only once and before partitioning is completed. */
1034 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1035 	if (conf->user.wr_rel_change) {
1036 		if (conf->user.wr_rel_set)
1037 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1038 		else
1039 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1040 	}
1041 	for (pidx = 0; pidx < 4; pidx++) {
1042 		if (conf->gp_part[pidx].wr_rel_change) {
1043 			if (conf->gp_part[pidx].wr_rel_set)
1044 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1045 			else
1046 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1047 		}
1048 	}
1049 
1050 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1051 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1052 		puts("Card does not support host controlled partition write "
1053 		     "reliability settings\n");
1054 		return -EMEDIUMTYPE;
1055 	}
1056 
1057 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1058 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1059 		pr_err("Card already partitioned\n");
1060 		return -EPERM;
1061 	}
1062 
1063 	if (mode == MMC_HWPART_CONF_CHECK)
1064 		return 0;
1065 
1066 	/* Partitioning requires high-capacity size definitions */
1067 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1068 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1069 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1070 
1071 		if (err)
1072 			return err;
1073 
1074 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1075 
1076 		/* update erase group size to be high-capacity */
1077 		mmc->erase_grp_size =
1078 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1079 
1080 	}
1081 
1082 	/* all OK, write the configuration */
1083 	for (i = 0; i < 4; i++) {
1084 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1085 				 EXT_CSD_ENH_START_ADDR+i,
1086 				 (enh_start_addr >> (i*8)) & 0xFF);
1087 		if (err)
1088 			return err;
1089 	}
1090 	for (i = 0; i < 3; i++) {
1091 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1092 				 EXT_CSD_ENH_SIZE_MULT+i,
1093 				 (enh_size_mult >> (i*8)) & 0xFF);
1094 		if (err)
1095 			return err;
1096 	}
1097 	for (pidx = 0; pidx < 4; pidx++) {
1098 		for (i = 0; i < 3; i++) {
1099 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1100 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1101 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1102 			if (err)
1103 				return err;
1104 		}
1105 	}
1106 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1107 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1108 	if (err)
1109 		return err;
1110 
1111 	if (mode == MMC_HWPART_CONF_SET)
1112 		return 0;
1113 
1114 	/* The WR_REL_SET is a write-once register but shall be
1115 	 * written before setting PART_SETTING_COMPLETED. As it is
1116 	 * write-once we can only write it when completing the
1117 	 * partitioning. */
1118 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1119 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1120 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1121 		if (err)
1122 			return err;
1123 	}
1124 
1125 	/* Setting PART_SETTING_COMPLETED confirms the partition
1126 	 * configuration but it only becomes effective after power
1127 	 * cycle, so we do not adjust the partition related settings
1128 	 * in the mmc struct. */
1129 
1130 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1131 			 EXT_CSD_PARTITION_SETTING,
1132 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1133 	if (err)
1134 		return err;
1135 
1136 	return 0;
1137 }
1138 #endif
1139 
1140 #if !CONFIG_IS_ENABLED(DM_MMC)
1141 int mmc_getcd(struct mmc *mmc)
1142 {
1143 	int cd;
1144 
1145 	cd = board_mmc_getcd(mmc);
1146 
1147 	if (cd < 0) {
1148 		if (mmc->cfg->ops->getcd)
1149 			cd = mmc->cfg->ops->getcd(mmc);
1150 		else
1151 			cd = 1;
1152 	}
1153 
1154 	return cd;
1155 }
1156 #endif
1157 
1158 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1159 {
1160 	struct mmc_cmd cmd;
1161 	struct mmc_data data;
1162 
1163 	/* Switch the frequency */
1164 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1165 	cmd.resp_type = MMC_RSP_R1;
1166 	cmd.cmdarg = (mode << 31) | 0xffffff;
1167 	cmd.cmdarg &= ~(0xf << (group * 4));
1168 	cmd.cmdarg |= value << (group * 4);
1169 
1170 	data.dest = (char *)resp;
1171 	data.blocksize = 64;
1172 	data.blocks = 1;
1173 	data.flags = MMC_DATA_READ;
1174 
1175 	return mmc_send_cmd(mmc, &cmd, &data);
1176 }
1177 
1178 
1179 static int sd_get_capabilities(struct mmc *mmc)
1180 {
1181 	int err;
1182 	struct mmc_cmd cmd;
1183 	ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1184 	ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1185 	struct mmc_data data;
1186 	int timeout;
1187 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1188 	u32 sd3_bus_mode;
1189 #endif
1190 
1191 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1192 
1193 	if (mmc_host_is_spi(mmc))
1194 		return 0;
1195 
1196 	/* Read the SCR to find out if this card supports higher speeds */
1197 	cmd.cmdidx = MMC_CMD_APP_CMD;
1198 	cmd.resp_type = MMC_RSP_R1;
1199 	cmd.cmdarg = mmc->rca << 16;
1200 
1201 	err = mmc_send_cmd(mmc, &cmd, NULL);
1202 
1203 	if (err)
1204 		return err;
1205 
1206 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1207 	cmd.resp_type = MMC_RSP_R1;
1208 	cmd.cmdarg = 0;
1209 
1210 	timeout = 3;
1211 
1212 retry_scr:
1213 	data.dest = (char *)scr;
1214 	data.blocksize = 8;
1215 	data.blocks = 1;
1216 	data.flags = MMC_DATA_READ;
1217 
1218 	err = mmc_send_cmd(mmc, &cmd, &data);
1219 
1220 	if (err) {
1221 		if (timeout--)
1222 			goto retry_scr;
1223 
1224 		return err;
1225 	}
1226 
1227 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1228 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1229 
1230 	switch ((mmc->scr[0] >> 24) & 0xf) {
1231 	case 0:
1232 		mmc->version = SD_VERSION_1_0;
1233 		break;
1234 	case 1:
1235 		mmc->version = SD_VERSION_1_10;
1236 		break;
1237 	case 2:
1238 		mmc->version = SD_VERSION_2;
1239 		if ((mmc->scr[0] >> 15) & 0x1)
1240 			mmc->version = SD_VERSION_3;
1241 		break;
1242 	default:
1243 		mmc->version = SD_VERSION_1_0;
1244 		break;
1245 	}
1246 
1247 	if (mmc->scr[0] & SD_DATA_4BIT)
1248 		mmc->card_caps |= MMC_MODE_4BIT;
1249 
1250 	/* Version 1.0 doesn't support switching */
1251 	if (mmc->version == SD_VERSION_1_0)
1252 		return 0;
1253 
1254 	timeout = 4;
1255 	while (timeout--) {
1256 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1257 				(u8 *)switch_status);
1258 
1259 		if (err)
1260 			return err;
1261 
1262 		/* The high-speed function is busy.  Try again */
1263 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1264 			break;
1265 	}
1266 
1267 	/* If high-speed isn't supported, we return */
1268 	if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1269 		mmc->card_caps |= MMC_CAP(SD_HS);
1270 
1271 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1272 	/* Version before 3.0 don't support UHS modes */
1273 	if (mmc->version < SD_VERSION_3)
1274 		return 0;
1275 
1276 	sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1277 	if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1278 		mmc->card_caps |= MMC_CAP(UHS_SDR104);
1279 	if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1280 		mmc->card_caps |= MMC_CAP(UHS_SDR50);
1281 	if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1282 		mmc->card_caps |= MMC_CAP(UHS_SDR25);
1283 	if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1284 		mmc->card_caps |= MMC_CAP(UHS_SDR12);
1285 	if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1286 		mmc->card_caps |= MMC_CAP(UHS_DDR50);
1287 #endif
1288 
1289 	return 0;
1290 }
1291 
1292 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1293 {
1294 	int err;
1295 
1296 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1297 	int speed;
1298 
1299 	switch (mode) {
1300 	case SD_LEGACY:
1301 		speed = UHS_SDR12_BUS_SPEED;
1302 		break;
1303 	case SD_HS:
1304 		speed = HIGH_SPEED_BUS_SPEED;
1305 		break;
1306 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1307 	case UHS_SDR12:
1308 		speed = UHS_SDR12_BUS_SPEED;
1309 		break;
1310 	case UHS_SDR25:
1311 		speed = UHS_SDR25_BUS_SPEED;
1312 		break;
1313 	case UHS_SDR50:
1314 		speed = UHS_SDR50_BUS_SPEED;
1315 		break;
1316 	case UHS_DDR50:
1317 		speed = UHS_DDR50_BUS_SPEED;
1318 		break;
1319 	case UHS_SDR104:
1320 		speed = UHS_SDR104_BUS_SPEED;
1321 		break;
1322 #endif
1323 	default:
1324 		return -EINVAL;
1325 	}
1326 
1327 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1328 	if (err)
1329 		return err;
1330 
1331 	if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1332 		return -ENOTSUPP;
1333 
1334 	return 0;
1335 }
1336 
1337 int sd_select_bus_width(struct mmc *mmc, int w)
1338 {
1339 	int err;
1340 	struct mmc_cmd cmd;
1341 
1342 	if ((w != 4) && (w != 1))
1343 		return -EINVAL;
1344 
1345 	cmd.cmdidx = MMC_CMD_APP_CMD;
1346 	cmd.resp_type = MMC_RSP_R1;
1347 	cmd.cmdarg = mmc->rca << 16;
1348 
1349 	err = mmc_send_cmd(mmc, &cmd, NULL);
1350 	if (err)
1351 		return err;
1352 
1353 	cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1354 	cmd.resp_type = MMC_RSP_R1;
1355 	if (w == 4)
1356 		cmd.cmdarg = 2;
1357 	else if (w == 1)
1358 		cmd.cmdarg = 0;
1359 	err = mmc_send_cmd(mmc, &cmd, NULL);
1360 	if (err)
1361 		return err;
1362 
1363 	return 0;
1364 }
1365 
1366 #if CONFIG_IS_ENABLED(MMC_WRITE)
1367 static int sd_read_ssr(struct mmc *mmc)
1368 {
1369 	static const unsigned int sd_au_size[] = {
1370 		0,		SZ_16K / 512,		SZ_32K / 512,
1371 		SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
1372 		SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
1373 		SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
1374 		SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,
1375 		SZ_64M / 512,
1376 	};
1377 	int err, i;
1378 	struct mmc_cmd cmd;
1379 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1380 	struct mmc_data data;
1381 	int timeout = 3;
1382 	unsigned int au, eo, et, es;
1383 
1384 	cmd.cmdidx = MMC_CMD_APP_CMD;
1385 	cmd.resp_type = MMC_RSP_R1;
1386 	cmd.cmdarg = mmc->rca << 16;
1387 
1388 	err = mmc_send_cmd(mmc, &cmd, NULL);
1389 	if (err)
1390 		return err;
1391 
1392 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1393 	cmd.resp_type = MMC_RSP_R1;
1394 	cmd.cmdarg = 0;
1395 
1396 retry_ssr:
1397 	data.dest = (char *)ssr;
1398 	data.blocksize = 64;
1399 	data.blocks = 1;
1400 	data.flags = MMC_DATA_READ;
1401 
1402 	err = mmc_send_cmd(mmc, &cmd, &data);
1403 	if (err) {
1404 		if (timeout--)
1405 			goto retry_ssr;
1406 
1407 		return err;
1408 	}
1409 
1410 	for (i = 0; i < 16; i++)
1411 		ssr[i] = be32_to_cpu(ssr[i]);
1412 
1413 	au = (ssr[2] >> 12) & 0xF;
1414 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1415 		mmc->ssr.au = sd_au_size[au];
1416 		es = (ssr[3] >> 24) & 0xFF;
1417 		es |= (ssr[2] & 0xFF) << 8;
1418 		et = (ssr[3] >> 18) & 0x3F;
1419 		if (es && et) {
1420 			eo = (ssr[3] >> 16) & 0x3;
1421 			mmc->ssr.erase_timeout = (et * 1000) / es;
1422 			mmc->ssr.erase_offset = eo * 1000;
1423 		}
1424 	} else {
1425 		pr_debug("Invalid Allocation Unit Size.\n");
1426 	}
1427 
1428 	return 0;
1429 }
1430 #endif
1431 /* frequency bases */
1432 /* divided by 10 to be nice to platforms without floating point */
1433 static const int fbase[] = {
1434 	10000,
1435 	100000,
1436 	1000000,
1437 	10000000,
1438 };
1439 
1440 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1441  * to platforms without floating point.
1442  */
1443 static const u8 multipliers[] = {
1444 	0,	/* reserved */
1445 	10,
1446 	12,
1447 	13,
1448 	15,
1449 	20,
1450 	25,
1451 	30,
1452 	35,
1453 	40,
1454 	45,
1455 	50,
1456 	55,
1457 	60,
1458 	70,
1459 	80,
1460 };
1461 
1462 static inline int bus_width(uint cap)
1463 {
1464 	if (cap == MMC_MODE_8BIT)
1465 		return 8;
1466 	if (cap == MMC_MODE_4BIT)
1467 		return 4;
1468 	if (cap == MMC_MODE_1BIT)
1469 		return 1;
1470 	pr_warn("invalid bus witdh capability 0x%x\n", cap);
1471 	return 0;
1472 }
1473 
1474 #if !CONFIG_IS_ENABLED(DM_MMC)
1475 #ifdef MMC_SUPPORTS_TUNING
1476 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1477 {
1478 	return -ENOTSUPP;
1479 }
1480 #endif
1481 
1482 static void mmc_send_init_stream(struct mmc *mmc)
1483 {
1484 }
1485 
1486 static int mmc_set_ios(struct mmc *mmc)
1487 {
1488 	int ret = 0;
1489 
1490 	if (mmc->cfg->ops->set_ios)
1491 		ret = mmc->cfg->ops->set_ios(mmc);
1492 
1493 	return ret;
1494 }
1495 #endif
1496 
1497 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1498 {
1499 	if (!disable) {
1500 		if (clock > mmc->cfg->f_max)
1501 			clock = mmc->cfg->f_max;
1502 
1503 		if (clock < mmc->cfg->f_min)
1504 			clock = mmc->cfg->f_min;
1505 	}
1506 
1507 	mmc->clock = clock;
1508 	mmc->clk_disable = disable;
1509 
1510 	return mmc_set_ios(mmc);
1511 }
1512 
1513 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1514 {
1515 	mmc->bus_width = width;
1516 
1517 	return mmc_set_ios(mmc);
1518 }
1519 
1520 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1521 /*
1522  * helper function to display the capabilities in a human
1523  * friendly manner. The capabilities include bus width and
1524  * supported modes.
1525  */
1526 void mmc_dump_capabilities(const char *text, uint caps)
1527 {
1528 	enum bus_mode mode;
1529 
1530 	pr_debug("%s: widths [", text);
1531 	if (caps & MMC_MODE_8BIT)
1532 		pr_debug("8, ");
1533 	if (caps & MMC_MODE_4BIT)
1534 		pr_debug("4, ");
1535 	if (caps & MMC_MODE_1BIT)
1536 		pr_debug("1, ");
1537 	pr_debug("\b\b] modes [");
1538 	for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1539 		if (MMC_CAP(mode) & caps)
1540 			pr_debug("%s, ", mmc_mode_name(mode));
1541 	pr_debug("\b\b]\n");
1542 }
1543 #endif
1544 
1545 struct mode_width_tuning {
1546 	enum bus_mode mode;
1547 	uint widths;
1548 #ifdef MMC_SUPPORTS_TUNING
1549 	uint tuning;
1550 #endif
1551 };
1552 
1553 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1554 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1555 {
1556 	switch (voltage) {
1557 	case MMC_SIGNAL_VOLTAGE_000: return 0;
1558 	case MMC_SIGNAL_VOLTAGE_330: return 3300;
1559 	case MMC_SIGNAL_VOLTAGE_180: return 1800;
1560 	case MMC_SIGNAL_VOLTAGE_120: return 1200;
1561 	}
1562 	return -EINVAL;
1563 }
1564 
1565 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1566 {
1567 	int err;
1568 
1569 	if (mmc->signal_voltage == signal_voltage)
1570 		return 0;
1571 
1572 	mmc->signal_voltage = signal_voltage;
1573 	err = mmc_set_ios(mmc);
1574 	if (err)
1575 		pr_debug("unable to set voltage (err %d)\n", err);
1576 
1577 	return err;
1578 }
1579 #else
1580 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1581 {
1582 	return 0;
1583 }
1584 #endif
1585 
1586 static const struct mode_width_tuning sd_modes_by_pref[] = {
1587 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1588 #ifdef MMC_SUPPORTS_TUNING
1589 	{
1590 		.mode = UHS_SDR104,
1591 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1592 		.tuning = MMC_CMD_SEND_TUNING_BLOCK
1593 	},
1594 #endif
1595 	{
1596 		.mode = UHS_SDR50,
1597 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1598 	},
1599 	{
1600 		.mode = UHS_DDR50,
1601 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1602 	},
1603 	{
1604 		.mode = UHS_SDR25,
1605 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1606 	},
1607 #endif
1608 	{
1609 		.mode = SD_HS,
1610 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1611 	},
1612 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1613 	{
1614 		.mode = UHS_SDR12,
1615 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1616 	},
1617 #endif
1618 	{
1619 		.mode = SD_LEGACY,
1620 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1621 	}
1622 };
1623 
1624 #define for_each_sd_mode_by_pref(caps, mwt) \
1625 	for (mwt = sd_modes_by_pref;\
1626 	     mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1627 	     mwt++) \
1628 		if (caps & MMC_CAP(mwt->mode))
1629 
1630 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1631 {
1632 	int err;
1633 	uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1634 	const struct mode_width_tuning *mwt;
1635 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1636 	bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1637 #else
1638 	bool uhs_en = false;
1639 #endif
1640 	uint caps;
1641 
1642 #ifdef DEBUG
1643 	mmc_dump_capabilities("sd card", card_caps);
1644 	mmc_dump_capabilities("host", mmc->host_caps);
1645 #endif
1646 
1647 	/* Restrict card's capabilities by what the host can do */
1648 	caps = card_caps & mmc->host_caps;
1649 
1650 	if (!uhs_en)
1651 		caps &= ~UHS_CAPS;
1652 
1653 	for_each_sd_mode_by_pref(caps, mwt) {
1654 		uint *w;
1655 
1656 		for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1657 			if (*w & caps & mwt->widths) {
1658 				pr_debug("trying mode %s width %d (at %d MHz)\n",
1659 					 mmc_mode_name(mwt->mode),
1660 					 bus_width(*w),
1661 					 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1662 
1663 				/* configure the bus width (card + host) */
1664 				err = sd_select_bus_width(mmc, bus_width(*w));
1665 				if (err)
1666 					goto error;
1667 				mmc_set_bus_width(mmc, bus_width(*w));
1668 
1669 				/* configure the bus mode (card) */
1670 				err = sd_set_card_speed(mmc, mwt->mode);
1671 				if (err)
1672 					goto error;
1673 
1674 				/* configure the bus mode (host) */
1675 				mmc_select_mode(mmc, mwt->mode);
1676 				mmc_set_clock(mmc, mmc->tran_speed, false);
1677 
1678 #ifdef MMC_SUPPORTS_TUNING
1679 				/* execute tuning if needed */
1680 				if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1681 					err = mmc_execute_tuning(mmc,
1682 								 mwt->tuning);
1683 					if (err) {
1684 						pr_debug("tuning failed\n");
1685 						goto error;
1686 					}
1687 				}
1688 #endif
1689 
1690 #if CONFIG_IS_ENABLED(MMC_WRITE)
1691 				err = sd_read_ssr(mmc);
1692 				if (!err)
1693 					pr_warn("unable to read ssr\n");
1694 #endif
1695 				if (!err)
1696 					return 0;
1697 
1698 error:
1699 				/* revert to a safer bus speed */
1700 				mmc_select_mode(mmc, SD_LEGACY);
1701 				mmc_set_clock(mmc, mmc->tran_speed, false);
1702 			}
1703 		}
1704 	}
1705 
1706 	pr_err("unable to select a mode\n");
1707 	return -ENOTSUPP;
1708 }
1709 
1710 /*
1711  * read the compare the part of ext csd that is constant.
1712  * This can be used to check that the transfer is working
1713  * as expected.
1714  */
1715 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1716 {
1717 	int err;
1718 	const u8 *ext_csd = mmc->ext_csd;
1719 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1720 
1721 	if (mmc->version < MMC_VERSION_4)
1722 		return 0;
1723 
1724 	err = mmc_send_ext_csd(mmc, test_csd);
1725 	if (err)
1726 		return err;
1727 
1728 	/* Only compare read only fields */
1729 	if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1730 		== test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1731 	    ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1732 		== test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1733 	    ext_csd[EXT_CSD_REV]
1734 		== test_csd[EXT_CSD_REV] &&
1735 	    ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1736 		== test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1737 	    memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1738 		   &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1739 		return 0;
1740 
1741 	return -EBADMSG;
1742 }
1743 
1744 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1745 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1746 				  uint32_t allowed_mask)
1747 {
1748 	u32 card_mask = 0;
1749 
1750 	switch (mode) {
1751 	case MMC_HS_200:
1752 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1753 			card_mask |= MMC_SIGNAL_VOLTAGE_180;
1754 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1755 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1756 		break;
1757 	case MMC_DDR_52:
1758 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1759 			card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1760 				     MMC_SIGNAL_VOLTAGE_180;
1761 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1762 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1763 		break;
1764 	default:
1765 		card_mask |= MMC_SIGNAL_VOLTAGE_330;
1766 		break;
1767 	}
1768 
1769 	while (card_mask & allowed_mask) {
1770 		enum mmc_voltage best_match;
1771 
1772 		best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1773 		if (!mmc_set_signal_voltage(mmc,  best_match))
1774 			return 0;
1775 
1776 		allowed_mask &= ~best_match;
1777 	}
1778 
1779 	return -ENOTSUPP;
1780 }
1781 #else
1782 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1783 					 uint32_t allowed_mask)
1784 {
1785 	return 0;
1786 }
1787 #endif
1788 
1789 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1790 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1791 	{
1792 		.mode = MMC_HS_200,
1793 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1794 		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1795 	},
1796 #endif
1797 	{
1798 		.mode = MMC_DDR_52,
1799 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1800 	},
1801 	{
1802 		.mode = MMC_HS_52,
1803 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1804 	},
1805 	{
1806 		.mode = MMC_HS,
1807 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1808 	},
1809 	{
1810 		.mode = MMC_LEGACY,
1811 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1812 	}
1813 };
1814 
1815 #define for_each_mmc_mode_by_pref(caps, mwt) \
1816 	for (mwt = mmc_modes_by_pref;\
1817 	    mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1818 	    mwt++) \
1819 		if (caps & MMC_CAP(mwt->mode))
1820 
1821 static const struct ext_csd_bus_width {
1822 	uint cap;
1823 	bool is_ddr;
1824 	uint ext_csd_bits;
1825 } ext_csd_bus_width[] = {
1826 	{MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1827 	{MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1828 	{MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1829 	{MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1830 	{MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1831 };
1832 
1833 #define for_each_supported_width(caps, ddr, ecbv) \
1834 	for (ecbv = ext_csd_bus_width;\
1835 	    ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1836 	    ecbv++) \
1837 		if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1838 
1839 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1840 {
1841 	int err;
1842 	const struct mode_width_tuning *mwt;
1843 	const struct ext_csd_bus_width *ecbw;
1844 
1845 #ifdef DEBUG
1846 	mmc_dump_capabilities("mmc", card_caps);
1847 	mmc_dump_capabilities("host", mmc->host_caps);
1848 #endif
1849 
1850 	/* Restrict card's capabilities by what the host can do */
1851 	card_caps &= mmc->host_caps;
1852 
1853 	/* Only version 4 of MMC supports wider bus widths */
1854 	if (mmc->version < MMC_VERSION_4)
1855 		return 0;
1856 
1857 	if (!mmc->ext_csd) {
1858 		pr_debug("No ext_csd found!\n"); /* this should enver happen */
1859 		return -ENOTSUPP;
1860 	}
1861 
1862 	mmc_set_clock(mmc, mmc->legacy_speed, false);
1863 
1864 	for_each_mmc_mode_by_pref(card_caps, mwt) {
1865 		for_each_supported_width(card_caps & mwt->widths,
1866 					 mmc_is_mode_ddr(mwt->mode), ecbw) {
1867 			enum mmc_voltage old_voltage;
1868 			pr_debug("trying mode %s width %d (at %d MHz)\n",
1869 				 mmc_mode_name(mwt->mode),
1870 				 bus_width(ecbw->cap),
1871 				 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1872 			old_voltage = mmc->signal_voltage;
1873 			err = mmc_set_lowest_voltage(mmc, mwt->mode,
1874 						     MMC_ALL_SIGNAL_VOLTAGE);
1875 			if (err)
1876 				continue;
1877 
1878 			/* configure the bus width (card + host) */
1879 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1880 				    EXT_CSD_BUS_WIDTH,
1881 				    ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1882 			if (err)
1883 				goto error;
1884 			mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1885 
1886 			/* configure the bus speed (card) */
1887 			err = mmc_set_card_speed(mmc, mwt->mode);
1888 			if (err)
1889 				goto error;
1890 
1891 			/*
1892 			 * configure the bus width AND the ddr mode (card)
1893 			 * The host side will be taken care of in the next step
1894 			 */
1895 			if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1896 				err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1897 						 EXT_CSD_BUS_WIDTH,
1898 						 ecbw->ext_csd_bits);
1899 				if (err)
1900 					goto error;
1901 			}
1902 
1903 			/* configure the bus mode (host) */
1904 			mmc_select_mode(mmc, mwt->mode);
1905 			mmc_set_clock(mmc, mmc->tran_speed, false);
1906 #ifdef MMC_SUPPORTS_TUNING
1907 
1908 			/* execute tuning if needed */
1909 			if (mwt->tuning) {
1910 				err = mmc_execute_tuning(mmc, mwt->tuning);
1911 				if (err) {
1912 					pr_debug("tuning failed\n");
1913 					goto error;
1914 				}
1915 			}
1916 #endif
1917 
1918 			/* do a transfer to check the configuration */
1919 			err = mmc_read_and_compare_ext_csd(mmc);
1920 			if (!err)
1921 				return 0;
1922 error:
1923 			mmc_set_signal_voltage(mmc, old_voltage);
1924 			/* if an error occured, revert to a safer bus mode */
1925 			mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1926 				   EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1927 			mmc_select_mode(mmc, MMC_LEGACY);
1928 			mmc_set_bus_width(mmc, 1);
1929 		}
1930 	}
1931 
1932 	pr_err("unable to select a mode\n");
1933 
1934 	return -ENOTSUPP;
1935 }
1936 
1937 static int mmc_startup_v4(struct mmc *mmc)
1938 {
1939 	int err, i;
1940 	u64 capacity;
1941 	bool has_parts = false;
1942 	bool part_completed;
1943 	static const u32 mmc_versions[] = {
1944 		MMC_VERSION_4,
1945 		MMC_VERSION_4_1,
1946 		MMC_VERSION_4_2,
1947 		MMC_VERSION_4_3,
1948 		MMC_VERSION_4_4,
1949 		MMC_VERSION_4_41,
1950 		MMC_VERSION_4_5,
1951 		MMC_VERSION_5_0,
1952 		MMC_VERSION_5_1
1953 	};
1954 
1955 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1956 
1957 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1958 		return 0;
1959 
1960 	/* check  ext_csd version and capacity */
1961 	err = mmc_send_ext_csd(mmc, ext_csd);
1962 	if (err)
1963 		goto error;
1964 
1965 	/* store the ext csd for future reference */
1966 	if (!mmc->ext_csd)
1967 		mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1968 	if (!mmc->ext_csd)
1969 		return -ENOMEM;
1970 	memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1971 
1972 	if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
1973 		return -EINVAL;
1974 
1975 	mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
1976 
1977 	if (mmc->version >= MMC_VERSION_4_2) {
1978 		/*
1979 		 * According to the JEDEC Standard, the value of
1980 		 * ext_csd's capacity is valid if the value is more
1981 		 * than 2GB
1982 		 */
1983 		capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1984 				| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1985 				| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1986 				| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1987 		capacity *= MMC_MAX_BLOCK_LEN;
1988 		if ((capacity >> 20) > 2 * 1024)
1989 			mmc->capacity_user = capacity;
1990 	}
1991 
1992 	/* The partition data may be non-zero but it is only
1993 	 * effective if PARTITION_SETTING_COMPLETED is set in
1994 	 * EXT_CSD, so ignore any data if this bit is not set,
1995 	 * except for enabling the high-capacity group size
1996 	 * definition (see below).
1997 	 */
1998 	part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1999 			    EXT_CSD_PARTITION_SETTING_COMPLETED);
2000 
2001 	/* store the partition info of emmc */
2002 	mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2003 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2004 	    ext_csd[EXT_CSD_BOOT_MULT])
2005 		mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2006 	if (part_completed &&
2007 	    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2008 		mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2009 
2010 	mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2011 
2012 	mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2013 
2014 	for (i = 0; i < 4; i++) {
2015 		int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2016 		uint mult = (ext_csd[idx + 2] << 16) +
2017 			(ext_csd[idx + 1] << 8) + ext_csd[idx];
2018 		if (mult)
2019 			has_parts = true;
2020 		if (!part_completed)
2021 			continue;
2022 		mmc->capacity_gp[i] = mult;
2023 		mmc->capacity_gp[i] *=
2024 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2025 		mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2026 		mmc->capacity_gp[i] <<= 19;
2027 	}
2028 
2029 #ifndef CONFIG_SPL_BUILD
2030 	if (part_completed) {
2031 		mmc->enh_user_size =
2032 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2033 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2034 			ext_csd[EXT_CSD_ENH_SIZE_MULT];
2035 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2036 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2037 		mmc->enh_user_size <<= 19;
2038 		mmc->enh_user_start =
2039 			(ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2040 			(ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2041 			(ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2042 			ext_csd[EXT_CSD_ENH_START_ADDR];
2043 		if (mmc->high_capacity)
2044 			mmc->enh_user_start <<= 9;
2045 	}
2046 #endif
2047 
2048 	/*
2049 	 * Host needs to enable ERASE_GRP_DEF bit if device is
2050 	 * partitioned. This bit will be lost every time after a reset
2051 	 * or power off. This will affect erase size.
2052 	 */
2053 	if (part_completed)
2054 		has_parts = true;
2055 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2056 	    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2057 		has_parts = true;
2058 	if (has_parts) {
2059 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2060 				 EXT_CSD_ERASE_GROUP_DEF, 1);
2061 
2062 		if (err)
2063 			goto error;
2064 
2065 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2066 	}
2067 
2068 	if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2069 #if CONFIG_IS_ENABLED(MMC_WRITE)
2070 		/* Read out group size from ext_csd */
2071 		mmc->erase_grp_size =
2072 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2073 #endif
2074 		/*
2075 		 * if high capacity and partition setting completed
2076 		 * SEC_COUNT is valid even if it is smaller than 2 GiB
2077 		 * JEDEC Standard JESD84-B45, 6.2.4
2078 		 */
2079 		if (mmc->high_capacity && part_completed) {
2080 			capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2081 				(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2082 				(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2083 				(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2084 			capacity *= MMC_MAX_BLOCK_LEN;
2085 			mmc->capacity_user = capacity;
2086 		}
2087 	}
2088 #if CONFIG_IS_ENABLED(MMC_WRITE)
2089 	else {
2090 		/* Calculate the group size from the csd value. */
2091 		int erase_gsz, erase_gmul;
2092 
2093 		erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2094 		erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2095 		mmc->erase_grp_size = (erase_gsz + 1)
2096 			* (erase_gmul + 1);
2097 	}
2098 #endif
2099 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2100 	mmc->hc_wp_grp_size = 1024
2101 		* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2102 		* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2103 #endif
2104 
2105 	mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2106 
2107 	return 0;
2108 error:
2109 	if (mmc->ext_csd) {
2110 		free(mmc->ext_csd);
2111 		mmc->ext_csd = NULL;
2112 	}
2113 	return err;
2114 }
2115 
2116 static int mmc_startup(struct mmc *mmc)
2117 {
2118 	int err, i;
2119 	uint mult, freq;
2120 	u64 cmult, csize;
2121 	struct mmc_cmd cmd;
2122 	struct blk_desc *bdesc;
2123 
2124 #ifdef CONFIG_MMC_SPI_CRC_ON
2125 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2126 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2127 		cmd.resp_type = MMC_RSP_R1;
2128 		cmd.cmdarg = 1;
2129 		err = mmc_send_cmd(mmc, &cmd, NULL);
2130 		if (err)
2131 			return err;
2132 	}
2133 #endif
2134 
2135 	/* Put the Card in Identify Mode */
2136 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2137 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2138 	cmd.resp_type = MMC_RSP_R2;
2139 	cmd.cmdarg = 0;
2140 
2141 	err = mmc_send_cmd(mmc, &cmd, NULL);
2142 
2143 #ifdef CONFIG_MMC_QUIRKS
2144 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2145 		int retries = 4;
2146 		/*
2147 		 * It has been seen that SEND_CID may fail on the first
2148 		 * attempt, let's try a few more time
2149 		 */
2150 		do {
2151 			err = mmc_send_cmd(mmc, &cmd, NULL);
2152 			if (!err)
2153 				break;
2154 		} while (retries--);
2155 	}
2156 #endif
2157 
2158 	if (err)
2159 		return err;
2160 
2161 	memcpy(mmc->cid, cmd.response, 16);
2162 
2163 	/*
2164 	 * For MMC cards, set the Relative Address.
2165 	 * For SD cards, get the Relatvie Address.
2166 	 * This also puts the cards into Standby State
2167 	 */
2168 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2169 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2170 		cmd.cmdarg = mmc->rca << 16;
2171 		cmd.resp_type = MMC_RSP_R6;
2172 
2173 		err = mmc_send_cmd(mmc, &cmd, NULL);
2174 
2175 		if (err)
2176 			return err;
2177 
2178 		if (IS_SD(mmc))
2179 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2180 	}
2181 
2182 	/* Get the Card-Specific Data */
2183 	cmd.cmdidx = MMC_CMD_SEND_CSD;
2184 	cmd.resp_type = MMC_RSP_R2;
2185 	cmd.cmdarg = mmc->rca << 16;
2186 
2187 	err = mmc_send_cmd(mmc, &cmd, NULL);
2188 
2189 	if (err)
2190 		return err;
2191 
2192 	mmc->csd[0] = cmd.response[0];
2193 	mmc->csd[1] = cmd.response[1];
2194 	mmc->csd[2] = cmd.response[2];
2195 	mmc->csd[3] = cmd.response[3];
2196 
2197 	if (mmc->version == MMC_VERSION_UNKNOWN) {
2198 		int version = (cmd.response[0] >> 26) & 0xf;
2199 
2200 		switch (version) {
2201 		case 0:
2202 			mmc->version = MMC_VERSION_1_2;
2203 			break;
2204 		case 1:
2205 			mmc->version = MMC_VERSION_1_4;
2206 			break;
2207 		case 2:
2208 			mmc->version = MMC_VERSION_2_2;
2209 			break;
2210 		case 3:
2211 			mmc->version = MMC_VERSION_3;
2212 			break;
2213 		case 4:
2214 			mmc->version = MMC_VERSION_4;
2215 			break;
2216 		default:
2217 			mmc->version = MMC_VERSION_1_2;
2218 			break;
2219 		}
2220 	}
2221 
2222 	/* divide frequency by 10, since the mults are 10x bigger */
2223 	freq = fbase[(cmd.response[0] & 0x7)];
2224 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2225 
2226 	mmc->legacy_speed = freq * mult;
2227 	mmc_select_mode(mmc, MMC_LEGACY);
2228 
2229 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2230 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2231 #if CONFIG_IS_ENABLED(MMC_WRITE)
2232 
2233 	if (IS_SD(mmc))
2234 		mmc->write_bl_len = mmc->read_bl_len;
2235 	else
2236 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2237 #endif
2238 
2239 	if (mmc->high_capacity) {
2240 		csize = (mmc->csd[1] & 0x3f) << 16
2241 			| (mmc->csd[2] & 0xffff0000) >> 16;
2242 		cmult = 8;
2243 	} else {
2244 		csize = (mmc->csd[1] & 0x3ff) << 2
2245 			| (mmc->csd[2] & 0xc0000000) >> 30;
2246 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
2247 	}
2248 
2249 	mmc->capacity_user = (csize + 1) << (cmult + 2);
2250 	mmc->capacity_user *= mmc->read_bl_len;
2251 	mmc->capacity_boot = 0;
2252 	mmc->capacity_rpmb = 0;
2253 	for (i = 0; i < 4; i++)
2254 		mmc->capacity_gp[i] = 0;
2255 
2256 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2257 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2258 
2259 #if CONFIG_IS_ENABLED(MMC_WRITE)
2260 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2261 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2262 #endif
2263 
2264 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2265 		cmd.cmdidx = MMC_CMD_SET_DSR;
2266 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2267 		cmd.resp_type = MMC_RSP_NONE;
2268 		if (mmc_send_cmd(mmc, &cmd, NULL))
2269 			pr_warn("MMC: SET_DSR failed\n");
2270 	}
2271 
2272 	/* Select the card, and put it into Transfer Mode */
2273 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2274 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2275 		cmd.resp_type = MMC_RSP_R1;
2276 		cmd.cmdarg = mmc->rca << 16;
2277 		err = mmc_send_cmd(mmc, &cmd, NULL);
2278 
2279 		if (err)
2280 			return err;
2281 	}
2282 
2283 	/*
2284 	 * For SD, its erase group is always one sector
2285 	 */
2286 #if CONFIG_IS_ENABLED(MMC_WRITE)
2287 	mmc->erase_grp_size = 1;
2288 #endif
2289 	mmc->part_config = MMCPART_NOAVAILABLE;
2290 
2291 	err = mmc_startup_v4(mmc);
2292 	if (err)
2293 		return err;
2294 
2295 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2296 	if (err)
2297 		return err;
2298 
2299 	if (IS_SD(mmc)) {
2300 		err = sd_get_capabilities(mmc);
2301 		if (err)
2302 			return err;
2303 		err = sd_select_mode_and_width(mmc, mmc->card_caps);
2304 	} else {
2305 		err = mmc_get_capabilities(mmc);
2306 		if (err)
2307 			return err;
2308 		mmc_select_mode_and_width(mmc, mmc->card_caps);
2309 	}
2310 
2311 	if (err)
2312 		return err;
2313 
2314 	mmc->best_mode = mmc->selected_mode;
2315 
2316 	/* Fix the block length for DDR mode */
2317 	if (mmc->ddr_mode) {
2318 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2319 #if CONFIG_IS_ENABLED(MMC_WRITE)
2320 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2321 #endif
2322 	}
2323 
2324 	/* fill in device description */
2325 	bdesc = mmc_get_blk_desc(mmc);
2326 	bdesc->lun = 0;
2327 	bdesc->hwpart = 0;
2328 	bdesc->type = 0;
2329 	bdesc->blksz = mmc->read_bl_len;
2330 	bdesc->log2blksz = LOG2(bdesc->blksz);
2331 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2332 #if !defined(CONFIG_SPL_BUILD) || \
2333 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2334 		!defined(CONFIG_USE_TINY_PRINTF))
2335 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2336 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2337 		(mmc->cid[3] >> 16) & 0xffff);
2338 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2339 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2340 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2341 		(mmc->cid[2] >> 24) & 0xff);
2342 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2343 		(mmc->cid[2] >> 16) & 0xf);
2344 #else
2345 	bdesc->vendor[0] = 0;
2346 	bdesc->product[0] = 0;
2347 	bdesc->revision[0] = 0;
2348 #endif
2349 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2350 	part_init(bdesc);
2351 #endif
2352 
2353 	return 0;
2354 }
2355 
2356 static int mmc_send_if_cond(struct mmc *mmc)
2357 {
2358 	struct mmc_cmd cmd;
2359 	int err;
2360 
2361 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2362 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2363 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2364 	cmd.resp_type = MMC_RSP_R7;
2365 
2366 	err = mmc_send_cmd(mmc, &cmd, NULL);
2367 
2368 	if (err)
2369 		return err;
2370 
2371 	if ((cmd.response[0] & 0xff) != 0xaa)
2372 		return -EOPNOTSUPP;
2373 	else
2374 		mmc->version = SD_VERSION_2;
2375 
2376 	return 0;
2377 }
2378 
2379 #if !CONFIG_IS_ENABLED(DM_MMC)
2380 /* board-specific MMC power initializations. */
2381 __weak void board_mmc_power_init(void)
2382 {
2383 }
2384 #endif
2385 
2386 static int mmc_power_init(struct mmc *mmc)
2387 {
2388 #if CONFIG_IS_ENABLED(DM_MMC)
2389 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2390 	int ret;
2391 
2392 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2393 					  &mmc->vmmc_supply);
2394 	if (ret)
2395 		pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2396 
2397 	ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2398 					  &mmc->vqmmc_supply);
2399 	if (ret)
2400 		pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2401 #endif
2402 #else /* !CONFIG_DM_MMC */
2403 	/*
2404 	 * Driver model should use a regulator, as above, rather than calling
2405 	 * out to board code.
2406 	 */
2407 	board_mmc_power_init();
2408 #endif
2409 	return 0;
2410 }
2411 
2412 /*
2413  * put the host in the initial state:
2414  * - turn on Vdd (card power supply)
2415  * - configure the bus width and clock to minimal values
2416  */
2417 static void mmc_set_initial_state(struct mmc *mmc)
2418 {
2419 	int err;
2420 
2421 	/* First try to set 3.3V. If it fails set to 1.8V */
2422 	err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2423 	if (err != 0)
2424 		err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2425 	if (err != 0)
2426 		pr_warn("mmc: failed to set signal voltage\n");
2427 
2428 	mmc_select_mode(mmc, MMC_LEGACY);
2429 	mmc_set_bus_width(mmc, 1);
2430 	mmc_set_clock(mmc, 0, false);
2431 }
2432 
2433 static int mmc_power_on(struct mmc *mmc)
2434 {
2435 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2436 	if (mmc->vmmc_supply) {
2437 		int ret = regulator_set_enable(mmc->vmmc_supply, true);
2438 
2439 		if (ret) {
2440 			puts("Error enabling VMMC supply\n");
2441 			return ret;
2442 		}
2443 	}
2444 #endif
2445 	return 0;
2446 }
2447 
2448 static int mmc_power_off(struct mmc *mmc)
2449 {
2450 	mmc_set_clock(mmc, 0, true);
2451 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2452 	if (mmc->vmmc_supply) {
2453 		int ret = regulator_set_enable(mmc->vmmc_supply, false);
2454 
2455 		if (ret) {
2456 			pr_debug("Error disabling VMMC supply\n");
2457 			return ret;
2458 		}
2459 	}
2460 #endif
2461 	return 0;
2462 }
2463 
2464 static int mmc_power_cycle(struct mmc *mmc)
2465 {
2466 	int ret;
2467 
2468 	ret = mmc_power_off(mmc);
2469 	if (ret)
2470 		return ret;
2471 	/*
2472 	 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2473 	 * to be on the safer side.
2474 	 */
2475 	udelay(2000);
2476 	return mmc_power_on(mmc);
2477 }
2478 
2479 int mmc_start_init(struct mmc *mmc)
2480 {
2481 	bool no_card;
2482 	bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2483 	int err;
2484 
2485 	/*
2486 	 * all hosts are capable of 1 bit bus-width and able to use the legacy
2487 	 * timings.
2488 	 */
2489 	mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2490 			 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2491 
2492 #if !defined(CONFIG_MMC_BROKEN_CD)
2493 	/* we pretend there's no card when init is NULL */
2494 	no_card = mmc_getcd(mmc) == 0;
2495 #else
2496 	no_card = 0;
2497 #endif
2498 #if !CONFIG_IS_ENABLED(DM_MMC)
2499 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2500 #endif
2501 	if (no_card) {
2502 		mmc->has_init = 0;
2503 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2504 		pr_err("MMC: no card present\n");
2505 #endif
2506 		return -ENOMEDIUM;
2507 	}
2508 
2509 	if (mmc->has_init)
2510 		return 0;
2511 
2512 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2513 	mmc_adapter_card_type_ident();
2514 #endif
2515 	err = mmc_power_init(mmc);
2516 	if (err)
2517 		return err;
2518 
2519 #ifdef CONFIG_MMC_QUIRKS
2520 	mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2521 		      MMC_QUIRK_RETRY_SEND_CID;
2522 #endif
2523 
2524 	err = mmc_power_cycle(mmc);
2525 	if (err) {
2526 		/*
2527 		 * if power cycling is not supported, we should not try
2528 		 * to use the UHS modes, because we wouldn't be able to
2529 		 * recover from an error during the UHS initialization.
2530 		 */
2531 		pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2532 		uhs_en = false;
2533 		mmc->host_caps &= ~UHS_CAPS;
2534 		err = mmc_power_on(mmc);
2535 	}
2536 	if (err)
2537 		return err;
2538 
2539 #if CONFIG_IS_ENABLED(DM_MMC)
2540 	/* The device has already been probed ready for use */
2541 #else
2542 	/* made sure it's not NULL earlier */
2543 	err = mmc->cfg->ops->init(mmc);
2544 	if (err)
2545 		return err;
2546 #endif
2547 	mmc->ddr_mode = 0;
2548 
2549 retry:
2550 	mmc_set_initial_state(mmc);
2551 	mmc_send_init_stream(mmc);
2552 
2553 	/* Reset the Card */
2554 	err = mmc_go_idle(mmc);
2555 
2556 	if (err)
2557 		return err;
2558 
2559 	/* The internal partition reset to user partition(0) at every CMD0*/
2560 	mmc_get_blk_desc(mmc)->hwpart = 0;
2561 
2562 	/* Test for SD version 2 */
2563 	err = mmc_send_if_cond(mmc);
2564 
2565 	/* Now try to get the SD card's operating condition */
2566 	err = sd_send_op_cond(mmc, uhs_en);
2567 	if (err && uhs_en) {
2568 		uhs_en = false;
2569 		mmc_power_cycle(mmc);
2570 		goto retry;
2571 	}
2572 
2573 	/* If the command timed out, we check for an MMC card */
2574 	if (err == -ETIMEDOUT) {
2575 		err = mmc_send_op_cond(mmc);
2576 
2577 		if (err) {
2578 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2579 			pr_err("Card did not respond to voltage select!\n");
2580 #endif
2581 			return -EOPNOTSUPP;
2582 		}
2583 	}
2584 
2585 	if (!err)
2586 		mmc->init_in_progress = 1;
2587 
2588 	return err;
2589 }
2590 
2591 static int mmc_complete_init(struct mmc *mmc)
2592 {
2593 	int err = 0;
2594 
2595 	mmc->init_in_progress = 0;
2596 	if (mmc->op_cond_pending)
2597 		err = mmc_complete_op_cond(mmc);
2598 
2599 	if (!err)
2600 		err = mmc_startup(mmc);
2601 	if (err)
2602 		mmc->has_init = 0;
2603 	else
2604 		mmc->has_init = 1;
2605 	return err;
2606 }
2607 
2608 int mmc_init(struct mmc *mmc)
2609 {
2610 	int err = 0;
2611 	__maybe_unused unsigned start;
2612 #if CONFIG_IS_ENABLED(DM_MMC)
2613 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2614 
2615 	upriv->mmc = mmc;
2616 #endif
2617 	if (mmc->has_init)
2618 		return 0;
2619 
2620 	start = get_timer(0);
2621 
2622 	if (!mmc->init_in_progress)
2623 		err = mmc_start_init(mmc);
2624 
2625 	if (!err)
2626 		err = mmc_complete_init(mmc);
2627 	if (err)
2628 		pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2629 
2630 	return err;
2631 }
2632 
2633 int mmc_set_dsr(struct mmc *mmc, u16 val)
2634 {
2635 	mmc->dsr = val;
2636 	return 0;
2637 }
2638 
2639 /* CPU-specific MMC initializations */
2640 __weak int cpu_mmc_init(bd_t *bis)
2641 {
2642 	return -1;
2643 }
2644 
2645 /* board-specific MMC initializations. */
2646 __weak int board_mmc_init(bd_t *bis)
2647 {
2648 	return -1;
2649 }
2650 
2651 void mmc_set_preinit(struct mmc *mmc, int preinit)
2652 {
2653 	mmc->preinit = preinit;
2654 }
2655 
2656 #if CONFIG_IS_ENABLED(DM_MMC)
2657 static int mmc_probe(bd_t *bis)
2658 {
2659 	int ret, i;
2660 	struct uclass *uc;
2661 	struct udevice *dev;
2662 
2663 	ret = uclass_get(UCLASS_MMC, &uc);
2664 	if (ret)
2665 		return ret;
2666 
2667 	/*
2668 	 * Try to add them in sequence order. Really with driver model we
2669 	 * should allow holes, but the current MMC list does not allow that.
2670 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2671 	 */
2672 	for (i = 0; ; i++) {
2673 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2674 		if (ret == -ENODEV)
2675 			break;
2676 	}
2677 	uclass_foreach_dev(dev, uc) {
2678 		ret = device_probe(dev);
2679 		if (ret)
2680 			pr_err("%s - probe failed: %d\n", dev->name, ret);
2681 	}
2682 
2683 	return 0;
2684 }
2685 #else
2686 static int mmc_probe(bd_t *bis)
2687 {
2688 	if (board_mmc_init(bis) < 0)
2689 		cpu_mmc_init(bis);
2690 
2691 	return 0;
2692 }
2693 #endif
2694 
2695 int mmc_initialize(bd_t *bis)
2696 {
2697 	static int initialized = 0;
2698 	int ret;
2699 	if (initialized)	/* Avoid initializing mmc multiple times */
2700 		return 0;
2701 	initialized = 1;
2702 
2703 #if !CONFIG_IS_ENABLED(BLK)
2704 #if !CONFIG_IS_ENABLED(MMC_TINY)
2705 	mmc_list_init();
2706 #endif
2707 #endif
2708 	ret = mmc_probe(bis);
2709 	if (ret)
2710 		return ret;
2711 
2712 #ifndef CONFIG_SPL_BUILD
2713 	print_mmc_devices(',');
2714 #endif
2715 
2716 	mmc_do_preinit();
2717 	return 0;
2718 }
2719 
2720 #ifdef CONFIG_CMD_BKOPS_ENABLE
2721 int mmc_set_bkops_enable(struct mmc *mmc)
2722 {
2723 	int err;
2724 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2725 
2726 	err = mmc_send_ext_csd(mmc, ext_csd);
2727 	if (err) {
2728 		puts("Could not get ext_csd register values\n");
2729 		return err;
2730 	}
2731 
2732 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2733 		puts("Background operations not supported on device\n");
2734 		return -EMEDIUMTYPE;
2735 	}
2736 
2737 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2738 		puts("Background operations already enabled\n");
2739 		return 0;
2740 	}
2741 
2742 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2743 	if (err) {
2744 		puts("Failed to enable manual background operations\n");
2745 		return err;
2746 	}
2747 
2748 	puts("Enabled manual background operations\n");
2749 
2750 	return 0;
2751 }
2752 #endif
2753