xref: /openbmc/u-boot/drivers/mmc/mmc.c (revision 445277b9)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
26 static int mmc_power_cycle(struct mmc *mmc);
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
28 
29 #if CONFIG_IS_ENABLED(MMC_TINY)
30 static struct mmc mmc_static;
31 struct mmc *find_mmc_device(int dev_num)
32 {
33 	return &mmc_static;
34 }
35 
36 void mmc_do_preinit(void)
37 {
38 	struct mmc *m = &mmc_static;
39 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
40 	mmc_set_preinit(m, 1);
41 #endif
42 	if (m->preinit)
43 		mmc_start_init(m);
44 }
45 
46 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
47 {
48 	return &mmc->block_dev;
49 }
50 #endif
51 
52 #if !CONFIG_IS_ENABLED(DM_MMC)
53 
54 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
55 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
56 {
57 	return -ENOSYS;
58 }
59 #endif
60 
61 __weak int board_mmc_getwp(struct mmc *mmc)
62 {
63 	return -1;
64 }
65 
66 int mmc_getwp(struct mmc *mmc)
67 {
68 	int wp;
69 
70 	wp = board_mmc_getwp(mmc);
71 
72 	if (wp < 0) {
73 		if (mmc->cfg->ops->getwp)
74 			wp = mmc->cfg->ops->getwp(mmc);
75 		else
76 			wp = 0;
77 	}
78 
79 	return wp;
80 }
81 
82 __weak int board_mmc_getcd(struct mmc *mmc)
83 {
84 	return -1;
85 }
86 #endif
87 
88 #ifdef CONFIG_MMC_TRACE
89 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
90 {
91 	printf("CMD_SEND:%d\n", cmd->cmdidx);
92 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
93 }
94 
95 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
96 {
97 	int i;
98 	u8 *ptr;
99 
100 	if (ret) {
101 		printf("\t\tRET\t\t\t %d\n", ret);
102 	} else {
103 		switch (cmd->resp_type) {
104 		case MMC_RSP_NONE:
105 			printf("\t\tMMC_RSP_NONE\n");
106 			break;
107 		case MMC_RSP_R1:
108 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
109 				cmd->response[0]);
110 			break;
111 		case MMC_RSP_R1b:
112 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
113 				cmd->response[0]);
114 			break;
115 		case MMC_RSP_R2:
116 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
117 				cmd->response[0]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[1]);
120 			printf("\t\t          \t\t 0x%08X \n",
121 				cmd->response[2]);
122 			printf("\t\t          \t\t 0x%08X \n",
123 				cmd->response[3]);
124 			printf("\n");
125 			printf("\t\t\t\t\tDUMPING DATA\n");
126 			for (i = 0; i < 4; i++) {
127 				int j;
128 				printf("\t\t\t\t\t%03d - ", i*4);
129 				ptr = (u8 *)&cmd->response[i];
130 				ptr += 3;
131 				for (j = 0; j < 4; j++)
132 					printf("%02X ", *ptr--);
133 				printf("\n");
134 			}
135 			break;
136 		case MMC_RSP_R3:
137 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
138 				cmd->response[0]);
139 			break;
140 		default:
141 			printf("\t\tERROR MMC rsp not supported\n");
142 			break;
143 		}
144 	}
145 }
146 
147 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
148 {
149 	int status;
150 
151 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
152 	printf("CURR STATE:%d\n", status);
153 }
154 #endif
155 
156 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
157 const char *mmc_mode_name(enum bus_mode mode)
158 {
159 	static const char *const names[] = {
160 	      [MMC_LEGACY]	= "MMC legacy",
161 	      [SD_LEGACY]	= "SD Legacy",
162 	      [MMC_HS]		= "MMC High Speed (26MHz)",
163 	      [SD_HS]		= "SD High Speed (50MHz)",
164 	      [UHS_SDR12]	= "UHS SDR12 (25MHz)",
165 	      [UHS_SDR25]	= "UHS SDR25 (50MHz)",
166 	      [UHS_SDR50]	= "UHS SDR50 (100MHz)",
167 	      [UHS_SDR104]	= "UHS SDR104 (208MHz)",
168 	      [UHS_DDR50]	= "UHS DDR50 (50MHz)",
169 	      [MMC_HS_52]	= "MMC High Speed (52MHz)",
170 	      [MMC_DDR_52]	= "MMC DDR52 (52MHz)",
171 	      [MMC_HS_200]	= "HS200 (200MHz)",
172 	};
173 
174 	if (mode >= MMC_MODES_END)
175 		return "Unknown mode";
176 	else
177 		return names[mode];
178 }
179 #endif
180 
181 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
182 {
183 	static const int freqs[] = {
184 	      [SD_LEGACY]	= 25000000,
185 	      [MMC_HS]		= 26000000,
186 	      [SD_HS]		= 50000000,
187 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
188 	      [UHS_SDR12]	= 25000000,
189 	      [UHS_SDR25]	= 50000000,
190 	      [UHS_SDR50]	= 100000000,
191 	      [UHS_DDR50]	= 50000000,
192 #ifdef MMC_SUPPORTS_TUNING
193 	      [UHS_SDR104]	= 208000000,
194 #endif
195 #endif
196 	      [MMC_HS_52]	= 52000000,
197 	      [MMC_DDR_52]	= 52000000,
198 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
199 	      [MMC_HS_200]	= 200000000,
200 #endif
201 	};
202 
203 	if (mode == MMC_LEGACY)
204 		return mmc->legacy_speed;
205 	else if (mode >= MMC_MODES_END)
206 		return 0;
207 	else
208 		return freqs[mode];
209 }
210 
211 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
212 {
213 	mmc->selected_mode = mode;
214 	mmc->tran_speed = mmc_mode2freq(mmc, mode);
215 	mmc->ddr_mode = mmc_is_mode_ddr(mode);
216 	debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
217 	      mmc->tran_speed / 1000000);
218 	return 0;
219 }
220 
221 #if !CONFIG_IS_ENABLED(DM_MMC)
222 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
223 {
224 	int ret;
225 
226 	mmmc_trace_before_send(mmc, cmd);
227 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
228 	mmmc_trace_after_send(mmc, cmd, ret);
229 
230 	return ret;
231 }
232 #endif
233 
234 int mmc_send_status(struct mmc *mmc, int timeout)
235 {
236 	struct mmc_cmd cmd;
237 	int err, retries = 5;
238 
239 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
240 	cmd.resp_type = MMC_RSP_R1;
241 	if (!mmc_host_is_spi(mmc))
242 		cmd.cmdarg = mmc->rca << 16;
243 
244 	while (1) {
245 		err = mmc_send_cmd(mmc, &cmd, NULL);
246 		if (!err) {
247 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
248 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
249 			     MMC_STATE_PRG)
250 				break;
251 
252 			if (cmd.response[0] & MMC_STATUS_MASK) {
253 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
254 				pr_err("Status Error: 0x%08X\n",
255 				       cmd.response[0]);
256 #endif
257 				return -ECOMM;
258 			}
259 		} else if (--retries < 0)
260 			return err;
261 
262 		if (timeout-- <= 0)
263 			break;
264 
265 		udelay(1000);
266 	}
267 
268 	mmc_trace_state(mmc, &cmd);
269 	if (timeout <= 0) {
270 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
271 		pr_err("Timeout waiting card ready\n");
272 #endif
273 		return -ETIMEDOUT;
274 	}
275 
276 	return 0;
277 }
278 
279 int mmc_set_blocklen(struct mmc *mmc, int len)
280 {
281 	struct mmc_cmd cmd;
282 	int err;
283 
284 	if (mmc->ddr_mode)
285 		return 0;
286 
287 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
288 	cmd.resp_type = MMC_RSP_R1;
289 	cmd.cmdarg = len;
290 
291 	err = mmc_send_cmd(mmc, &cmd, NULL);
292 
293 #ifdef CONFIG_MMC_QUIRKS
294 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
295 		int retries = 4;
296 		/*
297 		 * It has been seen that SET_BLOCKLEN may fail on the first
298 		 * attempt, let's try a few more time
299 		 */
300 		do {
301 			err = mmc_send_cmd(mmc, &cmd, NULL);
302 			if (!err)
303 				break;
304 		} while (retries--);
305 	}
306 #endif
307 
308 	return err;
309 }
310 
311 #ifdef MMC_SUPPORTS_TUNING
312 static const u8 tuning_blk_pattern_4bit[] = {
313 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
314 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
315 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
316 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
317 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
318 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
319 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
320 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
321 };
322 
323 static const u8 tuning_blk_pattern_8bit[] = {
324 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
325 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
326 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
327 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
328 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
329 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
330 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
331 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
332 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
333 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
334 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
335 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
336 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
337 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
338 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
339 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
340 };
341 
342 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
343 {
344 	struct mmc_cmd cmd;
345 	struct mmc_data data;
346 	const u8 *tuning_block_pattern;
347 	int size, err;
348 
349 	if (mmc->bus_width == 8) {
350 		tuning_block_pattern = tuning_blk_pattern_8bit;
351 		size = sizeof(tuning_blk_pattern_8bit);
352 	} else if (mmc->bus_width == 4) {
353 		tuning_block_pattern = tuning_blk_pattern_4bit;
354 		size = sizeof(tuning_blk_pattern_4bit);
355 	} else {
356 		return -EINVAL;
357 	}
358 
359 	ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
360 
361 	cmd.cmdidx = opcode;
362 	cmd.cmdarg = 0;
363 	cmd.resp_type = MMC_RSP_R1;
364 
365 	data.dest = (void *)data_buf;
366 	data.blocks = 1;
367 	data.blocksize = size;
368 	data.flags = MMC_DATA_READ;
369 
370 	err = mmc_send_cmd(mmc, &cmd, &data);
371 	if (err)
372 		return err;
373 
374 	if (memcmp(data_buf, tuning_block_pattern, size))
375 		return -EIO;
376 
377 	return 0;
378 }
379 #endif
380 
381 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
382 			   lbaint_t blkcnt)
383 {
384 	struct mmc_cmd cmd;
385 	struct mmc_data data;
386 
387 	if (blkcnt > 1)
388 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
389 	else
390 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
391 
392 	if (mmc->high_capacity)
393 		cmd.cmdarg = start;
394 	else
395 		cmd.cmdarg = start * mmc->read_bl_len;
396 
397 	cmd.resp_type = MMC_RSP_R1;
398 
399 	data.dest = dst;
400 	data.blocks = blkcnt;
401 	data.blocksize = mmc->read_bl_len;
402 	data.flags = MMC_DATA_READ;
403 
404 	if (mmc_send_cmd(mmc, &cmd, &data))
405 		return 0;
406 
407 	if (blkcnt > 1) {
408 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
409 		cmd.cmdarg = 0;
410 		cmd.resp_type = MMC_RSP_R1b;
411 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
412 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
413 			pr_err("mmc fail to send stop cmd\n");
414 #endif
415 			return 0;
416 		}
417 	}
418 
419 	return blkcnt;
420 }
421 
422 #if CONFIG_IS_ENABLED(BLK)
423 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
424 #else
425 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
426 		void *dst)
427 #endif
428 {
429 #if CONFIG_IS_ENABLED(BLK)
430 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
431 #endif
432 	int dev_num = block_dev->devnum;
433 	int err;
434 	lbaint_t cur, blocks_todo = blkcnt;
435 
436 	if (blkcnt == 0)
437 		return 0;
438 
439 	struct mmc *mmc = find_mmc_device(dev_num);
440 	if (!mmc)
441 		return 0;
442 
443 	if (CONFIG_IS_ENABLED(MMC_TINY))
444 		err = mmc_switch_part(mmc, block_dev->hwpart);
445 	else
446 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
447 
448 	if (err < 0)
449 		return 0;
450 
451 	if ((start + blkcnt) > block_dev->lba) {
452 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
453 		pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
454 		       start + blkcnt, block_dev->lba);
455 #endif
456 		return 0;
457 	}
458 
459 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
460 		debug("%s: Failed to set blocklen\n", __func__);
461 		return 0;
462 	}
463 
464 	do {
465 		cur = (blocks_todo > mmc->cfg->b_max) ?
466 			mmc->cfg->b_max : blocks_todo;
467 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
468 			debug("%s: Failed to read blocks\n", __func__);
469 			return 0;
470 		}
471 		blocks_todo -= cur;
472 		start += cur;
473 		dst += cur * mmc->read_bl_len;
474 	} while (blocks_todo > 0);
475 
476 	return blkcnt;
477 }
478 
479 static int mmc_go_idle(struct mmc *mmc)
480 {
481 	struct mmc_cmd cmd;
482 	int err;
483 
484 	udelay(1000);
485 
486 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
487 	cmd.cmdarg = 0;
488 	cmd.resp_type = MMC_RSP_NONE;
489 
490 	err = mmc_send_cmd(mmc, &cmd, NULL);
491 
492 	if (err)
493 		return err;
494 
495 	udelay(2000);
496 
497 	return 0;
498 }
499 
500 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
501 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
502 {
503 	struct mmc_cmd cmd;
504 	int err = 0;
505 
506 	/*
507 	 * Send CMD11 only if the request is to switch the card to
508 	 * 1.8V signalling.
509 	 */
510 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
511 		return mmc_set_signal_voltage(mmc, signal_voltage);
512 
513 	cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
514 	cmd.cmdarg = 0;
515 	cmd.resp_type = MMC_RSP_R1;
516 
517 	err = mmc_send_cmd(mmc, &cmd, NULL);
518 	if (err)
519 		return err;
520 
521 	if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
522 		return -EIO;
523 
524 	/*
525 	 * The card should drive cmd and dat[0:3] low immediately
526 	 * after the response of cmd11, but wait 100 us to be sure
527 	 */
528 	err = mmc_wait_dat0(mmc, 0, 100);
529 	if (err == -ENOSYS)
530 		udelay(100);
531 	else if (err)
532 		return -ETIMEDOUT;
533 
534 	/*
535 	 * During a signal voltage level switch, the clock must be gated
536 	 * for 5 ms according to the SD spec
537 	 */
538 	mmc_set_clock(mmc, mmc->clock, true);
539 
540 	err = mmc_set_signal_voltage(mmc, signal_voltage);
541 	if (err)
542 		return err;
543 
544 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
545 	mdelay(10);
546 	mmc_set_clock(mmc, mmc->clock, false);
547 
548 	/*
549 	 * Failure to switch is indicated by the card holding
550 	 * dat[0:3] low. Wait for at least 1 ms according to spec
551 	 */
552 	err = mmc_wait_dat0(mmc, 1, 1000);
553 	if (err == -ENOSYS)
554 		udelay(1000);
555 	else if (err)
556 		return -ETIMEDOUT;
557 
558 	return 0;
559 }
560 #endif
561 
562 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
563 {
564 	int timeout = 1000;
565 	int err;
566 	struct mmc_cmd cmd;
567 
568 	while (1) {
569 		cmd.cmdidx = MMC_CMD_APP_CMD;
570 		cmd.resp_type = MMC_RSP_R1;
571 		cmd.cmdarg = 0;
572 
573 		err = mmc_send_cmd(mmc, &cmd, NULL);
574 
575 		if (err)
576 			return err;
577 
578 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
579 		cmd.resp_type = MMC_RSP_R3;
580 
581 		/*
582 		 * Most cards do not answer if some reserved bits
583 		 * in the ocr are set. However, Some controller
584 		 * can set bit 7 (reserved for low voltages), but
585 		 * how to manage low voltages SD card is not yet
586 		 * specified.
587 		 */
588 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
589 			(mmc->cfg->voltages & 0xff8000);
590 
591 		if (mmc->version == SD_VERSION_2)
592 			cmd.cmdarg |= OCR_HCS;
593 
594 		if (uhs_en)
595 			cmd.cmdarg |= OCR_S18R;
596 
597 		err = mmc_send_cmd(mmc, &cmd, NULL);
598 
599 		if (err)
600 			return err;
601 
602 		if (cmd.response[0] & OCR_BUSY)
603 			break;
604 
605 		if (timeout-- <= 0)
606 			return -EOPNOTSUPP;
607 
608 		udelay(1000);
609 	}
610 
611 	if (mmc->version != SD_VERSION_2)
612 		mmc->version = SD_VERSION_1_0;
613 
614 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
615 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
616 		cmd.resp_type = MMC_RSP_R3;
617 		cmd.cmdarg = 0;
618 
619 		err = mmc_send_cmd(mmc, &cmd, NULL);
620 
621 		if (err)
622 			return err;
623 	}
624 
625 	mmc->ocr = cmd.response[0];
626 
627 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
628 	if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
629 	    == 0x41000000) {
630 		err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
631 		if (err)
632 			return err;
633 	}
634 #endif
635 
636 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
637 	mmc->rca = 0;
638 
639 	return 0;
640 }
641 
642 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
643 {
644 	struct mmc_cmd cmd;
645 	int err;
646 
647 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
648 	cmd.resp_type = MMC_RSP_R3;
649 	cmd.cmdarg = 0;
650 	if (use_arg && !mmc_host_is_spi(mmc))
651 		cmd.cmdarg = OCR_HCS |
652 			(mmc->cfg->voltages &
653 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
654 			(mmc->ocr & OCR_ACCESS_MODE);
655 
656 	err = mmc_send_cmd(mmc, &cmd, NULL);
657 	if (err)
658 		return err;
659 	mmc->ocr = cmd.response[0];
660 	return 0;
661 }
662 
663 static int mmc_send_op_cond(struct mmc *mmc)
664 {
665 	int err, i;
666 
667 	/* Some cards seem to need this */
668 	mmc_go_idle(mmc);
669 
670  	/* Asking to the card its capabilities */
671 	for (i = 0; i < 2; i++) {
672 		err = mmc_send_op_cond_iter(mmc, i != 0);
673 		if (err)
674 			return err;
675 
676 		/* exit if not busy (flag seems to be inverted) */
677 		if (mmc->ocr & OCR_BUSY)
678 			break;
679 	}
680 	mmc->op_cond_pending = 1;
681 	return 0;
682 }
683 
684 static int mmc_complete_op_cond(struct mmc *mmc)
685 {
686 	struct mmc_cmd cmd;
687 	int timeout = 1000;
688 	uint start;
689 	int err;
690 
691 	mmc->op_cond_pending = 0;
692 	if (!(mmc->ocr & OCR_BUSY)) {
693 		/* Some cards seem to need this */
694 		mmc_go_idle(mmc);
695 
696 		start = get_timer(0);
697 		while (1) {
698 			err = mmc_send_op_cond_iter(mmc, 1);
699 			if (err)
700 				return err;
701 			if (mmc->ocr & OCR_BUSY)
702 				break;
703 			if (get_timer(start) > timeout)
704 				return -EOPNOTSUPP;
705 			udelay(100);
706 		}
707 	}
708 
709 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
710 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
711 		cmd.resp_type = MMC_RSP_R3;
712 		cmd.cmdarg = 0;
713 
714 		err = mmc_send_cmd(mmc, &cmd, NULL);
715 
716 		if (err)
717 			return err;
718 
719 		mmc->ocr = cmd.response[0];
720 	}
721 
722 	mmc->version = MMC_VERSION_UNKNOWN;
723 
724 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
725 	mmc->rca = 1;
726 
727 	return 0;
728 }
729 
730 
731 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
732 {
733 	struct mmc_cmd cmd;
734 	struct mmc_data data;
735 	int err;
736 
737 	/* Get the Card Status Register */
738 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
739 	cmd.resp_type = MMC_RSP_R1;
740 	cmd.cmdarg = 0;
741 
742 	data.dest = (char *)ext_csd;
743 	data.blocks = 1;
744 	data.blocksize = MMC_MAX_BLOCK_LEN;
745 	data.flags = MMC_DATA_READ;
746 
747 	err = mmc_send_cmd(mmc, &cmd, &data);
748 
749 	return err;
750 }
751 
752 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
753 {
754 	struct mmc_cmd cmd;
755 	int timeout = 1000;
756 	int retries = 3;
757 	int ret;
758 
759 	cmd.cmdidx = MMC_CMD_SWITCH;
760 	cmd.resp_type = MMC_RSP_R1b;
761 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
762 				 (index << 16) |
763 				 (value << 8);
764 
765 	while (retries > 0) {
766 		ret = mmc_send_cmd(mmc, &cmd, NULL);
767 
768 		/* Waiting for the ready status */
769 		if (!ret) {
770 			ret = mmc_send_status(mmc, timeout);
771 			return ret;
772 		}
773 
774 		retries--;
775 	}
776 
777 	return ret;
778 
779 }
780 
781 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
782 {
783 	int err;
784 	int speed_bits;
785 
786 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
787 
788 	switch (mode) {
789 	case MMC_HS:
790 	case MMC_HS_52:
791 	case MMC_DDR_52:
792 		speed_bits = EXT_CSD_TIMING_HS;
793 		break;
794 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
795 	case MMC_HS_200:
796 		speed_bits = EXT_CSD_TIMING_HS200;
797 		break;
798 #endif
799 	case MMC_LEGACY:
800 		speed_bits = EXT_CSD_TIMING_LEGACY;
801 		break;
802 	default:
803 		return -EINVAL;
804 	}
805 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
806 			 speed_bits);
807 	if (err)
808 		return err;
809 
810 	if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
811 		/* Now check to see that it worked */
812 		err = mmc_send_ext_csd(mmc, test_csd);
813 		if (err)
814 			return err;
815 
816 		/* No high-speed support */
817 		if (!test_csd[EXT_CSD_HS_TIMING])
818 			return -ENOTSUPP;
819 	}
820 
821 	return 0;
822 }
823 
824 static int mmc_get_capabilities(struct mmc *mmc)
825 {
826 	u8 *ext_csd = mmc->ext_csd;
827 	char cardtype;
828 
829 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
830 
831 	if (mmc_host_is_spi(mmc))
832 		return 0;
833 
834 	/* Only version 4 supports high-speed */
835 	if (mmc->version < MMC_VERSION_4)
836 		return 0;
837 
838 	if (!ext_csd) {
839 		pr_err("No ext_csd found!\n"); /* this should enver happen */
840 		return -ENOTSUPP;
841 	}
842 
843 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
844 
845 	cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
846 	mmc->cardtype = cardtype;
847 
848 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
849 	if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
850 			EXT_CSD_CARD_TYPE_HS200_1_8V)) {
851 		mmc->card_caps |= MMC_MODE_HS200;
852 	}
853 #endif
854 	if (cardtype & EXT_CSD_CARD_TYPE_52) {
855 		if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
856 			mmc->card_caps |= MMC_MODE_DDR_52MHz;
857 		mmc->card_caps |= MMC_MODE_HS_52MHz;
858 	}
859 	if (cardtype & EXT_CSD_CARD_TYPE_26)
860 		mmc->card_caps |= MMC_MODE_HS;
861 
862 	return 0;
863 }
864 
865 static int mmc_set_capacity(struct mmc *mmc, int part_num)
866 {
867 	switch (part_num) {
868 	case 0:
869 		mmc->capacity = mmc->capacity_user;
870 		break;
871 	case 1:
872 	case 2:
873 		mmc->capacity = mmc->capacity_boot;
874 		break;
875 	case 3:
876 		mmc->capacity = mmc->capacity_rpmb;
877 		break;
878 	case 4:
879 	case 5:
880 	case 6:
881 	case 7:
882 		mmc->capacity = mmc->capacity_gp[part_num - 4];
883 		break;
884 	default:
885 		return -1;
886 	}
887 
888 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
889 
890 	return 0;
891 }
892 
893 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
894 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
895 {
896 	int forbidden = 0;
897 	bool change = false;
898 
899 	if (part_num & PART_ACCESS_MASK)
900 		forbidden = MMC_CAP(MMC_HS_200);
901 
902 	if (MMC_CAP(mmc->selected_mode) & forbidden) {
903 		debug("selected mode (%s) is forbidden for part %d\n",
904 		      mmc_mode_name(mmc->selected_mode), part_num);
905 		change = true;
906 	} else if (mmc->selected_mode != mmc->best_mode) {
907 		debug("selected mode is not optimal\n");
908 		change = true;
909 	}
910 
911 	if (change)
912 		return mmc_select_mode_and_width(mmc,
913 						 mmc->card_caps & ~forbidden);
914 
915 	return 0;
916 }
917 #else
918 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
919 					   unsigned int part_num)
920 {
921 	return 0;
922 }
923 #endif
924 
925 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
926 {
927 	int ret;
928 
929 	ret = mmc_boot_part_access_chk(mmc, part_num);
930 	if (ret)
931 		return ret;
932 
933 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
934 			 (mmc->part_config & ~PART_ACCESS_MASK)
935 			 | (part_num & PART_ACCESS_MASK));
936 
937 	/*
938 	 * Set the capacity if the switch succeeded or was intended
939 	 * to return to representing the raw device.
940 	 */
941 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
942 		ret = mmc_set_capacity(mmc, part_num);
943 		mmc_get_blk_desc(mmc)->hwpart = part_num;
944 	}
945 
946 	return ret;
947 }
948 
949 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
950 int mmc_hwpart_config(struct mmc *mmc,
951 		      const struct mmc_hwpart_conf *conf,
952 		      enum mmc_hwpart_conf_mode mode)
953 {
954 	u8 part_attrs = 0;
955 	u32 enh_size_mult;
956 	u32 enh_start_addr;
957 	u32 gp_size_mult[4];
958 	u32 max_enh_size_mult;
959 	u32 tot_enh_size_mult = 0;
960 	u8 wr_rel_set;
961 	int i, pidx, err;
962 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
963 
964 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
965 		return -EINVAL;
966 
967 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
968 		pr_err("eMMC >= 4.4 required for enhanced user data area\n");
969 		return -EMEDIUMTYPE;
970 	}
971 
972 	if (!(mmc->part_support & PART_SUPPORT)) {
973 		pr_err("Card does not support partitioning\n");
974 		return -EMEDIUMTYPE;
975 	}
976 
977 	if (!mmc->hc_wp_grp_size) {
978 		pr_err("Card does not define HC WP group size\n");
979 		return -EMEDIUMTYPE;
980 	}
981 
982 	/* check partition alignment and total enhanced size */
983 	if (conf->user.enh_size) {
984 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
985 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
986 			pr_err("User data enhanced area not HC WP group "
987 			       "size aligned\n");
988 			return -EINVAL;
989 		}
990 		part_attrs |= EXT_CSD_ENH_USR;
991 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
992 		if (mmc->high_capacity) {
993 			enh_start_addr = conf->user.enh_start;
994 		} else {
995 			enh_start_addr = (conf->user.enh_start << 9);
996 		}
997 	} else {
998 		enh_size_mult = 0;
999 		enh_start_addr = 0;
1000 	}
1001 	tot_enh_size_mult += enh_size_mult;
1002 
1003 	for (pidx = 0; pidx < 4; pidx++) {
1004 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1005 			pr_err("GP%i partition not HC WP group size "
1006 			       "aligned\n", pidx+1);
1007 			return -EINVAL;
1008 		}
1009 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1010 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1011 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1012 			tot_enh_size_mult += gp_size_mult[pidx];
1013 		}
1014 	}
1015 
1016 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1017 		pr_err("Card does not support enhanced attribute\n");
1018 		return -EMEDIUMTYPE;
1019 	}
1020 
1021 	err = mmc_send_ext_csd(mmc, ext_csd);
1022 	if (err)
1023 		return err;
1024 
1025 	max_enh_size_mult =
1026 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1027 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1028 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1029 	if (tot_enh_size_mult > max_enh_size_mult) {
1030 		pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1031 		       tot_enh_size_mult, max_enh_size_mult);
1032 		return -EMEDIUMTYPE;
1033 	}
1034 
1035 	/* The default value of EXT_CSD_WR_REL_SET is device
1036 	 * dependent, the values can only be changed if the
1037 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1038 	 * changed only once and before partitioning is completed. */
1039 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1040 	if (conf->user.wr_rel_change) {
1041 		if (conf->user.wr_rel_set)
1042 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1043 		else
1044 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1045 	}
1046 	for (pidx = 0; pidx < 4; pidx++) {
1047 		if (conf->gp_part[pidx].wr_rel_change) {
1048 			if (conf->gp_part[pidx].wr_rel_set)
1049 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1050 			else
1051 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1052 		}
1053 	}
1054 
1055 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1056 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1057 		puts("Card does not support host controlled partition write "
1058 		     "reliability settings\n");
1059 		return -EMEDIUMTYPE;
1060 	}
1061 
1062 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1063 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1064 		pr_err("Card already partitioned\n");
1065 		return -EPERM;
1066 	}
1067 
1068 	if (mode == MMC_HWPART_CONF_CHECK)
1069 		return 0;
1070 
1071 	/* Partitioning requires high-capacity size definitions */
1072 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1073 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1074 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1075 
1076 		if (err)
1077 			return err;
1078 
1079 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1080 
1081 		/* update erase group size to be high-capacity */
1082 		mmc->erase_grp_size =
1083 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1084 
1085 	}
1086 
1087 	/* all OK, write the configuration */
1088 	for (i = 0; i < 4; i++) {
1089 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1090 				 EXT_CSD_ENH_START_ADDR+i,
1091 				 (enh_start_addr >> (i*8)) & 0xFF);
1092 		if (err)
1093 			return err;
1094 	}
1095 	for (i = 0; i < 3; i++) {
1096 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1097 				 EXT_CSD_ENH_SIZE_MULT+i,
1098 				 (enh_size_mult >> (i*8)) & 0xFF);
1099 		if (err)
1100 			return err;
1101 	}
1102 	for (pidx = 0; pidx < 4; pidx++) {
1103 		for (i = 0; i < 3; i++) {
1104 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1105 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1106 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1107 			if (err)
1108 				return err;
1109 		}
1110 	}
1111 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1112 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1113 	if (err)
1114 		return err;
1115 
1116 	if (mode == MMC_HWPART_CONF_SET)
1117 		return 0;
1118 
1119 	/* The WR_REL_SET is a write-once register but shall be
1120 	 * written before setting PART_SETTING_COMPLETED. As it is
1121 	 * write-once we can only write it when completing the
1122 	 * partitioning. */
1123 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1124 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1125 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1126 		if (err)
1127 			return err;
1128 	}
1129 
1130 	/* Setting PART_SETTING_COMPLETED confirms the partition
1131 	 * configuration but it only becomes effective after power
1132 	 * cycle, so we do not adjust the partition related settings
1133 	 * in the mmc struct. */
1134 
1135 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1136 			 EXT_CSD_PARTITION_SETTING,
1137 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1138 	if (err)
1139 		return err;
1140 
1141 	return 0;
1142 }
1143 #endif
1144 
1145 #if !CONFIG_IS_ENABLED(DM_MMC)
1146 int mmc_getcd(struct mmc *mmc)
1147 {
1148 	int cd;
1149 
1150 	cd = board_mmc_getcd(mmc);
1151 
1152 	if (cd < 0) {
1153 		if (mmc->cfg->ops->getcd)
1154 			cd = mmc->cfg->ops->getcd(mmc);
1155 		else
1156 			cd = 1;
1157 	}
1158 
1159 	return cd;
1160 }
1161 #endif
1162 
1163 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1164 {
1165 	struct mmc_cmd cmd;
1166 	struct mmc_data data;
1167 
1168 	/* Switch the frequency */
1169 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1170 	cmd.resp_type = MMC_RSP_R1;
1171 	cmd.cmdarg = (mode << 31) | 0xffffff;
1172 	cmd.cmdarg &= ~(0xf << (group * 4));
1173 	cmd.cmdarg |= value << (group * 4);
1174 
1175 	data.dest = (char *)resp;
1176 	data.blocksize = 64;
1177 	data.blocks = 1;
1178 	data.flags = MMC_DATA_READ;
1179 
1180 	return mmc_send_cmd(mmc, &cmd, &data);
1181 }
1182 
1183 
1184 static int sd_get_capabilities(struct mmc *mmc)
1185 {
1186 	int err;
1187 	struct mmc_cmd cmd;
1188 	ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1189 	ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1190 	struct mmc_data data;
1191 	int timeout;
1192 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1193 	u32 sd3_bus_mode;
1194 #endif
1195 
1196 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1197 
1198 	if (mmc_host_is_spi(mmc))
1199 		return 0;
1200 
1201 	/* Read the SCR to find out if this card supports higher speeds */
1202 	cmd.cmdidx = MMC_CMD_APP_CMD;
1203 	cmd.resp_type = MMC_RSP_R1;
1204 	cmd.cmdarg = mmc->rca << 16;
1205 
1206 	err = mmc_send_cmd(mmc, &cmd, NULL);
1207 
1208 	if (err)
1209 		return err;
1210 
1211 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1212 	cmd.resp_type = MMC_RSP_R1;
1213 	cmd.cmdarg = 0;
1214 
1215 	timeout = 3;
1216 
1217 retry_scr:
1218 	data.dest = (char *)scr;
1219 	data.blocksize = 8;
1220 	data.blocks = 1;
1221 	data.flags = MMC_DATA_READ;
1222 
1223 	err = mmc_send_cmd(mmc, &cmd, &data);
1224 
1225 	if (err) {
1226 		if (timeout--)
1227 			goto retry_scr;
1228 
1229 		return err;
1230 	}
1231 
1232 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1233 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1234 
1235 	switch ((mmc->scr[0] >> 24) & 0xf) {
1236 	case 0:
1237 		mmc->version = SD_VERSION_1_0;
1238 		break;
1239 	case 1:
1240 		mmc->version = SD_VERSION_1_10;
1241 		break;
1242 	case 2:
1243 		mmc->version = SD_VERSION_2;
1244 		if ((mmc->scr[0] >> 15) & 0x1)
1245 			mmc->version = SD_VERSION_3;
1246 		break;
1247 	default:
1248 		mmc->version = SD_VERSION_1_0;
1249 		break;
1250 	}
1251 
1252 	if (mmc->scr[0] & SD_DATA_4BIT)
1253 		mmc->card_caps |= MMC_MODE_4BIT;
1254 
1255 	/* Version 1.0 doesn't support switching */
1256 	if (mmc->version == SD_VERSION_1_0)
1257 		return 0;
1258 
1259 	timeout = 4;
1260 	while (timeout--) {
1261 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1262 				(u8 *)switch_status);
1263 
1264 		if (err)
1265 			return err;
1266 
1267 		/* The high-speed function is busy.  Try again */
1268 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1269 			break;
1270 	}
1271 
1272 	/* If high-speed isn't supported, we return */
1273 	if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1274 		mmc->card_caps |= MMC_CAP(SD_HS);
1275 
1276 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1277 	/* Version before 3.0 don't support UHS modes */
1278 	if (mmc->version < SD_VERSION_3)
1279 		return 0;
1280 
1281 	sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1282 	if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1283 		mmc->card_caps |= MMC_CAP(UHS_SDR104);
1284 	if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1285 		mmc->card_caps |= MMC_CAP(UHS_SDR50);
1286 	if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1287 		mmc->card_caps |= MMC_CAP(UHS_SDR25);
1288 	if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1289 		mmc->card_caps |= MMC_CAP(UHS_SDR12);
1290 	if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1291 		mmc->card_caps |= MMC_CAP(UHS_DDR50);
1292 #endif
1293 
1294 	return 0;
1295 }
1296 
1297 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1298 {
1299 	int err;
1300 
1301 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1302 	int speed;
1303 
1304 	switch (mode) {
1305 	case SD_LEGACY:
1306 		speed = UHS_SDR12_BUS_SPEED;
1307 		break;
1308 	case SD_HS:
1309 		speed = HIGH_SPEED_BUS_SPEED;
1310 		break;
1311 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1312 	case UHS_SDR12:
1313 		speed = UHS_SDR12_BUS_SPEED;
1314 		break;
1315 	case UHS_SDR25:
1316 		speed = UHS_SDR25_BUS_SPEED;
1317 		break;
1318 	case UHS_SDR50:
1319 		speed = UHS_SDR50_BUS_SPEED;
1320 		break;
1321 	case UHS_DDR50:
1322 		speed = UHS_DDR50_BUS_SPEED;
1323 		break;
1324 	case UHS_SDR104:
1325 		speed = UHS_SDR104_BUS_SPEED;
1326 		break;
1327 #endif
1328 	default:
1329 		return -EINVAL;
1330 	}
1331 
1332 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1333 	if (err)
1334 		return err;
1335 
1336 	if ((__be32_to_cpu(switch_status[4]) >> 24) != speed)
1337 		return -ENOTSUPP;
1338 
1339 	return 0;
1340 }
1341 
1342 int sd_select_bus_width(struct mmc *mmc, int w)
1343 {
1344 	int err;
1345 	struct mmc_cmd cmd;
1346 
1347 	if ((w != 4) && (w != 1))
1348 		return -EINVAL;
1349 
1350 	cmd.cmdidx = MMC_CMD_APP_CMD;
1351 	cmd.resp_type = MMC_RSP_R1;
1352 	cmd.cmdarg = mmc->rca << 16;
1353 
1354 	err = mmc_send_cmd(mmc, &cmd, NULL);
1355 	if (err)
1356 		return err;
1357 
1358 	cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1359 	cmd.resp_type = MMC_RSP_R1;
1360 	if (w == 4)
1361 		cmd.cmdarg = 2;
1362 	else if (w == 1)
1363 		cmd.cmdarg = 0;
1364 	err = mmc_send_cmd(mmc, &cmd, NULL);
1365 	if (err)
1366 		return err;
1367 
1368 	return 0;
1369 }
1370 
1371 #if CONFIG_IS_ENABLED(MMC_WRITE)
1372 static int sd_read_ssr(struct mmc *mmc)
1373 {
1374 	static const unsigned int sd_au_size[] = {
1375 		0,		SZ_16K / 512,		SZ_32K / 512,
1376 		SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
1377 		SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
1378 		SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
1379 		SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,
1380 		SZ_64M / 512,
1381 	};
1382 	int err, i;
1383 	struct mmc_cmd cmd;
1384 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1385 	struct mmc_data data;
1386 	int timeout = 3;
1387 	unsigned int au, eo, et, es;
1388 
1389 	cmd.cmdidx = MMC_CMD_APP_CMD;
1390 	cmd.resp_type = MMC_RSP_R1;
1391 	cmd.cmdarg = mmc->rca << 16;
1392 
1393 	err = mmc_send_cmd(mmc, &cmd, NULL);
1394 	if (err)
1395 		return err;
1396 
1397 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1398 	cmd.resp_type = MMC_RSP_R1;
1399 	cmd.cmdarg = 0;
1400 
1401 retry_ssr:
1402 	data.dest = (char *)ssr;
1403 	data.blocksize = 64;
1404 	data.blocks = 1;
1405 	data.flags = MMC_DATA_READ;
1406 
1407 	err = mmc_send_cmd(mmc, &cmd, &data);
1408 	if (err) {
1409 		if (timeout--)
1410 			goto retry_ssr;
1411 
1412 		return err;
1413 	}
1414 
1415 	for (i = 0; i < 16; i++)
1416 		ssr[i] = be32_to_cpu(ssr[i]);
1417 
1418 	au = (ssr[2] >> 12) & 0xF;
1419 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1420 		mmc->ssr.au = sd_au_size[au];
1421 		es = (ssr[3] >> 24) & 0xFF;
1422 		es |= (ssr[2] & 0xFF) << 8;
1423 		et = (ssr[3] >> 18) & 0x3F;
1424 		if (es && et) {
1425 			eo = (ssr[3] >> 16) & 0x3;
1426 			mmc->ssr.erase_timeout = (et * 1000) / es;
1427 			mmc->ssr.erase_offset = eo * 1000;
1428 		}
1429 	} else {
1430 		debug("Invalid Allocation Unit Size.\n");
1431 	}
1432 
1433 	return 0;
1434 }
1435 #endif
1436 /* frequency bases */
1437 /* divided by 10 to be nice to platforms without floating point */
1438 static const int fbase[] = {
1439 	10000,
1440 	100000,
1441 	1000000,
1442 	10000000,
1443 };
1444 
1445 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1446  * to platforms without floating point.
1447  */
1448 static const u8 multipliers[] = {
1449 	0,	/* reserved */
1450 	10,
1451 	12,
1452 	13,
1453 	15,
1454 	20,
1455 	25,
1456 	30,
1457 	35,
1458 	40,
1459 	45,
1460 	50,
1461 	55,
1462 	60,
1463 	70,
1464 	80,
1465 };
1466 
1467 static inline int bus_width(uint cap)
1468 {
1469 	if (cap == MMC_MODE_8BIT)
1470 		return 8;
1471 	if (cap == MMC_MODE_4BIT)
1472 		return 4;
1473 	if (cap == MMC_MODE_1BIT)
1474 		return 1;
1475 	pr_warn("invalid bus witdh capability 0x%x\n", cap);
1476 	return 0;
1477 }
1478 
1479 #if !CONFIG_IS_ENABLED(DM_MMC)
1480 #ifdef MMC_SUPPORTS_TUNING
1481 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1482 {
1483 	return -ENOTSUPP;
1484 }
1485 #endif
1486 
1487 static void mmc_send_init_stream(struct mmc *mmc)
1488 {
1489 }
1490 
1491 static int mmc_set_ios(struct mmc *mmc)
1492 {
1493 	int ret = 0;
1494 
1495 	if (mmc->cfg->ops->set_ios)
1496 		ret = mmc->cfg->ops->set_ios(mmc);
1497 
1498 	return ret;
1499 }
1500 #endif
1501 
1502 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1503 {
1504 	if (clock > mmc->cfg->f_max)
1505 		clock = mmc->cfg->f_max;
1506 
1507 	if (clock < mmc->cfg->f_min)
1508 		clock = mmc->cfg->f_min;
1509 
1510 	mmc->clock = clock;
1511 	mmc->clk_disable = disable;
1512 
1513 	return mmc_set_ios(mmc);
1514 }
1515 
1516 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1517 {
1518 	mmc->bus_width = width;
1519 
1520 	return mmc_set_ios(mmc);
1521 }
1522 
1523 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1524 /*
1525  * helper function to display the capabilities in a human
1526  * friendly manner. The capabilities include bus width and
1527  * supported modes.
1528  */
1529 void mmc_dump_capabilities(const char *text, uint caps)
1530 {
1531 	enum bus_mode mode;
1532 
1533 	printf("%s: widths [", text);
1534 	if (caps & MMC_MODE_8BIT)
1535 		printf("8, ");
1536 	if (caps & MMC_MODE_4BIT)
1537 		printf("4, ");
1538 	if (caps & MMC_MODE_1BIT)
1539 		printf("1, ");
1540 	printf("\b\b] modes [");
1541 	for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1542 		if (MMC_CAP(mode) & caps)
1543 			printf("%s, ", mmc_mode_name(mode));
1544 	printf("\b\b]\n");
1545 }
1546 #endif
1547 
1548 struct mode_width_tuning {
1549 	enum bus_mode mode;
1550 	uint widths;
1551 #ifdef MMC_SUPPORTS_TUNING
1552 	uint tuning;
1553 #endif
1554 };
1555 
1556 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1557 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1558 {
1559 	switch (voltage) {
1560 	case MMC_SIGNAL_VOLTAGE_000: return 0;
1561 	case MMC_SIGNAL_VOLTAGE_330: return 3300;
1562 	case MMC_SIGNAL_VOLTAGE_180: return 1800;
1563 	case MMC_SIGNAL_VOLTAGE_120: return 1200;
1564 	}
1565 	return -EINVAL;
1566 }
1567 
1568 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1569 {
1570 	int err;
1571 
1572 	if (mmc->signal_voltage == signal_voltage)
1573 		return 0;
1574 
1575 	mmc->signal_voltage = signal_voltage;
1576 	err = mmc_set_ios(mmc);
1577 	if (err)
1578 		debug("unable to set voltage (err %d)\n", err);
1579 
1580 	return err;
1581 }
1582 #else
1583 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1584 {
1585 	return 0;
1586 }
1587 #endif
1588 
1589 static const struct mode_width_tuning sd_modes_by_pref[] = {
1590 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1591 #ifdef MMC_SUPPORTS_TUNING
1592 	{
1593 		.mode = UHS_SDR104,
1594 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1595 		.tuning = MMC_CMD_SEND_TUNING_BLOCK
1596 	},
1597 #endif
1598 	{
1599 		.mode = UHS_SDR50,
1600 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1601 	},
1602 	{
1603 		.mode = UHS_DDR50,
1604 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1605 	},
1606 	{
1607 		.mode = UHS_SDR25,
1608 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1609 	},
1610 #endif
1611 	{
1612 		.mode = SD_HS,
1613 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1614 	},
1615 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1616 	{
1617 		.mode = UHS_SDR12,
1618 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1619 	},
1620 #endif
1621 	{
1622 		.mode = SD_LEGACY,
1623 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1624 	}
1625 };
1626 
1627 #define for_each_sd_mode_by_pref(caps, mwt) \
1628 	for (mwt = sd_modes_by_pref;\
1629 	     mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1630 	     mwt++) \
1631 		if (caps & MMC_CAP(mwt->mode))
1632 
1633 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1634 {
1635 	int err;
1636 	uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1637 	const struct mode_width_tuning *mwt;
1638 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1639 	bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1640 #else
1641 	bool uhs_en = false;
1642 #endif
1643 	uint caps;
1644 
1645 #ifdef DEBUG
1646 	mmc_dump_capabilities("sd card", card_caps);
1647 	mmc_dump_capabilities("host", mmc->host_caps);
1648 #endif
1649 
1650 	/* Restrict card's capabilities by what the host can do */
1651 	caps = card_caps & mmc->host_caps;
1652 
1653 	if (!uhs_en)
1654 		caps &= ~UHS_CAPS;
1655 
1656 	for_each_sd_mode_by_pref(caps, mwt) {
1657 		uint *w;
1658 
1659 		for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1660 			if (*w & caps & mwt->widths) {
1661 				debug("trying mode %s width %d (at %d MHz)\n",
1662 				      mmc_mode_name(mwt->mode),
1663 				      bus_width(*w),
1664 				      mmc_mode2freq(mmc, mwt->mode) / 1000000);
1665 
1666 				/* configure the bus width (card + host) */
1667 				err = sd_select_bus_width(mmc, bus_width(*w));
1668 				if (err)
1669 					goto error;
1670 				mmc_set_bus_width(mmc, bus_width(*w));
1671 
1672 				/* configure the bus mode (card) */
1673 				err = sd_set_card_speed(mmc, mwt->mode);
1674 				if (err)
1675 					goto error;
1676 
1677 				/* configure the bus mode (host) */
1678 				mmc_select_mode(mmc, mwt->mode);
1679 				mmc_set_clock(mmc, mmc->tran_speed, false);
1680 
1681 #ifdef MMC_SUPPORTS_TUNING
1682 				/* execute tuning if needed */
1683 				if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1684 					err = mmc_execute_tuning(mmc,
1685 								 mwt->tuning);
1686 					if (err) {
1687 						debug("tuning failed\n");
1688 						goto error;
1689 					}
1690 				}
1691 #endif
1692 
1693 #if CONFIG_IS_ENABLED(MMC_WRITE)
1694 				err = sd_read_ssr(mmc);
1695 				if (!err)
1696 					pr_warn("unable to read ssr\n");
1697 #endif
1698 				if (!err)
1699 					return 0;
1700 
1701 error:
1702 				/* revert to a safer bus speed */
1703 				mmc_select_mode(mmc, SD_LEGACY);
1704 				mmc_set_clock(mmc, mmc->tran_speed, false);
1705 			}
1706 		}
1707 	}
1708 
1709 	printf("unable to select a mode\n");
1710 	return -ENOTSUPP;
1711 }
1712 
1713 /*
1714  * read the compare the part of ext csd that is constant.
1715  * This can be used to check that the transfer is working
1716  * as expected.
1717  */
1718 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1719 {
1720 	int err;
1721 	const u8 *ext_csd = mmc->ext_csd;
1722 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1723 
1724 	if (mmc->version < MMC_VERSION_4)
1725 		return 0;
1726 
1727 	err = mmc_send_ext_csd(mmc, test_csd);
1728 	if (err)
1729 		return err;
1730 
1731 	/* Only compare read only fields */
1732 	if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1733 		== test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1734 	    ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1735 		== test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1736 	    ext_csd[EXT_CSD_REV]
1737 		== test_csd[EXT_CSD_REV] &&
1738 	    ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1739 		== test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1740 	    memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1741 		   &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1742 		return 0;
1743 
1744 	return -EBADMSG;
1745 }
1746 
1747 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1748 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1749 				  uint32_t allowed_mask)
1750 {
1751 	u32 card_mask = 0;
1752 
1753 	switch (mode) {
1754 	case MMC_HS_200:
1755 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1756 			card_mask |= MMC_SIGNAL_VOLTAGE_180;
1757 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1758 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1759 		break;
1760 	case MMC_DDR_52:
1761 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1762 			card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1763 				     MMC_SIGNAL_VOLTAGE_180;
1764 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1765 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1766 		break;
1767 	default:
1768 		card_mask |= MMC_SIGNAL_VOLTAGE_330;
1769 		break;
1770 	}
1771 
1772 	while (card_mask & allowed_mask) {
1773 		enum mmc_voltage best_match;
1774 
1775 		best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1776 		if (!mmc_set_signal_voltage(mmc,  best_match))
1777 			return 0;
1778 
1779 		allowed_mask &= ~best_match;
1780 	}
1781 
1782 	return -ENOTSUPP;
1783 }
1784 #else
1785 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1786 					 uint32_t allowed_mask)
1787 {
1788 	return 0;
1789 }
1790 #endif
1791 
1792 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1793 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1794 	{
1795 		.mode = MMC_HS_200,
1796 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1797 		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1798 	},
1799 #endif
1800 	{
1801 		.mode = MMC_DDR_52,
1802 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1803 	},
1804 	{
1805 		.mode = MMC_HS_52,
1806 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1807 	},
1808 	{
1809 		.mode = MMC_HS,
1810 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1811 	},
1812 	{
1813 		.mode = MMC_LEGACY,
1814 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1815 	}
1816 };
1817 
1818 #define for_each_mmc_mode_by_pref(caps, mwt) \
1819 	for (mwt = mmc_modes_by_pref;\
1820 	    mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1821 	    mwt++) \
1822 		if (caps & MMC_CAP(mwt->mode))
1823 
1824 static const struct ext_csd_bus_width {
1825 	uint cap;
1826 	bool is_ddr;
1827 	uint ext_csd_bits;
1828 } ext_csd_bus_width[] = {
1829 	{MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1830 	{MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1831 	{MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1832 	{MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1833 	{MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1834 };
1835 
1836 #define for_each_supported_width(caps, ddr, ecbv) \
1837 	for (ecbv = ext_csd_bus_width;\
1838 	    ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1839 	    ecbv++) \
1840 		if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1841 
1842 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1843 {
1844 	int err;
1845 	const struct mode_width_tuning *mwt;
1846 	const struct ext_csd_bus_width *ecbw;
1847 
1848 #ifdef DEBUG
1849 	mmc_dump_capabilities("mmc", card_caps);
1850 	mmc_dump_capabilities("host", mmc->host_caps);
1851 #endif
1852 
1853 	/* Restrict card's capabilities by what the host can do */
1854 	card_caps &= mmc->host_caps;
1855 
1856 	/* Only version 4 of MMC supports wider bus widths */
1857 	if (mmc->version < MMC_VERSION_4)
1858 		return 0;
1859 
1860 	if (!mmc->ext_csd) {
1861 		debug("No ext_csd found!\n"); /* this should enver happen */
1862 		return -ENOTSUPP;
1863 	}
1864 
1865 	mmc_set_clock(mmc, mmc->legacy_speed, false);
1866 
1867 	for_each_mmc_mode_by_pref(card_caps, mwt) {
1868 		for_each_supported_width(card_caps & mwt->widths,
1869 					 mmc_is_mode_ddr(mwt->mode), ecbw) {
1870 			enum mmc_voltage old_voltage;
1871 			debug("trying mode %s width %d (at %d MHz)\n",
1872 			      mmc_mode_name(mwt->mode),
1873 			      bus_width(ecbw->cap),
1874 			      mmc_mode2freq(mmc, mwt->mode) / 1000000);
1875 			old_voltage = mmc->signal_voltage;
1876 			err = mmc_set_lowest_voltage(mmc, mwt->mode,
1877 						     MMC_ALL_SIGNAL_VOLTAGE);
1878 			if (err)
1879 				continue;
1880 
1881 			/* configure the bus width (card + host) */
1882 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1883 				    EXT_CSD_BUS_WIDTH,
1884 				    ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1885 			if (err)
1886 				goto error;
1887 			mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1888 
1889 			/* configure the bus speed (card) */
1890 			err = mmc_set_card_speed(mmc, mwt->mode);
1891 			if (err)
1892 				goto error;
1893 
1894 			/*
1895 			 * configure the bus width AND the ddr mode (card)
1896 			 * The host side will be taken care of in the next step
1897 			 */
1898 			if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1899 				err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1900 						 EXT_CSD_BUS_WIDTH,
1901 						 ecbw->ext_csd_bits);
1902 				if (err)
1903 					goto error;
1904 			}
1905 
1906 			/* configure the bus mode (host) */
1907 			mmc_select_mode(mmc, mwt->mode);
1908 			mmc_set_clock(mmc, mmc->tran_speed, false);
1909 #ifdef MMC_SUPPORTS_TUNING
1910 
1911 			/* execute tuning if needed */
1912 			if (mwt->tuning) {
1913 				err = mmc_execute_tuning(mmc, mwt->tuning);
1914 				if (err) {
1915 					debug("tuning failed\n");
1916 					goto error;
1917 				}
1918 			}
1919 #endif
1920 
1921 			/* do a transfer to check the configuration */
1922 			err = mmc_read_and_compare_ext_csd(mmc);
1923 			if (!err)
1924 				return 0;
1925 error:
1926 			mmc_set_signal_voltage(mmc, old_voltage);
1927 			/* if an error occured, revert to a safer bus mode */
1928 			mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1929 				   EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1930 			mmc_select_mode(mmc, MMC_LEGACY);
1931 			mmc_set_bus_width(mmc, 1);
1932 		}
1933 	}
1934 
1935 	pr_err("unable to select a mode\n");
1936 
1937 	return -ENOTSUPP;
1938 }
1939 
1940 static int mmc_startup_v4(struct mmc *mmc)
1941 {
1942 	int err, i;
1943 	u64 capacity;
1944 	bool has_parts = false;
1945 	bool part_completed;
1946 	static const u32 mmc_versions[] = {
1947 		MMC_VERSION_4,
1948 		MMC_VERSION_4_1,
1949 		MMC_VERSION_4_2,
1950 		MMC_VERSION_4_3,
1951 		MMC_VERSION_4_41,
1952 		MMC_VERSION_4_5,
1953 		MMC_VERSION_5_0,
1954 		MMC_VERSION_5_1
1955 	};
1956 
1957 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1958 
1959 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1960 		return 0;
1961 
1962 	/* check  ext_csd version and capacity */
1963 	err = mmc_send_ext_csd(mmc, ext_csd);
1964 	if (err)
1965 		goto error;
1966 
1967 	/* store the ext csd for future reference */
1968 	if (!mmc->ext_csd)
1969 		mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1970 	if (!mmc->ext_csd)
1971 		return -ENOMEM;
1972 	memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1973 
1974 	if (ext_csd[EXT_CSD_REV] > ARRAY_SIZE(mmc_versions))
1975 		return -EINVAL;
1976 
1977 	mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
1978 
1979 	if (mmc->version >= MMC_VERSION_4_2) {
1980 		/*
1981 		 * According to the JEDEC Standard, the value of
1982 		 * ext_csd's capacity is valid if the value is more
1983 		 * than 2GB
1984 		 */
1985 		capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1986 				| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1987 				| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1988 				| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1989 		capacity *= MMC_MAX_BLOCK_LEN;
1990 		if ((capacity >> 20) > 2 * 1024)
1991 			mmc->capacity_user = capacity;
1992 	}
1993 
1994 	/* The partition data may be non-zero but it is only
1995 	 * effective if PARTITION_SETTING_COMPLETED is set in
1996 	 * EXT_CSD, so ignore any data if this bit is not set,
1997 	 * except for enabling the high-capacity group size
1998 	 * definition (see below).
1999 	 */
2000 	part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2001 			    EXT_CSD_PARTITION_SETTING_COMPLETED);
2002 
2003 	/* store the partition info of emmc */
2004 	mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2005 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2006 	    ext_csd[EXT_CSD_BOOT_MULT])
2007 		mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2008 	if (part_completed &&
2009 	    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2010 		mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2011 
2012 	mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2013 
2014 	mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2015 
2016 	for (i = 0; i < 4; i++) {
2017 		int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2018 		uint mult = (ext_csd[idx + 2] << 16) +
2019 			(ext_csd[idx + 1] << 8) + ext_csd[idx];
2020 		if (mult)
2021 			has_parts = true;
2022 		if (!part_completed)
2023 			continue;
2024 		mmc->capacity_gp[i] = mult;
2025 		mmc->capacity_gp[i] *=
2026 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2027 		mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2028 		mmc->capacity_gp[i] <<= 19;
2029 	}
2030 
2031 #ifndef CONFIG_SPL_BUILD
2032 	if (part_completed) {
2033 		mmc->enh_user_size =
2034 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2035 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2036 			ext_csd[EXT_CSD_ENH_SIZE_MULT];
2037 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2038 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2039 		mmc->enh_user_size <<= 19;
2040 		mmc->enh_user_start =
2041 			(ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2042 			(ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2043 			(ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2044 			ext_csd[EXT_CSD_ENH_START_ADDR];
2045 		if (mmc->high_capacity)
2046 			mmc->enh_user_start <<= 9;
2047 	}
2048 #endif
2049 
2050 	/*
2051 	 * Host needs to enable ERASE_GRP_DEF bit if device is
2052 	 * partitioned. This bit will be lost every time after a reset
2053 	 * or power off. This will affect erase size.
2054 	 */
2055 	if (part_completed)
2056 		has_parts = true;
2057 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2058 	    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2059 		has_parts = true;
2060 	if (has_parts) {
2061 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2062 				 EXT_CSD_ERASE_GROUP_DEF, 1);
2063 
2064 		if (err)
2065 			goto error;
2066 
2067 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2068 	}
2069 
2070 	if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2071 #if CONFIG_IS_ENABLED(MMC_WRITE)
2072 		/* Read out group size from ext_csd */
2073 		mmc->erase_grp_size =
2074 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2075 #endif
2076 		/*
2077 		 * if high capacity and partition setting completed
2078 		 * SEC_COUNT is valid even if it is smaller than 2 GiB
2079 		 * JEDEC Standard JESD84-B45, 6.2.4
2080 		 */
2081 		if (mmc->high_capacity && part_completed) {
2082 			capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2083 				(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2084 				(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2085 				(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2086 			capacity *= MMC_MAX_BLOCK_LEN;
2087 			mmc->capacity_user = capacity;
2088 		}
2089 	}
2090 #if CONFIG_IS_ENABLED(MMC_WRITE)
2091 	else {
2092 		/* Calculate the group size from the csd value. */
2093 		int erase_gsz, erase_gmul;
2094 
2095 		erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2096 		erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2097 		mmc->erase_grp_size = (erase_gsz + 1)
2098 			* (erase_gmul + 1);
2099 	}
2100 #endif
2101 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2102 	mmc->hc_wp_grp_size = 1024
2103 		* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2104 		* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2105 #endif
2106 
2107 	mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2108 
2109 	return 0;
2110 error:
2111 	if (mmc->ext_csd) {
2112 		free(mmc->ext_csd);
2113 		mmc->ext_csd = NULL;
2114 	}
2115 	return err;
2116 }
2117 
2118 static int mmc_startup(struct mmc *mmc)
2119 {
2120 	int err, i;
2121 	uint mult, freq;
2122 	u64 cmult, csize;
2123 	struct mmc_cmd cmd;
2124 	struct blk_desc *bdesc;
2125 
2126 #ifdef CONFIG_MMC_SPI_CRC_ON
2127 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2128 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2129 		cmd.resp_type = MMC_RSP_R1;
2130 		cmd.cmdarg = 1;
2131 		err = mmc_send_cmd(mmc, &cmd, NULL);
2132 		if (err)
2133 			return err;
2134 	}
2135 #endif
2136 
2137 	/* Put the Card in Identify Mode */
2138 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2139 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2140 	cmd.resp_type = MMC_RSP_R2;
2141 	cmd.cmdarg = 0;
2142 
2143 	err = mmc_send_cmd(mmc, &cmd, NULL);
2144 
2145 #ifdef CONFIG_MMC_QUIRKS
2146 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2147 		int retries = 4;
2148 		/*
2149 		 * It has been seen that SEND_CID may fail on the first
2150 		 * attempt, let's try a few more time
2151 		 */
2152 		do {
2153 			err = mmc_send_cmd(mmc, &cmd, NULL);
2154 			if (!err)
2155 				break;
2156 		} while (retries--);
2157 	}
2158 #endif
2159 
2160 	if (err)
2161 		return err;
2162 
2163 	memcpy(mmc->cid, cmd.response, 16);
2164 
2165 	/*
2166 	 * For MMC cards, set the Relative Address.
2167 	 * For SD cards, get the Relatvie Address.
2168 	 * This also puts the cards into Standby State
2169 	 */
2170 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2171 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2172 		cmd.cmdarg = mmc->rca << 16;
2173 		cmd.resp_type = MMC_RSP_R6;
2174 
2175 		err = mmc_send_cmd(mmc, &cmd, NULL);
2176 
2177 		if (err)
2178 			return err;
2179 
2180 		if (IS_SD(mmc))
2181 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2182 	}
2183 
2184 	/* Get the Card-Specific Data */
2185 	cmd.cmdidx = MMC_CMD_SEND_CSD;
2186 	cmd.resp_type = MMC_RSP_R2;
2187 	cmd.cmdarg = mmc->rca << 16;
2188 
2189 	err = mmc_send_cmd(mmc, &cmd, NULL);
2190 
2191 	if (err)
2192 		return err;
2193 
2194 	mmc->csd[0] = cmd.response[0];
2195 	mmc->csd[1] = cmd.response[1];
2196 	mmc->csd[2] = cmd.response[2];
2197 	mmc->csd[3] = cmd.response[3];
2198 
2199 	if (mmc->version == MMC_VERSION_UNKNOWN) {
2200 		int version = (cmd.response[0] >> 26) & 0xf;
2201 
2202 		switch (version) {
2203 		case 0:
2204 			mmc->version = MMC_VERSION_1_2;
2205 			break;
2206 		case 1:
2207 			mmc->version = MMC_VERSION_1_4;
2208 			break;
2209 		case 2:
2210 			mmc->version = MMC_VERSION_2_2;
2211 			break;
2212 		case 3:
2213 			mmc->version = MMC_VERSION_3;
2214 			break;
2215 		case 4:
2216 			mmc->version = MMC_VERSION_4;
2217 			break;
2218 		default:
2219 			mmc->version = MMC_VERSION_1_2;
2220 			break;
2221 		}
2222 	}
2223 
2224 	/* divide frequency by 10, since the mults are 10x bigger */
2225 	freq = fbase[(cmd.response[0] & 0x7)];
2226 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2227 
2228 	mmc->legacy_speed = freq * mult;
2229 	mmc_select_mode(mmc, MMC_LEGACY);
2230 
2231 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2232 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2233 #if CONFIG_IS_ENABLED(MMC_WRITE)
2234 
2235 	if (IS_SD(mmc))
2236 		mmc->write_bl_len = mmc->read_bl_len;
2237 	else
2238 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2239 #endif
2240 
2241 	if (mmc->high_capacity) {
2242 		csize = (mmc->csd[1] & 0x3f) << 16
2243 			| (mmc->csd[2] & 0xffff0000) >> 16;
2244 		cmult = 8;
2245 	} else {
2246 		csize = (mmc->csd[1] & 0x3ff) << 2
2247 			| (mmc->csd[2] & 0xc0000000) >> 30;
2248 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
2249 	}
2250 
2251 	mmc->capacity_user = (csize + 1) << (cmult + 2);
2252 	mmc->capacity_user *= mmc->read_bl_len;
2253 	mmc->capacity_boot = 0;
2254 	mmc->capacity_rpmb = 0;
2255 	for (i = 0; i < 4; i++)
2256 		mmc->capacity_gp[i] = 0;
2257 
2258 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2259 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2260 
2261 #if CONFIG_IS_ENABLED(MMC_WRITE)
2262 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2263 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2264 #endif
2265 
2266 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2267 		cmd.cmdidx = MMC_CMD_SET_DSR;
2268 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2269 		cmd.resp_type = MMC_RSP_NONE;
2270 		if (mmc_send_cmd(mmc, &cmd, NULL))
2271 			pr_warn("MMC: SET_DSR failed\n");
2272 	}
2273 
2274 	/* Select the card, and put it into Transfer Mode */
2275 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2276 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2277 		cmd.resp_type = MMC_RSP_R1;
2278 		cmd.cmdarg = mmc->rca << 16;
2279 		err = mmc_send_cmd(mmc, &cmd, NULL);
2280 
2281 		if (err)
2282 			return err;
2283 	}
2284 
2285 	/*
2286 	 * For SD, its erase group is always one sector
2287 	 */
2288 #if CONFIG_IS_ENABLED(MMC_WRITE)
2289 	mmc->erase_grp_size = 1;
2290 #endif
2291 	mmc->part_config = MMCPART_NOAVAILABLE;
2292 
2293 	err = mmc_startup_v4(mmc);
2294 	if (err)
2295 		return err;
2296 
2297 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2298 	if (err)
2299 		return err;
2300 
2301 	if (IS_SD(mmc)) {
2302 		err = sd_get_capabilities(mmc);
2303 		if (err)
2304 			return err;
2305 		err = sd_select_mode_and_width(mmc, mmc->card_caps);
2306 	} else {
2307 		err = mmc_get_capabilities(mmc);
2308 		if (err)
2309 			return err;
2310 		mmc_select_mode_and_width(mmc, mmc->card_caps);
2311 	}
2312 
2313 	if (err)
2314 		return err;
2315 
2316 	mmc->best_mode = mmc->selected_mode;
2317 
2318 	/* Fix the block length for DDR mode */
2319 	if (mmc->ddr_mode) {
2320 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2321 #if CONFIG_IS_ENABLED(MMC_WRITE)
2322 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2323 #endif
2324 	}
2325 
2326 	/* fill in device description */
2327 	bdesc = mmc_get_blk_desc(mmc);
2328 	bdesc->lun = 0;
2329 	bdesc->hwpart = 0;
2330 	bdesc->type = 0;
2331 	bdesc->blksz = mmc->read_bl_len;
2332 	bdesc->log2blksz = LOG2(bdesc->blksz);
2333 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2334 #if !defined(CONFIG_SPL_BUILD) || \
2335 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2336 		!defined(CONFIG_USE_TINY_PRINTF))
2337 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2338 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2339 		(mmc->cid[3] >> 16) & 0xffff);
2340 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2341 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2342 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2343 		(mmc->cid[2] >> 24) & 0xff);
2344 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2345 		(mmc->cid[2] >> 16) & 0xf);
2346 #else
2347 	bdesc->vendor[0] = 0;
2348 	bdesc->product[0] = 0;
2349 	bdesc->revision[0] = 0;
2350 #endif
2351 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2352 	part_init(bdesc);
2353 #endif
2354 
2355 	return 0;
2356 }
2357 
2358 static int mmc_send_if_cond(struct mmc *mmc)
2359 {
2360 	struct mmc_cmd cmd;
2361 	int err;
2362 
2363 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2364 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2365 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2366 	cmd.resp_type = MMC_RSP_R7;
2367 
2368 	err = mmc_send_cmd(mmc, &cmd, NULL);
2369 
2370 	if (err)
2371 		return err;
2372 
2373 	if ((cmd.response[0] & 0xff) != 0xaa)
2374 		return -EOPNOTSUPP;
2375 	else
2376 		mmc->version = SD_VERSION_2;
2377 
2378 	return 0;
2379 }
2380 
2381 #if !CONFIG_IS_ENABLED(DM_MMC)
2382 /* board-specific MMC power initializations. */
2383 __weak void board_mmc_power_init(void)
2384 {
2385 }
2386 #endif
2387 
2388 static int mmc_power_init(struct mmc *mmc)
2389 {
2390 #if CONFIG_IS_ENABLED(DM_MMC)
2391 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2392 	int ret;
2393 
2394 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2395 					  &mmc->vmmc_supply);
2396 	if (ret)
2397 		debug("%s: No vmmc supply\n", mmc->dev->name);
2398 
2399 	ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2400 					  &mmc->vqmmc_supply);
2401 	if (ret)
2402 		debug("%s: No vqmmc supply\n", mmc->dev->name);
2403 #endif
2404 #else /* !CONFIG_DM_MMC */
2405 	/*
2406 	 * Driver model should use a regulator, as above, rather than calling
2407 	 * out to board code.
2408 	 */
2409 	board_mmc_power_init();
2410 #endif
2411 	return 0;
2412 }
2413 
2414 /*
2415  * put the host in the initial state:
2416  * - turn on Vdd (card power supply)
2417  * - configure the bus width and clock to minimal values
2418  */
2419 static void mmc_set_initial_state(struct mmc *mmc)
2420 {
2421 	int err;
2422 
2423 	/* First try to set 3.3V. If it fails set to 1.8V */
2424 	err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2425 	if (err != 0)
2426 		err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2427 	if (err != 0)
2428 		pr_warn("mmc: failed to set signal voltage\n");
2429 
2430 	mmc_select_mode(mmc, MMC_LEGACY);
2431 	mmc_set_bus_width(mmc, 1);
2432 	mmc_set_clock(mmc, 0, false);
2433 }
2434 
2435 static int mmc_power_on(struct mmc *mmc)
2436 {
2437 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2438 	if (mmc->vmmc_supply) {
2439 		int ret = regulator_set_enable(mmc->vmmc_supply, true);
2440 
2441 		if (ret) {
2442 			puts("Error enabling VMMC supply\n");
2443 			return ret;
2444 		}
2445 	}
2446 #endif
2447 	return 0;
2448 }
2449 
2450 static int mmc_power_off(struct mmc *mmc)
2451 {
2452 	mmc_set_clock(mmc, 1, true);
2453 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2454 	if (mmc->vmmc_supply) {
2455 		int ret = regulator_set_enable(mmc->vmmc_supply, false);
2456 
2457 		if (ret) {
2458 			debug("Error disabling VMMC supply\n");
2459 			return ret;
2460 		}
2461 	}
2462 #endif
2463 	return 0;
2464 }
2465 
2466 static int mmc_power_cycle(struct mmc *mmc)
2467 {
2468 	int ret;
2469 
2470 	ret = mmc_power_off(mmc);
2471 	if (ret)
2472 		return ret;
2473 	/*
2474 	 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2475 	 * to be on the safer side.
2476 	 */
2477 	udelay(2000);
2478 	return mmc_power_on(mmc);
2479 }
2480 
2481 int mmc_start_init(struct mmc *mmc)
2482 {
2483 	bool no_card;
2484 	bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2485 	int err;
2486 
2487 	/*
2488 	 * all hosts are capable of 1 bit bus-width and able to use the legacy
2489 	 * timings.
2490 	 */
2491 	mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2492 			 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2493 
2494 	/* we pretend there's no card when init is NULL */
2495 	no_card = mmc_getcd(mmc) == 0;
2496 #if !CONFIG_IS_ENABLED(DM_MMC)
2497 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2498 #endif
2499 	if (no_card) {
2500 		mmc->has_init = 0;
2501 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2502 		printf("MMC: no card present\n");
2503 #endif
2504 		return -ENOMEDIUM;
2505 	}
2506 
2507 	if (mmc->has_init)
2508 		return 0;
2509 
2510 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2511 	mmc_adapter_card_type_ident();
2512 #endif
2513 	err = mmc_power_init(mmc);
2514 	if (err)
2515 		return err;
2516 
2517 #ifdef CONFIG_MMC_QUIRKS
2518 	mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2519 		      MMC_QUIRK_RETRY_SEND_CID;
2520 #endif
2521 
2522 	err = mmc_power_cycle(mmc);
2523 	if (err) {
2524 		/*
2525 		 * if power cycling is not supported, we should not try
2526 		 * to use the UHS modes, because we wouldn't be able to
2527 		 * recover from an error during the UHS initialization.
2528 		 */
2529 		debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2530 		uhs_en = false;
2531 		mmc->host_caps &= ~UHS_CAPS;
2532 		err = mmc_power_on(mmc);
2533 	}
2534 	if (err)
2535 		return err;
2536 
2537 #if CONFIG_IS_ENABLED(DM_MMC)
2538 	/* The device has already been probed ready for use */
2539 #else
2540 	/* made sure it's not NULL earlier */
2541 	err = mmc->cfg->ops->init(mmc);
2542 	if (err)
2543 		return err;
2544 #endif
2545 	mmc->ddr_mode = 0;
2546 
2547 retry:
2548 	mmc_set_initial_state(mmc);
2549 	mmc_send_init_stream(mmc);
2550 
2551 	/* Reset the Card */
2552 	err = mmc_go_idle(mmc);
2553 
2554 	if (err)
2555 		return err;
2556 
2557 	/* The internal partition reset to user partition(0) at every CMD0*/
2558 	mmc_get_blk_desc(mmc)->hwpart = 0;
2559 
2560 	/* Test for SD version 2 */
2561 	err = mmc_send_if_cond(mmc);
2562 
2563 	/* Now try to get the SD card's operating condition */
2564 	err = sd_send_op_cond(mmc, uhs_en);
2565 	if (err && uhs_en) {
2566 		uhs_en = false;
2567 		mmc_power_cycle(mmc);
2568 		goto retry;
2569 	}
2570 
2571 	/* If the command timed out, we check for an MMC card */
2572 	if (err == -ETIMEDOUT) {
2573 		err = mmc_send_op_cond(mmc);
2574 
2575 		if (err) {
2576 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2577 			pr_err("Card did not respond to voltage select!\n");
2578 #endif
2579 			return -EOPNOTSUPP;
2580 		}
2581 	}
2582 
2583 	if (!err)
2584 		mmc->init_in_progress = 1;
2585 
2586 	return err;
2587 }
2588 
2589 static int mmc_complete_init(struct mmc *mmc)
2590 {
2591 	int err = 0;
2592 
2593 	mmc->init_in_progress = 0;
2594 	if (mmc->op_cond_pending)
2595 		err = mmc_complete_op_cond(mmc);
2596 
2597 	if (!err)
2598 		err = mmc_startup(mmc);
2599 	if (err)
2600 		mmc->has_init = 0;
2601 	else
2602 		mmc->has_init = 1;
2603 	return err;
2604 }
2605 
2606 int mmc_init(struct mmc *mmc)
2607 {
2608 	int err = 0;
2609 	__maybe_unused unsigned start;
2610 #if CONFIG_IS_ENABLED(DM_MMC)
2611 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2612 
2613 	upriv->mmc = mmc;
2614 #endif
2615 	if (mmc->has_init)
2616 		return 0;
2617 
2618 	start = get_timer(0);
2619 
2620 	if (!mmc->init_in_progress)
2621 		err = mmc_start_init(mmc);
2622 
2623 	if (!err)
2624 		err = mmc_complete_init(mmc);
2625 	if (err)
2626 		printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2627 
2628 	return err;
2629 }
2630 
2631 int mmc_set_dsr(struct mmc *mmc, u16 val)
2632 {
2633 	mmc->dsr = val;
2634 	return 0;
2635 }
2636 
2637 /* CPU-specific MMC initializations */
2638 __weak int cpu_mmc_init(bd_t *bis)
2639 {
2640 	return -1;
2641 }
2642 
2643 /* board-specific MMC initializations. */
2644 __weak int board_mmc_init(bd_t *bis)
2645 {
2646 	return -1;
2647 }
2648 
2649 void mmc_set_preinit(struct mmc *mmc, int preinit)
2650 {
2651 	mmc->preinit = preinit;
2652 }
2653 
2654 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2655 static int mmc_probe(bd_t *bis)
2656 {
2657 	return 0;
2658 }
2659 #elif CONFIG_IS_ENABLED(DM_MMC)
2660 static int mmc_probe(bd_t *bis)
2661 {
2662 	int ret, i;
2663 	struct uclass *uc;
2664 	struct udevice *dev;
2665 
2666 	ret = uclass_get(UCLASS_MMC, &uc);
2667 	if (ret)
2668 		return ret;
2669 
2670 	/*
2671 	 * Try to add them in sequence order. Really with driver model we
2672 	 * should allow holes, but the current MMC list does not allow that.
2673 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2674 	 */
2675 	for (i = 0; ; i++) {
2676 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2677 		if (ret == -ENODEV)
2678 			break;
2679 	}
2680 	uclass_foreach_dev(dev, uc) {
2681 		ret = device_probe(dev);
2682 		if (ret)
2683 			pr_err("%s - probe failed: %d\n", dev->name, ret);
2684 	}
2685 
2686 	return 0;
2687 }
2688 #else
2689 static int mmc_probe(bd_t *bis)
2690 {
2691 	if (board_mmc_init(bis) < 0)
2692 		cpu_mmc_init(bis);
2693 
2694 	return 0;
2695 }
2696 #endif
2697 
2698 int mmc_initialize(bd_t *bis)
2699 {
2700 	static int initialized = 0;
2701 	int ret;
2702 	if (initialized)	/* Avoid initializing mmc multiple times */
2703 		return 0;
2704 	initialized = 1;
2705 
2706 #if !CONFIG_IS_ENABLED(BLK)
2707 #if !CONFIG_IS_ENABLED(MMC_TINY)
2708 	mmc_list_init();
2709 #endif
2710 #endif
2711 	ret = mmc_probe(bis);
2712 	if (ret)
2713 		return ret;
2714 
2715 #ifndef CONFIG_SPL_BUILD
2716 	print_mmc_devices(',');
2717 #endif
2718 
2719 	mmc_do_preinit();
2720 	return 0;
2721 }
2722 
2723 #ifdef CONFIG_CMD_BKOPS_ENABLE
2724 int mmc_set_bkops_enable(struct mmc *mmc)
2725 {
2726 	int err;
2727 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2728 
2729 	err = mmc_send_ext_csd(mmc, ext_csd);
2730 	if (err) {
2731 		puts("Could not get ext_csd register values\n");
2732 		return err;
2733 	}
2734 
2735 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2736 		puts("Background operations not supported on device\n");
2737 		return -EMEDIUMTYPE;
2738 	}
2739 
2740 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2741 		puts("Background operations already enabled\n");
2742 		return 0;
2743 	}
2744 
2745 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2746 	if (err) {
2747 		puts("Failed to enable manual background operations\n");
2748 		return err;
2749 	}
2750 
2751 	puts("Enabled manual background operations\n");
2752 
2753 	return 0;
2754 }
2755 #endif
2756