xref: /openbmc/u-boot/drivers/mmc/mmc.c (revision e23b19f4)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
26 static int mmc_power_cycle(struct mmc *mmc);
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
28 
29 #if CONFIG_IS_ENABLED(MMC_TINY)
30 static struct mmc mmc_static;
31 struct mmc *find_mmc_device(int dev_num)
32 {
33 	return &mmc_static;
34 }
35 
36 void mmc_do_preinit(void)
37 {
38 	struct mmc *m = &mmc_static;
39 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
40 	mmc_set_preinit(m, 1);
41 #endif
42 	if (m->preinit)
43 		mmc_start_init(m);
44 }
45 
46 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
47 {
48 	return &mmc->block_dev;
49 }
50 #endif
51 
52 #if !CONFIG_IS_ENABLED(DM_MMC)
53 
54 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
55 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
56 {
57 	return -ENOSYS;
58 }
59 #endif
60 
61 __weak int board_mmc_getwp(struct mmc *mmc)
62 {
63 	return -1;
64 }
65 
66 int mmc_getwp(struct mmc *mmc)
67 {
68 	int wp;
69 
70 	wp = board_mmc_getwp(mmc);
71 
72 	if (wp < 0) {
73 		if (mmc->cfg->ops->getwp)
74 			wp = mmc->cfg->ops->getwp(mmc);
75 		else
76 			wp = 0;
77 	}
78 
79 	return wp;
80 }
81 
82 __weak int board_mmc_getcd(struct mmc *mmc)
83 {
84 	return -1;
85 }
86 #endif
87 
88 #ifdef CONFIG_MMC_TRACE
89 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
90 {
91 	printf("CMD_SEND:%d\n", cmd->cmdidx);
92 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
93 }
94 
95 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
96 {
97 	int i;
98 	u8 *ptr;
99 
100 	if (ret) {
101 		printf("\t\tRET\t\t\t %d\n", ret);
102 	} else {
103 		switch (cmd->resp_type) {
104 		case MMC_RSP_NONE:
105 			printf("\t\tMMC_RSP_NONE\n");
106 			break;
107 		case MMC_RSP_R1:
108 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
109 				cmd->response[0]);
110 			break;
111 		case MMC_RSP_R1b:
112 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
113 				cmd->response[0]);
114 			break;
115 		case MMC_RSP_R2:
116 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
117 				cmd->response[0]);
118 			printf("\t\t          \t\t 0x%08X \n",
119 				cmd->response[1]);
120 			printf("\t\t          \t\t 0x%08X \n",
121 				cmd->response[2]);
122 			printf("\t\t          \t\t 0x%08X \n",
123 				cmd->response[3]);
124 			printf("\n");
125 			printf("\t\t\t\t\tDUMPING DATA\n");
126 			for (i = 0; i < 4; i++) {
127 				int j;
128 				printf("\t\t\t\t\t%03d - ", i*4);
129 				ptr = (u8 *)&cmd->response[i];
130 				ptr += 3;
131 				for (j = 0; j < 4; j++)
132 					printf("%02X ", *ptr--);
133 				printf("\n");
134 			}
135 			break;
136 		case MMC_RSP_R3:
137 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
138 				cmd->response[0]);
139 			break;
140 		default:
141 			printf("\t\tERROR MMC rsp not supported\n");
142 			break;
143 		}
144 	}
145 }
146 
147 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
148 {
149 	int status;
150 
151 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
152 	printf("CURR STATE:%d\n", status);
153 }
154 #endif
155 
156 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
157 const char *mmc_mode_name(enum bus_mode mode)
158 {
159 	static const char *const names[] = {
160 	      [MMC_LEGACY]	= "MMC legacy",
161 	      [SD_LEGACY]	= "SD Legacy",
162 	      [MMC_HS]		= "MMC High Speed (26MHz)",
163 	      [SD_HS]		= "SD High Speed (50MHz)",
164 	      [UHS_SDR12]	= "UHS SDR12 (25MHz)",
165 	      [UHS_SDR25]	= "UHS SDR25 (50MHz)",
166 	      [UHS_SDR50]	= "UHS SDR50 (100MHz)",
167 	      [UHS_SDR104]	= "UHS SDR104 (208MHz)",
168 	      [UHS_DDR50]	= "UHS DDR50 (50MHz)",
169 	      [MMC_HS_52]	= "MMC High Speed (52MHz)",
170 	      [MMC_DDR_52]	= "MMC DDR52 (52MHz)",
171 	      [MMC_HS_200]	= "HS200 (200MHz)",
172 	};
173 
174 	if (mode >= MMC_MODES_END)
175 		return "Unknown mode";
176 	else
177 		return names[mode];
178 }
179 #endif
180 
181 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
182 {
183 	static const int freqs[] = {
184 	      [SD_LEGACY]	= 25000000,
185 	      [MMC_HS]		= 26000000,
186 	      [SD_HS]		= 50000000,
187 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
188 	      [UHS_SDR12]	= 25000000,
189 	      [UHS_SDR25]	= 50000000,
190 	      [UHS_SDR50]	= 100000000,
191 	      [UHS_DDR50]	= 50000000,
192 #ifdef MMC_SUPPORTS_TUNING
193 	      [UHS_SDR104]	= 208000000,
194 #endif
195 #endif
196 	      [MMC_HS_52]	= 52000000,
197 	      [MMC_DDR_52]	= 52000000,
198 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
199 	      [MMC_HS_200]	= 200000000,
200 #endif
201 	};
202 
203 	if (mode == MMC_LEGACY)
204 		return mmc->legacy_speed;
205 	else if (mode >= MMC_MODES_END)
206 		return 0;
207 	else
208 		return freqs[mode];
209 }
210 
211 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
212 {
213 	mmc->selected_mode = mode;
214 	mmc->tran_speed = mmc_mode2freq(mmc, mode);
215 	mmc->ddr_mode = mmc_is_mode_ddr(mode);
216 	debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
217 	      mmc->tran_speed / 1000000);
218 	return 0;
219 }
220 
221 #if !CONFIG_IS_ENABLED(DM_MMC)
222 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
223 {
224 	int ret;
225 
226 	mmmc_trace_before_send(mmc, cmd);
227 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
228 	mmmc_trace_after_send(mmc, cmd, ret);
229 
230 	return ret;
231 }
232 #endif
233 
234 int mmc_send_status(struct mmc *mmc, int timeout)
235 {
236 	struct mmc_cmd cmd;
237 	int err, retries = 5;
238 
239 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
240 	cmd.resp_type = MMC_RSP_R1;
241 	if (!mmc_host_is_spi(mmc))
242 		cmd.cmdarg = mmc->rca << 16;
243 
244 	while (1) {
245 		err = mmc_send_cmd(mmc, &cmd, NULL);
246 		if (!err) {
247 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
248 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
249 			     MMC_STATE_PRG)
250 				break;
251 
252 			if (cmd.response[0] & MMC_STATUS_MASK) {
253 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
254 				pr_err("Status Error: 0x%08X\n",
255 				       cmd.response[0]);
256 #endif
257 				return -ECOMM;
258 			}
259 		} else if (--retries < 0)
260 			return err;
261 
262 		if (timeout-- <= 0)
263 			break;
264 
265 		udelay(1000);
266 	}
267 
268 	mmc_trace_state(mmc, &cmd);
269 	if (timeout <= 0) {
270 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
271 		pr_err("Timeout waiting card ready\n");
272 #endif
273 		return -ETIMEDOUT;
274 	}
275 
276 	return 0;
277 }
278 
279 int mmc_set_blocklen(struct mmc *mmc, int len)
280 {
281 	struct mmc_cmd cmd;
282 	int err;
283 
284 	if (mmc->ddr_mode)
285 		return 0;
286 
287 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
288 	cmd.resp_type = MMC_RSP_R1;
289 	cmd.cmdarg = len;
290 
291 	err = mmc_send_cmd(mmc, &cmd, NULL);
292 
293 #ifdef CONFIG_MMC_QUIRKS
294 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
295 		int retries = 4;
296 		/*
297 		 * It has been seen that SET_BLOCKLEN may fail on the first
298 		 * attempt, let's try a few more time
299 		 */
300 		do {
301 			err = mmc_send_cmd(mmc, &cmd, NULL);
302 			if (!err)
303 				break;
304 		} while (retries--);
305 	}
306 #endif
307 
308 	return err;
309 }
310 
311 #ifdef MMC_SUPPORTS_TUNING
312 static const u8 tuning_blk_pattern_4bit[] = {
313 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
314 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
315 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
316 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
317 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
318 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
319 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
320 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
321 };
322 
323 static const u8 tuning_blk_pattern_8bit[] = {
324 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
325 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
326 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
327 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
328 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
329 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
330 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
331 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
332 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
333 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
334 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
335 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
336 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
337 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
338 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
339 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
340 };
341 
342 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
343 {
344 	struct mmc_cmd cmd;
345 	struct mmc_data data;
346 	const u8 *tuning_block_pattern;
347 	int size, err;
348 
349 	if (mmc->bus_width == 8) {
350 		tuning_block_pattern = tuning_blk_pattern_8bit;
351 		size = sizeof(tuning_blk_pattern_8bit);
352 	} else if (mmc->bus_width == 4) {
353 		tuning_block_pattern = tuning_blk_pattern_4bit;
354 		size = sizeof(tuning_blk_pattern_4bit);
355 	} else {
356 		return -EINVAL;
357 	}
358 
359 	ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
360 
361 	cmd.cmdidx = opcode;
362 	cmd.cmdarg = 0;
363 	cmd.resp_type = MMC_RSP_R1;
364 
365 	data.dest = (void *)data_buf;
366 	data.blocks = 1;
367 	data.blocksize = size;
368 	data.flags = MMC_DATA_READ;
369 
370 	err = mmc_send_cmd(mmc, &cmd, &data);
371 	if (err)
372 		return err;
373 
374 	if (memcmp(data_buf, tuning_block_pattern, size))
375 		return -EIO;
376 
377 	return 0;
378 }
379 #endif
380 
381 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
382 			   lbaint_t blkcnt)
383 {
384 	struct mmc_cmd cmd;
385 	struct mmc_data data;
386 
387 	if (blkcnt > 1)
388 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
389 	else
390 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
391 
392 	if (mmc->high_capacity)
393 		cmd.cmdarg = start;
394 	else
395 		cmd.cmdarg = start * mmc->read_bl_len;
396 
397 	cmd.resp_type = MMC_RSP_R1;
398 
399 	data.dest = dst;
400 	data.blocks = blkcnt;
401 	data.blocksize = mmc->read_bl_len;
402 	data.flags = MMC_DATA_READ;
403 
404 	if (mmc_send_cmd(mmc, &cmd, &data))
405 		return 0;
406 
407 	if (blkcnt > 1) {
408 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
409 		cmd.cmdarg = 0;
410 		cmd.resp_type = MMC_RSP_R1b;
411 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
412 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
413 			pr_err("mmc fail to send stop cmd\n");
414 #endif
415 			return 0;
416 		}
417 	}
418 
419 	return blkcnt;
420 }
421 
422 #if CONFIG_IS_ENABLED(BLK)
423 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
424 #else
425 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
426 		void *dst)
427 #endif
428 {
429 #if CONFIG_IS_ENABLED(BLK)
430 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
431 #endif
432 	int dev_num = block_dev->devnum;
433 	int err;
434 	lbaint_t cur, blocks_todo = blkcnt;
435 
436 	if (blkcnt == 0)
437 		return 0;
438 
439 	struct mmc *mmc = find_mmc_device(dev_num);
440 	if (!mmc)
441 		return 0;
442 
443 	if (CONFIG_IS_ENABLED(MMC_TINY))
444 		err = mmc_switch_part(mmc, block_dev->hwpart);
445 	else
446 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
447 
448 	if (err < 0)
449 		return 0;
450 
451 	if ((start + blkcnt) > block_dev->lba) {
452 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
453 		pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
454 		       start + blkcnt, block_dev->lba);
455 #endif
456 		return 0;
457 	}
458 
459 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
460 		debug("%s: Failed to set blocklen\n", __func__);
461 		return 0;
462 	}
463 
464 	do {
465 		cur = (blocks_todo > mmc->cfg->b_max) ?
466 			mmc->cfg->b_max : blocks_todo;
467 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
468 			debug("%s: Failed to read blocks\n", __func__);
469 			return 0;
470 		}
471 		blocks_todo -= cur;
472 		start += cur;
473 		dst += cur * mmc->read_bl_len;
474 	} while (blocks_todo > 0);
475 
476 	return blkcnt;
477 }
478 
479 static int mmc_go_idle(struct mmc *mmc)
480 {
481 	struct mmc_cmd cmd;
482 	int err;
483 
484 	udelay(1000);
485 
486 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
487 	cmd.cmdarg = 0;
488 	cmd.resp_type = MMC_RSP_NONE;
489 
490 	err = mmc_send_cmd(mmc, &cmd, NULL);
491 
492 	if (err)
493 		return err;
494 
495 	udelay(2000);
496 
497 	return 0;
498 }
499 
500 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
501 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
502 {
503 	struct mmc_cmd cmd;
504 	int err = 0;
505 
506 	/*
507 	 * Send CMD11 only if the request is to switch the card to
508 	 * 1.8V signalling.
509 	 */
510 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
511 		return mmc_set_signal_voltage(mmc, signal_voltage);
512 
513 	cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
514 	cmd.cmdarg = 0;
515 	cmd.resp_type = MMC_RSP_R1;
516 
517 	err = mmc_send_cmd(mmc, &cmd, NULL);
518 	if (err)
519 		return err;
520 
521 	if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
522 		return -EIO;
523 
524 	/*
525 	 * The card should drive cmd and dat[0:3] low immediately
526 	 * after the response of cmd11, but wait 100 us to be sure
527 	 */
528 	err = mmc_wait_dat0(mmc, 0, 100);
529 	if (err == -ENOSYS)
530 		udelay(100);
531 	else if (err)
532 		return -ETIMEDOUT;
533 
534 	/*
535 	 * During a signal voltage level switch, the clock must be gated
536 	 * for 5 ms according to the SD spec
537 	 */
538 	mmc_set_clock(mmc, mmc->clock, true);
539 
540 	err = mmc_set_signal_voltage(mmc, signal_voltage);
541 	if (err)
542 		return err;
543 
544 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
545 	mdelay(10);
546 	mmc_set_clock(mmc, mmc->clock, false);
547 
548 	/*
549 	 * Failure to switch is indicated by the card holding
550 	 * dat[0:3] low. Wait for at least 1 ms according to spec
551 	 */
552 	err = mmc_wait_dat0(mmc, 1, 1000);
553 	if (err == -ENOSYS)
554 		udelay(1000);
555 	else if (err)
556 		return -ETIMEDOUT;
557 
558 	return 0;
559 }
560 #endif
561 
562 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
563 {
564 	int timeout = 1000;
565 	int err;
566 	struct mmc_cmd cmd;
567 
568 	while (1) {
569 		cmd.cmdidx = MMC_CMD_APP_CMD;
570 		cmd.resp_type = MMC_RSP_R1;
571 		cmd.cmdarg = 0;
572 
573 		err = mmc_send_cmd(mmc, &cmd, NULL);
574 
575 		if (err)
576 			return err;
577 
578 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
579 		cmd.resp_type = MMC_RSP_R3;
580 
581 		/*
582 		 * Most cards do not answer if some reserved bits
583 		 * in the ocr are set. However, Some controller
584 		 * can set bit 7 (reserved for low voltages), but
585 		 * how to manage low voltages SD card is not yet
586 		 * specified.
587 		 */
588 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
589 			(mmc->cfg->voltages & 0xff8000);
590 
591 		if (mmc->version == SD_VERSION_2)
592 			cmd.cmdarg |= OCR_HCS;
593 
594 		if (uhs_en)
595 			cmd.cmdarg |= OCR_S18R;
596 
597 		err = mmc_send_cmd(mmc, &cmd, NULL);
598 
599 		if (err)
600 			return err;
601 
602 		if (cmd.response[0] & OCR_BUSY)
603 			break;
604 
605 		if (timeout-- <= 0)
606 			return -EOPNOTSUPP;
607 
608 		udelay(1000);
609 	}
610 
611 	if (mmc->version != SD_VERSION_2)
612 		mmc->version = SD_VERSION_1_0;
613 
614 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
615 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
616 		cmd.resp_type = MMC_RSP_R3;
617 		cmd.cmdarg = 0;
618 
619 		err = mmc_send_cmd(mmc, &cmd, NULL);
620 
621 		if (err)
622 			return err;
623 	}
624 
625 	mmc->ocr = cmd.response[0];
626 
627 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
628 	if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
629 	    == 0x41000000) {
630 		err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
631 		if (err)
632 			return err;
633 	}
634 #endif
635 
636 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
637 	mmc->rca = 0;
638 
639 	return 0;
640 }
641 
642 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
643 {
644 	struct mmc_cmd cmd;
645 	int err;
646 
647 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
648 	cmd.resp_type = MMC_RSP_R3;
649 	cmd.cmdarg = 0;
650 	if (use_arg && !mmc_host_is_spi(mmc))
651 		cmd.cmdarg = OCR_HCS |
652 			(mmc->cfg->voltages &
653 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
654 			(mmc->ocr & OCR_ACCESS_MODE);
655 
656 	err = mmc_send_cmd(mmc, &cmd, NULL);
657 	if (err)
658 		return err;
659 	mmc->ocr = cmd.response[0];
660 	return 0;
661 }
662 
663 static int mmc_send_op_cond(struct mmc *mmc)
664 {
665 	int err, i;
666 
667 	/* Some cards seem to need this */
668 	mmc_go_idle(mmc);
669 
670  	/* Asking to the card its capabilities */
671 	for (i = 0; i < 2; i++) {
672 		err = mmc_send_op_cond_iter(mmc, i != 0);
673 		if (err)
674 			return err;
675 
676 		/* exit if not busy (flag seems to be inverted) */
677 		if (mmc->ocr & OCR_BUSY)
678 			break;
679 	}
680 	mmc->op_cond_pending = 1;
681 	return 0;
682 }
683 
684 static int mmc_complete_op_cond(struct mmc *mmc)
685 {
686 	struct mmc_cmd cmd;
687 	int timeout = 1000;
688 	uint start;
689 	int err;
690 
691 	mmc->op_cond_pending = 0;
692 	if (!(mmc->ocr & OCR_BUSY)) {
693 		/* Some cards seem to need this */
694 		mmc_go_idle(mmc);
695 
696 		start = get_timer(0);
697 		while (1) {
698 			err = mmc_send_op_cond_iter(mmc, 1);
699 			if (err)
700 				return err;
701 			if (mmc->ocr & OCR_BUSY)
702 				break;
703 			if (get_timer(start) > timeout)
704 				return -EOPNOTSUPP;
705 			udelay(100);
706 		}
707 	}
708 
709 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
710 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
711 		cmd.resp_type = MMC_RSP_R3;
712 		cmd.cmdarg = 0;
713 
714 		err = mmc_send_cmd(mmc, &cmd, NULL);
715 
716 		if (err)
717 			return err;
718 
719 		mmc->ocr = cmd.response[0];
720 	}
721 
722 	mmc->version = MMC_VERSION_UNKNOWN;
723 
724 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
725 	mmc->rca = 1;
726 
727 	return 0;
728 }
729 
730 
731 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
732 {
733 	struct mmc_cmd cmd;
734 	struct mmc_data data;
735 	int err;
736 
737 	/* Get the Card Status Register */
738 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
739 	cmd.resp_type = MMC_RSP_R1;
740 	cmd.cmdarg = 0;
741 
742 	data.dest = (char *)ext_csd;
743 	data.blocks = 1;
744 	data.blocksize = MMC_MAX_BLOCK_LEN;
745 	data.flags = MMC_DATA_READ;
746 
747 	err = mmc_send_cmd(mmc, &cmd, &data);
748 
749 	return err;
750 }
751 
752 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
753 {
754 	struct mmc_cmd cmd;
755 	int timeout = 1000;
756 	int retries = 3;
757 	int ret;
758 
759 	cmd.cmdidx = MMC_CMD_SWITCH;
760 	cmd.resp_type = MMC_RSP_R1b;
761 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
762 				 (index << 16) |
763 				 (value << 8);
764 
765 	while (retries > 0) {
766 		ret = mmc_send_cmd(mmc, &cmd, NULL);
767 
768 		/* Waiting for the ready status */
769 		if (!ret) {
770 			ret = mmc_send_status(mmc, timeout);
771 			return ret;
772 		}
773 
774 		retries--;
775 	}
776 
777 	return ret;
778 
779 }
780 
781 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
782 {
783 	int err;
784 	int speed_bits;
785 
786 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
787 
788 	switch (mode) {
789 	case MMC_HS:
790 	case MMC_HS_52:
791 	case MMC_DDR_52:
792 		speed_bits = EXT_CSD_TIMING_HS;
793 		break;
794 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
795 	case MMC_HS_200:
796 		speed_bits = EXT_CSD_TIMING_HS200;
797 		break;
798 #endif
799 	case MMC_LEGACY:
800 		speed_bits = EXT_CSD_TIMING_LEGACY;
801 		break;
802 	default:
803 		return -EINVAL;
804 	}
805 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
806 			 speed_bits);
807 	if (err)
808 		return err;
809 
810 	if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
811 		/* Now check to see that it worked */
812 		err = mmc_send_ext_csd(mmc, test_csd);
813 		if (err)
814 			return err;
815 
816 		/* No high-speed support */
817 		if (!test_csd[EXT_CSD_HS_TIMING])
818 			return -ENOTSUPP;
819 	}
820 
821 	return 0;
822 }
823 
824 static int mmc_get_capabilities(struct mmc *mmc)
825 {
826 	u8 *ext_csd = mmc->ext_csd;
827 	char cardtype;
828 
829 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
830 
831 	if (mmc_host_is_spi(mmc))
832 		return 0;
833 
834 	/* Only version 4 supports high-speed */
835 	if (mmc->version < MMC_VERSION_4)
836 		return 0;
837 
838 	if (!ext_csd) {
839 		pr_err("No ext_csd found!\n"); /* this should enver happen */
840 		return -ENOTSUPP;
841 	}
842 
843 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
844 
845 	cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
846 	mmc->cardtype = cardtype;
847 
848 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
849 	if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
850 			EXT_CSD_CARD_TYPE_HS200_1_8V)) {
851 		mmc->card_caps |= MMC_MODE_HS200;
852 	}
853 #endif
854 	if (cardtype & EXT_CSD_CARD_TYPE_52) {
855 		if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
856 			mmc->card_caps |= MMC_MODE_DDR_52MHz;
857 		mmc->card_caps |= MMC_MODE_HS_52MHz;
858 	}
859 	if (cardtype & EXT_CSD_CARD_TYPE_26)
860 		mmc->card_caps |= MMC_MODE_HS;
861 
862 	return 0;
863 }
864 
865 static int mmc_set_capacity(struct mmc *mmc, int part_num)
866 {
867 	switch (part_num) {
868 	case 0:
869 		mmc->capacity = mmc->capacity_user;
870 		break;
871 	case 1:
872 	case 2:
873 		mmc->capacity = mmc->capacity_boot;
874 		break;
875 	case 3:
876 		mmc->capacity = mmc->capacity_rpmb;
877 		break;
878 	case 4:
879 	case 5:
880 	case 6:
881 	case 7:
882 		mmc->capacity = mmc->capacity_gp[part_num - 4];
883 		break;
884 	default:
885 		return -1;
886 	}
887 
888 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
889 
890 	return 0;
891 }
892 
893 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
894 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
895 {
896 	int forbidden = 0;
897 	bool change = false;
898 
899 	if (part_num & PART_ACCESS_MASK)
900 		forbidden = MMC_CAP(MMC_HS_200);
901 
902 	if (MMC_CAP(mmc->selected_mode) & forbidden) {
903 		debug("selected mode (%s) is forbidden for part %d\n",
904 		      mmc_mode_name(mmc->selected_mode), part_num);
905 		change = true;
906 	} else if (mmc->selected_mode != mmc->best_mode) {
907 		debug("selected mode is not optimal\n");
908 		change = true;
909 	}
910 
911 	if (change)
912 		return mmc_select_mode_and_width(mmc,
913 						 mmc->card_caps & ~forbidden);
914 
915 	return 0;
916 }
917 #else
918 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
919 					   unsigned int part_num)
920 {
921 	return 0;
922 }
923 #endif
924 
925 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
926 {
927 	int ret;
928 
929 	ret = mmc_boot_part_access_chk(mmc, part_num);
930 	if (ret)
931 		return ret;
932 
933 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
934 			 (mmc->part_config & ~PART_ACCESS_MASK)
935 			 | (part_num & PART_ACCESS_MASK));
936 
937 	/*
938 	 * Set the capacity if the switch succeeded or was intended
939 	 * to return to representing the raw device.
940 	 */
941 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
942 		ret = mmc_set_capacity(mmc, part_num);
943 		mmc_get_blk_desc(mmc)->hwpart = part_num;
944 	}
945 
946 	return ret;
947 }
948 
949 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
950 int mmc_hwpart_config(struct mmc *mmc,
951 		      const struct mmc_hwpart_conf *conf,
952 		      enum mmc_hwpart_conf_mode mode)
953 {
954 	u8 part_attrs = 0;
955 	u32 enh_size_mult;
956 	u32 enh_start_addr;
957 	u32 gp_size_mult[4];
958 	u32 max_enh_size_mult;
959 	u32 tot_enh_size_mult = 0;
960 	u8 wr_rel_set;
961 	int i, pidx, err;
962 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
963 
964 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
965 		return -EINVAL;
966 
967 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
968 		pr_err("eMMC >= 4.4 required for enhanced user data area\n");
969 		return -EMEDIUMTYPE;
970 	}
971 
972 	if (!(mmc->part_support & PART_SUPPORT)) {
973 		pr_err("Card does not support partitioning\n");
974 		return -EMEDIUMTYPE;
975 	}
976 
977 	if (!mmc->hc_wp_grp_size) {
978 		pr_err("Card does not define HC WP group size\n");
979 		return -EMEDIUMTYPE;
980 	}
981 
982 	/* check partition alignment and total enhanced size */
983 	if (conf->user.enh_size) {
984 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
985 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
986 			pr_err("User data enhanced area not HC WP group "
987 			       "size aligned\n");
988 			return -EINVAL;
989 		}
990 		part_attrs |= EXT_CSD_ENH_USR;
991 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
992 		if (mmc->high_capacity) {
993 			enh_start_addr = conf->user.enh_start;
994 		} else {
995 			enh_start_addr = (conf->user.enh_start << 9);
996 		}
997 	} else {
998 		enh_size_mult = 0;
999 		enh_start_addr = 0;
1000 	}
1001 	tot_enh_size_mult += enh_size_mult;
1002 
1003 	for (pidx = 0; pidx < 4; pidx++) {
1004 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1005 			pr_err("GP%i partition not HC WP group size "
1006 			       "aligned\n", pidx+1);
1007 			return -EINVAL;
1008 		}
1009 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1010 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1011 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1012 			tot_enh_size_mult += gp_size_mult[pidx];
1013 		}
1014 	}
1015 
1016 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1017 		pr_err("Card does not support enhanced attribute\n");
1018 		return -EMEDIUMTYPE;
1019 	}
1020 
1021 	err = mmc_send_ext_csd(mmc, ext_csd);
1022 	if (err)
1023 		return err;
1024 
1025 	max_enh_size_mult =
1026 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1027 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1028 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1029 	if (tot_enh_size_mult > max_enh_size_mult) {
1030 		pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1031 		       tot_enh_size_mult, max_enh_size_mult);
1032 		return -EMEDIUMTYPE;
1033 	}
1034 
1035 	/* The default value of EXT_CSD_WR_REL_SET is device
1036 	 * dependent, the values can only be changed if the
1037 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1038 	 * changed only once and before partitioning is completed. */
1039 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1040 	if (conf->user.wr_rel_change) {
1041 		if (conf->user.wr_rel_set)
1042 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1043 		else
1044 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1045 	}
1046 	for (pidx = 0; pidx < 4; pidx++) {
1047 		if (conf->gp_part[pidx].wr_rel_change) {
1048 			if (conf->gp_part[pidx].wr_rel_set)
1049 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1050 			else
1051 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1052 		}
1053 	}
1054 
1055 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1056 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1057 		puts("Card does not support host controlled partition write "
1058 		     "reliability settings\n");
1059 		return -EMEDIUMTYPE;
1060 	}
1061 
1062 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1063 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1064 		pr_err("Card already partitioned\n");
1065 		return -EPERM;
1066 	}
1067 
1068 	if (mode == MMC_HWPART_CONF_CHECK)
1069 		return 0;
1070 
1071 	/* Partitioning requires high-capacity size definitions */
1072 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1073 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1074 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1075 
1076 		if (err)
1077 			return err;
1078 
1079 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1080 
1081 		/* update erase group size to be high-capacity */
1082 		mmc->erase_grp_size =
1083 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1084 
1085 	}
1086 
1087 	/* all OK, write the configuration */
1088 	for (i = 0; i < 4; i++) {
1089 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1090 				 EXT_CSD_ENH_START_ADDR+i,
1091 				 (enh_start_addr >> (i*8)) & 0xFF);
1092 		if (err)
1093 			return err;
1094 	}
1095 	for (i = 0; i < 3; i++) {
1096 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1097 				 EXT_CSD_ENH_SIZE_MULT+i,
1098 				 (enh_size_mult >> (i*8)) & 0xFF);
1099 		if (err)
1100 			return err;
1101 	}
1102 	for (pidx = 0; pidx < 4; pidx++) {
1103 		for (i = 0; i < 3; i++) {
1104 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1105 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1106 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1107 			if (err)
1108 				return err;
1109 		}
1110 	}
1111 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1112 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1113 	if (err)
1114 		return err;
1115 
1116 	if (mode == MMC_HWPART_CONF_SET)
1117 		return 0;
1118 
1119 	/* The WR_REL_SET is a write-once register but shall be
1120 	 * written before setting PART_SETTING_COMPLETED. As it is
1121 	 * write-once we can only write it when completing the
1122 	 * partitioning. */
1123 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1124 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1125 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1126 		if (err)
1127 			return err;
1128 	}
1129 
1130 	/* Setting PART_SETTING_COMPLETED confirms the partition
1131 	 * configuration but it only becomes effective after power
1132 	 * cycle, so we do not adjust the partition related settings
1133 	 * in the mmc struct. */
1134 
1135 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1136 			 EXT_CSD_PARTITION_SETTING,
1137 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1138 	if (err)
1139 		return err;
1140 
1141 	return 0;
1142 }
1143 #endif
1144 
1145 #if !CONFIG_IS_ENABLED(DM_MMC)
1146 int mmc_getcd(struct mmc *mmc)
1147 {
1148 	int cd;
1149 
1150 	cd = board_mmc_getcd(mmc);
1151 
1152 	if (cd < 0) {
1153 		if (mmc->cfg->ops->getcd)
1154 			cd = mmc->cfg->ops->getcd(mmc);
1155 		else
1156 			cd = 1;
1157 	}
1158 
1159 	return cd;
1160 }
1161 #endif
1162 
1163 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1164 {
1165 	struct mmc_cmd cmd;
1166 	struct mmc_data data;
1167 
1168 	/* Switch the frequency */
1169 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1170 	cmd.resp_type = MMC_RSP_R1;
1171 	cmd.cmdarg = (mode << 31) | 0xffffff;
1172 	cmd.cmdarg &= ~(0xf << (group * 4));
1173 	cmd.cmdarg |= value << (group * 4);
1174 
1175 	data.dest = (char *)resp;
1176 	data.blocksize = 64;
1177 	data.blocks = 1;
1178 	data.flags = MMC_DATA_READ;
1179 
1180 	return mmc_send_cmd(mmc, &cmd, &data);
1181 }
1182 
1183 
1184 static int sd_get_capabilities(struct mmc *mmc)
1185 {
1186 	int err;
1187 	struct mmc_cmd cmd;
1188 	ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1189 	ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1190 	struct mmc_data data;
1191 	int timeout;
1192 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1193 	u32 sd3_bus_mode;
1194 #endif
1195 
1196 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1197 
1198 	if (mmc_host_is_spi(mmc))
1199 		return 0;
1200 
1201 	/* Read the SCR to find out if this card supports higher speeds */
1202 	cmd.cmdidx = MMC_CMD_APP_CMD;
1203 	cmd.resp_type = MMC_RSP_R1;
1204 	cmd.cmdarg = mmc->rca << 16;
1205 
1206 	err = mmc_send_cmd(mmc, &cmd, NULL);
1207 
1208 	if (err)
1209 		return err;
1210 
1211 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1212 	cmd.resp_type = MMC_RSP_R1;
1213 	cmd.cmdarg = 0;
1214 
1215 	timeout = 3;
1216 
1217 retry_scr:
1218 	data.dest = (char *)scr;
1219 	data.blocksize = 8;
1220 	data.blocks = 1;
1221 	data.flags = MMC_DATA_READ;
1222 
1223 	err = mmc_send_cmd(mmc, &cmd, &data);
1224 
1225 	if (err) {
1226 		if (timeout--)
1227 			goto retry_scr;
1228 
1229 		return err;
1230 	}
1231 
1232 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1233 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1234 
1235 	switch ((mmc->scr[0] >> 24) & 0xf) {
1236 	case 0:
1237 		mmc->version = SD_VERSION_1_0;
1238 		break;
1239 	case 1:
1240 		mmc->version = SD_VERSION_1_10;
1241 		break;
1242 	case 2:
1243 		mmc->version = SD_VERSION_2;
1244 		if ((mmc->scr[0] >> 15) & 0x1)
1245 			mmc->version = SD_VERSION_3;
1246 		break;
1247 	default:
1248 		mmc->version = SD_VERSION_1_0;
1249 		break;
1250 	}
1251 
1252 	if (mmc->scr[0] & SD_DATA_4BIT)
1253 		mmc->card_caps |= MMC_MODE_4BIT;
1254 
1255 	/* Version 1.0 doesn't support switching */
1256 	if (mmc->version == SD_VERSION_1_0)
1257 		return 0;
1258 
1259 	timeout = 4;
1260 	while (timeout--) {
1261 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1262 				(u8 *)switch_status);
1263 
1264 		if (err)
1265 			return err;
1266 
1267 		/* The high-speed function is busy.  Try again */
1268 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1269 			break;
1270 	}
1271 
1272 	/* If high-speed isn't supported, we return */
1273 	if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1274 		mmc->card_caps |= MMC_CAP(SD_HS);
1275 
1276 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1277 	/* Version before 3.0 don't support UHS modes */
1278 	if (mmc->version < SD_VERSION_3)
1279 		return 0;
1280 
1281 	sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1282 	if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1283 		mmc->card_caps |= MMC_CAP(UHS_SDR104);
1284 	if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1285 		mmc->card_caps |= MMC_CAP(UHS_SDR50);
1286 	if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1287 		mmc->card_caps |= MMC_CAP(UHS_SDR25);
1288 	if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1289 		mmc->card_caps |= MMC_CAP(UHS_SDR12);
1290 	if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1291 		mmc->card_caps |= MMC_CAP(UHS_DDR50);
1292 #endif
1293 
1294 	return 0;
1295 }
1296 
1297 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1298 {
1299 	int err;
1300 
1301 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1302 	int speed;
1303 
1304 	switch (mode) {
1305 	case SD_LEGACY:
1306 		speed = UHS_SDR12_BUS_SPEED;
1307 		break;
1308 	case SD_HS:
1309 		speed = HIGH_SPEED_BUS_SPEED;
1310 		break;
1311 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1312 	case UHS_SDR12:
1313 		speed = UHS_SDR12_BUS_SPEED;
1314 		break;
1315 	case UHS_SDR25:
1316 		speed = UHS_SDR25_BUS_SPEED;
1317 		break;
1318 	case UHS_SDR50:
1319 		speed = UHS_SDR50_BUS_SPEED;
1320 		break;
1321 	case UHS_DDR50:
1322 		speed = UHS_DDR50_BUS_SPEED;
1323 		break;
1324 	case UHS_SDR104:
1325 		speed = UHS_SDR104_BUS_SPEED;
1326 		break;
1327 #endif
1328 	default:
1329 		return -EINVAL;
1330 	}
1331 
1332 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1333 	if (err)
1334 		return err;
1335 
1336 	if ((__be32_to_cpu(switch_status[4]) >> 24) != speed)
1337 		return -ENOTSUPP;
1338 
1339 	return 0;
1340 }
1341 
1342 int sd_select_bus_width(struct mmc *mmc, int w)
1343 {
1344 	int err;
1345 	struct mmc_cmd cmd;
1346 
1347 	if ((w != 4) && (w != 1))
1348 		return -EINVAL;
1349 
1350 	cmd.cmdidx = MMC_CMD_APP_CMD;
1351 	cmd.resp_type = MMC_RSP_R1;
1352 	cmd.cmdarg = mmc->rca << 16;
1353 
1354 	err = mmc_send_cmd(mmc, &cmd, NULL);
1355 	if (err)
1356 		return err;
1357 
1358 	cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1359 	cmd.resp_type = MMC_RSP_R1;
1360 	if (w == 4)
1361 		cmd.cmdarg = 2;
1362 	else if (w == 1)
1363 		cmd.cmdarg = 0;
1364 	err = mmc_send_cmd(mmc, &cmd, NULL);
1365 	if (err)
1366 		return err;
1367 
1368 	return 0;
1369 }
1370 
1371 #if CONFIG_IS_ENABLED(MMC_WRITE)
1372 static int sd_read_ssr(struct mmc *mmc)
1373 {
1374 	static const unsigned int sd_au_size[] = {
1375 		0,		SZ_16K / 512,		SZ_32K / 512,
1376 		SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
1377 		SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
1378 		SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
1379 		SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,
1380 		SZ_64M / 512,
1381 	};
1382 	int err, i;
1383 	struct mmc_cmd cmd;
1384 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1385 	struct mmc_data data;
1386 	int timeout = 3;
1387 	unsigned int au, eo, et, es;
1388 
1389 	cmd.cmdidx = MMC_CMD_APP_CMD;
1390 	cmd.resp_type = MMC_RSP_R1;
1391 	cmd.cmdarg = mmc->rca << 16;
1392 
1393 	err = mmc_send_cmd(mmc, &cmd, NULL);
1394 	if (err)
1395 		return err;
1396 
1397 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1398 	cmd.resp_type = MMC_RSP_R1;
1399 	cmd.cmdarg = 0;
1400 
1401 retry_ssr:
1402 	data.dest = (char *)ssr;
1403 	data.blocksize = 64;
1404 	data.blocks = 1;
1405 	data.flags = MMC_DATA_READ;
1406 
1407 	err = mmc_send_cmd(mmc, &cmd, &data);
1408 	if (err) {
1409 		if (timeout--)
1410 			goto retry_ssr;
1411 
1412 		return err;
1413 	}
1414 
1415 	for (i = 0; i < 16; i++)
1416 		ssr[i] = be32_to_cpu(ssr[i]);
1417 
1418 	au = (ssr[2] >> 12) & 0xF;
1419 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1420 		mmc->ssr.au = sd_au_size[au];
1421 		es = (ssr[3] >> 24) & 0xFF;
1422 		es |= (ssr[2] & 0xFF) << 8;
1423 		et = (ssr[3] >> 18) & 0x3F;
1424 		if (es && et) {
1425 			eo = (ssr[3] >> 16) & 0x3;
1426 			mmc->ssr.erase_timeout = (et * 1000) / es;
1427 			mmc->ssr.erase_offset = eo * 1000;
1428 		}
1429 	} else {
1430 		debug("Invalid Allocation Unit Size.\n");
1431 	}
1432 
1433 	return 0;
1434 }
1435 #endif
1436 /* frequency bases */
1437 /* divided by 10 to be nice to platforms without floating point */
1438 static const int fbase[] = {
1439 	10000,
1440 	100000,
1441 	1000000,
1442 	10000000,
1443 };
1444 
1445 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1446  * to platforms without floating point.
1447  */
1448 static const u8 multipliers[] = {
1449 	0,	/* reserved */
1450 	10,
1451 	12,
1452 	13,
1453 	15,
1454 	20,
1455 	25,
1456 	30,
1457 	35,
1458 	40,
1459 	45,
1460 	50,
1461 	55,
1462 	60,
1463 	70,
1464 	80,
1465 };
1466 
1467 static inline int bus_width(uint cap)
1468 {
1469 	if (cap == MMC_MODE_8BIT)
1470 		return 8;
1471 	if (cap == MMC_MODE_4BIT)
1472 		return 4;
1473 	if (cap == MMC_MODE_1BIT)
1474 		return 1;
1475 	pr_warn("invalid bus witdh capability 0x%x\n", cap);
1476 	return 0;
1477 }
1478 
1479 #if !CONFIG_IS_ENABLED(DM_MMC)
1480 #ifdef MMC_SUPPORTS_TUNING
1481 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1482 {
1483 	return -ENOTSUPP;
1484 }
1485 #endif
1486 
1487 static void mmc_send_init_stream(struct mmc *mmc)
1488 {
1489 }
1490 
1491 static int mmc_set_ios(struct mmc *mmc)
1492 {
1493 	int ret = 0;
1494 
1495 	if (mmc->cfg->ops->set_ios)
1496 		ret = mmc->cfg->ops->set_ios(mmc);
1497 
1498 	return ret;
1499 }
1500 #endif
1501 
1502 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1503 {
1504 	if (!disable) {
1505 		if (clock > mmc->cfg->f_max)
1506 			clock = mmc->cfg->f_max;
1507 
1508 		if (clock < mmc->cfg->f_min)
1509 			clock = mmc->cfg->f_min;
1510 	}
1511 
1512 	mmc->clock = clock;
1513 	mmc->clk_disable = disable;
1514 
1515 	return mmc_set_ios(mmc);
1516 }
1517 
1518 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1519 {
1520 	mmc->bus_width = width;
1521 
1522 	return mmc_set_ios(mmc);
1523 }
1524 
1525 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1526 /*
1527  * helper function to display the capabilities in a human
1528  * friendly manner. The capabilities include bus width and
1529  * supported modes.
1530  */
1531 void mmc_dump_capabilities(const char *text, uint caps)
1532 {
1533 	enum bus_mode mode;
1534 
1535 	printf("%s: widths [", text);
1536 	if (caps & MMC_MODE_8BIT)
1537 		printf("8, ");
1538 	if (caps & MMC_MODE_4BIT)
1539 		printf("4, ");
1540 	if (caps & MMC_MODE_1BIT)
1541 		printf("1, ");
1542 	printf("\b\b] modes [");
1543 	for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1544 		if (MMC_CAP(mode) & caps)
1545 			printf("%s, ", mmc_mode_name(mode));
1546 	printf("\b\b]\n");
1547 }
1548 #endif
1549 
1550 struct mode_width_tuning {
1551 	enum bus_mode mode;
1552 	uint widths;
1553 #ifdef MMC_SUPPORTS_TUNING
1554 	uint tuning;
1555 #endif
1556 };
1557 
1558 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1559 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1560 {
1561 	switch (voltage) {
1562 	case MMC_SIGNAL_VOLTAGE_000: return 0;
1563 	case MMC_SIGNAL_VOLTAGE_330: return 3300;
1564 	case MMC_SIGNAL_VOLTAGE_180: return 1800;
1565 	case MMC_SIGNAL_VOLTAGE_120: return 1200;
1566 	}
1567 	return -EINVAL;
1568 }
1569 
1570 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1571 {
1572 	int err;
1573 
1574 	if (mmc->signal_voltage == signal_voltage)
1575 		return 0;
1576 
1577 	mmc->signal_voltage = signal_voltage;
1578 	err = mmc_set_ios(mmc);
1579 	if (err)
1580 		debug("unable to set voltage (err %d)\n", err);
1581 
1582 	return err;
1583 }
1584 #else
1585 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1586 {
1587 	return 0;
1588 }
1589 #endif
1590 
1591 static const struct mode_width_tuning sd_modes_by_pref[] = {
1592 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1593 #ifdef MMC_SUPPORTS_TUNING
1594 	{
1595 		.mode = UHS_SDR104,
1596 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1597 		.tuning = MMC_CMD_SEND_TUNING_BLOCK
1598 	},
1599 #endif
1600 	{
1601 		.mode = UHS_SDR50,
1602 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1603 	},
1604 	{
1605 		.mode = UHS_DDR50,
1606 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1607 	},
1608 	{
1609 		.mode = UHS_SDR25,
1610 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1611 	},
1612 #endif
1613 	{
1614 		.mode = SD_HS,
1615 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1616 	},
1617 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1618 	{
1619 		.mode = UHS_SDR12,
1620 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1621 	},
1622 #endif
1623 	{
1624 		.mode = SD_LEGACY,
1625 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1626 	}
1627 };
1628 
1629 #define for_each_sd_mode_by_pref(caps, mwt) \
1630 	for (mwt = sd_modes_by_pref;\
1631 	     mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1632 	     mwt++) \
1633 		if (caps & MMC_CAP(mwt->mode))
1634 
1635 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1636 {
1637 	int err;
1638 	uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1639 	const struct mode_width_tuning *mwt;
1640 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1641 	bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1642 #else
1643 	bool uhs_en = false;
1644 #endif
1645 	uint caps;
1646 
1647 #ifdef DEBUG
1648 	mmc_dump_capabilities("sd card", card_caps);
1649 	mmc_dump_capabilities("host", mmc->host_caps);
1650 #endif
1651 
1652 	/* Restrict card's capabilities by what the host can do */
1653 	caps = card_caps & mmc->host_caps;
1654 
1655 	if (!uhs_en)
1656 		caps &= ~UHS_CAPS;
1657 
1658 	for_each_sd_mode_by_pref(caps, mwt) {
1659 		uint *w;
1660 
1661 		for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1662 			if (*w & caps & mwt->widths) {
1663 				debug("trying mode %s width %d (at %d MHz)\n",
1664 				      mmc_mode_name(mwt->mode),
1665 				      bus_width(*w),
1666 				      mmc_mode2freq(mmc, mwt->mode) / 1000000);
1667 
1668 				/* configure the bus width (card + host) */
1669 				err = sd_select_bus_width(mmc, bus_width(*w));
1670 				if (err)
1671 					goto error;
1672 				mmc_set_bus_width(mmc, bus_width(*w));
1673 
1674 				/* configure the bus mode (card) */
1675 				err = sd_set_card_speed(mmc, mwt->mode);
1676 				if (err)
1677 					goto error;
1678 
1679 				/* configure the bus mode (host) */
1680 				mmc_select_mode(mmc, mwt->mode);
1681 				mmc_set_clock(mmc, mmc->tran_speed, false);
1682 
1683 #ifdef MMC_SUPPORTS_TUNING
1684 				/* execute tuning if needed */
1685 				if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1686 					err = mmc_execute_tuning(mmc,
1687 								 mwt->tuning);
1688 					if (err) {
1689 						debug("tuning failed\n");
1690 						goto error;
1691 					}
1692 				}
1693 #endif
1694 
1695 #if CONFIG_IS_ENABLED(MMC_WRITE)
1696 				err = sd_read_ssr(mmc);
1697 				if (!err)
1698 					pr_warn("unable to read ssr\n");
1699 #endif
1700 				if (!err)
1701 					return 0;
1702 
1703 error:
1704 				/* revert to a safer bus speed */
1705 				mmc_select_mode(mmc, SD_LEGACY);
1706 				mmc_set_clock(mmc, mmc->tran_speed, false);
1707 			}
1708 		}
1709 	}
1710 
1711 	printf("unable to select a mode\n");
1712 	return -ENOTSUPP;
1713 }
1714 
1715 /*
1716  * read the compare the part of ext csd that is constant.
1717  * This can be used to check that the transfer is working
1718  * as expected.
1719  */
1720 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1721 {
1722 	int err;
1723 	const u8 *ext_csd = mmc->ext_csd;
1724 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1725 
1726 	if (mmc->version < MMC_VERSION_4)
1727 		return 0;
1728 
1729 	err = mmc_send_ext_csd(mmc, test_csd);
1730 	if (err)
1731 		return err;
1732 
1733 	/* Only compare read only fields */
1734 	if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1735 		== test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1736 	    ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1737 		== test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1738 	    ext_csd[EXT_CSD_REV]
1739 		== test_csd[EXT_CSD_REV] &&
1740 	    ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1741 		== test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1742 	    memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1743 		   &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1744 		return 0;
1745 
1746 	return -EBADMSG;
1747 }
1748 
1749 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1750 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1751 				  uint32_t allowed_mask)
1752 {
1753 	u32 card_mask = 0;
1754 
1755 	switch (mode) {
1756 	case MMC_HS_200:
1757 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1758 			card_mask |= MMC_SIGNAL_VOLTAGE_180;
1759 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1760 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1761 		break;
1762 	case MMC_DDR_52:
1763 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1764 			card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1765 				     MMC_SIGNAL_VOLTAGE_180;
1766 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1767 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1768 		break;
1769 	default:
1770 		card_mask |= MMC_SIGNAL_VOLTAGE_330;
1771 		break;
1772 	}
1773 
1774 	while (card_mask & allowed_mask) {
1775 		enum mmc_voltage best_match;
1776 
1777 		best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1778 		if (!mmc_set_signal_voltage(mmc,  best_match))
1779 			return 0;
1780 
1781 		allowed_mask &= ~best_match;
1782 	}
1783 
1784 	return -ENOTSUPP;
1785 }
1786 #else
1787 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1788 					 uint32_t allowed_mask)
1789 {
1790 	return 0;
1791 }
1792 #endif
1793 
1794 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1795 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1796 	{
1797 		.mode = MMC_HS_200,
1798 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1799 		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1800 	},
1801 #endif
1802 	{
1803 		.mode = MMC_DDR_52,
1804 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1805 	},
1806 	{
1807 		.mode = MMC_HS_52,
1808 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1809 	},
1810 	{
1811 		.mode = MMC_HS,
1812 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1813 	},
1814 	{
1815 		.mode = MMC_LEGACY,
1816 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1817 	}
1818 };
1819 
1820 #define for_each_mmc_mode_by_pref(caps, mwt) \
1821 	for (mwt = mmc_modes_by_pref;\
1822 	    mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1823 	    mwt++) \
1824 		if (caps & MMC_CAP(mwt->mode))
1825 
1826 static const struct ext_csd_bus_width {
1827 	uint cap;
1828 	bool is_ddr;
1829 	uint ext_csd_bits;
1830 } ext_csd_bus_width[] = {
1831 	{MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1832 	{MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1833 	{MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1834 	{MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1835 	{MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1836 };
1837 
1838 #define for_each_supported_width(caps, ddr, ecbv) \
1839 	for (ecbv = ext_csd_bus_width;\
1840 	    ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1841 	    ecbv++) \
1842 		if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1843 
1844 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1845 {
1846 	int err;
1847 	const struct mode_width_tuning *mwt;
1848 	const struct ext_csd_bus_width *ecbw;
1849 
1850 #ifdef DEBUG
1851 	mmc_dump_capabilities("mmc", card_caps);
1852 	mmc_dump_capabilities("host", mmc->host_caps);
1853 #endif
1854 
1855 	/* Restrict card's capabilities by what the host can do */
1856 	card_caps &= mmc->host_caps;
1857 
1858 	/* Only version 4 of MMC supports wider bus widths */
1859 	if (mmc->version < MMC_VERSION_4)
1860 		return 0;
1861 
1862 	if (!mmc->ext_csd) {
1863 		debug("No ext_csd found!\n"); /* this should enver happen */
1864 		return -ENOTSUPP;
1865 	}
1866 
1867 	mmc_set_clock(mmc, mmc->legacy_speed, false);
1868 
1869 	for_each_mmc_mode_by_pref(card_caps, mwt) {
1870 		for_each_supported_width(card_caps & mwt->widths,
1871 					 mmc_is_mode_ddr(mwt->mode), ecbw) {
1872 			enum mmc_voltage old_voltage;
1873 			debug("trying mode %s width %d (at %d MHz)\n",
1874 			      mmc_mode_name(mwt->mode),
1875 			      bus_width(ecbw->cap),
1876 			      mmc_mode2freq(mmc, mwt->mode) / 1000000);
1877 			old_voltage = mmc->signal_voltage;
1878 			err = mmc_set_lowest_voltage(mmc, mwt->mode,
1879 						     MMC_ALL_SIGNAL_VOLTAGE);
1880 			if (err)
1881 				continue;
1882 
1883 			/* configure the bus width (card + host) */
1884 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1885 				    EXT_CSD_BUS_WIDTH,
1886 				    ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1887 			if (err)
1888 				goto error;
1889 			mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1890 
1891 			/* configure the bus speed (card) */
1892 			err = mmc_set_card_speed(mmc, mwt->mode);
1893 			if (err)
1894 				goto error;
1895 
1896 			/*
1897 			 * configure the bus width AND the ddr mode (card)
1898 			 * The host side will be taken care of in the next step
1899 			 */
1900 			if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1901 				err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1902 						 EXT_CSD_BUS_WIDTH,
1903 						 ecbw->ext_csd_bits);
1904 				if (err)
1905 					goto error;
1906 			}
1907 
1908 			/* configure the bus mode (host) */
1909 			mmc_select_mode(mmc, mwt->mode);
1910 			mmc_set_clock(mmc, mmc->tran_speed, false);
1911 #ifdef MMC_SUPPORTS_TUNING
1912 
1913 			/* execute tuning if needed */
1914 			if (mwt->tuning) {
1915 				err = mmc_execute_tuning(mmc, mwt->tuning);
1916 				if (err) {
1917 					debug("tuning failed\n");
1918 					goto error;
1919 				}
1920 			}
1921 #endif
1922 
1923 			/* do a transfer to check the configuration */
1924 			err = mmc_read_and_compare_ext_csd(mmc);
1925 			if (!err)
1926 				return 0;
1927 error:
1928 			mmc_set_signal_voltage(mmc, old_voltage);
1929 			/* if an error occured, revert to a safer bus mode */
1930 			mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1931 				   EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1932 			mmc_select_mode(mmc, MMC_LEGACY);
1933 			mmc_set_bus_width(mmc, 1);
1934 		}
1935 	}
1936 
1937 	pr_err("unable to select a mode\n");
1938 
1939 	return -ENOTSUPP;
1940 }
1941 
1942 static int mmc_startup_v4(struct mmc *mmc)
1943 {
1944 	int err, i;
1945 	u64 capacity;
1946 	bool has_parts = false;
1947 	bool part_completed;
1948 	static const u32 mmc_versions[] = {
1949 		MMC_VERSION_4,
1950 		MMC_VERSION_4_1,
1951 		MMC_VERSION_4_2,
1952 		MMC_VERSION_4_3,
1953 		MMC_VERSION_4_41,
1954 		MMC_VERSION_4_5,
1955 		MMC_VERSION_5_0,
1956 		MMC_VERSION_5_1
1957 	};
1958 
1959 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1960 
1961 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1962 		return 0;
1963 
1964 	/* check  ext_csd version and capacity */
1965 	err = mmc_send_ext_csd(mmc, ext_csd);
1966 	if (err)
1967 		goto error;
1968 
1969 	/* store the ext csd for future reference */
1970 	if (!mmc->ext_csd)
1971 		mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1972 	if (!mmc->ext_csd)
1973 		return -ENOMEM;
1974 	memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1975 
1976 	if (ext_csd[EXT_CSD_REV] > ARRAY_SIZE(mmc_versions))
1977 		return -EINVAL;
1978 
1979 	mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
1980 
1981 	if (mmc->version >= MMC_VERSION_4_2) {
1982 		/*
1983 		 * According to the JEDEC Standard, the value of
1984 		 * ext_csd's capacity is valid if the value is more
1985 		 * than 2GB
1986 		 */
1987 		capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1988 				| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1989 				| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1990 				| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1991 		capacity *= MMC_MAX_BLOCK_LEN;
1992 		if ((capacity >> 20) > 2 * 1024)
1993 			mmc->capacity_user = capacity;
1994 	}
1995 
1996 	/* The partition data may be non-zero but it is only
1997 	 * effective if PARTITION_SETTING_COMPLETED is set in
1998 	 * EXT_CSD, so ignore any data if this bit is not set,
1999 	 * except for enabling the high-capacity group size
2000 	 * definition (see below).
2001 	 */
2002 	part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2003 			    EXT_CSD_PARTITION_SETTING_COMPLETED);
2004 
2005 	/* store the partition info of emmc */
2006 	mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2007 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2008 	    ext_csd[EXT_CSD_BOOT_MULT])
2009 		mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2010 	if (part_completed &&
2011 	    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2012 		mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2013 
2014 	mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2015 
2016 	mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2017 
2018 	for (i = 0; i < 4; i++) {
2019 		int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2020 		uint mult = (ext_csd[idx + 2] << 16) +
2021 			(ext_csd[idx + 1] << 8) + ext_csd[idx];
2022 		if (mult)
2023 			has_parts = true;
2024 		if (!part_completed)
2025 			continue;
2026 		mmc->capacity_gp[i] = mult;
2027 		mmc->capacity_gp[i] *=
2028 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2029 		mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2030 		mmc->capacity_gp[i] <<= 19;
2031 	}
2032 
2033 #ifndef CONFIG_SPL_BUILD
2034 	if (part_completed) {
2035 		mmc->enh_user_size =
2036 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2037 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2038 			ext_csd[EXT_CSD_ENH_SIZE_MULT];
2039 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2040 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2041 		mmc->enh_user_size <<= 19;
2042 		mmc->enh_user_start =
2043 			(ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2044 			(ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2045 			(ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2046 			ext_csd[EXT_CSD_ENH_START_ADDR];
2047 		if (mmc->high_capacity)
2048 			mmc->enh_user_start <<= 9;
2049 	}
2050 #endif
2051 
2052 	/*
2053 	 * Host needs to enable ERASE_GRP_DEF bit if device is
2054 	 * partitioned. This bit will be lost every time after a reset
2055 	 * or power off. This will affect erase size.
2056 	 */
2057 	if (part_completed)
2058 		has_parts = true;
2059 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2060 	    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2061 		has_parts = true;
2062 	if (has_parts) {
2063 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2064 				 EXT_CSD_ERASE_GROUP_DEF, 1);
2065 
2066 		if (err)
2067 			goto error;
2068 
2069 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2070 	}
2071 
2072 	if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2073 #if CONFIG_IS_ENABLED(MMC_WRITE)
2074 		/* Read out group size from ext_csd */
2075 		mmc->erase_grp_size =
2076 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2077 #endif
2078 		/*
2079 		 * if high capacity and partition setting completed
2080 		 * SEC_COUNT is valid even if it is smaller than 2 GiB
2081 		 * JEDEC Standard JESD84-B45, 6.2.4
2082 		 */
2083 		if (mmc->high_capacity && part_completed) {
2084 			capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2085 				(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2086 				(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2087 				(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2088 			capacity *= MMC_MAX_BLOCK_LEN;
2089 			mmc->capacity_user = capacity;
2090 		}
2091 	}
2092 #if CONFIG_IS_ENABLED(MMC_WRITE)
2093 	else {
2094 		/* Calculate the group size from the csd value. */
2095 		int erase_gsz, erase_gmul;
2096 
2097 		erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2098 		erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2099 		mmc->erase_grp_size = (erase_gsz + 1)
2100 			* (erase_gmul + 1);
2101 	}
2102 #endif
2103 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2104 	mmc->hc_wp_grp_size = 1024
2105 		* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2106 		* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2107 #endif
2108 
2109 	mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2110 
2111 	return 0;
2112 error:
2113 	if (mmc->ext_csd) {
2114 		free(mmc->ext_csd);
2115 		mmc->ext_csd = NULL;
2116 	}
2117 	return err;
2118 }
2119 
2120 static int mmc_startup(struct mmc *mmc)
2121 {
2122 	int err, i;
2123 	uint mult, freq;
2124 	u64 cmult, csize;
2125 	struct mmc_cmd cmd;
2126 	struct blk_desc *bdesc;
2127 
2128 #ifdef CONFIG_MMC_SPI_CRC_ON
2129 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2130 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2131 		cmd.resp_type = MMC_RSP_R1;
2132 		cmd.cmdarg = 1;
2133 		err = mmc_send_cmd(mmc, &cmd, NULL);
2134 		if (err)
2135 			return err;
2136 	}
2137 #endif
2138 
2139 	/* Put the Card in Identify Mode */
2140 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2141 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2142 	cmd.resp_type = MMC_RSP_R2;
2143 	cmd.cmdarg = 0;
2144 
2145 	err = mmc_send_cmd(mmc, &cmd, NULL);
2146 
2147 #ifdef CONFIG_MMC_QUIRKS
2148 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2149 		int retries = 4;
2150 		/*
2151 		 * It has been seen that SEND_CID may fail on the first
2152 		 * attempt, let's try a few more time
2153 		 */
2154 		do {
2155 			err = mmc_send_cmd(mmc, &cmd, NULL);
2156 			if (!err)
2157 				break;
2158 		} while (retries--);
2159 	}
2160 #endif
2161 
2162 	if (err)
2163 		return err;
2164 
2165 	memcpy(mmc->cid, cmd.response, 16);
2166 
2167 	/*
2168 	 * For MMC cards, set the Relative Address.
2169 	 * For SD cards, get the Relatvie Address.
2170 	 * This also puts the cards into Standby State
2171 	 */
2172 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2173 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2174 		cmd.cmdarg = mmc->rca << 16;
2175 		cmd.resp_type = MMC_RSP_R6;
2176 
2177 		err = mmc_send_cmd(mmc, &cmd, NULL);
2178 
2179 		if (err)
2180 			return err;
2181 
2182 		if (IS_SD(mmc))
2183 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2184 	}
2185 
2186 	/* Get the Card-Specific Data */
2187 	cmd.cmdidx = MMC_CMD_SEND_CSD;
2188 	cmd.resp_type = MMC_RSP_R2;
2189 	cmd.cmdarg = mmc->rca << 16;
2190 
2191 	err = mmc_send_cmd(mmc, &cmd, NULL);
2192 
2193 	if (err)
2194 		return err;
2195 
2196 	mmc->csd[0] = cmd.response[0];
2197 	mmc->csd[1] = cmd.response[1];
2198 	mmc->csd[2] = cmd.response[2];
2199 	mmc->csd[3] = cmd.response[3];
2200 
2201 	if (mmc->version == MMC_VERSION_UNKNOWN) {
2202 		int version = (cmd.response[0] >> 26) & 0xf;
2203 
2204 		switch (version) {
2205 		case 0:
2206 			mmc->version = MMC_VERSION_1_2;
2207 			break;
2208 		case 1:
2209 			mmc->version = MMC_VERSION_1_4;
2210 			break;
2211 		case 2:
2212 			mmc->version = MMC_VERSION_2_2;
2213 			break;
2214 		case 3:
2215 			mmc->version = MMC_VERSION_3;
2216 			break;
2217 		case 4:
2218 			mmc->version = MMC_VERSION_4;
2219 			break;
2220 		default:
2221 			mmc->version = MMC_VERSION_1_2;
2222 			break;
2223 		}
2224 	}
2225 
2226 	/* divide frequency by 10, since the mults are 10x bigger */
2227 	freq = fbase[(cmd.response[0] & 0x7)];
2228 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2229 
2230 	mmc->legacy_speed = freq * mult;
2231 	mmc_select_mode(mmc, MMC_LEGACY);
2232 
2233 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2234 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2235 #if CONFIG_IS_ENABLED(MMC_WRITE)
2236 
2237 	if (IS_SD(mmc))
2238 		mmc->write_bl_len = mmc->read_bl_len;
2239 	else
2240 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2241 #endif
2242 
2243 	if (mmc->high_capacity) {
2244 		csize = (mmc->csd[1] & 0x3f) << 16
2245 			| (mmc->csd[2] & 0xffff0000) >> 16;
2246 		cmult = 8;
2247 	} else {
2248 		csize = (mmc->csd[1] & 0x3ff) << 2
2249 			| (mmc->csd[2] & 0xc0000000) >> 30;
2250 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
2251 	}
2252 
2253 	mmc->capacity_user = (csize + 1) << (cmult + 2);
2254 	mmc->capacity_user *= mmc->read_bl_len;
2255 	mmc->capacity_boot = 0;
2256 	mmc->capacity_rpmb = 0;
2257 	for (i = 0; i < 4; i++)
2258 		mmc->capacity_gp[i] = 0;
2259 
2260 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2261 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2262 
2263 #if CONFIG_IS_ENABLED(MMC_WRITE)
2264 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2265 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2266 #endif
2267 
2268 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2269 		cmd.cmdidx = MMC_CMD_SET_DSR;
2270 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2271 		cmd.resp_type = MMC_RSP_NONE;
2272 		if (mmc_send_cmd(mmc, &cmd, NULL))
2273 			pr_warn("MMC: SET_DSR failed\n");
2274 	}
2275 
2276 	/* Select the card, and put it into Transfer Mode */
2277 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2278 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2279 		cmd.resp_type = MMC_RSP_R1;
2280 		cmd.cmdarg = mmc->rca << 16;
2281 		err = mmc_send_cmd(mmc, &cmd, NULL);
2282 
2283 		if (err)
2284 			return err;
2285 	}
2286 
2287 	/*
2288 	 * For SD, its erase group is always one sector
2289 	 */
2290 #if CONFIG_IS_ENABLED(MMC_WRITE)
2291 	mmc->erase_grp_size = 1;
2292 #endif
2293 	mmc->part_config = MMCPART_NOAVAILABLE;
2294 
2295 	err = mmc_startup_v4(mmc);
2296 	if (err)
2297 		return err;
2298 
2299 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2300 	if (err)
2301 		return err;
2302 
2303 	if (IS_SD(mmc)) {
2304 		err = sd_get_capabilities(mmc);
2305 		if (err)
2306 			return err;
2307 		err = sd_select_mode_and_width(mmc, mmc->card_caps);
2308 	} else {
2309 		err = mmc_get_capabilities(mmc);
2310 		if (err)
2311 			return err;
2312 		mmc_select_mode_and_width(mmc, mmc->card_caps);
2313 	}
2314 
2315 	if (err)
2316 		return err;
2317 
2318 	mmc->best_mode = mmc->selected_mode;
2319 
2320 	/* Fix the block length for DDR mode */
2321 	if (mmc->ddr_mode) {
2322 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2323 #if CONFIG_IS_ENABLED(MMC_WRITE)
2324 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2325 #endif
2326 	}
2327 
2328 	/* fill in device description */
2329 	bdesc = mmc_get_blk_desc(mmc);
2330 	bdesc->lun = 0;
2331 	bdesc->hwpart = 0;
2332 	bdesc->type = 0;
2333 	bdesc->blksz = mmc->read_bl_len;
2334 	bdesc->log2blksz = LOG2(bdesc->blksz);
2335 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2336 #if !defined(CONFIG_SPL_BUILD) || \
2337 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2338 		!defined(CONFIG_USE_TINY_PRINTF))
2339 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2340 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2341 		(mmc->cid[3] >> 16) & 0xffff);
2342 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2343 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2344 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2345 		(mmc->cid[2] >> 24) & 0xff);
2346 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2347 		(mmc->cid[2] >> 16) & 0xf);
2348 #else
2349 	bdesc->vendor[0] = 0;
2350 	bdesc->product[0] = 0;
2351 	bdesc->revision[0] = 0;
2352 #endif
2353 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2354 	part_init(bdesc);
2355 #endif
2356 
2357 	return 0;
2358 }
2359 
2360 static int mmc_send_if_cond(struct mmc *mmc)
2361 {
2362 	struct mmc_cmd cmd;
2363 	int err;
2364 
2365 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2366 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2367 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2368 	cmd.resp_type = MMC_RSP_R7;
2369 
2370 	err = mmc_send_cmd(mmc, &cmd, NULL);
2371 
2372 	if (err)
2373 		return err;
2374 
2375 	if ((cmd.response[0] & 0xff) != 0xaa)
2376 		return -EOPNOTSUPP;
2377 	else
2378 		mmc->version = SD_VERSION_2;
2379 
2380 	return 0;
2381 }
2382 
2383 #if !CONFIG_IS_ENABLED(DM_MMC)
2384 /* board-specific MMC power initializations. */
2385 __weak void board_mmc_power_init(void)
2386 {
2387 }
2388 #endif
2389 
2390 static int mmc_power_init(struct mmc *mmc)
2391 {
2392 #if CONFIG_IS_ENABLED(DM_MMC)
2393 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2394 	int ret;
2395 
2396 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2397 					  &mmc->vmmc_supply);
2398 	if (ret)
2399 		debug("%s: No vmmc supply\n", mmc->dev->name);
2400 
2401 	ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2402 					  &mmc->vqmmc_supply);
2403 	if (ret)
2404 		debug("%s: No vqmmc supply\n", mmc->dev->name);
2405 #endif
2406 #else /* !CONFIG_DM_MMC */
2407 	/*
2408 	 * Driver model should use a regulator, as above, rather than calling
2409 	 * out to board code.
2410 	 */
2411 	board_mmc_power_init();
2412 #endif
2413 	return 0;
2414 }
2415 
2416 /*
2417  * put the host in the initial state:
2418  * - turn on Vdd (card power supply)
2419  * - configure the bus width and clock to minimal values
2420  */
2421 static void mmc_set_initial_state(struct mmc *mmc)
2422 {
2423 	int err;
2424 
2425 	/* First try to set 3.3V. If it fails set to 1.8V */
2426 	err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2427 	if (err != 0)
2428 		err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2429 	if (err != 0)
2430 		pr_warn("mmc: failed to set signal voltage\n");
2431 
2432 	mmc_select_mode(mmc, MMC_LEGACY);
2433 	mmc_set_bus_width(mmc, 1);
2434 	mmc_set_clock(mmc, 0, false);
2435 }
2436 
2437 static int mmc_power_on(struct mmc *mmc)
2438 {
2439 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2440 	if (mmc->vmmc_supply) {
2441 		int ret = regulator_set_enable(mmc->vmmc_supply, true);
2442 
2443 		if (ret) {
2444 			puts("Error enabling VMMC supply\n");
2445 			return ret;
2446 		}
2447 	}
2448 #endif
2449 	return 0;
2450 }
2451 
2452 static int mmc_power_off(struct mmc *mmc)
2453 {
2454 	mmc_set_clock(mmc, 0, true);
2455 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2456 	if (mmc->vmmc_supply) {
2457 		int ret = regulator_set_enable(mmc->vmmc_supply, false);
2458 
2459 		if (ret) {
2460 			debug("Error disabling VMMC supply\n");
2461 			return ret;
2462 		}
2463 	}
2464 #endif
2465 	return 0;
2466 }
2467 
2468 static int mmc_power_cycle(struct mmc *mmc)
2469 {
2470 	int ret;
2471 
2472 	ret = mmc_power_off(mmc);
2473 	if (ret)
2474 		return ret;
2475 	/*
2476 	 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2477 	 * to be on the safer side.
2478 	 */
2479 	udelay(2000);
2480 	return mmc_power_on(mmc);
2481 }
2482 
2483 int mmc_start_init(struct mmc *mmc)
2484 {
2485 	bool no_card;
2486 	bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2487 	int err;
2488 
2489 	/*
2490 	 * all hosts are capable of 1 bit bus-width and able to use the legacy
2491 	 * timings.
2492 	 */
2493 	mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2494 			 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2495 
2496 #if !defined(CONFIG_MMC_BROKEN_CD)
2497 	/* we pretend there's no card when init is NULL */
2498 	no_card = mmc_getcd(mmc) == 0;
2499 #else
2500 	no_card = 0;
2501 #endif
2502 #if !CONFIG_IS_ENABLED(DM_MMC)
2503 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2504 #endif
2505 	if (no_card) {
2506 		mmc->has_init = 0;
2507 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2508 		printf("MMC: no card present\n");
2509 #endif
2510 		return -ENOMEDIUM;
2511 	}
2512 
2513 	if (mmc->has_init)
2514 		return 0;
2515 
2516 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2517 	mmc_adapter_card_type_ident();
2518 #endif
2519 	err = mmc_power_init(mmc);
2520 	if (err)
2521 		return err;
2522 
2523 #ifdef CONFIG_MMC_QUIRKS
2524 	mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2525 		      MMC_QUIRK_RETRY_SEND_CID;
2526 #endif
2527 
2528 	err = mmc_power_cycle(mmc);
2529 	if (err) {
2530 		/*
2531 		 * if power cycling is not supported, we should not try
2532 		 * to use the UHS modes, because we wouldn't be able to
2533 		 * recover from an error during the UHS initialization.
2534 		 */
2535 		debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2536 		uhs_en = false;
2537 		mmc->host_caps &= ~UHS_CAPS;
2538 		err = mmc_power_on(mmc);
2539 	}
2540 	if (err)
2541 		return err;
2542 
2543 #if CONFIG_IS_ENABLED(DM_MMC)
2544 	/* The device has already been probed ready for use */
2545 #else
2546 	/* made sure it's not NULL earlier */
2547 	err = mmc->cfg->ops->init(mmc);
2548 	if (err)
2549 		return err;
2550 #endif
2551 	mmc->ddr_mode = 0;
2552 
2553 retry:
2554 	mmc_set_initial_state(mmc);
2555 	mmc_send_init_stream(mmc);
2556 
2557 	/* Reset the Card */
2558 	err = mmc_go_idle(mmc);
2559 
2560 	if (err)
2561 		return err;
2562 
2563 	/* The internal partition reset to user partition(0) at every CMD0*/
2564 	mmc_get_blk_desc(mmc)->hwpart = 0;
2565 
2566 	/* Test for SD version 2 */
2567 	err = mmc_send_if_cond(mmc);
2568 
2569 	/* Now try to get the SD card's operating condition */
2570 	err = sd_send_op_cond(mmc, uhs_en);
2571 	if (err && uhs_en) {
2572 		uhs_en = false;
2573 		mmc_power_cycle(mmc);
2574 		goto retry;
2575 	}
2576 
2577 	/* If the command timed out, we check for an MMC card */
2578 	if (err == -ETIMEDOUT) {
2579 		err = mmc_send_op_cond(mmc);
2580 
2581 		if (err) {
2582 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2583 			pr_err("Card did not respond to voltage select!\n");
2584 #endif
2585 			return -EOPNOTSUPP;
2586 		}
2587 	}
2588 
2589 	if (!err)
2590 		mmc->init_in_progress = 1;
2591 
2592 	return err;
2593 }
2594 
2595 static int mmc_complete_init(struct mmc *mmc)
2596 {
2597 	int err = 0;
2598 
2599 	mmc->init_in_progress = 0;
2600 	if (mmc->op_cond_pending)
2601 		err = mmc_complete_op_cond(mmc);
2602 
2603 	if (!err)
2604 		err = mmc_startup(mmc);
2605 	if (err)
2606 		mmc->has_init = 0;
2607 	else
2608 		mmc->has_init = 1;
2609 	return err;
2610 }
2611 
2612 int mmc_init(struct mmc *mmc)
2613 {
2614 	int err = 0;
2615 	__maybe_unused unsigned start;
2616 #if CONFIG_IS_ENABLED(DM_MMC)
2617 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2618 
2619 	upriv->mmc = mmc;
2620 #endif
2621 	if (mmc->has_init)
2622 		return 0;
2623 
2624 	start = get_timer(0);
2625 
2626 	if (!mmc->init_in_progress)
2627 		err = mmc_start_init(mmc);
2628 
2629 	if (!err)
2630 		err = mmc_complete_init(mmc);
2631 	if (err)
2632 		printf("%s: %d, time %lu\n", __func__, err, get_timer(start));
2633 
2634 	return err;
2635 }
2636 
2637 int mmc_set_dsr(struct mmc *mmc, u16 val)
2638 {
2639 	mmc->dsr = val;
2640 	return 0;
2641 }
2642 
2643 /* CPU-specific MMC initializations */
2644 __weak int cpu_mmc_init(bd_t *bis)
2645 {
2646 	return -1;
2647 }
2648 
2649 /* board-specific MMC initializations. */
2650 __weak int board_mmc_init(bd_t *bis)
2651 {
2652 	return -1;
2653 }
2654 
2655 void mmc_set_preinit(struct mmc *mmc, int preinit)
2656 {
2657 	mmc->preinit = preinit;
2658 }
2659 
2660 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD)
2661 static int mmc_probe(bd_t *bis)
2662 {
2663 	return 0;
2664 }
2665 #elif CONFIG_IS_ENABLED(DM_MMC)
2666 static int mmc_probe(bd_t *bis)
2667 {
2668 	int ret, i;
2669 	struct uclass *uc;
2670 	struct udevice *dev;
2671 
2672 	ret = uclass_get(UCLASS_MMC, &uc);
2673 	if (ret)
2674 		return ret;
2675 
2676 	/*
2677 	 * Try to add them in sequence order. Really with driver model we
2678 	 * should allow holes, but the current MMC list does not allow that.
2679 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2680 	 */
2681 	for (i = 0; ; i++) {
2682 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2683 		if (ret == -ENODEV)
2684 			break;
2685 	}
2686 	uclass_foreach_dev(dev, uc) {
2687 		ret = device_probe(dev);
2688 		if (ret)
2689 			pr_err("%s - probe failed: %d\n", dev->name, ret);
2690 	}
2691 
2692 	return 0;
2693 }
2694 #else
2695 static int mmc_probe(bd_t *bis)
2696 {
2697 	if (board_mmc_init(bis) < 0)
2698 		cpu_mmc_init(bis);
2699 
2700 	return 0;
2701 }
2702 #endif
2703 
2704 int mmc_initialize(bd_t *bis)
2705 {
2706 	static int initialized = 0;
2707 	int ret;
2708 	if (initialized)	/* Avoid initializing mmc multiple times */
2709 		return 0;
2710 	initialized = 1;
2711 
2712 #if !CONFIG_IS_ENABLED(BLK)
2713 #if !CONFIG_IS_ENABLED(MMC_TINY)
2714 	mmc_list_init();
2715 #endif
2716 #endif
2717 	ret = mmc_probe(bis);
2718 	if (ret)
2719 		return ret;
2720 
2721 #ifndef CONFIG_SPL_BUILD
2722 	print_mmc_devices(',');
2723 #endif
2724 
2725 	mmc_do_preinit();
2726 	return 0;
2727 }
2728 
2729 #ifdef CONFIG_CMD_BKOPS_ENABLE
2730 int mmc_set_bkops_enable(struct mmc *mmc)
2731 {
2732 	int err;
2733 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2734 
2735 	err = mmc_send_ext_csd(mmc, ext_csd);
2736 	if (err) {
2737 		puts("Could not get ext_csd register values\n");
2738 		return err;
2739 	}
2740 
2741 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2742 		puts("Background operations not supported on device\n");
2743 		return -EMEDIUMTYPE;
2744 	}
2745 
2746 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2747 		puts("Background operations already enabled\n");
2748 		return 0;
2749 	}
2750 
2751 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2752 	if (err) {
2753 		puts("Failed to enable manual background operations\n");
2754 		return err;
2755 	}
2756 
2757 	puts("Enabled manual background operations\n");
2758 
2759 	return 0;
2760 }
2761 #endif
2762