xref: /openbmc/u-boot/drivers/mmc/mmc.c (revision 53deb24d)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2008, Freescale Semiconductor, Inc
4  * Andy Fleming
5  *
6  * Based vaguely on the Linux code
7  */
8 
9 #include <config.h>
10 #include <common.h>
11 #include <command.h>
12 #include <dm.h>
13 #include <dm/device-internal.h>
14 #include <errno.h>
15 #include <mmc.h>
16 #include <part.h>
17 #include <power/regulator.h>
18 #include <malloc.h>
19 #include <memalign.h>
20 #include <linux/list.h>
21 #include <div64.h>
22 #include "mmc_private.h"
23 
24 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
25 static int mmc_power_cycle(struct mmc *mmc);
26 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
27 
28 #if CONFIG_IS_ENABLED(MMC_TINY)
29 static struct mmc mmc_static;
30 struct mmc *find_mmc_device(int dev_num)
31 {
32 	return &mmc_static;
33 }
34 
35 void mmc_do_preinit(void)
36 {
37 	struct mmc *m = &mmc_static;
38 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
39 	mmc_set_preinit(m, 1);
40 #endif
41 	if (m->preinit)
42 		mmc_start_init(m);
43 }
44 
45 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
46 {
47 	return &mmc->block_dev;
48 }
49 #endif
50 
51 #if !CONFIG_IS_ENABLED(DM_MMC)
52 
53 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
54 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
55 {
56 	return -ENOSYS;
57 }
58 #endif
59 
60 __weak int board_mmc_getwp(struct mmc *mmc)
61 {
62 	return -1;
63 }
64 
65 int mmc_getwp(struct mmc *mmc)
66 {
67 	int wp;
68 
69 	wp = board_mmc_getwp(mmc);
70 
71 	if (wp < 0) {
72 		if (mmc->cfg->ops->getwp)
73 			wp = mmc->cfg->ops->getwp(mmc);
74 		else
75 			wp = 0;
76 	}
77 
78 	return wp;
79 }
80 
81 __weak int board_mmc_getcd(struct mmc *mmc)
82 {
83 	return -1;
84 }
85 #endif
86 
87 #ifdef CONFIG_MMC_TRACE
88 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
89 {
90 	printf("CMD_SEND:%d\n", cmd->cmdidx);
91 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
92 }
93 
94 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
95 {
96 	int i;
97 	u8 *ptr;
98 
99 	if (ret) {
100 		printf("\t\tRET\t\t\t %d\n", ret);
101 	} else {
102 		switch (cmd->resp_type) {
103 		case MMC_RSP_NONE:
104 			printf("\t\tMMC_RSP_NONE\n");
105 			break;
106 		case MMC_RSP_R1:
107 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
108 				cmd->response[0]);
109 			break;
110 		case MMC_RSP_R1b:
111 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
112 				cmd->response[0]);
113 			break;
114 		case MMC_RSP_R2:
115 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
116 				cmd->response[0]);
117 			printf("\t\t          \t\t 0x%08X \n",
118 				cmd->response[1]);
119 			printf("\t\t          \t\t 0x%08X \n",
120 				cmd->response[2]);
121 			printf("\t\t          \t\t 0x%08X \n",
122 				cmd->response[3]);
123 			printf("\n");
124 			printf("\t\t\t\t\tDUMPING DATA\n");
125 			for (i = 0; i < 4; i++) {
126 				int j;
127 				printf("\t\t\t\t\t%03d - ", i*4);
128 				ptr = (u8 *)&cmd->response[i];
129 				ptr += 3;
130 				for (j = 0; j < 4; j++)
131 					printf("%02X ", *ptr--);
132 				printf("\n");
133 			}
134 			break;
135 		case MMC_RSP_R3:
136 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
137 				cmd->response[0]);
138 			break;
139 		default:
140 			printf("\t\tERROR MMC rsp not supported\n");
141 			break;
142 		}
143 	}
144 }
145 
146 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
147 {
148 	int status;
149 
150 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
151 	printf("CURR STATE:%d\n", status);
152 }
153 #endif
154 
155 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
156 const char *mmc_mode_name(enum bus_mode mode)
157 {
158 	static const char *const names[] = {
159 	      [MMC_LEGACY]	= "MMC legacy",
160 	      [SD_LEGACY]	= "SD Legacy",
161 	      [MMC_HS]		= "MMC High Speed (26MHz)",
162 	      [SD_HS]		= "SD High Speed (50MHz)",
163 	      [UHS_SDR12]	= "UHS SDR12 (25MHz)",
164 	      [UHS_SDR25]	= "UHS SDR25 (50MHz)",
165 	      [UHS_SDR50]	= "UHS SDR50 (100MHz)",
166 	      [UHS_SDR104]	= "UHS SDR104 (208MHz)",
167 	      [UHS_DDR50]	= "UHS DDR50 (50MHz)",
168 	      [MMC_HS_52]	= "MMC High Speed (52MHz)",
169 	      [MMC_DDR_52]	= "MMC DDR52 (52MHz)",
170 	      [MMC_HS_200]	= "HS200 (200MHz)",
171 	};
172 
173 	if (mode >= MMC_MODES_END)
174 		return "Unknown mode";
175 	else
176 		return names[mode];
177 }
178 #endif
179 
180 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
181 {
182 	static const int freqs[] = {
183 	      [MMC_LEGACY]	= 25000000,
184 	      [SD_LEGACY]	= 25000000,
185 	      [MMC_HS]		= 26000000,
186 	      [SD_HS]		= 50000000,
187 	      [MMC_HS_52]	= 52000000,
188 	      [MMC_DDR_52]	= 52000000,
189 	      [UHS_SDR12]	= 25000000,
190 	      [UHS_SDR25]	= 50000000,
191 	      [UHS_SDR50]	= 100000000,
192 	      [UHS_DDR50]	= 50000000,
193 	      [UHS_SDR104]	= 208000000,
194 	      [MMC_HS_200]	= 200000000,
195 	};
196 
197 	if (mode == MMC_LEGACY)
198 		return mmc->legacy_speed;
199 	else if (mode >= MMC_MODES_END)
200 		return 0;
201 	else
202 		return freqs[mode];
203 }
204 
205 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
206 {
207 	mmc->selected_mode = mode;
208 	mmc->tran_speed = mmc_mode2freq(mmc, mode);
209 	mmc->ddr_mode = mmc_is_mode_ddr(mode);
210 	pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
211 		 mmc->tran_speed / 1000000);
212 	return 0;
213 }
214 
215 #if !CONFIG_IS_ENABLED(DM_MMC)
216 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
217 {
218 	int ret;
219 
220 	mmmc_trace_before_send(mmc, cmd);
221 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
222 	mmmc_trace_after_send(mmc, cmd, ret);
223 
224 	return ret;
225 }
226 #endif
227 
228 int mmc_send_status(struct mmc *mmc, int timeout)
229 {
230 	struct mmc_cmd cmd;
231 	int err, retries = 5;
232 
233 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
234 	cmd.resp_type = MMC_RSP_R1;
235 	if (!mmc_host_is_spi(mmc))
236 		cmd.cmdarg = mmc->rca << 16;
237 
238 	while (1) {
239 		err = mmc_send_cmd(mmc, &cmd, NULL);
240 		if (!err) {
241 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
242 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
243 			     MMC_STATE_PRG)
244 				break;
245 
246 			if (cmd.response[0] & MMC_STATUS_MASK) {
247 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
248 				pr_err("Status Error: 0x%08X\n",
249 				       cmd.response[0]);
250 #endif
251 				return -ECOMM;
252 			}
253 		} else if (--retries < 0)
254 			return err;
255 
256 		if (timeout-- <= 0)
257 			break;
258 
259 		udelay(1000);
260 	}
261 
262 	mmc_trace_state(mmc, &cmd);
263 	if (timeout <= 0) {
264 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
265 		pr_err("Timeout waiting card ready\n");
266 #endif
267 		return -ETIMEDOUT;
268 	}
269 
270 	return 0;
271 }
272 
273 int mmc_set_blocklen(struct mmc *mmc, int len)
274 {
275 	struct mmc_cmd cmd;
276 	int err;
277 
278 	if (mmc->ddr_mode)
279 		return 0;
280 
281 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
282 	cmd.resp_type = MMC_RSP_R1;
283 	cmd.cmdarg = len;
284 
285 	err = mmc_send_cmd(mmc, &cmd, NULL);
286 
287 #ifdef CONFIG_MMC_QUIRKS
288 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
289 		int retries = 4;
290 		/*
291 		 * It has been seen that SET_BLOCKLEN may fail on the first
292 		 * attempt, let's try a few more time
293 		 */
294 		do {
295 			err = mmc_send_cmd(mmc, &cmd, NULL);
296 			if (!err)
297 				break;
298 		} while (retries--);
299 	}
300 #endif
301 
302 	return err;
303 }
304 
305 #ifdef MMC_SUPPORTS_TUNING
306 static const u8 tuning_blk_pattern_4bit[] = {
307 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
308 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
309 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
310 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
311 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
312 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
313 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
314 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
315 };
316 
317 static const u8 tuning_blk_pattern_8bit[] = {
318 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
319 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
320 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
321 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
322 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
323 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
324 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
325 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
326 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
327 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
328 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
329 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
330 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
331 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
332 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
333 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
334 };
335 
336 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
337 {
338 	struct mmc_cmd cmd;
339 	struct mmc_data data;
340 	const u8 *tuning_block_pattern;
341 	int size, err;
342 
343 	if (mmc->bus_width == 8) {
344 		tuning_block_pattern = tuning_blk_pattern_8bit;
345 		size = sizeof(tuning_blk_pattern_8bit);
346 	} else if (mmc->bus_width == 4) {
347 		tuning_block_pattern = tuning_blk_pattern_4bit;
348 		size = sizeof(tuning_blk_pattern_4bit);
349 	} else {
350 		return -EINVAL;
351 	}
352 
353 	ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
354 
355 	cmd.cmdidx = opcode;
356 	cmd.cmdarg = 0;
357 	cmd.resp_type = MMC_RSP_R1;
358 
359 	data.dest = (void *)data_buf;
360 	data.blocks = 1;
361 	data.blocksize = size;
362 	data.flags = MMC_DATA_READ;
363 
364 	err = mmc_send_cmd(mmc, &cmd, &data);
365 	if (err)
366 		return err;
367 
368 	if (memcmp(data_buf, tuning_block_pattern, size))
369 		return -EIO;
370 
371 	return 0;
372 }
373 #endif
374 
375 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
376 			   lbaint_t blkcnt)
377 {
378 	struct mmc_cmd cmd;
379 	struct mmc_data data;
380 
381 	if (blkcnt > 1)
382 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
383 	else
384 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
385 
386 	if (mmc->high_capacity)
387 		cmd.cmdarg = start;
388 	else
389 		cmd.cmdarg = start * mmc->read_bl_len;
390 
391 	cmd.resp_type = MMC_RSP_R1;
392 
393 	data.dest = dst;
394 	data.blocks = blkcnt;
395 	data.blocksize = mmc->read_bl_len;
396 	data.flags = MMC_DATA_READ;
397 
398 	if (mmc_send_cmd(mmc, &cmd, &data))
399 		return 0;
400 
401 	if (blkcnt > 1) {
402 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
403 		cmd.cmdarg = 0;
404 		cmd.resp_type = MMC_RSP_R1b;
405 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
406 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 			pr_err("mmc fail to send stop cmd\n");
408 #endif
409 			return 0;
410 		}
411 	}
412 
413 	return blkcnt;
414 }
415 
416 #if CONFIG_IS_ENABLED(BLK)
417 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
418 #else
419 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
420 		void *dst)
421 #endif
422 {
423 #if CONFIG_IS_ENABLED(BLK)
424 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
425 #endif
426 	int dev_num = block_dev->devnum;
427 	int err;
428 	lbaint_t cur, blocks_todo = blkcnt;
429 
430 	if (blkcnt == 0)
431 		return 0;
432 
433 	struct mmc *mmc = find_mmc_device(dev_num);
434 	if (!mmc)
435 		return 0;
436 
437 	if (CONFIG_IS_ENABLED(MMC_TINY))
438 		err = mmc_switch_part(mmc, block_dev->hwpart);
439 	else
440 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
441 
442 	if (err < 0)
443 		return 0;
444 
445 	if ((start + blkcnt) > block_dev->lba) {
446 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
447 		pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
448 		       start + blkcnt, block_dev->lba);
449 #endif
450 		return 0;
451 	}
452 
453 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
454 		pr_debug("%s: Failed to set blocklen\n", __func__);
455 		return 0;
456 	}
457 
458 	do {
459 		cur = (blocks_todo > mmc->cfg->b_max) ?
460 			mmc->cfg->b_max : blocks_todo;
461 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
462 			pr_debug("%s: Failed to read blocks\n", __func__);
463 			return 0;
464 		}
465 		blocks_todo -= cur;
466 		start += cur;
467 		dst += cur * mmc->read_bl_len;
468 	} while (blocks_todo > 0);
469 
470 	return blkcnt;
471 }
472 
473 static int mmc_go_idle(struct mmc *mmc)
474 {
475 	struct mmc_cmd cmd;
476 	int err;
477 
478 	udelay(1000);
479 
480 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
481 	cmd.cmdarg = 0;
482 	cmd.resp_type = MMC_RSP_NONE;
483 
484 	err = mmc_send_cmd(mmc, &cmd, NULL);
485 
486 	if (err)
487 		return err;
488 
489 	udelay(2000);
490 
491 	return 0;
492 }
493 
494 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
495 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
496 {
497 	struct mmc_cmd cmd;
498 	int err = 0;
499 
500 	/*
501 	 * Send CMD11 only if the request is to switch the card to
502 	 * 1.8V signalling.
503 	 */
504 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
505 		return mmc_set_signal_voltage(mmc, signal_voltage);
506 
507 	cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
508 	cmd.cmdarg = 0;
509 	cmd.resp_type = MMC_RSP_R1;
510 
511 	err = mmc_send_cmd(mmc, &cmd, NULL);
512 	if (err)
513 		return err;
514 
515 	if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
516 		return -EIO;
517 
518 	/*
519 	 * The card should drive cmd and dat[0:3] low immediately
520 	 * after the response of cmd11, but wait 100 us to be sure
521 	 */
522 	err = mmc_wait_dat0(mmc, 0, 100);
523 	if (err == -ENOSYS)
524 		udelay(100);
525 	else if (err)
526 		return -ETIMEDOUT;
527 
528 	/*
529 	 * During a signal voltage level switch, the clock must be gated
530 	 * for 5 ms according to the SD spec
531 	 */
532 	mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
533 
534 	err = mmc_set_signal_voltage(mmc, signal_voltage);
535 	if (err)
536 		return err;
537 
538 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
539 	mdelay(10);
540 	mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
541 
542 	/*
543 	 * Failure to switch is indicated by the card holding
544 	 * dat[0:3] low. Wait for at least 1 ms according to spec
545 	 */
546 	err = mmc_wait_dat0(mmc, 1, 1000);
547 	if (err == -ENOSYS)
548 		udelay(1000);
549 	else if (err)
550 		return -ETIMEDOUT;
551 
552 	return 0;
553 }
554 #endif
555 
556 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
557 {
558 	int timeout = 1000;
559 	int err;
560 	struct mmc_cmd cmd;
561 
562 	while (1) {
563 		cmd.cmdidx = MMC_CMD_APP_CMD;
564 		cmd.resp_type = MMC_RSP_R1;
565 		cmd.cmdarg = 0;
566 
567 		err = mmc_send_cmd(mmc, &cmd, NULL);
568 
569 		if (err)
570 			return err;
571 
572 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
573 		cmd.resp_type = MMC_RSP_R3;
574 
575 		/*
576 		 * Most cards do not answer if some reserved bits
577 		 * in the ocr are set. However, Some controller
578 		 * can set bit 7 (reserved for low voltages), but
579 		 * how to manage low voltages SD card is not yet
580 		 * specified.
581 		 */
582 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
583 			(mmc->cfg->voltages & 0xff8000);
584 
585 		if (mmc->version == SD_VERSION_2)
586 			cmd.cmdarg |= OCR_HCS;
587 
588 		if (uhs_en)
589 			cmd.cmdarg |= OCR_S18R;
590 
591 		err = mmc_send_cmd(mmc, &cmd, NULL);
592 
593 		if (err)
594 			return err;
595 
596 		if (cmd.response[0] & OCR_BUSY)
597 			break;
598 
599 		if (timeout-- <= 0)
600 			return -EOPNOTSUPP;
601 
602 		udelay(1000);
603 	}
604 
605 	if (mmc->version != SD_VERSION_2)
606 		mmc->version = SD_VERSION_1_0;
607 
608 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
609 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
610 		cmd.resp_type = MMC_RSP_R3;
611 		cmd.cmdarg = 0;
612 
613 		err = mmc_send_cmd(mmc, &cmd, NULL);
614 
615 		if (err)
616 			return err;
617 	}
618 
619 	mmc->ocr = cmd.response[0];
620 
621 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
622 	if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
623 	    == 0x41000000) {
624 		err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
625 		if (err)
626 			return err;
627 	}
628 #endif
629 
630 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
631 	mmc->rca = 0;
632 
633 	return 0;
634 }
635 
636 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
637 {
638 	struct mmc_cmd cmd;
639 	int err;
640 
641 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
642 	cmd.resp_type = MMC_RSP_R3;
643 	cmd.cmdarg = 0;
644 	if (use_arg && !mmc_host_is_spi(mmc))
645 		cmd.cmdarg = OCR_HCS |
646 			(mmc->cfg->voltages &
647 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
648 			(mmc->ocr & OCR_ACCESS_MODE);
649 
650 	err = mmc_send_cmd(mmc, &cmd, NULL);
651 	if (err)
652 		return err;
653 	mmc->ocr = cmd.response[0];
654 	return 0;
655 }
656 
657 static int mmc_send_op_cond(struct mmc *mmc)
658 {
659 	int err, i;
660 
661 	/* Some cards seem to need this */
662 	mmc_go_idle(mmc);
663 
664  	/* Asking to the card its capabilities */
665 	for (i = 0; i < 2; i++) {
666 		err = mmc_send_op_cond_iter(mmc, i != 0);
667 		if (err)
668 			return err;
669 
670 		/* exit if not busy (flag seems to be inverted) */
671 		if (mmc->ocr & OCR_BUSY)
672 			break;
673 	}
674 	mmc->op_cond_pending = 1;
675 	return 0;
676 }
677 
678 static int mmc_complete_op_cond(struct mmc *mmc)
679 {
680 	struct mmc_cmd cmd;
681 	int timeout = 1000;
682 	uint start;
683 	int err;
684 
685 	mmc->op_cond_pending = 0;
686 	if (!(mmc->ocr & OCR_BUSY)) {
687 		/* Some cards seem to need this */
688 		mmc_go_idle(mmc);
689 
690 		start = get_timer(0);
691 		while (1) {
692 			err = mmc_send_op_cond_iter(mmc, 1);
693 			if (err)
694 				return err;
695 			if (mmc->ocr & OCR_BUSY)
696 				break;
697 			if (get_timer(start) > timeout)
698 				return -EOPNOTSUPP;
699 			udelay(100);
700 		}
701 	}
702 
703 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
704 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
705 		cmd.resp_type = MMC_RSP_R3;
706 		cmd.cmdarg = 0;
707 
708 		err = mmc_send_cmd(mmc, &cmd, NULL);
709 
710 		if (err)
711 			return err;
712 
713 		mmc->ocr = cmd.response[0];
714 	}
715 
716 	mmc->version = MMC_VERSION_UNKNOWN;
717 
718 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
719 	mmc->rca = 1;
720 
721 	return 0;
722 }
723 
724 
725 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
726 {
727 	struct mmc_cmd cmd;
728 	struct mmc_data data;
729 	int err;
730 
731 	/* Get the Card Status Register */
732 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
733 	cmd.resp_type = MMC_RSP_R1;
734 	cmd.cmdarg = 0;
735 
736 	data.dest = (char *)ext_csd;
737 	data.blocks = 1;
738 	data.blocksize = MMC_MAX_BLOCK_LEN;
739 	data.flags = MMC_DATA_READ;
740 
741 	err = mmc_send_cmd(mmc, &cmd, &data);
742 
743 	return err;
744 }
745 
746 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
747 {
748 	struct mmc_cmd cmd;
749 	int timeout = 1000;
750 	int retries = 3;
751 	int ret;
752 
753 	cmd.cmdidx = MMC_CMD_SWITCH;
754 	cmd.resp_type = MMC_RSP_R1b;
755 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
756 				 (index << 16) |
757 				 (value << 8);
758 
759 	while (retries > 0) {
760 		ret = mmc_send_cmd(mmc, &cmd, NULL);
761 
762 		/* Waiting for the ready status */
763 		if (!ret) {
764 			ret = mmc_send_status(mmc, timeout);
765 			return ret;
766 		}
767 
768 		retries--;
769 	}
770 
771 	return ret;
772 
773 }
774 
775 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
776 {
777 	int err;
778 	int speed_bits;
779 
780 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
781 
782 	switch (mode) {
783 	case MMC_HS:
784 	case MMC_HS_52:
785 	case MMC_DDR_52:
786 		speed_bits = EXT_CSD_TIMING_HS;
787 		break;
788 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
789 	case MMC_HS_200:
790 		speed_bits = EXT_CSD_TIMING_HS200;
791 		break;
792 #endif
793 	case MMC_LEGACY:
794 		speed_bits = EXT_CSD_TIMING_LEGACY;
795 		break;
796 	default:
797 		return -EINVAL;
798 	}
799 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
800 			 speed_bits);
801 	if (err)
802 		return err;
803 
804 	if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
805 		/* Now check to see that it worked */
806 		err = mmc_send_ext_csd(mmc, test_csd);
807 		if (err)
808 			return err;
809 
810 		/* No high-speed support */
811 		if (!test_csd[EXT_CSD_HS_TIMING])
812 			return -ENOTSUPP;
813 	}
814 
815 	return 0;
816 }
817 
818 static int mmc_get_capabilities(struct mmc *mmc)
819 {
820 	u8 *ext_csd = mmc->ext_csd;
821 	char cardtype;
822 
823 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
824 
825 	if (mmc_host_is_spi(mmc))
826 		return 0;
827 
828 	/* Only version 4 supports high-speed */
829 	if (mmc->version < MMC_VERSION_4)
830 		return 0;
831 
832 	if (!ext_csd) {
833 		pr_err("No ext_csd found!\n"); /* this should enver happen */
834 		return -ENOTSUPP;
835 	}
836 
837 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
838 
839 	cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
840 	mmc->cardtype = cardtype;
841 
842 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
843 	if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
844 			EXT_CSD_CARD_TYPE_HS200_1_8V)) {
845 		mmc->card_caps |= MMC_MODE_HS200;
846 	}
847 #endif
848 	if (cardtype & EXT_CSD_CARD_TYPE_52) {
849 		if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
850 			mmc->card_caps |= MMC_MODE_DDR_52MHz;
851 		mmc->card_caps |= MMC_MODE_HS_52MHz;
852 	}
853 	if (cardtype & EXT_CSD_CARD_TYPE_26)
854 		mmc->card_caps |= MMC_MODE_HS;
855 
856 	return 0;
857 }
858 
859 static int mmc_set_capacity(struct mmc *mmc, int part_num)
860 {
861 	switch (part_num) {
862 	case 0:
863 		mmc->capacity = mmc->capacity_user;
864 		break;
865 	case 1:
866 	case 2:
867 		mmc->capacity = mmc->capacity_boot;
868 		break;
869 	case 3:
870 		mmc->capacity = mmc->capacity_rpmb;
871 		break;
872 	case 4:
873 	case 5:
874 	case 6:
875 	case 7:
876 		mmc->capacity = mmc->capacity_gp[part_num - 4];
877 		break;
878 	default:
879 		return -1;
880 	}
881 
882 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
883 
884 	return 0;
885 }
886 
887 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
888 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
889 {
890 	int forbidden = 0;
891 	bool change = false;
892 
893 	if (part_num & PART_ACCESS_MASK)
894 		forbidden = MMC_CAP(MMC_HS_200);
895 
896 	if (MMC_CAP(mmc->selected_mode) & forbidden) {
897 		pr_debug("selected mode (%s) is forbidden for part %d\n",
898 			 mmc_mode_name(mmc->selected_mode), part_num);
899 		change = true;
900 	} else if (mmc->selected_mode != mmc->best_mode) {
901 		pr_debug("selected mode is not optimal\n");
902 		change = true;
903 	}
904 
905 	if (change)
906 		return mmc_select_mode_and_width(mmc,
907 						 mmc->card_caps & ~forbidden);
908 
909 	return 0;
910 }
911 #else
912 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
913 					   unsigned int part_num)
914 {
915 	return 0;
916 }
917 #endif
918 
919 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
920 {
921 	int ret;
922 
923 	ret = mmc_boot_part_access_chk(mmc, part_num);
924 	if (ret)
925 		return ret;
926 
927 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
928 			 (mmc->part_config & ~PART_ACCESS_MASK)
929 			 | (part_num & PART_ACCESS_MASK));
930 
931 	/*
932 	 * Set the capacity if the switch succeeded or was intended
933 	 * to return to representing the raw device.
934 	 */
935 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
936 		ret = mmc_set_capacity(mmc, part_num);
937 		mmc_get_blk_desc(mmc)->hwpart = part_num;
938 	}
939 
940 	return ret;
941 }
942 
943 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
944 int mmc_hwpart_config(struct mmc *mmc,
945 		      const struct mmc_hwpart_conf *conf,
946 		      enum mmc_hwpart_conf_mode mode)
947 {
948 	u8 part_attrs = 0;
949 	u32 enh_size_mult;
950 	u32 enh_start_addr;
951 	u32 gp_size_mult[4];
952 	u32 max_enh_size_mult;
953 	u32 tot_enh_size_mult = 0;
954 	u8 wr_rel_set;
955 	int i, pidx, err;
956 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
957 
958 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
959 		return -EINVAL;
960 
961 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
962 		pr_err("eMMC >= 4.4 required for enhanced user data area\n");
963 		return -EMEDIUMTYPE;
964 	}
965 
966 	if (!(mmc->part_support & PART_SUPPORT)) {
967 		pr_err("Card does not support partitioning\n");
968 		return -EMEDIUMTYPE;
969 	}
970 
971 	if (!mmc->hc_wp_grp_size) {
972 		pr_err("Card does not define HC WP group size\n");
973 		return -EMEDIUMTYPE;
974 	}
975 
976 	/* check partition alignment and total enhanced size */
977 	if (conf->user.enh_size) {
978 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
979 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
980 			pr_err("User data enhanced area not HC WP group "
981 			       "size aligned\n");
982 			return -EINVAL;
983 		}
984 		part_attrs |= EXT_CSD_ENH_USR;
985 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
986 		if (mmc->high_capacity) {
987 			enh_start_addr = conf->user.enh_start;
988 		} else {
989 			enh_start_addr = (conf->user.enh_start << 9);
990 		}
991 	} else {
992 		enh_size_mult = 0;
993 		enh_start_addr = 0;
994 	}
995 	tot_enh_size_mult += enh_size_mult;
996 
997 	for (pidx = 0; pidx < 4; pidx++) {
998 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
999 			pr_err("GP%i partition not HC WP group size "
1000 			       "aligned\n", pidx+1);
1001 			return -EINVAL;
1002 		}
1003 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1004 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1005 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1006 			tot_enh_size_mult += gp_size_mult[pidx];
1007 		}
1008 	}
1009 
1010 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1011 		pr_err("Card does not support enhanced attribute\n");
1012 		return -EMEDIUMTYPE;
1013 	}
1014 
1015 	err = mmc_send_ext_csd(mmc, ext_csd);
1016 	if (err)
1017 		return err;
1018 
1019 	max_enh_size_mult =
1020 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1021 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1022 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1023 	if (tot_enh_size_mult > max_enh_size_mult) {
1024 		pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1025 		       tot_enh_size_mult, max_enh_size_mult);
1026 		return -EMEDIUMTYPE;
1027 	}
1028 
1029 	/* The default value of EXT_CSD_WR_REL_SET is device
1030 	 * dependent, the values can only be changed if the
1031 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1032 	 * changed only once and before partitioning is completed. */
1033 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1034 	if (conf->user.wr_rel_change) {
1035 		if (conf->user.wr_rel_set)
1036 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1037 		else
1038 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1039 	}
1040 	for (pidx = 0; pidx < 4; pidx++) {
1041 		if (conf->gp_part[pidx].wr_rel_change) {
1042 			if (conf->gp_part[pidx].wr_rel_set)
1043 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1044 			else
1045 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1046 		}
1047 	}
1048 
1049 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1050 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1051 		puts("Card does not support host controlled partition write "
1052 		     "reliability settings\n");
1053 		return -EMEDIUMTYPE;
1054 	}
1055 
1056 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1057 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1058 		pr_err("Card already partitioned\n");
1059 		return -EPERM;
1060 	}
1061 
1062 	if (mode == MMC_HWPART_CONF_CHECK)
1063 		return 0;
1064 
1065 	/* Partitioning requires high-capacity size definitions */
1066 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1067 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1068 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1069 
1070 		if (err)
1071 			return err;
1072 
1073 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1074 
1075 		/* update erase group size to be high-capacity */
1076 		mmc->erase_grp_size =
1077 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1078 
1079 	}
1080 
1081 	/* all OK, write the configuration */
1082 	for (i = 0; i < 4; i++) {
1083 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1084 				 EXT_CSD_ENH_START_ADDR+i,
1085 				 (enh_start_addr >> (i*8)) & 0xFF);
1086 		if (err)
1087 			return err;
1088 	}
1089 	for (i = 0; i < 3; i++) {
1090 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1091 				 EXT_CSD_ENH_SIZE_MULT+i,
1092 				 (enh_size_mult >> (i*8)) & 0xFF);
1093 		if (err)
1094 			return err;
1095 	}
1096 	for (pidx = 0; pidx < 4; pidx++) {
1097 		for (i = 0; i < 3; i++) {
1098 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1099 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1100 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1101 			if (err)
1102 				return err;
1103 		}
1104 	}
1105 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1106 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1107 	if (err)
1108 		return err;
1109 
1110 	if (mode == MMC_HWPART_CONF_SET)
1111 		return 0;
1112 
1113 	/* The WR_REL_SET is a write-once register but shall be
1114 	 * written before setting PART_SETTING_COMPLETED. As it is
1115 	 * write-once we can only write it when completing the
1116 	 * partitioning. */
1117 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1118 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1119 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1120 		if (err)
1121 			return err;
1122 	}
1123 
1124 	/* Setting PART_SETTING_COMPLETED confirms the partition
1125 	 * configuration but it only becomes effective after power
1126 	 * cycle, so we do not adjust the partition related settings
1127 	 * in the mmc struct. */
1128 
1129 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1130 			 EXT_CSD_PARTITION_SETTING,
1131 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1132 	if (err)
1133 		return err;
1134 
1135 	return 0;
1136 }
1137 #endif
1138 
1139 #if !CONFIG_IS_ENABLED(DM_MMC)
1140 int mmc_getcd(struct mmc *mmc)
1141 {
1142 	int cd;
1143 
1144 	cd = board_mmc_getcd(mmc);
1145 
1146 	if (cd < 0) {
1147 		if (mmc->cfg->ops->getcd)
1148 			cd = mmc->cfg->ops->getcd(mmc);
1149 		else
1150 			cd = 1;
1151 	}
1152 
1153 	return cd;
1154 }
1155 #endif
1156 
1157 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1158 {
1159 	struct mmc_cmd cmd;
1160 	struct mmc_data data;
1161 
1162 	/* Switch the frequency */
1163 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1164 	cmd.resp_type = MMC_RSP_R1;
1165 	cmd.cmdarg = (mode << 31) | 0xffffff;
1166 	cmd.cmdarg &= ~(0xf << (group * 4));
1167 	cmd.cmdarg |= value << (group * 4);
1168 
1169 	data.dest = (char *)resp;
1170 	data.blocksize = 64;
1171 	data.blocks = 1;
1172 	data.flags = MMC_DATA_READ;
1173 
1174 	return mmc_send_cmd(mmc, &cmd, &data);
1175 }
1176 
1177 
1178 static int sd_get_capabilities(struct mmc *mmc)
1179 {
1180 	int err;
1181 	struct mmc_cmd cmd;
1182 	ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1183 	ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1184 	struct mmc_data data;
1185 	int timeout;
1186 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1187 	u32 sd3_bus_mode;
1188 #endif
1189 
1190 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1191 
1192 	if (mmc_host_is_spi(mmc))
1193 		return 0;
1194 
1195 	/* Read the SCR to find out if this card supports higher speeds */
1196 	cmd.cmdidx = MMC_CMD_APP_CMD;
1197 	cmd.resp_type = MMC_RSP_R1;
1198 	cmd.cmdarg = mmc->rca << 16;
1199 
1200 	err = mmc_send_cmd(mmc, &cmd, NULL);
1201 
1202 	if (err)
1203 		return err;
1204 
1205 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1206 	cmd.resp_type = MMC_RSP_R1;
1207 	cmd.cmdarg = 0;
1208 
1209 	timeout = 3;
1210 
1211 retry_scr:
1212 	data.dest = (char *)scr;
1213 	data.blocksize = 8;
1214 	data.blocks = 1;
1215 	data.flags = MMC_DATA_READ;
1216 
1217 	err = mmc_send_cmd(mmc, &cmd, &data);
1218 
1219 	if (err) {
1220 		if (timeout--)
1221 			goto retry_scr;
1222 
1223 		return err;
1224 	}
1225 
1226 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1227 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1228 
1229 	switch ((mmc->scr[0] >> 24) & 0xf) {
1230 	case 0:
1231 		mmc->version = SD_VERSION_1_0;
1232 		break;
1233 	case 1:
1234 		mmc->version = SD_VERSION_1_10;
1235 		break;
1236 	case 2:
1237 		mmc->version = SD_VERSION_2;
1238 		if ((mmc->scr[0] >> 15) & 0x1)
1239 			mmc->version = SD_VERSION_3;
1240 		break;
1241 	default:
1242 		mmc->version = SD_VERSION_1_0;
1243 		break;
1244 	}
1245 
1246 	if (mmc->scr[0] & SD_DATA_4BIT)
1247 		mmc->card_caps |= MMC_MODE_4BIT;
1248 
1249 	/* Version 1.0 doesn't support switching */
1250 	if (mmc->version == SD_VERSION_1_0)
1251 		return 0;
1252 
1253 	timeout = 4;
1254 	while (timeout--) {
1255 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1256 				(u8 *)switch_status);
1257 
1258 		if (err)
1259 			return err;
1260 
1261 		/* The high-speed function is busy.  Try again */
1262 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1263 			break;
1264 	}
1265 
1266 	/* If high-speed isn't supported, we return */
1267 	if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1268 		mmc->card_caps |= MMC_CAP(SD_HS);
1269 
1270 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1271 	/* Version before 3.0 don't support UHS modes */
1272 	if (mmc->version < SD_VERSION_3)
1273 		return 0;
1274 
1275 	sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1276 	if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1277 		mmc->card_caps |= MMC_CAP(UHS_SDR104);
1278 	if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1279 		mmc->card_caps |= MMC_CAP(UHS_SDR50);
1280 	if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1281 		mmc->card_caps |= MMC_CAP(UHS_SDR25);
1282 	if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1283 		mmc->card_caps |= MMC_CAP(UHS_SDR12);
1284 	if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1285 		mmc->card_caps |= MMC_CAP(UHS_DDR50);
1286 #endif
1287 
1288 	return 0;
1289 }
1290 
1291 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1292 {
1293 	int err;
1294 
1295 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1296 	int speed;
1297 
1298 	switch (mode) {
1299 	case SD_LEGACY:
1300 		speed = UHS_SDR12_BUS_SPEED;
1301 		break;
1302 	case SD_HS:
1303 		speed = HIGH_SPEED_BUS_SPEED;
1304 		break;
1305 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1306 	case UHS_SDR12:
1307 		speed = UHS_SDR12_BUS_SPEED;
1308 		break;
1309 	case UHS_SDR25:
1310 		speed = UHS_SDR25_BUS_SPEED;
1311 		break;
1312 	case UHS_SDR50:
1313 		speed = UHS_SDR50_BUS_SPEED;
1314 		break;
1315 	case UHS_DDR50:
1316 		speed = UHS_DDR50_BUS_SPEED;
1317 		break;
1318 	case UHS_SDR104:
1319 		speed = UHS_SDR104_BUS_SPEED;
1320 		break;
1321 #endif
1322 	default:
1323 		return -EINVAL;
1324 	}
1325 
1326 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1327 	if (err)
1328 		return err;
1329 
1330 	if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1331 		return -ENOTSUPP;
1332 
1333 	return 0;
1334 }
1335 
1336 static int sd_select_bus_width(struct mmc *mmc, int w)
1337 {
1338 	int err;
1339 	struct mmc_cmd cmd;
1340 
1341 	if ((w != 4) && (w != 1))
1342 		return -EINVAL;
1343 
1344 	cmd.cmdidx = MMC_CMD_APP_CMD;
1345 	cmd.resp_type = MMC_RSP_R1;
1346 	cmd.cmdarg = mmc->rca << 16;
1347 
1348 	err = mmc_send_cmd(mmc, &cmd, NULL);
1349 	if (err)
1350 		return err;
1351 
1352 	cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1353 	cmd.resp_type = MMC_RSP_R1;
1354 	if (w == 4)
1355 		cmd.cmdarg = 2;
1356 	else if (w == 1)
1357 		cmd.cmdarg = 0;
1358 	err = mmc_send_cmd(mmc, &cmd, NULL);
1359 	if (err)
1360 		return err;
1361 
1362 	return 0;
1363 }
1364 
1365 #if CONFIG_IS_ENABLED(MMC_WRITE)
1366 static int sd_read_ssr(struct mmc *mmc)
1367 {
1368 	static const unsigned int sd_au_size[] = {
1369 		0,		SZ_16K / 512,		SZ_32K / 512,
1370 		SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
1371 		SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
1372 		SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
1373 		SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,
1374 		SZ_64M / 512,
1375 	};
1376 	int err, i;
1377 	struct mmc_cmd cmd;
1378 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1379 	struct mmc_data data;
1380 	int timeout = 3;
1381 	unsigned int au, eo, et, es;
1382 
1383 	cmd.cmdidx = MMC_CMD_APP_CMD;
1384 	cmd.resp_type = MMC_RSP_R1;
1385 	cmd.cmdarg = mmc->rca << 16;
1386 
1387 	err = mmc_send_cmd(mmc, &cmd, NULL);
1388 	if (err)
1389 		return err;
1390 
1391 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1392 	cmd.resp_type = MMC_RSP_R1;
1393 	cmd.cmdarg = 0;
1394 
1395 retry_ssr:
1396 	data.dest = (char *)ssr;
1397 	data.blocksize = 64;
1398 	data.blocks = 1;
1399 	data.flags = MMC_DATA_READ;
1400 
1401 	err = mmc_send_cmd(mmc, &cmd, &data);
1402 	if (err) {
1403 		if (timeout--)
1404 			goto retry_ssr;
1405 
1406 		return err;
1407 	}
1408 
1409 	for (i = 0; i < 16; i++)
1410 		ssr[i] = be32_to_cpu(ssr[i]);
1411 
1412 	au = (ssr[2] >> 12) & 0xF;
1413 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1414 		mmc->ssr.au = sd_au_size[au];
1415 		es = (ssr[3] >> 24) & 0xFF;
1416 		es |= (ssr[2] & 0xFF) << 8;
1417 		et = (ssr[3] >> 18) & 0x3F;
1418 		if (es && et) {
1419 			eo = (ssr[3] >> 16) & 0x3;
1420 			mmc->ssr.erase_timeout = (et * 1000) / es;
1421 			mmc->ssr.erase_offset = eo * 1000;
1422 		}
1423 	} else {
1424 		pr_debug("Invalid Allocation Unit Size.\n");
1425 	}
1426 
1427 	return 0;
1428 }
1429 #endif
1430 /* frequency bases */
1431 /* divided by 10 to be nice to platforms without floating point */
1432 static const int fbase[] = {
1433 	10000,
1434 	100000,
1435 	1000000,
1436 	10000000,
1437 };
1438 
1439 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1440  * to platforms without floating point.
1441  */
1442 static const u8 multipliers[] = {
1443 	0,	/* reserved */
1444 	10,
1445 	12,
1446 	13,
1447 	15,
1448 	20,
1449 	25,
1450 	30,
1451 	35,
1452 	40,
1453 	45,
1454 	50,
1455 	55,
1456 	60,
1457 	70,
1458 	80,
1459 };
1460 
1461 static inline int bus_width(uint cap)
1462 {
1463 	if (cap == MMC_MODE_8BIT)
1464 		return 8;
1465 	if (cap == MMC_MODE_4BIT)
1466 		return 4;
1467 	if (cap == MMC_MODE_1BIT)
1468 		return 1;
1469 	pr_warn("invalid bus witdh capability 0x%x\n", cap);
1470 	return 0;
1471 }
1472 
1473 #if !CONFIG_IS_ENABLED(DM_MMC)
1474 #ifdef MMC_SUPPORTS_TUNING
1475 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1476 {
1477 	return -ENOTSUPP;
1478 }
1479 #endif
1480 
1481 static void mmc_send_init_stream(struct mmc *mmc)
1482 {
1483 }
1484 
1485 static int mmc_set_ios(struct mmc *mmc)
1486 {
1487 	int ret = 0;
1488 
1489 	if (mmc->cfg->ops->set_ios)
1490 		ret = mmc->cfg->ops->set_ios(mmc);
1491 
1492 	return ret;
1493 }
1494 #endif
1495 
1496 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1497 {
1498 	if (!disable) {
1499 		if (clock > mmc->cfg->f_max)
1500 			clock = mmc->cfg->f_max;
1501 
1502 		if (clock < mmc->cfg->f_min)
1503 			clock = mmc->cfg->f_min;
1504 	}
1505 
1506 	mmc->clock = clock;
1507 	mmc->clk_disable = disable;
1508 
1509 	debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1510 
1511 	return mmc_set_ios(mmc);
1512 }
1513 
1514 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1515 {
1516 	mmc->bus_width = width;
1517 
1518 	return mmc_set_ios(mmc);
1519 }
1520 
1521 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1522 /*
1523  * helper function to display the capabilities in a human
1524  * friendly manner. The capabilities include bus width and
1525  * supported modes.
1526  */
1527 void mmc_dump_capabilities(const char *text, uint caps)
1528 {
1529 	enum bus_mode mode;
1530 
1531 	pr_debug("%s: widths [", text);
1532 	if (caps & MMC_MODE_8BIT)
1533 		pr_debug("8, ");
1534 	if (caps & MMC_MODE_4BIT)
1535 		pr_debug("4, ");
1536 	if (caps & MMC_MODE_1BIT)
1537 		pr_debug("1, ");
1538 	pr_debug("\b\b] modes [");
1539 	for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1540 		if (MMC_CAP(mode) & caps)
1541 			pr_debug("%s, ", mmc_mode_name(mode));
1542 	pr_debug("\b\b]\n");
1543 }
1544 #endif
1545 
1546 struct mode_width_tuning {
1547 	enum bus_mode mode;
1548 	uint widths;
1549 #ifdef MMC_SUPPORTS_TUNING
1550 	uint tuning;
1551 #endif
1552 };
1553 
1554 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1555 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1556 {
1557 	switch (voltage) {
1558 	case MMC_SIGNAL_VOLTAGE_000: return 0;
1559 	case MMC_SIGNAL_VOLTAGE_330: return 3300;
1560 	case MMC_SIGNAL_VOLTAGE_180: return 1800;
1561 	case MMC_SIGNAL_VOLTAGE_120: return 1200;
1562 	}
1563 	return -EINVAL;
1564 }
1565 
1566 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1567 {
1568 	int err;
1569 
1570 	if (mmc->signal_voltage == signal_voltage)
1571 		return 0;
1572 
1573 	mmc->signal_voltage = signal_voltage;
1574 	err = mmc_set_ios(mmc);
1575 	if (err)
1576 		pr_debug("unable to set voltage (err %d)\n", err);
1577 
1578 	return err;
1579 }
1580 #else
1581 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1582 {
1583 	return 0;
1584 }
1585 #endif
1586 
1587 static const struct mode_width_tuning sd_modes_by_pref[] = {
1588 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1589 #ifdef MMC_SUPPORTS_TUNING
1590 	{
1591 		.mode = UHS_SDR104,
1592 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1593 		.tuning = MMC_CMD_SEND_TUNING_BLOCK
1594 	},
1595 #endif
1596 	{
1597 		.mode = UHS_SDR50,
1598 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1599 	},
1600 	{
1601 		.mode = UHS_DDR50,
1602 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1603 	},
1604 	{
1605 		.mode = UHS_SDR25,
1606 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1607 	},
1608 #endif
1609 	{
1610 		.mode = SD_HS,
1611 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1612 	},
1613 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1614 	{
1615 		.mode = UHS_SDR12,
1616 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1617 	},
1618 #endif
1619 	{
1620 		.mode = SD_LEGACY,
1621 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1622 	}
1623 };
1624 
1625 #define for_each_sd_mode_by_pref(caps, mwt) \
1626 	for (mwt = sd_modes_by_pref;\
1627 	     mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1628 	     mwt++) \
1629 		if (caps & MMC_CAP(mwt->mode))
1630 
1631 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1632 {
1633 	int err;
1634 	uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1635 	const struct mode_width_tuning *mwt;
1636 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1637 	bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1638 #else
1639 	bool uhs_en = false;
1640 #endif
1641 	uint caps;
1642 
1643 #ifdef DEBUG
1644 	mmc_dump_capabilities("sd card", card_caps);
1645 	mmc_dump_capabilities("host", mmc->host_caps);
1646 #endif
1647 
1648 	/* Restrict card's capabilities by what the host can do */
1649 	caps = card_caps & mmc->host_caps;
1650 
1651 	if (!uhs_en)
1652 		caps &= ~UHS_CAPS;
1653 
1654 	for_each_sd_mode_by_pref(caps, mwt) {
1655 		uint *w;
1656 
1657 		for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1658 			if (*w & caps & mwt->widths) {
1659 				pr_debug("trying mode %s width %d (at %d MHz)\n",
1660 					 mmc_mode_name(mwt->mode),
1661 					 bus_width(*w),
1662 					 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1663 
1664 				/* configure the bus width (card + host) */
1665 				err = sd_select_bus_width(mmc, bus_width(*w));
1666 				if (err)
1667 					goto error;
1668 				mmc_set_bus_width(mmc, bus_width(*w));
1669 
1670 				/* configure the bus mode (card) */
1671 				err = sd_set_card_speed(mmc, mwt->mode);
1672 				if (err)
1673 					goto error;
1674 
1675 				/* configure the bus mode (host) */
1676 				mmc_select_mode(mmc, mwt->mode);
1677 				mmc_set_clock(mmc, mmc->tran_speed,
1678 						MMC_CLK_ENABLE);
1679 
1680 #ifdef MMC_SUPPORTS_TUNING
1681 				/* execute tuning if needed */
1682 				if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1683 					err = mmc_execute_tuning(mmc,
1684 								 mwt->tuning);
1685 					if (err) {
1686 						pr_debug("tuning failed\n");
1687 						goto error;
1688 					}
1689 				}
1690 #endif
1691 
1692 #if CONFIG_IS_ENABLED(MMC_WRITE)
1693 				err = sd_read_ssr(mmc);
1694 				if (err)
1695 					pr_warn("unable to read ssr\n");
1696 #endif
1697 				if (!err)
1698 					return 0;
1699 
1700 error:
1701 				/* revert to a safer bus speed */
1702 				mmc_select_mode(mmc, SD_LEGACY);
1703 				mmc_set_clock(mmc, mmc->tran_speed,
1704 						MMC_CLK_ENABLE);
1705 			}
1706 		}
1707 	}
1708 
1709 	pr_err("unable to select a mode\n");
1710 	return -ENOTSUPP;
1711 }
1712 
1713 /*
1714  * read the compare the part of ext csd that is constant.
1715  * This can be used to check that the transfer is working
1716  * as expected.
1717  */
1718 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1719 {
1720 	int err;
1721 	const u8 *ext_csd = mmc->ext_csd;
1722 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1723 
1724 	if (mmc->version < MMC_VERSION_4)
1725 		return 0;
1726 
1727 	err = mmc_send_ext_csd(mmc, test_csd);
1728 	if (err)
1729 		return err;
1730 
1731 	/* Only compare read only fields */
1732 	if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1733 		== test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1734 	    ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1735 		== test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1736 	    ext_csd[EXT_CSD_REV]
1737 		== test_csd[EXT_CSD_REV] &&
1738 	    ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1739 		== test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1740 	    memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1741 		   &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1742 		return 0;
1743 
1744 	return -EBADMSG;
1745 }
1746 
1747 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1748 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1749 				  uint32_t allowed_mask)
1750 {
1751 	u32 card_mask = 0;
1752 
1753 	switch (mode) {
1754 	case MMC_HS_200:
1755 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1756 			card_mask |= MMC_SIGNAL_VOLTAGE_180;
1757 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1758 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1759 		break;
1760 	case MMC_DDR_52:
1761 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1762 			card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1763 				     MMC_SIGNAL_VOLTAGE_180;
1764 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1765 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1766 		break;
1767 	default:
1768 		card_mask |= MMC_SIGNAL_VOLTAGE_330;
1769 		break;
1770 	}
1771 
1772 	while (card_mask & allowed_mask) {
1773 		enum mmc_voltage best_match;
1774 
1775 		best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1776 		if (!mmc_set_signal_voltage(mmc,  best_match))
1777 			return 0;
1778 
1779 		allowed_mask &= ~best_match;
1780 	}
1781 
1782 	return -ENOTSUPP;
1783 }
1784 #else
1785 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1786 					 uint32_t allowed_mask)
1787 {
1788 	return 0;
1789 }
1790 #endif
1791 
1792 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1793 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1794 	{
1795 		.mode = MMC_HS_200,
1796 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1797 		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1798 	},
1799 #endif
1800 	{
1801 		.mode = MMC_DDR_52,
1802 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1803 	},
1804 	{
1805 		.mode = MMC_HS_52,
1806 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1807 	},
1808 	{
1809 		.mode = MMC_HS,
1810 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1811 	},
1812 	{
1813 		.mode = MMC_LEGACY,
1814 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1815 	}
1816 };
1817 
1818 #define for_each_mmc_mode_by_pref(caps, mwt) \
1819 	for (mwt = mmc_modes_by_pref;\
1820 	    mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1821 	    mwt++) \
1822 		if (caps & MMC_CAP(mwt->mode))
1823 
1824 static const struct ext_csd_bus_width {
1825 	uint cap;
1826 	bool is_ddr;
1827 	uint ext_csd_bits;
1828 } ext_csd_bus_width[] = {
1829 	{MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1830 	{MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1831 	{MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1832 	{MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1833 	{MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1834 };
1835 
1836 #define for_each_supported_width(caps, ddr, ecbv) \
1837 	for (ecbv = ext_csd_bus_width;\
1838 	    ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1839 	    ecbv++) \
1840 		if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1841 
1842 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1843 {
1844 	int err;
1845 	const struct mode_width_tuning *mwt;
1846 	const struct ext_csd_bus_width *ecbw;
1847 
1848 #ifdef DEBUG
1849 	mmc_dump_capabilities("mmc", card_caps);
1850 	mmc_dump_capabilities("host", mmc->host_caps);
1851 #endif
1852 
1853 	/* Restrict card's capabilities by what the host can do */
1854 	card_caps &= mmc->host_caps;
1855 
1856 	/* Only version 4 of MMC supports wider bus widths */
1857 	if (mmc->version < MMC_VERSION_4)
1858 		return 0;
1859 
1860 	if (!mmc->ext_csd) {
1861 		pr_debug("No ext_csd found!\n"); /* this should enver happen */
1862 		return -ENOTSUPP;
1863 	}
1864 
1865 	mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1866 
1867 	for_each_mmc_mode_by_pref(card_caps, mwt) {
1868 		for_each_supported_width(card_caps & mwt->widths,
1869 					 mmc_is_mode_ddr(mwt->mode), ecbw) {
1870 			enum mmc_voltage old_voltage;
1871 			pr_debug("trying mode %s width %d (at %d MHz)\n",
1872 				 mmc_mode_name(mwt->mode),
1873 				 bus_width(ecbw->cap),
1874 				 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1875 			old_voltage = mmc->signal_voltage;
1876 			err = mmc_set_lowest_voltage(mmc, mwt->mode,
1877 						     MMC_ALL_SIGNAL_VOLTAGE);
1878 			if (err)
1879 				continue;
1880 
1881 			/* configure the bus width (card + host) */
1882 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1883 				    EXT_CSD_BUS_WIDTH,
1884 				    ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1885 			if (err)
1886 				goto error;
1887 			mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1888 
1889 			/* configure the bus speed (card) */
1890 			err = mmc_set_card_speed(mmc, mwt->mode);
1891 			if (err)
1892 				goto error;
1893 
1894 			/*
1895 			 * configure the bus width AND the ddr mode (card)
1896 			 * The host side will be taken care of in the next step
1897 			 */
1898 			if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1899 				err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1900 						 EXT_CSD_BUS_WIDTH,
1901 						 ecbw->ext_csd_bits);
1902 				if (err)
1903 					goto error;
1904 			}
1905 
1906 			/* configure the bus mode (host) */
1907 			mmc_select_mode(mmc, mwt->mode);
1908 			mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1909 #ifdef MMC_SUPPORTS_TUNING
1910 
1911 			/* execute tuning if needed */
1912 			if (mwt->tuning) {
1913 				err = mmc_execute_tuning(mmc, mwt->tuning);
1914 				if (err) {
1915 					pr_debug("tuning failed\n");
1916 					goto error;
1917 				}
1918 			}
1919 #endif
1920 
1921 			/* do a transfer to check the configuration */
1922 			err = mmc_read_and_compare_ext_csd(mmc);
1923 			if (!err)
1924 				return 0;
1925 error:
1926 			mmc_set_signal_voltage(mmc, old_voltage);
1927 			/* if an error occured, revert to a safer bus mode */
1928 			mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1929 				   EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1930 			mmc_select_mode(mmc, MMC_LEGACY);
1931 			mmc_set_bus_width(mmc, 1);
1932 		}
1933 	}
1934 
1935 	pr_err("unable to select a mode\n");
1936 
1937 	return -ENOTSUPP;
1938 }
1939 
1940 static int mmc_startup_v4(struct mmc *mmc)
1941 {
1942 	int err, i;
1943 	u64 capacity;
1944 	bool has_parts = false;
1945 	bool part_completed;
1946 	static const u32 mmc_versions[] = {
1947 		MMC_VERSION_4,
1948 		MMC_VERSION_4_1,
1949 		MMC_VERSION_4_2,
1950 		MMC_VERSION_4_3,
1951 		MMC_VERSION_4_4,
1952 		MMC_VERSION_4_41,
1953 		MMC_VERSION_4_5,
1954 		MMC_VERSION_5_0,
1955 		MMC_VERSION_5_1
1956 	};
1957 
1958 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1959 
1960 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1961 		return 0;
1962 
1963 	/* check  ext_csd version and capacity */
1964 	err = mmc_send_ext_csd(mmc, ext_csd);
1965 	if (err)
1966 		goto error;
1967 
1968 	/* store the ext csd for future reference */
1969 	if (!mmc->ext_csd)
1970 		mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1971 	if (!mmc->ext_csd)
1972 		return -ENOMEM;
1973 	memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1974 
1975 	if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
1976 		return -EINVAL;
1977 
1978 	mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
1979 
1980 	if (mmc->version >= MMC_VERSION_4_2) {
1981 		/*
1982 		 * According to the JEDEC Standard, the value of
1983 		 * ext_csd's capacity is valid if the value is more
1984 		 * than 2GB
1985 		 */
1986 		capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1987 				| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1988 				| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1989 				| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1990 		capacity *= MMC_MAX_BLOCK_LEN;
1991 		if ((capacity >> 20) > 2 * 1024)
1992 			mmc->capacity_user = capacity;
1993 	}
1994 
1995 	/* The partition data may be non-zero but it is only
1996 	 * effective if PARTITION_SETTING_COMPLETED is set in
1997 	 * EXT_CSD, so ignore any data if this bit is not set,
1998 	 * except for enabling the high-capacity group size
1999 	 * definition (see below).
2000 	 */
2001 	part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2002 			    EXT_CSD_PARTITION_SETTING_COMPLETED);
2003 
2004 	/* store the partition info of emmc */
2005 	mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2006 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2007 	    ext_csd[EXT_CSD_BOOT_MULT])
2008 		mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2009 	if (part_completed &&
2010 	    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2011 		mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2012 
2013 	mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2014 
2015 	mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2016 
2017 	for (i = 0; i < 4; i++) {
2018 		int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2019 		uint mult = (ext_csd[idx + 2] << 16) +
2020 			(ext_csd[idx + 1] << 8) + ext_csd[idx];
2021 		if (mult)
2022 			has_parts = true;
2023 		if (!part_completed)
2024 			continue;
2025 		mmc->capacity_gp[i] = mult;
2026 		mmc->capacity_gp[i] *=
2027 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2028 		mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2029 		mmc->capacity_gp[i] <<= 19;
2030 	}
2031 
2032 #ifndef CONFIG_SPL_BUILD
2033 	if (part_completed) {
2034 		mmc->enh_user_size =
2035 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2036 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2037 			ext_csd[EXT_CSD_ENH_SIZE_MULT];
2038 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2039 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2040 		mmc->enh_user_size <<= 19;
2041 		mmc->enh_user_start =
2042 			(ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2043 			(ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2044 			(ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2045 			ext_csd[EXT_CSD_ENH_START_ADDR];
2046 		if (mmc->high_capacity)
2047 			mmc->enh_user_start <<= 9;
2048 	}
2049 #endif
2050 
2051 	/*
2052 	 * Host needs to enable ERASE_GRP_DEF bit if device is
2053 	 * partitioned. This bit will be lost every time after a reset
2054 	 * or power off. This will affect erase size.
2055 	 */
2056 	if (part_completed)
2057 		has_parts = true;
2058 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2059 	    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2060 		has_parts = true;
2061 	if (has_parts) {
2062 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2063 				 EXT_CSD_ERASE_GROUP_DEF, 1);
2064 
2065 		if (err)
2066 			goto error;
2067 
2068 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2069 	}
2070 
2071 	if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2072 #if CONFIG_IS_ENABLED(MMC_WRITE)
2073 		/* Read out group size from ext_csd */
2074 		mmc->erase_grp_size =
2075 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2076 #endif
2077 		/*
2078 		 * if high capacity and partition setting completed
2079 		 * SEC_COUNT is valid even if it is smaller than 2 GiB
2080 		 * JEDEC Standard JESD84-B45, 6.2.4
2081 		 */
2082 		if (mmc->high_capacity && part_completed) {
2083 			capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2084 				(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2085 				(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2086 				(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2087 			capacity *= MMC_MAX_BLOCK_LEN;
2088 			mmc->capacity_user = capacity;
2089 		}
2090 	}
2091 #if CONFIG_IS_ENABLED(MMC_WRITE)
2092 	else {
2093 		/* Calculate the group size from the csd value. */
2094 		int erase_gsz, erase_gmul;
2095 
2096 		erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2097 		erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2098 		mmc->erase_grp_size = (erase_gsz + 1)
2099 			* (erase_gmul + 1);
2100 	}
2101 #endif
2102 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2103 	mmc->hc_wp_grp_size = 1024
2104 		* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2105 		* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2106 #endif
2107 
2108 	mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2109 
2110 	return 0;
2111 error:
2112 	if (mmc->ext_csd) {
2113 		free(mmc->ext_csd);
2114 		mmc->ext_csd = NULL;
2115 	}
2116 	return err;
2117 }
2118 
2119 static int mmc_startup(struct mmc *mmc)
2120 {
2121 	int err, i;
2122 	uint mult, freq;
2123 	u64 cmult, csize;
2124 	struct mmc_cmd cmd;
2125 	struct blk_desc *bdesc;
2126 
2127 #ifdef CONFIG_MMC_SPI_CRC_ON
2128 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2129 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2130 		cmd.resp_type = MMC_RSP_R1;
2131 		cmd.cmdarg = 1;
2132 		err = mmc_send_cmd(mmc, &cmd, NULL);
2133 		if (err)
2134 			return err;
2135 	}
2136 #endif
2137 
2138 	/* Put the Card in Identify Mode */
2139 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2140 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2141 	cmd.resp_type = MMC_RSP_R2;
2142 	cmd.cmdarg = 0;
2143 
2144 	err = mmc_send_cmd(mmc, &cmd, NULL);
2145 
2146 #ifdef CONFIG_MMC_QUIRKS
2147 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2148 		int retries = 4;
2149 		/*
2150 		 * It has been seen that SEND_CID may fail on the first
2151 		 * attempt, let's try a few more time
2152 		 */
2153 		do {
2154 			err = mmc_send_cmd(mmc, &cmd, NULL);
2155 			if (!err)
2156 				break;
2157 		} while (retries--);
2158 	}
2159 #endif
2160 
2161 	if (err)
2162 		return err;
2163 
2164 	memcpy(mmc->cid, cmd.response, 16);
2165 
2166 	/*
2167 	 * For MMC cards, set the Relative Address.
2168 	 * For SD cards, get the Relatvie Address.
2169 	 * This also puts the cards into Standby State
2170 	 */
2171 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2172 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2173 		cmd.cmdarg = mmc->rca << 16;
2174 		cmd.resp_type = MMC_RSP_R6;
2175 
2176 		err = mmc_send_cmd(mmc, &cmd, NULL);
2177 
2178 		if (err)
2179 			return err;
2180 
2181 		if (IS_SD(mmc))
2182 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2183 	}
2184 
2185 	/* Get the Card-Specific Data */
2186 	cmd.cmdidx = MMC_CMD_SEND_CSD;
2187 	cmd.resp_type = MMC_RSP_R2;
2188 	cmd.cmdarg = mmc->rca << 16;
2189 
2190 	err = mmc_send_cmd(mmc, &cmd, NULL);
2191 
2192 	if (err)
2193 		return err;
2194 
2195 	mmc->csd[0] = cmd.response[0];
2196 	mmc->csd[1] = cmd.response[1];
2197 	mmc->csd[2] = cmd.response[2];
2198 	mmc->csd[3] = cmd.response[3];
2199 
2200 	if (mmc->version == MMC_VERSION_UNKNOWN) {
2201 		int version = (cmd.response[0] >> 26) & 0xf;
2202 
2203 		switch (version) {
2204 		case 0:
2205 			mmc->version = MMC_VERSION_1_2;
2206 			break;
2207 		case 1:
2208 			mmc->version = MMC_VERSION_1_4;
2209 			break;
2210 		case 2:
2211 			mmc->version = MMC_VERSION_2_2;
2212 			break;
2213 		case 3:
2214 			mmc->version = MMC_VERSION_3;
2215 			break;
2216 		case 4:
2217 			mmc->version = MMC_VERSION_4;
2218 			break;
2219 		default:
2220 			mmc->version = MMC_VERSION_1_2;
2221 			break;
2222 		}
2223 	}
2224 
2225 	/* divide frequency by 10, since the mults are 10x bigger */
2226 	freq = fbase[(cmd.response[0] & 0x7)];
2227 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2228 
2229 	mmc->legacy_speed = freq * mult;
2230 	mmc_select_mode(mmc, MMC_LEGACY);
2231 
2232 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2233 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2234 #if CONFIG_IS_ENABLED(MMC_WRITE)
2235 
2236 	if (IS_SD(mmc))
2237 		mmc->write_bl_len = mmc->read_bl_len;
2238 	else
2239 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2240 #endif
2241 
2242 	if (mmc->high_capacity) {
2243 		csize = (mmc->csd[1] & 0x3f) << 16
2244 			| (mmc->csd[2] & 0xffff0000) >> 16;
2245 		cmult = 8;
2246 	} else {
2247 		csize = (mmc->csd[1] & 0x3ff) << 2
2248 			| (mmc->csd[2] & 0xc0000000) >> 30;
2249 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
2250 	}
2251 
2252 	mmc->capacity_user = (csize + 1) << (cmult + 2);
2253 	mmc->capacity_user *= mmc->read_bl_len;
2254 	mmc->capacity_boot = 0;
2255 	mmc->capacity_rpmb = 0;
2256 	for (i = 0; i < 4; i++)
2257 		mmc->capacity_gp[i] = 0;
2258 
2259 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2260 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2261 
2262 #if CONFIG_IS_ENABLED(MMC_WRITE)
2263 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2264 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2265 #endif
2266 
2267 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2268 		cmd.cmdidx = MMC_CMD_SET_DSR;
2269 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2270 		cmd.resp_type = MMC_RSP_NONE;
2271 		if (mmc_send_cmd(mmc, &cmd, NULL))
2272 			pr_warn("MMC: SET_DSR failed\n");
2273 	}
2274 
2275 	/* Select the card, and put it into Transfer Mode */
2276 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2277 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2278 		cmd.resp_type = MMC_RSP_R1;
2279 		cmd.cmdarg = mmc->rca << 16;
2280 		err = mmc_send_cmd(mmc, &cmd, NULL);
2281 
2282 		if (err)
2283 			return err;
2284 	}
2285 
2286 	/*
2287 	 * For SD, its erase group is always one sector
2288 	 */
2289 #if CONFIG_IS_ENABLED(MMC_WRITE)
2290 	mmc->erase_grp_size = 1;
2291 #endif
2292 	mmc->part_config = MMCPART_NOAVAILABLE;
2293 
2294 	err = mmc_startup_v4(mmc);
2295 	if (err)
2296 		return err;
2297 
2298 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2299 	if (err)
2300 		return err;
2301 
2302 	if (IS_SD(mmc)) {
2303 		err = sd_get_capabilities(mmc);
2304 		if (err)
2305 			return err;
2306 		err = sd_select_mode_and_width(mmc, mmc->card_caps);
2307 	} else {
2308 		err = mmc_get_capabilities(mmc);
2309 		if (err)
2310 			return err;
2311 		mmc_select_mode_and_width(mmc, mmc->card_caps);
2312 	}
2313 
2314 	if (err)
2315 		return err;
2316 
2317 	mmc->best_mode = mmc->selected_mode;
2318 
2319 	/* Fix the block length for DDR mode */
2320 	if (mmc->ddr_mode) {
2321 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2322 #if CONFIG_IS_ENABLED(MMC_WRITE)
2323 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2324 #endif
2325 	}
2326 
2327 	/* fill in device description */
2328 	bdesc = mmc_get_blk_desc(mmc);
2329 	bdesc->lun = 0;
2330 	bdesc->hwpart = 0;
2331 	bdesc->type = 0;
2332 	bdesc->blksz = mmc->read_bl_len;
2333 	bdesc->log2blksz = LOG2(bdesc->blksz);
2334 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2335 #if !defined(CONFIG_SPL_BUILD) || \
2336 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2337 		!defined(CONFIG_USE_TINY_PRINTF))
2338 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2339 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2340 		(mmc->cid[3] >> 16) & 0xffff);
2341 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2342 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2343 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2344 		(mmc->cid[2] >> 24) & 0xff);
2345 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2346 		(mmc->cid[2] >> 16) & 0xf);
2347 #else
2348 	bdesc->vendor[0] = 0;
2349 	bdesc->product[0] = 0;
2350 	bdesc->revision[0] = 0;
2351 #endif
2352 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2353 	part_init(bdesc);
2354 #endif
2355 
2356 	return 0;
2357 }
2358 
2359 static int mmc_send_if_cond(struct mmc *mmc)
2360 {
2361 	struct mmc_cmd cmd;
2362 	int err;
2363 
2364 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2365 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2366 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2367 	cmd.resp_type = MMC_RSP_R7;
2368 
2369 	err = mmc_send_cmd(mmc, &cmd, NULL);
2370 
2371 	if (err)
2372 		return err;
2373 
2374 	if ((cmd.response[0] & 0xff) != 0xaa)
2375 		return -EOPNOTSUPP;
2376 	else
2377 		mmc->version = SD_VERSION_2;
2378 
2379 	return 0;
2380 }
2381 
2382 #if !CONFIG_IS_ENABLED(DM_MMC)
2383 /* board-specific MMC power initializations. */
2384 __weak void board_mmc_power_init(void)
2385 {
2386 }
2387 #endif
2388 
2389 static int mmc_power_init(struct mmc *mmc)
2390 {
2391 #if CONFIG_IS_ENABLED(DM_MMC)
2392 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2393 	int ret;
2394 
2395 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2396 					  &mmc->vmmc_supply);
2397 	if (ret)
2398 		pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2399 
2400 	ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2401 					  &mmc->vqmmc_supply);
2402 	if (ret)
2403 		pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2404 #endif
2405 #else /* !CONFIG_DM_MMC */
2406 	/*
2407 	 * Driver model should use a regulator, as above, rather than calling
2408 	 * out to board code.
2409 	 */
2410 	board_mmc_power_init();
2411 #endif
2412 	return 0;
2413 }
2414 
2415 /*
2416  * put the host in the initial state:
2417  * - turn on Vdd (card power supply)
2418  * - configure the bus width and clock to minimal values
2419  */
2420 static void mmc_set_initial_state(struct mmc *mmc)
2421 {
2422 	int err;
2423 
2424 	/* First try to set 3.3V. If it fails set to 1.8V */
2425 	err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2426 	if (err != 0)
2427 		err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2428 	if (err != 0)
2429 		pr_warn("mmc: failed to set signal voltage\n");
2430 
2431 	mmc_select_mode(mmc, MMC_LEGACY);
2432 	mmc_set_bus_width(mmc, 1);
2433 	mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2434 }
2435 
2436 static int mmc_power_on(struct mmc *mmc)
2437 {
2438 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2439 	if (mmc->vmmc_supply) {
2440 		int ret = regulator_set_enable(mmc->vmmc_supply, true);
2441 
2442 		if (ret) {
2443 			puts("Error enabling VMMC supply\n");
2444 			return ret;
2445 		}
2446 	}
2447 #endif
2448 	return 0;
2449 }
2450 
2451 static int mmc_power_off(struct mmc *mmc)
2452 {
2453 	mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2454 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2455 	if (mmc->vmmc_supply) {
2456 		int ret = regulator_set_enable(mmc->vmmc_supply, false);
2457 
2458 		if (ret) {
2459 			pr_debug("Error disabling VMMC supply\n");
2460 			return ret;
2461 		}
2462 	}
2463 #endif
2464 	return 0;
2465 }
2466 
2467 static int mmc_power_cycle(struct mmc *mmc)
2468 {
2469 	int ret;
2470 
2471 	ret = mmc_power_off(mmc);
2472 	if (ret)
2473 		return ret;
2474 	/*
2475 	 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2476 	 * to be on the safer side.
2477 	 */
2478 	udelay(2000);
2479 	return mmc_power_on(mmc);
2480 }
2481 
2482 int mmc_start_init(struct mmc *mmc)
2483 {
2484 	bool no_card;
2485 	bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2486 	int err;
2487 
2488 	/*
2489 	 * all hosts are capable of 1 bit bus-width and able to use the legacy
2490 	 * timings.
2491 	 */
2492 	mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2493 			 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2494 
2495 #if !defined(CONFIG_MMC_BROKEN_CD)
2496 	/* we pretend there's no card when init is NULL */
2497 	no_card = mmc_getcd(mmc) == 0;
2498 #else
2499 	no_card = 0;
2500 #endif
2501 #if !CONFIG_IS_ENABLED(DM_MMC)
2502 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2503 #endif
2504 	if (no_card) {
2505 		mmc->has_init = 0;
2506 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2507 		pr_err("MMC: no card present\n");
2508 #endif
2509 		return -ENOMEDIUM;
2510 	}
2511 
2512 	if (mmc->has_init)
2513 		return 0;
2514 
2515 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2516 	mmc_adapter_card_type_ident();
2517 #endif
2518 	err = mmc_power_init(mmc);
2519 	if (err)
2520 		return err;
2521 
2522 #ifdef CONFIG_MMC_QUIRKS
2523 	mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2524 		      MMC_QUIRK_RETRY_SEND_CID;
2525 #endif
2526 
2527 	err = mmc_power_cycle(mmc);
2528 	if (err) {
2529 		/*
2530 		 * if power cycling is not supported, we should not try
2531 		 * to use the UHS modes, because we wouldn't be able to
2532 		 * recover from an error during the UHS initialization.
2533 		 */
2534 		pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2535 		uhs_en = false;
2536 		mmc->host_caps &= ~UHS_CAPS;
2537 		err = mmc_power_on(mmc);
2538 	}
2539 	if (err)
2540 		return err;
2541 
2542 #if CONFIG_IS_ENABLED(DM_MMC)
2543 	/* The device has already been probed ready for use */
2544 #else
2545 	/* made sure it's not NULL earlier */
2546 	err = mmc->cfg->ops->init(mmc);
2547 	if (err)
2548 		return err;
2549 #endif
2550 	mmc->ddr_mode = 0;
2551 
2552 retry:
2553 	mmc_set_initial_state(mmc);
2554 	mmc_send_init_stream(mmc);
2555 
2556 	/* Reset the Card */
2557 	err = mmc_go_idle(mmc);
2558 
2559 	if (err)
2560 		return err;
2561 
2562 	/* The internal partition reset to user partition(0) at every CMD0*/
2563 	mmc_get_blk_desc(mmc)->hwpart = 0;
2564 
2565 	/* Test for SD version 2 */
2566 	err = mmc_send_if_cond(mmc);
2567 
2568 	/* Now try to get the SD card's operating condition */
2569 	err = sd_send_op_cond(mmc, uhs_en);
2570 	if (err && uhs_en) {
2571 		uhs_en = false;
2572 		mmc_power_cycle(mmc);
2573 		goto retry;
2574 	}
2575 
2576 	/* If the command timed out, we check for an MMC card */
2577 	if (err == -ETIMEDOUT) {
2578 		err = mmc_send_op_cond(mmc);
2579 
2580 		if (err) {
2581 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2582 			pr_err("Card did not respond to voltage select!\n");
2583 #endif
2584 			return -EOPNOTSUPP;
2585 		}
2586 	}
2587 
2588 	if (!err)
2589 		mmc->init_in_progress = 1;
2590 
2591 	return err;
2592 }
2593 
2594 static int mmc_complete_init(struct mmc *mmc)
2595 {
2596 	int err = 0;
2597 
2598 	mmc->init_in_progress = 0;
2599 	if (mmc->op_cond_pending)
2600 		err = mmc_complete_op_cond(mmc);
2601 
2602 	if (!err)
2603 		err = mmc_startup(mmc);
2604 	if (err)
2605 		mmc->has_init = 0;
2606 	else
2607 		mmc->has_init = 1;
2608 	return err;
2609 }
2610 
2611 int mmc_init(struct mmc *mmc)
2612 {
2613 	int err = 0;
2614 	__maybe_unused unsigned start;
2615 #if CONFIG_IS_ENABLED(DM_MMC)
2616 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2617 
2618 	upriv->mmc = mmc;
2619 #endif
2620 	if (mmc->has_init)
2621 		return 0;
2622 
2623 	start = get_timer(0);
2624 
2625 	if (!mmc->init_in_progress)
2626 		err = mmc_start_init(mmc);
2627 
2628 	if (!err)
2629 		err = mmc_complete_init(mmc);
2630 	if (err)
2631 		pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2632 
2633 	return err;
2634 }
2635 
2636 int mmc_set_dsr(struct mmc *mmc, u16 val)
2637 {
2638 	mmc->dsr = val;
2639 	return 0;
2640 }
2641 
2642 /* CPU-specific MMC initializations */
2643 __weak int cpu_mmc_init(bd_t *bis)
2644 {
2645 	return -1;
2646 }
2647 
2648 /* board-specific MMC initializations. */
2649 __weak int board_mmc_init(bd_t *bis)
2650 {
2651 	return -1;
2652 }
2653 
2654 void mmc_set_preinit(struct mmc *mmc, int preinit)
2655 {
2656 	mmc->preinit = preinit;
2657 }
2658 
2659 #if CONFIG_IS_ENABLED(DM_MMC)
2660 static int mmc_probe(bd_t *bis)
2661 {
2662 	int ret, i;
2663 	struct uclass *uc;
2664 	struct udevice *dev;
2665 
2666 	ret = uclass_get(UCLASS_MMC, &uc);
2667 	if (ret)
2668 		return ret;
2669 
2670 	/*
2671 	 * Try to add them in sequence order. Really with driver model we
2672 	 * should allow holes, but the current MMC list does not allow that.
2673 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2674 	 */
2675 	for (i = 0; ; i++) {
2676 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2677 		if (ret == -ENODEV)
2678 			break;
2679 	}
2680 	uclass_foreach_dev(dev, uc) {
2681 		ret = device_probe(dev);
2682 		if (ret)
2683 			pr_err("%s - probe failed: %d\n", dev->name, ret);
2684 	}
2685 
2686 	return 0;
2687 }
2688 #else
2689 static int mmc_probe(bd_t *bis)
2690 {
2691 	if (board_mmc_init(bis) < 0)
2692 		cpu_mmc_init(bis);
2693 
2694 	return 0;
2695 }
2696 #endif
2697 
2698 int mmc_initialize(bd_t *bis)
2699 {
2700 	static int initialized = 0;
2701 	int ret;
2702 	if (initialized)	/* Avoid initializing mmc multiple times */
2703 		return 0;
2704 	initialized = 1;
2705 
2706 #if !CONFIG_IS_ENABLED(BLK)
2707 #if !CONFIG_IS_ENABLED(MMC_TINY)
2708 	mmc_list_init();
2709 #endif
2710 #endif
2711 	ret = mmc_probe(bis);
2712 	if (ret)
2713 		return ret;
2714 
2715 #ifndef CONFIG_SPL_BUILD
2716 	print_mmc_devices(',');
2717 #endif
2718 
2719 	mmc_do_preinit();
2720 	return 0;
2721 }
2722 
2723 #ifdef CONFIG_CMD_BKOPS_ENABLE
2724 int mmc_set_bkops_enable(struct mmc *mmc)
2725 {
2726 	int err;
2727 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2728 
2729 	err = mmc_send_ext_csd(mmc, ext_csd);
2730 	if (err) {
2731 		puts("Could not get ext_csd register values\n");
2732 		return err;
2733 	}
2734 
2735 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2736 		puts("Background operations not supported on device\n");
2737 		return -EMEDIUMTYPE;
2738 	}
2739 
2740 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2741 		puts("Background operations already enabled\n");
2742 		return 0;
2743 	}
2744 
2745 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2746 	if (err) {
2747 		puts("Failed to enable manual background operations\n");
2748 		return err;
2749 	}
2750 
2751 	puts("Enabled manual background operations\n");
2752 
2753 	return 0;
2754 }
2755 #endif
2756