xref: /openbmc/u-boot/drivers/mmc/mmc.c (revision 4c0411eb)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2008, Freescale Semiconductor, Inc
4  * Andy Fleming
5  *
6  * Based vaguely on the Linux code
7  */
8 
9 #include <config.h>
10 #include <common.h>
11 #include <command.h>
12 #include <dm.h>
13 #include <dm/device-internal.h>
14 #include <errno.h>
15 #include <mmc.h>
16 #include <part.h>
17 #include <power/regulator.h>
18 #include <malloc.h>
19 #include <memalign.h>
20 #include <linux/list.h>
21 #include <div64.h>
22 #include "mmc_private.h"
23 
24 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
25 static int mmc_power_cycle(struct mmc *mmc);
26 #if !CONFIG_IS_ENABLED(MMC_TINY)
27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
28 #endif
29 
30 #if !CONFIG_IS_ENABLED(DM_MMC)
31 
32 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
33 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
34 {
35 	return -ENOSYS;
36 }
37 #endif
38 
39 __weak int board_mmc_getwp(struct mmc *mmc)
40 {
41 	return -1;
42 }
43 
44 int mmc_getwp(struct mmc *mmc)
45 {
46 	int wp;
47 
48 	wp = board_mmc_getwp(mmc);
49 
50 	if (wp < 0) {
51 		if (mmc->cfg->ops->getwp)
52 			wp = mmc->cfg->ops->getwp(mmc);
53 		else
54 			wp = 0;
55 	}
56 
57 	return wp;
58 }
59 
60 __weak int board_mmc_getcd(struct mmc *mmc)
61 {
62 	return -1;
63 }
64 #endif
65 
66 #ifdef CONFIG_MMC_TRACE
67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
68 {
69 	printf("CMD_SEND:%d\n", cmd->cmdidx);
70 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
71 }
72 
73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
74 {
75 	int i;
76 	u8 *ptr;
77 
78 	if (ret) {
79 		printf("\t\tRET\t\t\t %d\n", ret);
80 	} else {
81 		switch (cmd->resp_type) {
82 		case MMC_RSP_NONE:
83 			printf("\t\tMMC_RSP_NONE\n");
84 			break;
85 		case MMC_RSP_R1:
86 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
87 				cmd->response[0]);
88 			break;
89 		case MMC_RSP_R1b:
90 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
91 				cmd->response[0]);
92 			break;
93 		case MMC_RSP_R2:
94 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
95 				cmd->response[0]);
96 			printf("\t\t          \t\t 0x%08X \n",
97 				cmd->response[1]);
98 			printf("\t\t          \t\t 0x%08X \n",
99 				cmd->response[2]);
100 			printf("\t\t          \t\t 0x%08X \n",
101 				cmd->response[3]);
102 			printf("\n");
103 			printf("\t\t\t\t\tDUMPING DATA\n");
104 			for (i = 0; i < 4; i++) {
105 				int j;
106 				printf("\t\t\t\t\t%03d - ", i*4);
107 				ptr = (u8 *)&cmd->response[i];
108 				ptr += 3;
109 				for (j = 0; j < 4; j++)
110 					printf("%02X ", *ptr--);
111 				printf("\n");
112 			}
113 			break;
114 		case MMC_RSP_R3:
115 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
116 				cmd->response[0]);
117 			break;
118 		default:
119 			printf("\t\tERROR MMC rsp not supported\n");
120 			break;
121 		}
122 	}
123 }
124 
125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
126 {
127 	int status;
128 
129 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
130 	printf("CURR STATE:%d\n", status);
131 }
132 #endif
133 
134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
135 const char *mmc_mode_name(enum bus_mode mode)
136 {
137 	static const char *const names[] = {
138 	      [MMC_LEGACY]	= "MMC legacy",
139 	      [SD_LEGACY]	= "SD Legacy",
140 	      [MMC_HS]		= "MMC High Speed (26MHz)",
141 	      [SD_HS]		= "SD High Speed (50MHz)",
142 	      [UHS_SDR12]	= "UHS SDR12 (25MHz)",
143 	      [UHS_SDR25]	= "UHS SDR25 (50MHz)",
144 	      [UHS_SDR50]	= "UHS SDR50 (100MHz)",
145 	      [UHS_SDR104]	= "UHS SDR104 (208MHz)",
146 	      [UHS_DDR50]	= "UHS DDR50 (50MHz)",
147 	      [MMC_HS_52]	= "MMC High Speed (52MHz)",
148 	      [MMC_DDR_52]	= "MMC DDR52 (52MHz)",
149 	      [MMC_HS_200]	= "HS200 (200MHz)",
150 	      [MMC_HS_400]	= "HS400 (200MHz)",
151 	};
152 
153 	if (mode >= MMC_MODES_END)
154 		return "Unknown mode";
155 	else
156 		return names[mode];
157 }
158 #endif
159 
160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
161 {
162 	static const int freqs[] = {
163 	      [MMC_LEGACY]	= 25000000,
164 	      [SD_LEGACY]	= 25000000,
165 	      [MMC_HS]		= 26000000,
166 	      [SD_HS]		= 50000000,
167 	      [MMC_HS_52]	= 52000000,
168 	      [MMC_DDR_52]	= 52000000,
169 	      [UHS_SDR12]	= 25000000,
170 	      [UHS_SDR25]	= 50000000,
171 	      [UHS_SDR50]	= 100000000,
172 	      [UHS_DDR50]	= 50000000,
173 	      [UHS_SDR104]	= 208000000,
174 	      [MMC_HS_200]	= 200000000,
175 	      [MMC_HS_400]	= 200000000,
176 	};
177 
178 	if (mode == MMC_LEGACY)
179 		return mmc->legacy_speed;
180 	else if (mode >= MMC_MODES_END)
181 		return 0;
182 	else
183 		return freqs[mode];
184 }
185 
186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
187 {
188 	mmc->selected_mode = mode;
189 	mmc->tran_speed = mmc_mode2freq(mmc, mode);
190 	mmc->ddr_mode = mmc_is_mode_ddr(mode);
191 	pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
192 		 mmc->tran_speed / 1000000);
193 	return 0;
194 }
195 
196 #if !CONFIG_IS_ENABLED(DM_MMC)
197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
198 {
199 	int ret;
200 
201 	mmmc_trace_before_send(mmc, cmd);
202 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
203 	mmmc_trace_after_send(mmc, cmd, ret);
204 
205 	return ret;
206 }
207 #endif
208 
209 int mmc_send_status(struct mmc *mmc, int timeout)
210 {
211 	struct mmc_cmd cmd;
212 	int err, retries = 5;
213 
214 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
215 	cmd.resp_type = MMC_RSP_R1;
216 	if (!mmc_host_is_spi(mmc))
217 		cmd.cmdarg = mmc->rca << 16;
218 
219 	while (1) {
220 		err = mmc_send_cmd(mmc, &cmd, NULL);
221 		if (!err) {
222 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
223 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
224 			     MMC_STATE_PRG)
225 				break;
226 
227 			if (cmd.response[0] & MMC_STATUS_MASK) {
228 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
229 				pr_err("Status Error: 0x%08X\n",
230 				       cmd.response[0]);
231 #endif
232 				return -ECOMM;
233 			}
234 		} else if (--retries < 0)
235 			return err;
236 
237 		if (timeout-- <= 0)
238 			break;
239 
240 		udelay(1000);
241 	}
242 
243 	mmc_trace_state(mmc, &cmd);
244 	if (timeout <= 0) {
245 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
246 		pr_err("Timeout waiting card ready\n");
247 #endif
248 		return -ETIMEDOUT;
249 	}
250 
251 	return 0;
252 }
253 
254 int mmc_set_blocklen(struct mmc *mmc, int len)
255 {
256 	struct mmc_cmd cmd;
257 	int err;
258 
259 	if (mmc->ddr_mode)
260 		return 0;
261 
262 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
263 	cmd.resp_type = MMC_RSP_R1;
264 	cmd.cmdarg = len;
265 
266 	err = mmc_send_cmd(mmc, &cmd, NULL);
267 
268 #ifdef CONFIG_MMC_QUIRKS
269 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
270 		int retries = 4;
271 		/*
272 		 * It has been seen that SET_BLOCKLEN may fail on the first
273 		 * attempt, let's try a few more time
274 		 */
275 		do {
276 			err = mmc_send_cmd(mmc, &cmd, NULL);
277 			if (!err)
278 				break;
279 		} while (retries--);
280 	}
281 #endif
282 
283 	return err;
284 }
285 
286 #ifdef MMC_SUPPORTS_TUNING
287 static const u8 tuning_blk_pattern_4bit[] = {
288 	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
289 	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
290 	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
291 	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
292 	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
293 	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
294 	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
295 	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
296 };
297 
298 static const u8 tuning_blk_pattern_8bit[] = {
299 	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
300 	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
301 	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
302 	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
303 	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
304 	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
305 	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
306 	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
307 	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
308 	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
309 	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
310 	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
311 	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
312 	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
313 	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
314 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
315 };
316 
317 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
318 {
319 	struct mmc_cmd cmd;
320 	struct mmc_data data;
321 	const u8 *tuning_block_pattern;
322 	int size, err;
323 
324 	if (mmc->bus_width == 8) {
325 		tuning_block_pattern = tuning_blk_pattern_8bit;
326 		size = sizeof(tuning_blk_pattern_8bit);
327 	} else if (mmc->bus_width == 4) {
328 		tuning_block_pattern = tuning_blk_pattern_4bit;
329 		size = sizeof(tuning_blk_pattern_4bit);
330 	} else {
331 		return -EINVAL;
332 	}
333 
334 	ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
335 
336 	cmd.cmdidx = opcode;
337 	cmd.cmdarg = 0;
338 	cmd.resp_type = MMC_RSP_R1;
339 
340 	data.dest = (void *)data_buf;
341 	data.blocks = 1;
342 	data.blocksize = size;
343 	data.flags = MMC_DATA_READ;
344 
345 	err = mmc_send_cmd(mmc, &cmd, &data);
346 	if (err)
347 		return err;
348 
349 	if (memcmp(data_buf, tuning_block_pattern, size))
350 		return -EIO;
351 
352 	return 0;
353 }
354 #endif
355 
356 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
357 			   lbaint_t blkcnt)
358 {
359 	struct mmc_cmd cmd;
360 	struct mmc_data data;
361 
362 	if (blkcnt > 1)
363 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
364 	else
365 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
366 
367 	if (mmc->high_capacity)
368 		cmd.cmdarg = start;
369 	else
370 		cmd.cmdarg = start * mmc->read_bl_len;
371 
372 	cmd.resp_type = MMC_RSP_R1;
373 
374 	data.dest = dst;
375 	data.blocks = blkcnt;
376 	data.blocksize = mmc->read_bl_len;
377 	data.flags = MMC_DATA_READ;
378 
379 	if (mmc_send_cmd(mmc, &cmd, &data))
380 		return 0;
381 
382 	if (blkcnt > 1) {
383 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
384 		cmd.cmdarg = 0;
385 		cmd.resp_type = MMC_RSP_R1b;
386 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
387 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
388 			pr_err("mmc fail to send stop cmd\n");
389 #endif
390 			return 0;
391 		}
392 	}
393 
394 	return blkcnt;
395 }
396 
397 #if CONFIG_IS_ENABLED(BLK)
398 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
399 #else
400 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
401 		void *dst)
402 #endif
403 {
404 #if CONFIG_IS_ENABLED(BLK)
405 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
406 #endif
407 	int dev_num = block_dev->devnum;
408 	int err;
409 	lbaint_t cur, blocks_todo = blkcnt;
410 
411 	if (blkcnt == 0)
412 		return 0;
413 
414 	struct mmc *mmc = find_mmc_device(dev_num);
415 	if (!mmc)
416 		return 0;
417 
418 	if (CONFIG_IS_ENABLED(MMC_TINY))
419 		err = mmc_switch_part(mmc, block_dev->hwpart);
420 	else
421 		err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
422 
423 	if (err < 0)
424 		return 0;
425 
426 	if ((start + blkcnt) > block_dev->lba) {
427 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
428 		pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
429 		       start + blkcnt, block_dev->lba);
430 #endif
431 		return 0;
432 	}
433 
434 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
435 		pr_debug("%s: Failed to set blocklen\n", __func__);
436 		return 0;
437 	}
438 
439 	do {
440 		cur = (blocks_todo > mmc->cfg->b_max) ?
441 			mmc->cfg->b_max : blocks_todo;
442 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
443 			pr_debug("%s: Failed to read blocks\n", __func__);
444 			return 0;
445 		}
446 		blocks_todo -= cur;
447 		start += cur;
448 		dst += cur * mmc->read_bl_len;
449 	} while (blocks_todo > 0);
450 
451 	return blkcnt;
452 }
453 
454 static int mmc_go_idle(struct mmc *mmc)
455 {
456 	struct mmc_cmd cmd;
457 	int err;
458 
459 	udelay(1000);
460 
461 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
462 	cmd.cmdarg = 0;
463 	cmd.resp_type = MMC_RSP_NONE;
464 
465 	err = mmc_send_cmd(mmc, &cmd, NULL);
466 
467 	if (err)
468 		return err;
469 
470 	udelay(2000);
471 
472 	return 0;
473 }
474 
475 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
476 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
477 {
478 	struct mmc_cmd cmd;
479 	int err = 0;
480 
481 	/*
482 	 * Send CMD11 only if the request is to switch the card to
483 	 * 1.8V signalling.
484 	 */
485 	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
486 		return mmc_set_signal_voltage(mmc, signal_voltage);
487 
488 	cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
489 	cmd.cmdarg = 0;
490 	cmd.resp_type = MMC_RSP_R1;
491 
492 	err = mmc_send_cmd(mmc, &cmd, NULL);
493 	if (err)
494 		return err;
495 
496 	if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
497 		return -EIO;
498 
499 	/*
500 	 * The card should drive cmd and dat[0:3] low immediately
501 	 * after the response of cmd11, but wait 100 us to be sure
502 	 */
503 	err = mmc_wait_dat0(mmc, 0, 100);
504 	if (err == -ENOSYS)
505 		udelay(100);
506 	else if (err)
507 		return -ETIMEDOUT;
508 
509 	/*
510 	 * During a signal voltage level switch, the clock must be gated
511 	 * for 5 ms according to the SD spec
512 	 */
513 	mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
514 
515 	err = mmc_set_signal_voltage(mmc, signal_voltage);
516 	if (err)
517 		return err;
518 
519 	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
520 	mdelay(10);
521 	mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
522 
523 	/*
524 	 * Failure to switch is indicated by the card holding
525 	 * dat[0:3] low. Wait for at least 1 ms according to spec
526 	 */
527 	err = mmc_wait_dat0(mmc, 1, 1000);
528 	if (err == -ENOSYS)
529 		udelay(1000);
530 	else if (err)
531 		return -ETIMEDOUT;
532 
533 	return 0;
534 }
535 #endif
536 
537 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
538 {
539 	int timeout = 1000;
540 	int err;
541 	struct mmc_cmd cmd;
542 
543 	while (1) {
544 		cmd.cmdidx = MMC_CMD_APP_CMD;
545 		cmd.resp_type = MMC_RSP_R1;
546 		cmd.cmdarg = 0;
547 
548 		err = mmc_send_cmd(mmc, &cmd, NULL);
549 
550 		if (err)
551 			return err;
552 
553 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
554 		cmd.resp_type = MMC_RSP_R3;
555 
556 		/*
557 		 * Most cards do not answer if some reserved bits
558 		 * in the ocr are set. However, Some controller
559 		 * can set bit 7 (reserved for low voltages), but
560 		 * how to manage low voltages SD card is not yet
561 		 * specified.
562 		 */
563 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
564 			(mmc->cfg->voltages & 0xff8000);
565 
566 		if (mmc->version == SD_VERSION_2)
567 			cmd.cmdarg |= OCR_HCS;
568 
569 		if (uhs_en)
570 			cmd.cmdarg |= OCR_S18R;
571 
572 		err = mmc_send_cmd(mmc, &cmd, NULL);
573 
574 		if (err)
575 			return err;
576 
577 		if (cmd.response[0] & OCR_BUSY)
578 			break;
579 
580 		if (timeout-- <= 0)
581 			return -EOPNOTSUPP;
582 
583 		udelay(1000);
584 	}
585 
586 	if (mmc->version != SD_VERSION_2)
587 		mmc->version = SD_VERSION_1_0;
588 
589 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
590 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
591 		cmd.resp_type = MMC_RSP_R3;
592 		cmd.cmdarg = 0;
593 
594 		err = mmc_send_cmd(mmc, &cmd, NULL);
595 
596 		if (err)
597 			return err;
598 	}
599 
600 	mmc->ocr = cmd.response[0];
601 
602 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
603 	if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
604 	    == 0x41000000) {
605 		err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
606 		if (err)
607 			return err;
608 	}
609 #endif
610 
611 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
612 	mmc->rca = 0;
613 
614 	return 0;
615 }
616 
617 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
618 {
619 	struct mmc_cmd cmd;
620 	int err;
621 
622 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
623 	cmd.resp_type = MMC_RSP_R3;
624 	cmd.cmdarg = 0;
625 	if (use_arg && !mmc_host_is_spi(mmc))
626 		cmd.cmdarg = OCR_HCS |
627 			(mmc->cfg->voltages &
628 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
629 			(mmc->ocr & OCR_ACCESS_MODE);
630 
631 	err = mmc_send_cmd(mmc, &cmd, NULL);
632 	if (err)
633 		return err;
634 	mmc->ocr = cmd.response[0];
635 	return 0;
636 }
637 
638 static int mmc_send_op_cond(struct mmc *mmc)
639 {
640 	int err, i;
641 
642 	/* Some cards seem to need this */
643 	mmc_go_idle(mmc);
644 
645  	/* Asking to the card its capabilities */
646 	for (i = 0; i < 2; i++) {
647 		err = mmc_send_op_cond_iter(mmc, i != 0);
648 		if (err)
649 			return err;
650 
651 		/* exit if not busy (flag seems to be inverted) */
652 		if (mmc->ocr & OCR_BUSY)
653 			break;
654 	}
655 	mmc->op_cond_pending = 1;
656 	return 0;
657 }
658 
659 static int mmc_complete_op_cond(struct mmc *mmc)
660 {
661 	struct mmc_cmd cmd;
662 	int timeout = 1000;
663 	ulong start;
664 	int err;
665 
666 	mmc->op_cond_pending = 0;
667 	if (!(mmc->ocr & OCR_BUSY)) {
668 		/* Some cards seem to need this */
669 		mmc_go_idle(mmc);
670 
671 		start = get_timer(0);
672 		while (1) {
673 			err = mmc_send_op_cond_iter(mmc, 1);
674 			if (err)
675 				return err;
676 			if (mmc->ocr & OCR_BUSY)
677 				break;
678 			if (get_timer(start) > timeout)
679 				return -EOPNOTSUPP;
680 			udelay(100);
681 		}
682 	}
683 
684 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
685 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
686 		cmd.resp_type = MMC_RSP_R3;
687 		cmd.cmdarg = 0;
688 
689 		err = mmc_send_cmd(mmc, &cmd, NULL);
690 
691 		if (err)
692 			return err;
693 
694 		mmc->ocr = cmd.response[0];
695 	}
696 
697 	mmc->version = MMC_VERSION_UNKNOWN;
698 
699 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
700 	mmc->rca = 1;
701 
702 	return 0;
703 }
704 
705 
706 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
707 {
708 	struct mmc_cmd cmd;
709 	struct mmc_data data;
710 	int err;
711 
712 	/* Get the Card Status Register */
713 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
714 	cmd.resp_type = MMC_RSP_R1;
715 	cmd.cmdarg = 0;
716 
717 	data.dest = (char *)ext_csd;
718 	data.blocks = 1;
719 	data.blocksize = MMC_MAX_BLOCK_LEN;
720 	data.flags = MMC_DATA_READ;
721 
722 	err = mmc_send_cmd(mmc, &cmd, &data);
723 
724 	return err;
725 }
726 
727 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
728 {
729 	struct mmc_cmd cmd;
730 	int timeout = 1000;
731 	int retries = 3;
732 	int ret;
733 
734 	cmd.cmdidx = MMC_CMD_SWITCH;
735 	cmd.resp_type = MMC_RSP_R1b;
736 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
737 				 (index << 16) |
738 				 (value << 8);
739 
740 	while (retries > 0) {
741 		ret = mmc_send_cmd(mmc, &cmd, NULL);
742 
743 		/* Waiting for the ready status */
744 		if (!ret) {
745 			ret = mmc_send_status(mmc, timeout);
746 			return ret;
747 		}
748 
749 		retries--;
750 	}
751 
752 	return ret;
753 
754 }
755 
756 #if !CONFIG_IS_ENABLED(MMC_TINY)
757 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
758 {
759 	int err;
760 	int speed_bits;
761 
762 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
763 
764 	switch (mode) {
765 	case MMC_HS:
766 	case MMC_HS_52:
767 	case MMC_DDR_52:
768 		speed_bits = EXT_CSD_TIMING_HS;
769 		break;
770 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
771 	case MMC_HS_200:
772 		speed_bits = EXT_CSD_TIMING_HS200;
773 		break;
774 #endif
775 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
776 	case MMC_HS_400:
777 		speed_bits = EXT_CSD_TIMING_HS400;
778 		break;
779 #endif
780 	case MMC_LEGACY:
781 		speed_bits = EXT_CSD_TIMING_LEGACY;
782 		break;
783 	default:
784 		return -EINVAL;
785 	}
786 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
787 			 speed_bits);
788 	if (err)
789 		return err;
790 
791 	if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
792 		/* Now check to see that it worked */
793 		err = mmc_send_ext_csd(mmc, test_csd);
794 		if (err)
795 			return err;
796 
797 		/* No high-speed support */
798 		if (!test_csd[EXT_CSD_HS_TIMING])
799 			return -ENOTSUPP;
800 	}
801 
802 	return 0;
803 }
804 
805 static int mmc_get_capabilities(struct mmc *mmc)
806 {
807 	u8 *ext_csd = mmc->ext_csd;
808 	char cardtype;
809 
810 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
811 
812 	if (mmc_host_is_spi(mmc))
813 		return 0;
814 
815 	/* Only version 4 supports high-speed */
816 	if (mmc->version < MMC_VERSION_4)
817 		return 0;
818 
819 	if (!ext_csd) {
820 		pr_err("No ext_csd found!\n"); /* this should enver happen */
821 		return -ENOTSUPP;
822 	}
823 
824 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
825 
826 	cardtype = ext_csd[EXT_CSD_CARD_TYPE];
827 	mmc->cardtype = cardtype;
828 
829 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
830 	if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
831 			EXT_CSD_CARD_TYPE_HS200_1_8V)) {
832 		mmc->card_caps |= MMC_MODE_HS200;
833 	}
834 #endif
835 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
836 	if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
837 			EXT_CSD_CARD_TYPE_HS400_1_8V)) {
838 		mmc->card_caps |= MMC_MODE_HS400;
839 	}
840 #endif
841 	if (cardtype & EXT_CSD_CARD_TYPE_52) {
842 		if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
843 			mmc->card_caps |= MMC_MODE_DDR_52MHz;
844 		mmc->card_caps |= MMC_MODE_HS_52MHz;
845 	}
846 	if (cardtype & EXT_CSD_CARD_TYPE_26)
847 		mmc->card_caps |= MMC_MODE_HS;
848 
849 	return 0;
850 }
851 #endif
852 
853 static int mmc_set_capacity(struct mmc *mmc, int part_num)
854 {
855 	switch (part_num) {
856 	case 0:
857 		mmc->capacity = mmc->capacity_user;
858 		break;
859 	case 1:
860 	case 2:
861 		mmc->capacity = mmc->capacity_boot;
862 		break;
863 	case 3:
864 		mmc->capacity = mmc->capacity_rpmb;
865 		break;
866 	case 4:
867 	case 5:
868 	case 6:
869 	case 7:
870 		mmc->capacity = mmc->capacity_gp[part_num - 4];
871 		break;
872 	default:
873 		return -1;
874 	}
875 
876 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
877 
878 	return 0;
879 }
880 
881 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
882 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
883 {
884 	int forbidden = 0;
885 	bool change = false;
886 
887 	if (part_num & PART_ACCESS_MASK)
888 		forbidden = MMC_CAP(MMC_HS_200);
889 
890 	if (MMC_CAP(mmc->selected_mode) & forbidden) {
891 		pr_debug("selected mode (%s) is forbidden for part %d\n",
892 			 mmc_mode_name(mmc->selected_mode), part_num);
893 		change = true;
894 	} else if (mmc->selected_mode != mmc->best_mode) {
895 		pr_debug("selected mode is not optimal\n");
896 		change = true;
897 	}
898 
899 	if (change)
900 		return mmc_select_mode_and_width(mmc,
901 						 mmc->card_caps & ~forbidden);
902 
903 	return 0;
904 }
905 #else
906 static inline int mmc_boot_part_access_chk(struct mmc *mmc,
907 					   unsigned int part_num)
908 {
909 	return 0;
910 }
911 #endif
912 
913 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
914 {
915 	int ret;
916 
917 	ret = mmc_boot_part_access_chk(mmc, part_num);
918 	if (ret)
919 		return ret;
920 
921 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
922 			 (mmc->part_config & ~PART_ACCESS_MASK)
923 			 | (part_num & PART_ACCESS_MASK));
924 
925 	/*
926 	 * Set the capacity if the switch succeeded or was intended
927 	 * to return to representing the raw device.
928 	 */
929 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
930 		ret = mmc_set_capacity(mmc, part_num);
931 		mmc_get_blk_desc(mmc)->hwpart = part_num;
932 	}
933 
934 	return ret;
935 }
936 
937 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
938 int mmc_hwpart_config(struct mmc *mmc,
939 		      const struct mmc_hwpart_conf *conf,
940 		      enum mmc_hwpart_conf_mode mode)
941 {
942 	u8 part_attrs = 0;
943 	u32 enh_size_mult;
944 	u32 enh_start_addr;
945 	u32 gp_size_mult[4];
946 	u32 max_enh_size_mult;
947 	u32 tot_enh_size_mult = 0;
948 	u8 wr_rel_set;
949 	int i, pidx, err;
950 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
951 
952 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
953 		return -EINVAL;
954 
955 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
956 		pr_err("eMMC >= 4.4 required for enhanced user data area\n");
957 		return -EMEDIUMTYPE;
958 	}
959 
960 	if (!(mmc->part_support & PART_SUPPORT)) {
961 		pr_err("Card does not support partitioning\n");
962 		return -EMEDIUMTYPE;
963 	}
964 
965 	if (!mmc->hc_wp_grp_size) {
966 		pr_err("Card does not define HC WP group size\n");
967 		return -EMEDIUMTYPE;
968 	}
969 
970 	/* check partition alignment and total enhanced size */
971 	if (conf->user.enh_size) {
972 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
973 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
974 			pr_err("User data enhanced area not HC WP group "
975 			       "size aligned\n");
976 			return -EINVAL;
977 		}
978 		part_attrs |= EXT_CSD_ENH_USR;
979 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
980 		if (mmc->high_capacity) {
981 			enh_start_addr = conf->user.enh_start;
982 		} else {
983 			enh_start_addr = (conf->user.enh_start << 9);
984 		}
985 	} else {
986 		enh_size_mult = 0;
987 		enh_start_addr = 0;
988 	}
989 	tot_enh_size_mult += enh_size_mult;
990 
991 	for (pidx = 0; pidx < 4; pidx++) {
992 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
993 			pr_err("GP%i partition not HC WP group size "
994 			       "aligned\n", pidx+1);
995 			return -EINVAL;
996 		}
997 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
998 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
999 			part_attrs |= EXT_CSD_ENH_GP(pidx);
1000 			tot_enh_size_mult += gp_size_mult[pidx];
1001 		}
1002 	}
1003 
1004 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1005 		pr_err("Card does not support enhanced attribute\n");
1006 		return -EMEDIUMTYPE;
1007 	}
1008 
1009 	err = mmc_send_ext_csd(mmc, ext_csd);
1010 	if (err)
1011 		return err;
1012 
1013 	max_enh_size_mult =
1014 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1015 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1016 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1017 	if (tot_enh_size_mult > max_enh_size_mult) {
1018 		pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1019 		       tot_enh_size_mult, max_enh_size_mult);
1020 		return -EMEDIUMTYPE;
1021 	}
1022 
1023 	/* The default value of EXT_CSD_WR_REL_SET is device
1024 	 * dependent, the values can only be changed if the
1025 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1026 	 * changed only once and before partitioning is completed. */
1027 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1028 	if (conf->user.wr_rel_change) {
1029 		if (conf->user.wr_rel_set)
1030 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1031 		else
1032 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1033 	}
1034 	for (pidx = 0; pidx < 4; pidx++) {
1035 		if (conf->gp_part[pidx].wr_rel_change) {
1036 			if (conf->gp_part[pidx].wr_rel_set)
1037 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1038 			else
1039 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1040 		}
1041 	}
1042 
1043 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1044 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1045 		puts("Card does not support host controlled partition write "
1046 		     "reliability settings\n");
1047 		return -EMEDIUMTYPE;
1048 	}
1049 
1050 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1051 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
1052 		pr_err("Card already partitioned\n");
1053 		return -EPERM;
1054 	}
1055 
1056 	if (mode == MMC_HWPART_CONF_CHECK)
1057 		return 0;
1058 
1059 	/* Partitioning requires high-capacity size definitions */
1060 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1061 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1062 				 EXT_CSD_ERASE_GROUP_DEF, 1);
1063 
1064 		if (err)
1065 			return err;
1066 
1067 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1068 
1069 		/* update erase group size to be high-capacity */
1070 		mmc->erase_grp_size =
1071 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1072 
1073 	}
1074 
1075 	/* all OK, write the configuration */
1076 	for (i = 0; i < 4; i++) {
1077 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1078 				 EXT_CSD_ENH_START_ADDR+i,
1079 				 (enh_start_addr >> (i*8)) & 0xFF);
1080 		if (err)
1081 			return err;
1082 	}
1083 	for (i = 0; i < 3; i++) {
1084 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1085 				 EXT_CSD_ENH_SIZE_MULT+i,
1086 				 (enh_size_mult >> (i*8)) & 0xFF);
1087 		if (err)
1088 			return err;
1089 	}
1090 	for (pidx = 0; pidx < 4; pidx++) {
1091 		for (i = 0; i < 3; i++) {
1092 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1093 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1094 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1095 			if (err)
1096 				return err;
1097 		}
1098 	}
1099 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1100 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1101 	if (err)
1102 		return err;
1103 
1104 	if (mode == MMC_HWPART_CONF_SET)
1105 		return 0;
1106 
1107 	/* The WR_REL_SET is a write-once register but shall be
1108 	 * written before setting PART_SETTING_COMPLETED. As it is
1109 	 * write-once we can only write it when completing the
1110 	 * partitioning. */
1111 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1112 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1113 				 EXT_CSD_WR_REL_SET, wr_rel_set);
1114 		if (err)
1115 			return err;
1116 	}
1117 
1118 	/* Setting PART_SETTING_COMPLETED confirms the partition
1119 	 * configuration but it only becomes effective after power
1120 	 * cycle, so we do not adjust the partition related settings
1121 	 * in the mmc struct. */
1122 
1123 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1124 			 EXT_CSD_PARTITION_SETTING,
1125 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
1126 	if (err)
1127 		return err;
1128 
1129 	return 0;
1130 }
1131 #endif
1132 
1133 #if !CONFIG_IS_ENABLED(DM_MMC)
1134 int mmc_getcd(struct mmc *mmc)
1135 {
1136 	int cd;
1137 
1138 	cd = board_mmc_getcd(mmc);
1139 
1140 	if (cd < 0) {
1141 		if (mmc->cfg->ops->getcd)
1142 			cd = mmc->cfg->ops->getcd(mmc);
1143 		else
1144 			cd = 1;
1145 	}
1146 
1147 	return cd;
1148 }
1149 #endif
1150 
1151 #if !CONFIG_IS_ENABLED(MMC_TINY)
1152 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1153 {
1154 	struct mmc_cmd cmd;
1155 	struct mmc_data data;
1156 
1157 	/* Switch the frequency */
1158 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1159 	cmd.resp_type = MMC_RSP_R1;
1160 	cmd.cmdarg = (mode << 31) | 0xffffff;
1161 	cmd.cmdarg &= ~(0xf << (group * 4));
1162 	cmd.cmdarg |= value << (group * 4);
1163 
1164 	data.dest = (char *)resp;
1165 	data.blocksize = 64;
1166 	data.blocks = 1;
1167 	data.flags = MMC_DATA_READ;
1168 
1169 	return mmc_send_cmd(mmc, &cmd, &data);
1170 }
1171 
1172 static int sd_get_capabilities(struct mmc *mmc)
1173 {
1174 	int err;
1175 	struct mmc_cmd cmd;
1176 	ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1177 	ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1178 	struct mmc_data data;
1179 	int timeout;
1180 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1181 	u32 sd3_bus_mode;
1182 #endif
1183 
1184 	mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1185 
1186 	if (mmc_host_is_spi(mmc))
1187 		return 0;
1188 
1189 	/* Read the SCR to find out if this card supports higher speeds */
1190 	cmd.cmdidx = MMC_CMD_APP_CMD;
1191 	cmd.resp_type = MMC_RSP_R1;
1192 	cmd.cmdarg = mmc->rca << 16;
1193 
1194 	err = mmc_send_cmd(mmc, &cmd, NULL);
1195 
1196 	if (err)
1197 		return err;
1198 
1199 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1200 	cmd.resp_type = MMC_RSP_R1;
1201 	cmd.cmdarg = 0;
1202 
1203 	timeout = 3;
1204 
1205 retry_scr:
1206 	data.dest = (char *)scr;
1207 	data.blocksize = 8;
1208 	data.blocks = 1;
1209 	data.flags = MMC_DATA_READ;
1210 
1211 	err = mmc_send_cmd(mmc, &cmd, &data);
1212 
1213 	if (err) {
1214 		if (timeout--)
1215 			goto retry_scr;
1216 
1217 		return err;
1218 	}
1219 
1220 	mmc->scr[0] = __be32_to_cpu(scr[0]);
1221 	mmc->scr[1] = __be32_to_cpu(scr[1]);
1222 
1223 	switch ((mmc->scr[0] >> 24) & 0xf) {
1224 	case 0:
1225 		mmc->version = SD_VERSION_1_0;
1226 		break;
1227 	case 1:
1228 		mmc->version = SD_VERSION_1_10;
1229 		break;
1230 	case 2:
1231 		mmc->version = SD_VERSION_2;
1232 		if ((mmc->scr[0] >> 15) & 0x1)
1233 			mmc->version = SD_VERSION_3;
1234 		break;
1235 	default:
1236 		mmc->version = SD_VERSION_1_0;
1237 		break;
1238 	}
1239 
1240 	if (mmc->scr[0] & SD_DATA_4BIT)
1241 		mmc->card_caps |= MMC_MODE_4BIT;
1242 
1243 	/* Version 1.0 doesn't support switching */
1244 	if (mmc->version == SD_VERSION_1_0)
1245 		return 0;
1246 
1247 	timeout = 4;
1248 	while (timeout--) {
1249 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1250 				(u8 *)switch_status);
1251 
1252 		if (err)
1253 			return err;
1254 
1255 		/* The high-speed function is busy.  Try again */
1256 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1257 			break;
1258 	}
1259 
1260 	/* If high-speed isn't supported, we return */
1261 	if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1262 		mmc->card_caps |= MMC_CAP(SD_HS);
1263 
1264 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1265 	/* Version before 3.0 don't support UHS modes */
1266 	if (mmc->version < SD_VERSION_3)
1267 		return 0;
1268 
1269 	sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1270 	if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1271 		mmc->card_caps |= MMC_CAP(UHS_SDR104);
1272 	if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1273 		mmc->card_caps |= MMC_CAP(UHS_SDR50);
1274 	if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1275 		mmc->card_caps |= MMC_CAP(UHS_SDR25);
1276 	if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1277 		mmc->card_caps |= MMC_CAP(UHS_SDR12);
1278 	if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1279 		mmc->card_caps |= MMC_CAP(UHS_DDR50);
1280 #endif
1281 
1282 	return 0;
1283 }
1284 
1285 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1286 {
1287 	int err;
1288 
1289 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1290 	int speed;
1291 
1292 	/* SD version 1.00 and 1.01 does not support CMD 6 */
1293 	if (mmc->version == SD_VERSION_1_0)
1294 		return 0;
1295 
1296 	switch (mode) {
1297 	case SD_LEGACY:
1298 		speed = UHS_SDR12_BUS_SPEED;
1299 		break;
1300 	case SD_HS:
1301 		speed = HIGH_SPEED_BUS_SPEED;
1302 		break;
1303 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1304 	case UHS_SDR12:
1305 		speed = UHS_SDR12_BUS_SPEED;
1306 		break;
1307 	case UHS_SDR25:
1308 		speed = UHS_SDR25_BUS_SPEED;
1309 		break;
1310 	case UHS_SDR50:
1311 		speed = UHS_SDR50_BUS_SPEED;
1312 		break;
1313 	case UHS_DDR50:
1314 		speed = UHS_DDR50_BUS_SPEED;
1315 		break;
1316 	case UHS_SDR104:
1317 		speed = UHS_SDR104_BUS_SPEED;
1318 		break;
1319 #endif
1320 	default:
1321 		return -EINVAL;
1322 	}
1323 
1324 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1325 	if (err)
1326 		return err;
1327 
1328 	if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1329 		return -ENOTSUPP;
1330 
1331 	return 0;
1332 }
1333 
1334 static int sd_select_bus_width(struct mmc *mmc, int w)
1335 {
1336 	int err;
1337 	struct mmc_cmd cmd;
1338 
1339 	if ((w != 4) && (w != 1))
1340 		return -EINVAL;
1341 
1342 	cmd.cmdidx = MMC_CMD_APP_CMD;
1343 	cmd.resp_type = MMC_RSP_R1;
1344 	cmd.cmdarg = mmc->rca << 16;
1345 
1346 	err = mmc_send_cmd(mmc, &cmd, NULL);
1347 	if (err)
1348 		return err;
1349 
1350 	cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1351 	cmd.resp_type = MMC_RSP_R1;
1352 	if (w == 4)
1353 		cmd.cmdarg = 2;
1354 	else if (w == 1)
1355 		cmd.cmdarg = 0;
1356 	err = mmc_send_cmd(mmc, &cmd, NULL);
1357 	if (err)
1358 		return err;
1359 
1360 	return 0;
1361 }
1362 #endif
1363 
1364 #if CONFIG_IS_ENABLED(MMC_WRITE)
1365 static int sd_read_ssr(struct mmc *mmc)
1366 {
1367 	static const unsigned int sd_au_size[] = {
1368 		0,		SZ_16K / 512,		SZ_32K / 512,
1369 		SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
1370 		SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
1371 		SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
1372 		SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,
1373 		SZ_64M / 512,
1374 	};
1375 	int err, i;
1376 	struct mmc_cmd cmd;
1377 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1378 	struct mmc_data data;
1379 	int timeout = 3;
1380 	unsigned int au, eo, et, es;
1381 
1382 	cmd.cmdidx = MMC_CMD_APP_CMD;
1383 	cmd.resp_type = MMC_RSP_R1;
1384 	cmd.cmdarg = mmc->rca << 16;
1385 
1386 	err = mmc_send_cmd(mmc, &cmd, NULL);
1387 	if (err)
1388 		return err;
1389 
1390 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1391 	cmd.resp_type = MMC_RSP_R1;
1392 	cmd.cmdarg = 0;
1393 
1394 retry_ssr:
1395 	data.dest = (char *)ssr;
1396 	data.blocksize = 64;
1397 	data.blocks = 1;
1398 	data.flags = MMC_DATA_READ;
1399 
1400 	err = mmc_send_cmd(mmc, &cmd, &data);
1401 	if (err) {
1402 		if (timeout--)
1403 			goto retry_ssr;
1404 
1405 		return err;
1406 	}
1407 
1408 	for (i = 0; i < 16; i++)
1409 		ssr[i] = be32_to_cpu(ssr[i]);
1410 
1411 	au = (ssr[2] >> 12) & 0xF;
1412 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1413 		mmc->ssr.au = sd_au_size[au];
1414 		es = (ssr[3] >> 24) & 0xFF;
1415 		es |= (ssr[2] & 0xFF) << 8;
1416 		et = (ssr[3] >> 18) & 0x3F;
1417 		if (es && et) {
1418 			eo = (ssr[3] >> 16) & 0x3;
1419 			mmc->ssr.erase_timeout = (et * 1000) / es;
1420 			mmc->ssr.erase_offset = eo * 1000;
1421 		}
1422 	} else {
1423 		pr_debug("Invalid Allocation Unit Size.\n");
1424 	}
1425 
1426 	return 0;
1427 }
1428 #endif
1429 /* frequency bases */
1430 /* divided by 10 to be nice to platforms without floating point */
1431 static const int fbase[] = {
1432 	10000,
1433 	100000,
1434 	1000000,
1435 	10000000,
1436 };
1437 
1438 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1439  * to platforms without floating point.
1440  */
1441 static const u8 multipliers[] = {
1442 	0,	/* reserved */
1443 	10,
1444 	12,
1445 	13,
1446 	15,
1447 	20,
1448 	25,
1449 	30,
1450 	35,
1451 	40,
1452 	45,
1453 	50,
1454 	55,
1455 	60,
1456 	70,
1457 	80,
1458 };
1459 
1460 static inline int bus_width(uint cap)
1461 {
1462 	if (cap == MMC_MODE_8BIT)
1463 		return 8;
1464 	if (cap == MMC_MODE_4BIT)
1465 		return 4;
1466 	if (cap == MMC_MODE_1BIT)
1467 		return 1;
1468 	pr_warn("invalid bus witdh capability 0x%x\n", cap);
1469 	return 0;
1470 }
1471 
1472 #if !CONFIG_IS_ENABLED(DM_MMC)
1473 #ifdef MMC_SUPPORTS_TUNING
1474 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1475 {
1476 	return -ENOTSUPP;
1477 }
1478 #endif
1479 
1480 static void mmc_send_init_stream(struct mmc *mmc)
1481 {
1482 }
1483 
1484 static int mmc_set_ios(struct mmc *mmc)
1485 {
1486 	int ret = 0;
1487 
1488 	if (mmc->cfg->ops->set_ios)
1489 		ret = mmc->cfg->ops->set_ios(mmc);
1490 
1491 	return ret;
1492 }
1493 #endif
1494 
1495 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1496 {
1497 	if (!disable) {
1498 		if (clock > mmc->cfg->f_max)
1499 			clock = mmc->cfg->f_max;
1500 
1501 		if (clock < mmc->cfg->f_min)
1502 			clock = mmc->cfg->f_min;
1503 	}
1504 
1505 	mmc->clock = clock;
1506 	mmc->clk_disable = disable;
1507 
1508 	debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1509 
1510 	return mmc_set_ios(mmc);
1511 }
1512 
1513 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1514 {
1515 	mmc->bus_width = width;
1516 
1517 	return mmc_set_ios(mmc);
1518 }
1519 
1520 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1521 /*
1522  * helper function to display the capabilities in a human
1523  * friendly manner. The capabilities include bus width and
1524  * supported modes.
1525  */
1526 void mmc_dump_capabilities(const char *text, uint caps)
1527 {
1528 	enum bus_mode mode;
1529 
1530 	pr_debug("%s: widths [", text);
1531 	if (caps & MMC_MODE_8BIT)
1532 		pr_debug("8, ");
1533 	if (caps & MMC_MODE_4BIT)
1534 		pr_debug("4, ");
1535 	if (caps & MMC_MODE_1BIT)
1536 		pr_debug("1, ");
1537 	pr_debug("\b\b] modes [");
1538 	for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1539 		if (MMC_CAP(mode) & caps)
1540 			pr_debug("%s, ", mmc_mode_name(mode));
1541 	pr_debug("\b\b]\n");
1542 }
1543 #endif
1544 
1545 struct mode_width_tuning {
1546 	enum bus_mode mode;
1547 	uint widths;
1548 #ifdef MMC_SUPPORTS_TUNING
1549 	uint tuning;
1550 #endif
1551 };
1552 
1553 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1554 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1555 {
1556 	switch (voltage) {
1557 	case MMC_SIGNAL_VOLTAGE_000: return 0;
1558 	case MMC_SIGNAL_VOLTAGE_330: return 3300;
1559 	case MMC_SIGNAL_VOLTAGE_180: return 1800;
1560 	case MMC_SIGNAL_VOLTAGE_120: return 1200;
1561 	}
1562 	return -EINVAL;
1563 }
1564 
1565 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1566 {
1567 	int err;
1568 
1569 	if (mmc->signal_voltage == signal_voltage)
1570 		return 0;
1571 
1572 	mmc->signal_voltage = signal_voltage;
1573 	err = mmc_set_ios(mmc);
1574 	if (err)
1575 		pr_debug("unable to set voltage (err %d)\n", err);
1576 
1577 	return err;
1578 }
1579 #else
1580 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1581 {
1582 	return 0;
1583 }
1584 #endif
1585 
1586 #if !CONFIG_IS_ENABLED(MMC_TINY)
1587 static const struct mode_width_tuning sd_modes_by_pref[] = {
1588 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1589 #ifdef MMC_SUPPORTS_TUNING
1590 	{
1591 		.mode = UHS_SDR104,
1592 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1593 		.tuning = MMC_CMD_SEND_TUNING_BLOCK
1594 	},
1595 #endif
1596 	{
1597 		.mode = UHS_SDR50,
1598 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1599 	},
1600 	{
1601 		.mode = UHS_DDR50,
1602 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1603 	},
1604 	{
1605 		.mode = UHS_SDR25,
1606 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1607 	},
1608 #endif
1609 	{
1610 		.mode = SD_HS,
1611 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1612 	},
1613 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1614 	{
1615 		.mode = UHS_SDR12,
1616 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1617 	},
1618 #endif
1619 	{
1620 		.mode = SD_LEGACY,
1621 		.widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1622 	}
1623 };
1624 
1625 #define for_each_sd_mode_by_pref(caps, mwt) \
1626 	for (mwt = sd_modes_by_pref;\
1627 	     mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1628 	     mwt++) \
1629 		if (caps & MMC_CAP(mwt->mode))
1630 
1631 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1632 {
1633 	int err;
1634 	uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1635 	const struct mode_width_tuning *mwt;
1636 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1637 	bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1638 #else
1639 	bool uhs_en = false;
1640 #endif
1641 	uint caps;
1642 
1643 #ifdef DEBUG
1644 	mmc_dump_capabilities("sd card", card_caps);
1645 	mmc_dump_capabilities("host", mmc->host_caps);
1646 #endif
1647 
1648 	/* Restrict card's capabilities by what the host can do */
1649 	caps = card_caps & mmc->host_caps;
1650 
1651 	if (!uhs_en)
1652 		caps &= ~UHS_CAPS;
1653 
1654 	for_each_sd_mode_by_pref(caps, mwt) {
1655 		uint *w;
1656 
1657 		for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1658 			if (*w & caps & mwt->widths) {
1659 				pr_debug("trying mode %s width %d (at %d MHz)\n",
1660 					 mmc_mode_name(mwt->mode),
1661 					 bus_width(*w),
1662 					 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1663 
1664 				/* configure the bus width (card + host) */
1665 				err = sd_select_bus_width(mmc, bus_width(*w));
1666 				if (err)
1667 					goto error;
1668 				mmc_set_bus_width(mmc, bus_width(*w));
1669 
1670 				/* configure the bus mode (card) */
1671 				err = sd_set_card_speed(mmc, mwt->mode);
1672 				if (err)
1673 					goto error;
1674 
1675 				/* configure the bus mode (host) */
1676 				mmc_select_mode(mmc, mwt->mode);
1677 				mmc_set_clock(mmc, mmc->tran_speed,
1678 						MMC_CLK_ENABLE);
1679 
1680 #ifdef MMC_SUPPORTS_TUNING
1681 				/* execute tuning if needed */
1682 				if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1683 					err = mmc_execute_tuning(mmc,
1684 								 mwt->tuning);
1685 					if (err) {
1686 						pr_debug("tuning failed\n");
1687 						goto error;
1688 					}
1689 				}
1690 #endif
1691 
1692 #if CONFIG_IS_ENABLED(MMC_WRITE)
1693 				err = sd_read_ssr(mmc);
1694 				if (err)
1695 					pr_warn("unable to read ssr\n");
1696 #endif
1697 				if (!err)
1698 					return 0;
1699 
1700 error:
1701 				/* revert to a safer bus speed */
1702 				mmc_select_mode(mmc, SD_LEGACY);
1703 				mmc_set_clock(mmc, mmc->tran_speed,
1704 						MMC_CLK_ENABLE);
1705 			}
1706 		}
1707 	}
1708 
1709 	pr_err("unable to select a mode\n");
1710 	return -ENOTSUPP;
1711 }
1712 
1713 /*
1714  * read the compare the part of ext csd that is constant.
1715  * This can be used to check that the transfer is working
1716  * as expected.
1717  */
1718 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1719 {
1720 	int err;
1721 	const u8 *ext_csd = mmc->ext_csd;
1722 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1723 
1724 	if (mmc->version < MMC_VERSION_4)
1725 		return 0;
1726 
1727 	err = mmc_send_ext_csd(mmc, test_csd);
1728 	if (err)
1729 		return err;
1730 
1731 	/* Only compare read only fields */
1732 	if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1733 		== test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1734 	    ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1735 		== test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1736 	    ext_csd[EXT_CSD_REV]
1737 		== test_csd[EXT_CSD_REV] &&
1738 	    ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1739 		== test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1740 	    memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1741 		   &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1742 		return 0;
1743 
1744 	return -EBADMSG;
1745 }
1746 
1747 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1748 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1749 				  uint32_t allowed_mask)
1750 {
1751 	u32 card_mask = 0;
1752 
1753 	switch (mode) {
1754 	case MMC_HS_400:
1755 	case MMC_HS_200:
1756 		if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1757 		    EXT_CSD_CARD_TYPE_HS400_1_8V))
1758 			card_mask |= MMC_SIGNAL_VOLTAGE_180;
1759 		if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1760 		    EXT_CSD_CARD_TYPE_HS400_1_2V))
1761 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1762 		break;
1763 	case MMC_DDR_52:
1764 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1765 			card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1766 				     MMC_SIGNAL_VOLTAGE_180;
1767 		if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1768 			card_mask |= MMC_SIGNAL_VOLTAGE_120;
1769 		break;
1770 	default:
1771 		card_mask |= MMC_SIGNAL_VOLTAGE_330;
1772 		break;
1773 	}
1774 
1775 	while (card_mask & allowed_mask) {
1776 		enum mmc_voltage best_match;
1777 
1778 		best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1779 		if (!mmc_set_signal_voltage(mmc,  best_match))
1780 			return 0;
1781 
1782 		allowed_mask &= ~best_match;
1783 	}
1784 
1785 	return -ENOTSUPP;
1786 }
1787 #else
1788 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1789 					 uint32_t allowed_mask)
1790 {
1791 	return 0;
1792 }
1793 #endif
1794 
1795 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1796 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1797 	{
1798 		.mode = MMC_HS_400,
1799 		.widths = MMC_MODE_8BIT,
1800 		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1801 	},
1802 #endif
1803 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1804 	{
1805 		.mode = MMC_HS_200,
1806 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1807 		.tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1808 	},
1809 #endif
1810 	{
1811 		.mode = MMC_DDR_52,
1812 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1813 	},
1814 	{
1815 		.mode = MMC_HS_52,
1816 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1817 	},
1818 	{
1819 		.mode = MMC_HS,
1820 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1821 	},
1822 	{
1823 		.mode = MMC_LEGACY,
1824 		.widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1825 	}
1826 };
1827 
1828 #define for_each_mmc_mode_by_pref(caps, mwt) \
1829 	for (mwt = mmc_modes_by_pref;\
1830 	    mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1831 	    mwt++) \
1832 		if (caps & MMC_CAP(mwt->mode))
1833 
1834 static const struct ext_csd_bus_width {
1835 	uint cap;
1836 	bool is_ddr;
1837 	uint ext_csd_bits;
1838 } ext_csd_bus_width[] = {
1839 	{MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1840 	{MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1841 	{MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1842 	{MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1843 	{MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1844 };
1845 
1846 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1847 static int mmc_select_hs400(struct mmc *mmc)
1848 {
1849 	int err;
1850 
1851 	/* Set timing to HS200 for tuning */
1852 	err = mmc_set_card_speed(mmc, MMC_HS_200);
1853 	if (err)
1854 		return err;
1855 
1856 	/* configure the bus mode (host) */
1857 	mmc_select_mode(mmc, MMC_HS_200);
1858 	mmc_set_clock(mmc, mmc->tran_speed, false);
1859 
1860 	/* execute tuning if needed */
1861 	err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1862 	if (err) {
1863 		debug("tuning failed\n");
1864 		return err;
1865 	}
1866 
1867 	/* Set back to HS */
1868 	mmc_set_card_speed(mmc, MMC_HS);
1869 	mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
1870 
1871 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1872 			 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1873 	if (err)
1874 		return err;
1875 
1876 	err = mmc_set_card_speed(mmc, MMC_HS_400);
1877 	if (err)
1878 		return err;
1879 
1880 	mmc_select_mode(mmc, MMC_HS_400);
1881 	err = mmc_set_clock(mmc, mmc->tran_speed, false);
1882 	if (err)
1883 		return err;
1884 
1885 	return 0;
1886 }
1887 #else
1888 static int mmc_select_hs400(struct mmc *mmc)
1889 {
1890 	return -ENOTSUPP;
1891 }
1892 #endif
1893 
1894 #define for_each_supported_width(caps, ddr, ecbv) \
1895 	for (ecbv = ext_csd_bus_width;\
1896 	    ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1897 	    ecbv++) \
1898 		if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1899 
1900 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1901 {
1902 	int err;
1903 	const struct mode_width_tuning *mwt;
1904 	const struct ext_csd_bus_width *ecbw;
1905 
1906 #ifdef DEBUG
1907 	mmc_dump_capabilities("mmc", card_caps);
1908 	mmc_dump_capabilities("host", mmc->host_caps);
1909 #endif
1910 
1911 	/* Restrict card's capabilities by what the host can do */
1912 	card_caps &= mmc->host_caps;
1913 
1914 	/* Only version 4 of MMC supports wider bus widths */
1915 	if (mmc->version < MMC_VERSION_4)
1916 		return 0;
1917 
1918 	if (!mmc->ext_csd) {
1919 		pr_debug("No ext_csd found!\n"); /* this should enver happen */
1920 		return -ENOTSUPP;
1921 	}
1922 
1923 	mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
1924 
1925 	for_each_mmc_mode_by_pref(card_caps, mwt) {
1926 		for_each_supported_width(card_caps & mwt->widths,
1927 					 mmc_is_mode_ddr(mwt->mode), ecbw) {
1928 			enum mmc_voltage old_voltage;
1929 			pr_debug("trying mode %s width %d (at %d MHz)\n",
1930 				 mmc_mode_name(mwt->mode),
1931 				 bus_width(ecbw->cap),
1932 				 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1933 			old_voltage = mmc->signal_voltage;
1934 			err = mmc_set_lowest_voltage(mmc, mwt->mode,
1935 						     MMC_ALL_SIGNAL_VOLTAGE);
1936 			if (err)
1937 				continue;
1938 
1939 			/* configure the bus width (card + host) */
1940 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1941 				    EXT_CSD_BUS_WIDTH,
1942 				    ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1943 			if (err)
1944 				goto error;
1945 			mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1946 
1947 			if (mwt->mode == MMC_HS_400) {
1948 				err = mmc_select_hs400(mmc);
1949 				if (err) {
1950 					printf("Select HS400 failed %d\n", err);
1951 					goto error;
1952 				}
1953 			} else {
1954 				/* configure the bus speed (card) */
1955 				err = mmc_set_card_speed(mmc, mwt->mode);
1956 				if (err)
1957 					goto error;
1958 
1959 				/*
1960 				 * configure the bus width AND the ddr mode
1961 				 * (card). The host side will be taken care
1962 				 * of in the next step
1963 				 */
1964 				if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1965 					err = mmc_switch(mmc,
1966 							 EXT_CSD_CMD_SET_NORMAL,
1967 							 EXT_CSD_BUS_WIDTH,
1968 							 ecbw->ext_csd_bits);
1969 					if (err)
1970 						goto error;
1971 				}
1972 
1973 				/* configure the bus mode (host) */
1974 				mmc_select_mode(mmc, mwt->mode);
1975 				mmc_set_clock(mmc, mmc->tran_speed,
1976 					      MMC_CLK_ENABLE);
1977 #ifdef MMC_SUPPORTS_TUNING
1978 
1979 				/* execute tuning if needed */
1980 				if (mwt->tuning) {
1981 					err = mmc_execute_tuning(mmc,
1982 								 mwt->tuning);
1983 					if (err) {
1984 						pr_debug("tuning failed\n");
1985 						goto error;
1986 					}
1987 				}
1988 #endif
1989 			}
1990 
1991 			/* do a transfer to check the configuration */
1992 			err = mmc_read_and_compare_ext_csd(mmc);
1993 			if (!err)
1994 				return 0;
1995 error:
1996 			mmc_set_signal_voltage(mmc, old_voltage);
1997 			/* if an error occured, revert to a safer bus mode */
1998 			mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1999 				   EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2000 			mmc_select_mode(mmc, MMC_LEGACY);
2001 			mmc_set_bus_width(mmc, 1);
2002 		}
2003 	}
2004 
2005 	pr_err("unable to select a mode\n");
2006 
2007 	return -ENOTSUPP;
2008 }
2009 #endif
2010 
2011 #if CONFIG_IS_ENABLED(MMC_TINY)
2012 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2013 #endif
2014 
2015 static int mmc_startup_v4(struct mmc *mmc)
2016 {
2017 	int err, i;
2018 	u64 capacity;
2019 	bool has_parts = false;
2020 	bool part_completed;
2021 	static const u32 mmc_versions[] = {
2022 		MMC_VERSION_4,
2023 		MMC_VERSION_4_1,
2024 		MMC_VERSION_4_2,
2025 		MMC_VERSION_4_3,
2026 		MMC_VERSION_4_4,
2027 		MMC_VERSION_4_41,
2028 		MMC_VERSION_4_5,
2029 		MMC_VERSION_5_0,
2030 		MMC_VERSION_5_1
2031 	};
2032 
2033 #if CONFIG_IS_ENABLED(MMC_TINY)
2034 	u8 *ext_csd = ext_csd_bkup;
2035 
2036 	if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2037 		return 0;
2038 
2039 	if (!mmc->ext_csd)
2040 		memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2041 
2042 	err = mmc_send_ext_csd(mmc, ext_csd);
2043 	if (err)
2044 		goto error;
2045 
2046 	/* store the ext csd for future reference */
2047 	if (!mmc->ext_csd)
2048 		mmc->ext_csd = ext_csd;
2049 #else
2050 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2051 
2052 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2053 		return 0;
2054 
2055 	/* check  ext_csd version and capacity */
2056 	err = mmc_send_ext_csd(mmc, ext_csd);
2057 	if (err)
2058 		goto error;
2059 
2060 	/* store the ext csd for future reference */
2061 	if (!mmc->ext_csd)
2062 		mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2063 	if (!mmc->ext_csd)
2064 		return -ENOMEM;
2065 	memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2066 #endif
2067 	if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2068 		return -EINVAL;
2069 
2070 	mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2071 
2072 	if (mmc->version >= MMC_VERSION_4_2) {
2073 		/*
2074 		 * According to the JEDEC Standard, the value of
2075 		 * ext_csd's capacity is valid if the value is more
2076 		 * than 2GB
2077 		 */
2078 		capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2079 				| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2080 				| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2081 				| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2082 		capacity *= MMC_MAX_BLOCK_LEN;
2083 		if ((capacity >> 20) > 2 * 1024)
2084 			mmc->capacity_user = capacity;
2085 	}
2086 
2087 	/* The partition data may be non-zero but it is only
2088 	 * effective if PARTITION_SETTING_COMPLETED is set in
2089 	 * EXT_CSD, so ignore any data if this bit is not set,
2090 	 * except for enabling the high-capacity group size
2091 	 * definition (see below).
2092 	 */
2093 	part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2094 			    EXT_CSD_PARTITION_SETTING_COMPLETED);
2095 
2096 	/* store the partition info of emmc */
2097 	mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2098 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2099 	    ext_csd[EXT_CSD_BOOT_MULT])
2100 		mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2101 	if (part_completed &&
2102 	    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2103 		mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2104 
2105 	mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2106 
2107 	mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2108 
2109 	for (i = 0; i < 4; i++) {
2110 		int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2111 		uint mult = (ext_csd[idx + 2] << 16) +
2112 			(ext_csd[idx + 1] << 8) + ext_csd[idx];
2113 		if (mult)
2114 			has_parts = true;
2115 		if (!part_completed)
2116 			continue;
2117 		mmc->capacity_gp[i] = mult;
2118 		mmc->capacity_gp[i] *=
2119 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2120 		mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2121 		mmc->capacity_gp[i] <<= 19;
2122 	}
2123 
2124 #ifndef CONFIG_SPL_BUILD
2125 	if (part_completed) {
2126 		mmc->enh_user_size =
2127 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2128 			(ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2129 			ext_csd[EXT_CSD_ENH_SIZE_MULT];
2130 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2131 		mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2132 		mmc->enh_user_size <<= 19;
2133 		mmc->enh_user_start =
2134 			(ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2135 			(ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2136 			(ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2137 			ext_csd[EXT_CSD_ENH_START_ADDR];
2138 		if (mmc->high_capacity)
2139 			mmc->enh_user_start <<= 9;
2140 	}
2141 #endif
2142 
2143 	/*
2144 	 * Host needs to enable ERASE_GRP_DEF bit if device is
2145 	 * partitioned. This bit will be lost every time after a reset
2146 	 * or power off. This will affect erase size.
2147 	 */
2148 	if (part_completed)
2149 		has_parts = true;
2150 	if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2151 	    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2152 		has_parts = true;
2153 	if (has_parts) {
2154 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2155 				 EXT_CSD_ERASE_GROUP_DEF, 1);
2156 
2157 		if (err)
2158 			goto error;
2159 
2160 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2161 	}
2162 
2163 	if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2164 #if CONFIG_IS_ENABLED(MMC_WRITE)
2165 		/* Read out group size from ext_csd */
2166 		mmc->erase_grp_size =
2167 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2168 #endif
2169 		/*
2170 		 * if high capacity and partition setting completed
2171 		 * SEC_COUNT is valid even if it is smaller than 2 GiB
2172 		 * JEDEC Standard JESD84-B45, 6.2.4
2173 		 */
2174 		if (mmc->high_capacity && part_completed) {
2175 			capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2176 				(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2177 				(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2178 				(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2179 			capacity *= MMC_MAX_BLOCK_LEN;
2180 			mmc->capacity_user = capacity;
2181 		}
2182 	}
2183 #if CONFIG_IS_ENABLED(MMC_WRITE)
2184 	else {
2185 		/* Calculate the group size from the csd value. */
2186 		int erase_gsz, erase_gmul;
2187 
2188 		erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2189 		erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2190 		mmc->erase_grp_size = (erase_gsz + 1)
2191 			* (erase_gmul + 1);
2192 	}
2193 #endif
2194 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2195 	mmc->hc_wp_grp_size = 1024
2196 		* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2197 		* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2198 #endif
2199 
2200 	mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2201 
2202 	return 0;
2203 error:
2204 	if (mmc->ext_csd) {
2205 #if !CONFIG_IS_ENABLED(MMC_TINY)
2206 		free(mmc->ext_csd);
2207 #endif
2208 		mmc->ext_csd = NULL;
2209 	}
2210 	return err;
2211 }
2212 
2213 static int mmc_startup(struct mmc *mmc)
2214 {
2215 	int err, i;
2216 	uint mult, freq;
2217 	u64 cmult, csize;
2218 	struct mmc_cmd cmd;
2219 	struct blk_desc *bdesc;
2220 
2221 #ifdef CONFIG_MMC_SPI_CRC_ON
2222 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2223 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2224 		cmd.resp_type = MMC_RSP_R1;
2225 		cmd.cmdarg = 1;
2226 		err = mmc_send_cmd(mmc, &cmd, NULL);
2227 		if (err)
2228 			return err;
2229 	}
2230 #endif
2231 
2232 	/* Put the Card in Identify Mode */
2233 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2234 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2235 	cmd.resp_type = MMC_RSP_R2;
2236 	cmd.cmdarg = 0;
2237 
2238 	err = mmc_send_cmd(mmc, &cmd, NULL);
2239 
2240 #ifdef CONFIG_MMC_QUIRKS
2241 	if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2242 		int retries = 4;
2243 		/*
2244 		 * It has been seen that SEND_CID may fail on the first
2245 		 * attempt, let's try a few more time
2246 		 */
2247 		do {
2248 			err = mmc_send_cmd(mmc, &cmd, NULL);
2249 			if (!err)
2250 				break;
2251 		} while (retries--);
2252 	}
2253 #endif
2254 
2255 	if (err)
2256 		return err;
2257 
2258 	memcpy(mmc->cid, cmd.response, 16);
2259 
2260 	/*
2261 	 * For MMC cards, set the Relative Address.
2262 	 * For SD cards, get the Relatvie Address.
2263 	 * This also puts the cards into Standby State
2264 	 */
2265 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2266 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2267 		cmd.cmdarg = mmc->rca << 16;
2268 		cmd.resp_type = MMC_RSP_R6;
2269 
2270 		err = mmc_send_cmd(mmc, &cmd, NULL);
2271 
2272 		if (err)
2273 			return err;
2274 
2275 		if (IS_SD(mmc))
2276 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2277 	}
2278 
2279 	/* Get the Card-Specific Data */
2280 	cmd.cmdidx = MMC_CMD_SEND_CSD;
2281 	cmd.resp_type = MMC_RSP_R2;
2282 	cmd.cmdarg = mmc->rca << 16;
2283 
2284 	err = mmc_send_cmd(mmc, &cmd, NULL);
2285 
2286 	if (err)
2287 		return err;
2288 
2289 	mmc->csd[0] = cmd.response[0];
2290 	mmc->csd[1] = cmd.response[1];
2291 	mmc->csd[2] = cmd.response[2];
2292 	mmc->csd[3] = cmd.response[3];
2293 
2294 	if (mmc->version == MMC_VERSION_UNKNOWN) {
2295 		int version = (cmd.response[0] >> 26) & 0xf;
2296 
2297 		switch (version) {
2298 		case 0:
2299 			mmc->version = MMC_VERSION_1_2;
2300 			break;
2301 		case 1:
2302 			mmc->version = MMC_VERSION_1_4;
2303 			break;
2304 		case 2:
2305 			mmc->version = MMC_VERSION_2_2;
2306 			break;
2307 		case 3:
2308 			mmc->version = MMC_VERSION_3;
2309 			break;
2310 		case 4:
2311 			mmc->version = MMC_VERSION_4;
2312 			break;
2313 		default:
2314 			mmc->version = MMC_VERSION_1_2;
2315 			break;
2316 		}
2317 	}
2318 
2319 	/* divide frequency by 10, since the mults are 10x bigger */
2320 	freq = fbase[(cmd.response[0] & 0x7)];
2321 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2322 
2323 	mmc->legacy_speed = freq * mult;
2324 	mmc_select_mode(mmc, MMC_LEGACY);
2325 
2326 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2327 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2328 #if CONFIG_IS_ENABLED(MMC_WRITE)
2329 
2330 	if (IS_SD(mmc))
2331 		mmc->write_bl_len = mmc->read_bl_len;
2332 	else
2333 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2334 #endif
2335 
2336 	if (mmc->high_capacity) {
2337 		csize = (mmc->csd[1] & 0x3f) << 16
2338 			| (mmc->csd[2] & 0xffff0000) >> 16;
2339 		cmult = 8;
2340 	} else {
2341 		csize = (mmc->csd[1] & 0x3ff) << 2
2342 			| (mmc->csd[2] & 0xc0000000) >> 30;
2343 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
2344 	}
2345 
2346 	mmc->capacity_user = (csize + 1) << (cmult + 2);
2347 	mmc->capacity_user *= mmc->read_bl_len;
2348 	mmc->capacity_boot = 0;
2349 	mmc->capacity_rpmb = 0;
2350 	for (i = 0; i < 4; i++)
2351 		mmc->capacity_gp[i] = 0;
2352 
2353 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2354 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2355 
2356 #if CONFIG_IS_ENABLED(MMC_WRITE)
2357 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2358 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2359 #endif
2360 
2361 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2362 		cmd.cmdidx = MMC_CMD_SET_DSR;
2363 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2364 		cmd.resp_type = MMC_RSP_NONE;
2365 		if (mmc_send_cmd(mmc, &cmd, NULL))
2366 			pr_warn("MMC: SET_DSR failed\n");
2367 	}
2368 
2369 	/* Select the card, and put it into Transfer Mode */
2370 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2371 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
2372 		cmd.resp_type = MMC_RSP_R1;
2373 		cmd.cmdarg = mmc->rca << 16;
2374 		err = mmc_send_cmd(mmc, &cmd, NULL);
2375 
2376 		if (err)
2377 			return err;
2378 	}
2379 
2380 	/*
2381 	 * For SD, its erase group is always one sector
2382 	 */
2383 #if CONFIG_IS_ENABLED(MMC_WRITE)
2384 	mmc->erase_grp_size = 1;
2385 #endif
2386 	mmc->part_config = MMCPART_NOAVAILABLE;
2387 
2388 	err = mmc_startup_v4(mmc);
2389 	if (err)
2390 		return err;
2391 
2392 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2393 	if (err)
2394 		return err;
2395 
2396 #if CONFIG_IS_ENABLED(MMC_TINY)
2397 	mmc_set_clock(mmc, mmc->legacy_speed, false);
2398 	mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY);
2399 	mmc_set_bus_width(mmc, 1);
2400 #else
2401 	if (IS_SD(mmc)) {
2402 		err = sd_get_capabilities(mmc);
2403 		if (err)
2404 			return err;
2405 		err = sd_select_mode_and_width(mmc, mmc->card_caps);
2406 	} else {
2407 		err = mmc_get_capabilities(mmc);
2408 		if (err)
2409 			return err;
2410 		mmc_select_mode_and_width(mmc, mmc->card_caps);
2411 	}
2412 #endif
2413 	if (err)
2414 		return err;
2415 
2416 	mmc->best_mode = mmc->selected_mode;
2417 
2418 	/* Fix the block length for DDR mode */
2419 	if (mmc->ddr_mode) {
2420 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2421 #if CONFIG_IS_ENABLED(MMC_WRITE)
2422 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2423 #endif
2424 	}
2425 
2426 	/* fill in device description */
2427 	bdesc = mmc_get_blk_desc(mmc);
2428 	bdesc->lun = 0;
2429 	bdesc->hwpart = 0;
2430 	bdesc->type = 0;
2431 	bdesc->blksz = mmc->read_bl_len;
2432 	bdesc->log2blksz = LOG2(bdesc->blksz);
2433 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2434 #if !defined(CONFIG_SPL_BUILD) || \
2435 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2436 		!defined(CONFIG_USE_TINY_PRINTF))
2437 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2438 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2439 		(mmc->cid[3] >> 16) & 0xffff);
2440 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2441 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2442 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2443 		(mmc->cid[2] >> 24) & 0xff);
2444 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2445 		(mmc->cid[2] >> 16) & 0xf);
2446 #else
2447 	bdesc->vendor[0] = 0;
2448 	bdesc->product[0] = 0;
2449 	bdesc->revision[0] = 0;
2450 #endif
2451 
2452 	return 0;
2453 }
2454 
2455 static int mmc_send_if_cond(struct mmc *mmc)
2456 {
2457 	struct mmc_cmd cmd;
2458 	int err;
2459 
2460 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
2461 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2462 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2463 	cmd.resp_type = MMC_RSP_R7;
2464 
2465 	err = mmc_send_cmd(mmc, &cmd, NULL);
2466 
2467 	if (err)
2468 		return err;
2469 
2470 	if ((cmd.response[0] & 0xff) != 0xaa)
2471 		return -EOPNOTSUPP;
2472 	else
2473 		mmc->version = SD_VERSION_2;
2474 
2475 	return 0;
2476 }
2477 
2478 #if !CONFIG_IS_ENABLED(DM_MMC)
2479 /* board-specific MMC power initializations. */
2480 __weak void board_mmc_power_init(void)
2481 {
2482 }
2483 #endif
2484 
2485 static int mmc_power_init(struct mmc *mmc)
2486 {
2487 #if CONFIG_IS_ENABLED(DM_MMC)
2488 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2489 	int ret;
2490 
2491 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2492 					  &mmc->vmmc_supply);
2493 	if (ret)
2494 		pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2495 
2496 	ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2497 					  &mmc->vqmmc_supply);
2498 	if (ret)
2499 		pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2500 #endif
2501 #else /* !CONFIG_DM_MMC */
2502 	/*
2503 	 * Driver model should use a regulator, as above, rather than calling
2504 	 * out to board code.
2505 	 */
2506 	board_mmc_power_init();
2507 #endif
2508 	return 0;
2509 }
2510 
2511 /*
2512  * put the host in the initial state:
2513  * - turn on Vdd (card power supply)
2514  * - configure the bus width and clock to minimal values
2515  */
2516 static void mmc_set_initial_state(struct mmc *mmc)
2517 {
2518 	int err;
2519 
2520 	/* First try to set 3.3V. If it fails set to 1.8V */
2521 	err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2522 	if (err != 0)
2523 		err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2524 	if (err != 0)
2525 		pr_warn("mmc: failed to set signal voltage\n");
2526 
2527 	mmc_select_mode(mmc, MMC_LEGACY);
2528 	mmc_set_bus_width(mmc, 1);
2529 	mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2530 }
2531 
2532 static int mmc_power_on(struct mmc *mmc)
2533 {
2534 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2535 	if (mmc->vmmc_supply) {
2536 		int ret = regulator_set_enable(mmc->vmmc_supply, true);
2537 
2538 		if (ret) {
2539 			puts("Error enabling VMMC supply\n");
2540 			return ret;
2541 		}
2542 	}
2543 #endif
2544 	return 0;
2545 }
2546 
2547 static int mmc_power_off(struct mmc *mmc)
2548 {
2549 	mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2550 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2551 	if (mmc->vmmc_supply) {
2552 		int ret = regulator_set_enable(mmc->vmmc_supply, false);
2553 
2554 		if (ret) {
2555 			pr_debug("Error disabling VMMC supply\n");
2556 			return ret;
2557 		}
2558 	}
2559 #endif
2560 	return 0;
2561 }
2562 
2563 static int mmc_power_cycle(struct mmc *mmc)
2564 {
2565 	int ret;
2566 
2567 	ret = mmc_power_off(mmc);
2568 	if (ret)
2569 		return ret;
2570 	/*
2571 	 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2572 	 * to be on the safer side.
2573 	 */
2574 	udelay(2000);
2575 	return mmc_power_on(mmc);
2576 }
2577 
2578 int mmc_get_op_cond(struct mmc *mmc)
2579 {
2580 	bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2581 	int err;
2582 
2583 	if (mmc->has_init)
2584 		return 0;
2585 
2586 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2587 	mmc_adapter_card_type_ident();
2588 #endif
2589 	err = mmc_power_init(mmc);
2590 	if (err)
2591 		return err;
2592 
2593 #ifdef CONFIG_MMC_QUIRKS
2594 	mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2595 		      MMC_QUIRK_RETRY_SEND_CID;
2596 #endif
2597 
2598 	err = mmc_power_cycle(mmc);
2599 	if (err) {
2600 		/*
2601 		 * if power cycling is not supported, we should not try
2602 		 * to use the UHS modes, because we wouldn't be able to
2603 		 * recover from an error during the UHS initialization.
2604 		 */
2605 		pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2606 		uhs_en = false;
2607 		mmc->host_caps &= ~UHS_CAPS;
2608 		err = mmc_power_on(mmc);
2609 	}
2610 	if (err)
2611 		return err;
2612 
2613 #if CONFIG_IS_ENABLED(DM_MMC)
2614 	/* The device has already been probed ready for use */
2615 #else
2616 	/* made sure it's not NULL earlier */
2617 	err = mmc->cfg->ops->init(mmc);
2618 	if (err)
2619 		return err;
2620 #endif
2621 	mmc->ddr_mode = 0;
2622 
2623 retry:
2624 	mmc_set_initial_state(mmc);
2625 	mmc_send_init_stream(mmc);
2626 
2627 	/* Reset the Card */
2628 	err = mmc_go_idle(mmc);
2629 
2630 	if (err)
2631 		return err;
2632 
2633 	/* The internal partition reset to user partition(0) at every CMD0*/
2634 	mmc_get_blk_desc(mmc)->hwpart = 0;
2635 
2636 	/* Test for SD version 2 */
2637 	err = mmc_send_if_cond(mmc);
2638 
2639 	/* Now try to get the SD card's operating condition */
2640 	err = sd_send_op_cond(mmc, uhs_en);
2641 	if (err && uhs_en) {
2642 		uhs_en = false;
2643 		mmc_power_cycle(mmc);
2644 		goto retry;
2645 	}
2646 
2647 	/* If the command timed out, we check for an MMC card */
2648 	if (err == -ETIMEDOUT) {
2649 		err = mmc_send_op_cond(mmc);
2650 
2651 		if (err) {
2652 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2653 			pr_err("Card did not respond to voltage select!\n");
2654 #endif
2655 			return -EOPNOTSUPP;
2656 		}
2657 	}
2658 
2659 	return err;
2660 }
2661 
2662 int mmc_start_init(struct mmc *mmc)
2663 {
2664 	bool no_card;
2665 	int err = 0;
2666 
2667 	/*
2668 	 * all hosts are capable of 1 bit bus-width and able to use the legacy
2669 	 * timings.
2670 	 */
2671 	mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2672 			 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2673 
2674 #if !defined(CONFIG_MMC_BROKEN_CD)
2675 	/* we pretend there's no card when init is NULL */
2676 	no_card = mmc_getcd(mmc) == 0;
2677 #else
2678 	no_card = 0;
2679 #endif
2680 #if !CONFIG_IS_ENABLED(DM_MMC)
2681 	no_card = no_card || (mmc->cfg->ops->init == NULL);
2682 #endif
2683 	if (no_card) {
2684 		mmc->has_init = 0;
2685 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2686 		pr_err("MMC: no card present\n");
2687 #endif
2688 		return -ENOMEDIUM;
2689 	}
2690 
2691 	err = mmc_get_op_cond(mmc);
2692 
2693 	if (!err)
2694 		mmc->init_in_progress = 1;
2695 
2696 	return err;
2697 }
2698 
2699 static int mmc_complete_init(struct mmc *mmc)
2700 {
2701 	int err = 0;
2702 
2703 	mmc->init_in_progress = 0;
2704 	if (mmc->op_cond_pending)
2705 		err = mmc_complete_op_cond(mmc);
2706 
2707 	if (!err)
2708 		err = mmc_startup(mmc);
2709 	if (err)
2710 		mmc->has_init = 0;
2711 	else
2712 		mmc->has_init = 1;
2713 	return err;
2714 }
2715 
2716 int mmc_init(struct mmc *mmc)
2717 {
2718 	int err = 0;
2719 	__maybe_unused ulong start;
2720 #if CONFIG_IS_ENABLED(DM_MMC)
2721 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2722 
2723 	upriv->mmc = mmc;
2724 #endif
2725 	if (mmc->has_init)
2726 		return 0;
2727 
2728 	start = get_timer(0);
2729 
2730 	if (!mmc->init_in_progress)
2731 		err = mmc_start_init(mmc);
2732 
2733 	if (!err)
2734 		err = mmc_complete_init(mmc);
2735 	if (err)
2736 		pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2737 
2738 	return err;
2739 }
2740 
2741 int mmc_set_dsr(struct mmc *mmc, u16 val)
2742 {
2743 	mmc->dsr = val;
2744 	return 0;
2745 }
2746 
2747 /* CPU-specific MMC initializations */
2748 __weak int cpu_mmc_init(bd_t *bis)
2749 {
2750 	return -1;
2751 }
2752 
2753 /* board-specific MMC initializations. */
2754 __weak int board_mmc_init(bd_t *bis)
2755 {
2756 	return -1;
2757 }
2758 
2759 void mmc_set_preinit(struct mmc *mmc, int preinit)
2760 {
2761 	mmc->preinit = preinit;
2762 }
2763 
2764 #if CONFIG_IS_ENABLED(DM_MMC)
2765 static int mmc_probe(bd_t *bis)
2766 {
2767 	int ret, i;
2768 	struct uclass *uc;
2769 	struct udevice *dev;
2770 
2771 	ret = uclass_get(UCLASS_MMC, &uc);
2772 	if (ret)
2773 		return ret;
2774 
2775 	/*
2776 	 * Try to add them in sequence order. Really with driver model we
2777 	 * should allow holes, but the current MMC list does not allow that.
2778 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
2779 	 */
2780 	for (i = 0; ; i++) {
2781 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2782 		if (ret == -ENODEV)
2783 			break;
2784 	}
2785 	uclass_foreach_dev(dev, uc) {
2786 		ret = device_probe(dev);
2787 		if (ret)
2788 			pr_err("%s - probe failed: %d\n", dev->name, ret);
2789 	}
2790 
2791 	return 0;
2792 }
2793 #else
2794 static int mmc_probe(bd_t *bis)
2795 {
2796 	if (board_mmc_init(bis) < 0)
2797 		cpu_mmc_init(bis);
2798 
2799 	return 0;
2800 }
2801 #endif
2802 
2803 int mmc_initialize(bd_t *bis)
2804 {
2805 	static int initialized = 0;
2806 	int ret;
2807 	if (initialized)	/* Avoid initializing mmc multiple times */
2808 		return 0;
2809 	initialized = 1;
2810 
2811 #if !CONFIG_IS_ENABLED(BLK)
2812 #if !CONFIG_IS_ENABLED(MMC_TINY)
2813 	mmc_list_init();
2814 #endif
2815 #endif
2816 	ret = mmc_probe(bis);
2817 	if (ret)
2818 		return ret;
2819 
2820 #ifndef CONFIG_SPL_BUILD
2821 	print_mmc_devices(',');
2822 #endif
2823 
2824 	mmc_do_preinit();
2825 	return 0;
2826 }
2827 
2828 #ifdef CONFIG_CMD_BKOPS_ENABLE
2829 int mmc_set_bkops_enable(struct mmc *mmc)
2830 {
2831 	int err;
2832 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2833 
2834 	err = mmc_send_ext_csd(mmc, ext_csd);
2835 	if (err) {
2836 		puts("Could not get ext_csd register values\n");
2837 		return err;
2838 	}
2839 
2840 	if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2841 		puts("Background operations not supported on device\n");
2842 		return -EMEDIUMTYPE;
2843 	}
2844 
2845 	if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2846 		puts("Background operations already enabled\n");
2847 		return 0;
2848 	}
2849 
2850 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2851 	if (err) {
2852 		puts("Failed to enable manual background operations\n");
2853 		return err;
2854 	}
2855 
2856 	puts("Enabled manual background operations\n");
2857 
2858 	return 0;
2859 }
2860 #endif
2861