xref: /openbmc/u-boot/drivers/mmc/mmc.c (revision 72c10153)
1 /*
2  * Copyright 2008, Freescale Semiconductor, Inc
3  * Andy Fleming
4  *
5  * Based vaguely on the Linux code
6  *
7  * SPDX-License-Identifier:	GPL-2.0+
8  */
9 
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <dm.h>
14 #include <dm/device-internal.h>
15 #include <errno.h>
16 #include <mmc.h>
17 #include <part.h>
18 #include <power/regulator.h>
19 #include <malloc.h>
20 #include <memalign.h>
21 #include <linux/list.h>
22 #include <div64.h>
23 #include "mmc_private.h"
24 
25 static const unsigned int sd_au_size[] = {
26 	0,		SZ_16K / 512,		SZ_32K / 512,
27 	SZ_64K / 512,	SZ_128K / 512,		SZ_256K / 512,
28 	SZ_512K / 512,	SZ_1M / 512,		SZ_2M / 512,
29 	SZ_4M / 512,	SZ_8M / 512,		(SZ_8M + SZ_4M) / 512,
30 	SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,	SZ_64M / 512,
31 };
32 
33 #ifndef CONFIG_DM_MMC_OPS
34 __weak int board_mmc_getwp(struct mmc *mmc)
35 {
36 	return -1;
37 }
38 
39 int mmc_getwp(struct mmc *mmc)
40 {
41 	int wp;
42 
43 	wp = board_mmc_getwp(mmc);
44 
45 	if (wp < 0) {
46 		if (mmc->cfg->ops->getwp)
47 			wp = mmc->cfg->ops->getwp(mmc);
48 		else
49 			wp = 0;
50 	}
51 
52 	return wp;
53 }
54 
55 __weak int board_mmc_getcd(struct mmc *mmc)
56 {
57 	return -1;
58 }
59 #endif
60 
61 #ifdef CONFIG_MMC_TRACE
62 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
63 {
64 	printf("CMD_SEND:%d\n", cmd->cmdidx);
65 	printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
66 }
67 
68 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
69 {
70 	int i;
71 	u8 *ptr;
72 
73 	if (ret) {
74 		printf("\t\tRET\t\t\t %d\n", ret);
75 	} else {
76 		switch (cmd->resp_type) {
77 		case MMC_RSP_NONE:
78 			printf("\t\tMMC_RSP_NONE\n");
79 			break;
80 		case MMC_RSP_R1:
81 			printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
82 				cmd->response[0]);
83 			break;
84 		case MMC_RSP_R1b:
85 			printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
86 				cmd->response[0]);
87 			break;
88 		case MMC_RSP_R2:
89 			printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
90 				cmd->response[0]);
91 			printf("\t\t          \t\t 0x%08X \n",
92 				cmd->response[1]);
93 			printf("\t\t          \t\t 0x%08X \n",
94 				cmd->response[2]);
95 			printf("\t\t          \t\t 0x%08X \n",
96 				cmd->response[3]);
97 			printf("\n");
98 			printf("\t\t\t\t\tDUMPING DATA\n");
99 			for (i = 0; i < 4; i++) {
100 				int j;
101 				printf("\t\t\t\t\t%03d - ", i*4);
102 				ptr = (u8 *)&cmd->response[i];
103 				ptr += 3;
104 				for (j = 0; j < 4; j++)
105 					printf("%02X ", *ptr--);
106 				printf("\n");
107 			}
108 			break;
109 		case MMC_RSP_R3:
110 			printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
111 				cmd->response[0]);
112 			break;
113 		default:
114 			printf("\t\tERROR MMC rsp not supported\n");
115 			break;
116 		}
117 	}
118 }
119 
120 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
121 {
122 	int status;
123 
124 	status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
125 	printf("CURR STATE:%d\n", status);
126 }
127 #endif
128 
129 #ifndef CONFIG_DM_MMC_OPS
130 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
131 {
132 	int ret;
133 
134 	mmmc_trace_before_send(mmc, cmd);
135 	ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
136 	mmmc_trace_after_send(mmc, cmd, ret);
137 
138 	return ret;
139 }
140 #endif
141 
142 int mmc_send_status(struct mmc *mmc, int timeout)
143 {
144 	struct mmc_cmd cmd;
145 	int err, retries = 5;
146 
147 	cmd.cmdidx = MMC_CMD_SEND_STATUS;
148 	cmd.resp_type = MMC_RSP_R1;
149 	if (!mmc_host_is_spi(mmc))
150 		cmd.cmdarg = mmc->rca << 16;
151 
152 	while (1) {
153 		err = mmc_send_cmd(mmc, &cmd, NULL);
154 		if (!err) {
155 			if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
156 			    (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
157 			     MMC_STATE_PRG)
158 				break;
159 			else if (cmd.response[0] & MMC_STATUS_MASK) {
160 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
161 				printf("Status Error: 0x%08X\n",
162 					cmd.response[0]);
163 #endif
164 				return -ECOMM;
165 			}
166 		} else if (--retries < 0)
167 			return err;
168 
169 		if (timeout-- <= 0)
170 			break;
171 
172 		udelay(1000);
173 	}
174 
175 	mmc_trace_state(mmc, &cmd);
176 	if (timeout <= 0) {
177 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
178 		printf("Timeout waiting card ready\n");
179 #endif
180 		return -ETIMEDOUT;
181 	}
182 
183 	return 0;
184 }
185 
186 int mmc_set_blocklen(struct mmc *mmc, int len)
187 {
188 	struct mmc_cmd cmd;
189 
190 	if (mmc->ddr_mode)
191 		return 0;
192 
193 	cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
194 	cmd.resp_type = MMC_RSP_R1;
195 	cmd.cmdarg = len;
196 
197 	return mmc_send_cmd(mmc, &cmd, NULL);
198 }
199 
200 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
201 			   lbaint_t blkcnt)
202 {
203 	struct mmc_cmd cmd;
204 	struct mmc_data data;
205 
206 	if (blkcnt > 1)
207 		cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
208 	else
209 		cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
210 
211 	if (mmc->high_capacity)
212 		cmd.cmdarg = start;
213 	else
214 		cmd.cmdarg = start * mmc->read_bl_len;
215 
216 	cmd.resp_type = MMC_RSP_R1;
217 
218 	data.dest = dst;
219 	data.blocks = blkcnt;
220 	data.blocksize = mmc->read_bl_len;
221 	data.flags = MMC_DATA_READ;
222 
223 	if (mmc_send_cmd(mmc, &cmd, &data))
224 		return 0;
225 
226 	if (blkcnt > 1) {
227 		cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
228 		cmd.cmdarg = 0;
229 		cmd.resp_type = MMC_RSP_R1b;
230 		if (mmc_send_cmd(mmc, &cmd, NULL)) {
231 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
232 			printf("mmc fail to send stop cmd\n");
233 #endif
234 			return 0;
235 		}
236 	}
237 
238 	return blkcnt;
239 }
240 
241 #ifdef CONFIG_BLK
242 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
243 #else
244 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
245 		void *dst)
246 #endif
247 {
248 #ifdef CONFIG_BLK
249 	struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
250 #endif
251 	int dev_num = block_dev->devnum;
252 	int err;
253 	lbaint_t cur, blocks_todo = blkcnt;
254 
255 	if (blkcnt == 0)
256 		return 0;
257 
258 	struct mmc *mmc = find_mmc_device(dev_num);
259 	if (!mmc)
260 		return 0;
261 
262 	err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
263 	if (err < 0)
264 		return 0;
265 
266 	if ((start + blkcnt) > block_dev->lba) {
267 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
268 		printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
269 			start + blkcnt, block_dev->lba);
270 #endif
271 		return 0;
272 	}
273 
274 	if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
275 		debug("%s: Failed to set blocklen\n", __func__);
276 		return 0;
277 	}
278 
279 	do {
280 		cur = (blocks_todo > mmc->cfg->b_max) ?
281 			mmc->cfg->b_max : blocks_todo;
282 		if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
283 			debug("%s: Failed to read blocks\n", __func__);
284 			return 0;
285 		}
286 		blocks_todo -= cur;
287 		start += cur;
288 		dst += cur * mmc->read_bl_len;
289 	} while (blocks_todo > 0);
290 
291 	return blkcnt;
292 }
293 
294 static int mmc_go_idle(struct mmc *mmc)
295 {
296 	struct mmc_cmd cmd;
297 	int err;
298 
299 	udelay(1000);
300 
301 	cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
302 	cmd.cmdarg = 0;
303 	cmd.resp_type = MMC_RSP_NONE;
304 
305 	err = mmc_send_cmd(mmc, &cmd, NULL);
306 
307 	if (err)
308 		return err;
309 
310 	udelay(2000);
311 
312 	return 0;
313 }
314 
315 static int sd_send_op_cond(struct mmc *mmc)
316 {
317 	int timeout = 1000;
318 	int err;
319 	struct mmc_cmd cmd;
320 
321 	while (1) {
322 		cmd.cmdidx = MMC_CMD_APP_CMD;
323 		cmd.resp_type = MMC_RSP_R1;
324 		cmd.cmdarg = 0;
325 
326 		err = mmc_send_cmd(mmc, &cmd, NULL);
327 
328 		if (err)
329 			return err;
330 
331 		cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
332 		cmd.resp_type = MMC_RSP_R3;
333 
334 		/*
335 		 * Most cards do not answer if some reserved bits
336 		 * in the ocr are set. However, Some controller
337 		 * can set bit 7 (reserved for low voltages), but
338 		 * how to manage low voltages SD card is not yet
339 		 * specified.
340 		 */
341 		cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
342 			(mmc->cfg->voltages & 0xff8000);
343 
344 		if (mmc->version == SD_VERSION_2)
345 			cmd.cmdarg |= OCR_HCS;
346 
347 		err = mmc_send_cmd(mmc, &cmd, NULL);
348 
349 		if (err)
350 			return err;
351 
352 		if (cmd.response[0] & OCR_BUSY)
353 			break;
354 
355 		if (timeout-- <= 0)
356 			return -EOPNOTSUPP;
357 
358 		udelay(1000);
359 	}
360 
361 	if (mmc->version != SD_VERSION_2)
362 		mmc->version = SD_VERSION_1_0;
363 
364 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
365 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
366 		cmd.resp_type = MMC_RSP_R3;
367 		cmd.cmdarg = 0;
368 
369 		err = mmc_send_cmd(mmc, &cmd, NULL);
370 
371 		if (err)
372 			return err;
373 	}
374 
375 	mmc->ocr = cmd.response[0];
376 
377 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
378 	mmc->rca = 0;
379 
380 	return 0;
381 }
382 
383 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
384 {
385 	struct mmc_cmd cmd;
386 	int err;
387 
388 	cmd.cmdidx = MMC_CMD_SEND_OP_COND;
389 	cmd.resp_type = MMC_RSP_R3;
390 	cmd.cmdarg = 0;
391 	if (use_arg && !mmc_host_is_spi(mmc))
392 		cmd.cmdarg = OCR_HCS |
393 			(mmc->cfg->voltages &
394 			(mmc->ocr & OCR_VOLTAGE_MASK)) |
395 			(mmc->ocr & OCR_ACCESS_MODE);
396 
397 	err = mmc_send_cmd(mmc, &cmd, NULL);
398 	if (err)
399 		return err;
400 	mmc->ocr = cmd.response[0];
401 	return 0;
402 }
403 
404 static int mmc_send_op_cond(struct mmc *mmc)
405 {
406 	int err, i;
407 
408 	/* Some cards seem to need this */
409 	mmc_go_idle(mmc);
410 
411  	/* Asking to the card its capabilities */
412 	for (i = 0; i < 2; i++) {
413 		err = mmc_send_op_cond_iter(mmc, i != 0);
414 		if (err)
415 			return err;
416 
417 		/* exit if not busy (flag seems to be inverted) */
418 		if (mmc->ocr & OCR_BUSY)
419 			break;
420 	}
421 	mmc->op_cond_pending = 1;
422 	return 0;
423 }
424 
425 static int mmc_complete_op_cond(struct mmc *mmc)
426 {
427 	struct mmc_cmd cmd;
428 	int timeout = 1000;
429 	uint start;
430 	int err;
431 
432 	mmc->op_cond_pending = 0;
433 	if (!(mmc->ocr & OCR_BUSY)) {
434 		/* Some cards seem to need this */
435 		mmc_go_idle(mmc);
436 
437 		start = get_timer(0);
438 		while (1) {
439 			err = mmc_send_op_cond_iter(mmc, 1);
440 			if (err)
441 				return err;
442 			if (mmc->ocr & OCR_BUSY)
443 				break;
444 			if (get_timer(start) > timeout)
445 				return -EOPNOTSUPP;
446 			udelay(100);
447 		}
448 	}
449 
450 	if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
451 		cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
452 		cmd.resp_type = MMC_RSP_R3;
453 		cmd.cmdarg = 0;
454 
455 		err = mmc_send_cmd(mmc, &cmd, NULL);
456 
457 		if (err)
458 			return err;
459 
460 		mmc->ocr = cmd.response[0];
461 	}
462 
463 	mmc->version = MMC_VERSION_UNKNOWN;
464 
465 	mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
466 	mmc->rca = 1;
467 
468 	return 0;
469 }
470 
471 
472 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
473 {
474 	struct mmc_cmd cmd;
475 	struct mmc_data data;
476 	int err;
477 
478 	/* Get the Card Status Register */
479 	cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
480 	cmd.resp_type = MMC_RSP_R1;
481 	cmd.cmdarg = 0;
482 
483 	data.dest = (char *)ext_csd;
484 	data.blocks = 1;
485 	data.blocksize = MMC_MAX_BLOCK_LEN;
486 	data.flags = MMC_DATA_READ;
487 
488 	err = mmc_send_cmd(mmc, &cmd, &data);
489 
490 	return err;
491 }
492 
493 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
494 {
495 	struct mmc_cmd cmd;
496 	int timeout = 1000;
497 	int ret;
498 
499 	cmd.cmdidx = MMC_CMD_SWITCH;
500 	cmd.resp_type = MMC_RSP_R1b;
501 	cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
502 				 (index << 16) |
503 				 (value << 8);
504 
505 	ret = mmc_send_cmd(mmc, &cmd, NULL);
506 
507 	/* Waiting for the ready status */
508 	if (!ret)
509 		ret = mmc_send_status(mmc, timeout);
510 
511 	return ret;
512 
513 }
514 
515 static int mmc_change_freq(struct mmc *mmc)
516 {
517 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
518 	char cardtype;
519 	int err;
520 
521 	mmc->card_caps = 0;
522 
523 	if (mmc_host_is_spi(mmc))
524 		return 0;
525 
526 	/* Only version 4 supports high-speed */
527 	if (mmc->version < MMC_VERSION_4)
528 		return 0;
529 
530 	mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
531 
532 	err = mmc_send_ext_csd(mmc, ext_csd);
533 
534 	if (err)
535 		return err;
536 
537 	cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0xf;
538 
539 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1);
540 
541 	if (err)
542 		return err;
543 
544 	/* Now check to see that it worked */
545 	err = mmc_send_ext_csd(mmc, ext_csd);
546 
547 	if (err)
548 		return err;
549 
550 	/* No high-speed support */
551 	if (!ext_csd[EXT_CSD_HS_TIMING])
552 		return 0;
553 
554 	/* High Speed is set, there are two types: 52MHz and 26MHz */
555 	if (cardtype & EXT_CSD_CARD_TYPE_52) {
556 		if (cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
557 			mmc->card_caps |= MMC_MODE_DDR_52MHz;
558 		mmc->card_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
559 	} else {
560 		mmc->card_caps |= MMC_MODE_HS;
561 	}
562 
563 	return 0;
564 }
565 
566 static int mmc_set_capacity(struct mmc *mmc, int part_num)
567 {
568 	switch (part_num) {
569 	case 0:
570 		mmc->capacity = mmc->capacity_user;
571 		break;
572 	case 1:
573 	case 2:
574 		mmc->capacity = mmc->capacity_boot;
575 		break;
576 	case 3:
577 		mmc->capacity = mmc->capacity_rpmb;
578 		break;
579 	case 4:
580 	case 5:
581 	case 6:
582 	case 7:
583 		mmc->capacity = mmc->capacity_gp[part_num - 4];
584 		break;
585 	default:
586 		return -1;
587 	}
588 
589 	mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
590 
591 	return 0;
592 }
593 
594 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
595 {
596 	int ret;
597 
598 	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
599 			 (mmc->part_config & ~PART_ACCESS_MASK)
600 			 | (part_num & PART_ACCESS_MASK));
601 
602 	/*
603 	 * Set the capacity if the switch succeeded or was intended
604 	 * to return to representing the raw device.
605 	 */
606 	if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
607 		ret = mmc_set_capacity(mmc, part_num);
608 		mmc_get_blk_desc(mmc)->hwpart = part_num;
609 	}
610 
611 	return ret;
612 }
613 
614 int mmc_hwpart_config(struct mmc *mmc,
615 		      const struct mmc_hwpart_conf *conf,
616 		      enum mmc_hwpart_conf_mode mode)
617 {
618 	u8 part_attrs = 0;
619 	u32 enh_size_mult;
620 	u32 enh_start_addr;
621 	u32 gp_size_mult[4];
622 	u32 max_enh_size_mult;
623 	u32 tot_enh_size_mult = 0;
624 	u8 wr_rel_set;
625 	int i, pidx, err;
626 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
627 
628 	if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
629 		return -EINVAL;
630 
631 	if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
632 		printf("eMMC >= 4.4 required for enhanced user data area\n");
633 		return -EMEDIUMTYPE;
634 	}
635 
636 	if (!(mmc->part_support & PART_SUPPORT)) {
637 		printf("Card does not support partitioning\n");
638 		return -EMEDIUMTYPE;
639 	}
640 
641 	if (!mmc->hc_wp_grp_size) {
642 		printf("Card does not define HC WP group size\n");
643 		return -EMEDIUMTYPE;
644 	}
645 
646 	/* check partition alignment and total enhanced size */
647 	if (conf->user.enh_size) {
648 		if (conf->user.enh_size % mmc->hc_wp_grp_size ||
649 		    conf->user.enh_start % mmc->hc_wp_grp_size) {
650 			printf("User data enhanced area not HC WP group "
651 			       "size aligned\n");
652 			return -EINVAL;
653 		}
654 		part_attrs |= EXT_CSD_ENH_USR;
655 		enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
656 		if (mmc->high_capacity) {
657 			enh_start_addr = conf->user.enh_start;
658 		} else {
659 			enh_start_addr = (conf->user.enh_start << 9);
660 		}
661 	} else {
662 		enh_size_mult = 0;
663 		enh_start_addr = 0;
664 	}
665 	tot_enh_size_mult += enh_size_mult;
666 
667 	for (pidx = 0; pidx < 4; pidx++) {
668 		if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
669 			printf("GP%i partition not HC WP group size "
670 			       "aligned\n", pidx+1);
671 			return -EINVAL;
672 		}
673 		gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
674 		if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
675 			part_attrs |= EXT_CSD_ENH_GP(pidx);
676 			tot_enh_size_mult += gp_size_mult[pidx];
677 		}
678 	}
679 
680 	if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
681 		printf("Card does not support enhanced attribute\n");
682 		return -EMEDIUMTYPE;
683 	}
684 
685 	err = mmc_send_ext_csd(mmc, ext_csd);
686 	if (err)
687 		return err;
688 
689 	max_enh_size_mult =
690 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
691 		(ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
692 		ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
693 	if (tot_enh_size_mult > max_enh_size_mult) {
694 		printf("Total enhanced size exceeds maximum (%u > %u)\n",
695 		       tot_enh_size_mult, max_enh_size_mult);
696 		return -EMEDIUMTYPE;
697 	}
698 
699 	/* The default value of EXT_CSD_WR_REL_SET is device
700 	 * dependent, the values can only be changed if the
701 	 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
702 	 * changed only once and before partitioning is completed. */
703 	wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
704 	if (conf->user.wr_rel_change) {
705 		if (conf->user.wr_rel_set)
706 			wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
707 		else
708 			wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
709 	}
710 	for (pidx = 0; pidx < 4; pidx++) {
711 		if (conf->gp_part[pidx].wr_rel_change) {
712 			if (conf->gp_part[pidx].wr_rel_set)
713 				wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
714 			else
715 				wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
716 		}
717 	}
718 
719 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
720 	    !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
721 		puts("Card does not support host controlled partition write "
722 		     "reliability settings\n");
723 		return -EMEDIUMTYPE;
724 	}
725 
726 	if (ext_csd[EXT_CSD_PARTITION_SETTING] &
727 	    EXT_CSD_PARTITION_SETTING_COMPLETED) {
728 		printf("Card already partitioned\n");
729 		return -EPERM;
730 	}
731 
732 	if (mode == MMC_HWPART_CONF_CHECK)
733 		return 0;
734 
735 	/* Partitioning requires high-capacity size definitions */
736 	if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
737 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
738 				 EXT_CSD_ERASE_GROUP_DEF, 1);
739 
740 		if (err)
741 			return err;
742 
743 		ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
744 
745 		/* update erase group size to be high-capacity */
746 		mmc->erase_grp_size =
747 			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
748 
749 	}
750 
751 	/* all OK, write the configuration */
752 	for (i = 0; i < 4; i++) {
753 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
754 				 EXT_CSD_ENH_START_ADDR+i,
755 				 (enh_start_addr >> (i*8)) & 0xFF);
756 		if (err)
757 			return err;
758 	}
759 	for (i = 0; i < 3; i++) {
760 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
761 				 EXT_CSD_ENH_SIZE_MULT+i,
762 				 (enh_size_mult >> (i*8)) & 0xFF);
763 		if (err)
764 			return err;
765 	}
766 	for (pidx = 0; pidx < 4; pidx++) {
767 		for (i = 0; i < 3; i++) {
768 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
769 					 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
770 					 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
771 			if (err)
772 				return err;
773 		}
774 	}
775 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
776 			 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
777 	if (err)
778 		return err;
779 
780 	if (mode == MMC_HWPART_CONF_SET)
781 		return 0;
782 
783 	/* The WR_REL_SET is a write-once register but shall be
784 	 * written before setting PART_SETTING_COMPLETED. As it is
785 	 * write-once we can only write it when completing the
786 	 * partitioning. */
787 	if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
788 		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
789 				 EXT_CSD_WR_REL_SET, wr_rel_set);
790 		if (err)
791 			return err;
792 	}
793 
794 	/* Setting PART_SETTING_COMPLETED confirms the partition
795 	 * configuration but it only becomes effective after power
796 	 * cycle, so we do not adjust the partition related settings
797 	 * in the mmc struct. */
798 
799 	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
800 			 EXT_CSD_PARTITION_SETTING,
801 			 EXT_CSD_PARTITION_SETTING_COMPLETED);
802 	if (err)
803 		return err;
804 
805 	return 0;
806 }
807 
808 #ifndef CONFIG_DM_MMC_OPS
809 int mmc_getcd(struct mmc *mmc)
810 {
811 	int cd;
812 
813 	cd = board_mmc_getcd(mmc);
814 
815 	if (cd < 0) {
816 		if (mmc->cfg->ops->getcd)
817 			cd = mmc->cfg->ops->getcd(mmc);
818 		else
819 			cd = 1;
820 	}
821 
822 	return cd;
823 }
824 #endif
825 
826 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
827 {
828 	struct mmc_cmd cmd;
829 	struct mmc_data data;
830 
831 	/* Switch the frequency */
832 	cmd.cmdidx = SD_CMD_SWITCH_FUNC;
833 	cmd.resp_type = MMC_RSP_R1;
834 	cmd.cmdarg = (mode << 31) | 0xffffff;
835 	cmd.cmdarg &= ~(0xf << (group * 4));
836 	cmd.cmdarg |= value << (group * 4);
837 
838 	data.dest = (char *)resp;
839 	data.blocksize = 64;
840 	data.blocks = 1;
841 	data.flags = MMC_DATA_READ;
842 
843 	return mmc_send_cmd(mmc, &cmd, &data);
844 }
845 
846 
847 static int sd_change_freq(struct mmc *mmc)
848 {
849 	int err;
850 	struct mmc_cmd cmd;
851 	ALLOC_CACHE_ALIGN_BUFFER(uint, scr, 2);
852 	ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
853 	struct mmc_data data;
854 	int timeout;
855 
856 	mmc->card_caps = 0;
857 
858 	if (mmc_host_is_spi(mmc))
859 		return 0;
860 
861 	/* Read the SCR to find out if this card supports higher speeds */
862 	cmd.cmdidx = MMC_CMD_APP_CMD;
863 	cmd.resp_type = MMC_RSP_R1;
864 	cmd.cmdarg = mmc->rca << 16;
865 
866 	err = mmc_send_cmd(mmc, &cmd, NULL);
867 
868 	if (err)
869 		return err;
870 
871 	cmd.cmdidx = SD_CMD_APP_SEND_SCR;
872 	cmd.resp_type = MMC_RSP_R1;
873 	cmd.cmdarg = 0;
874 
875 	timeout = 3;
876 
877 retry_scr:
878 	data.dest = (char *)scr;
879 	data.blocksize = 8;
880 	data.blocks = 1;
881 	data.flags = MMC_DATA_READ;
882 
883 	err = mmc_send_cmd(mmc, &cmd, &data);
884 
885 	if (err) {
886 		if (timeout--)
887 			goto retry_scr;
888 
889 		return err;
890 	}
891 
892 	mmc->scr[0] = __be32_to_cpu(scr[0]);
893 	mmc->scr[1] = __be32_to_cpu(scr[1]);
894 
895 	switch ((mmc->scr[0] >> 24) & 0xf) {
896 	case 0:
897 		mmc->version = SD_VERSION_1_0;
898 		break;
899 	case 1:
900 		mmc->version = SD_VERSION_1_10;
901 		break;
902 	case 2:
903 		mmc->version = SD_VERSION_2;
904 		if ((mmc->scr[0] >> 15) & 0x1)
905 			mmc->version = SD_VERSION_3;
906 		break;
907 	default:
908 		mmc->version = SD_VERSION_1_0;
909 		break;
910 	}
911 
912 	if (mmc->scr[0] & SD_DATA_4BIT)
913 		mmc->card_caps |= MMC_MODE_4BIT;
914 
915 	/* Version 1.0 doesn't support switching */
916 	if (mmc->version == SD_VERSION_1_0)
917 		return 0;
918 
919 	timeout = 4;
920 	while (timeout--) {
921 		err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
922 				(u8 *)switch_status);
923 
924 		if (err)
925 			return err;
926 
927 		/* The high-speed function is busy.  Try again */
928 		if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
929 			break;
930 	}
931 
932 	/* If high-speed isn't supported, we return */
933 	if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED))
934 		return 0;
935 
936 	/*
937 	 * If the host doesn't support SD_HIGHSPEED, do not switch card to
938 	 * HIGHSPEED mode even if the card support SD_HIGHSPPED.
939 	 * This can avoid furthur problem when the card runs in different
940 	 * mode between the host.
941 	 */
942 	if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) &&
943 		(mmc->cfg->host_caps & MMC_MODE_HS)))
944 		return 0;
945 
946 	err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status);
947 
948 	if (err)
949 		return err;
950 
951 	if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000)
952 		mmc->card_caps |= MMC_MODE_HS;
953 
954 	return 0;
955 }
956 
957 static int sd_read_ssr(struct mmc *mmc)
958 {
959 	int err, i;
960 	struct mmc_cmd cmd;
961 	ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
962 	struct mmc_data data;
963 	int timeout = 3;
964 	unsigned int au, eo, et, es;
965 
966 	cmd.cmdidx = MMC_CMD_APP_CMD;
967 	cmd.resp_type = MMC_RSP_R1;
968 	cmd.cmdarg = mmc->rca << 16;
969 
970 	err = mmc_send_cmd(mmc, &cmd, NULL);
971 	if (err)
972 		return err;
973 
974 	cmd.cmdidx = SD_CMD_APP_SD_STATUS;
975 	cmd.resp_type = MMC_RSP_R1;
976 	cmd.cmdarg = 0;
977 
978 retry_ssr:
979 	data.dest = (char *)ssr;
980 	data.blocksize = 64;
981 	data.blocks = 1;
982 	data.flags = MMC_DATA_READ;
983 
984 	err = mmc_send_cmd(mmc, &cmd, &data);
985 	if (err) {
986 		if (timeout--)
987 			goto retry_ssr;
988 
989 		return err;
990 	}
991 
992 	for (i = 0; i < 16; i++)
993 		ssr[i] = be32_to_cpu(ssr[i]);
994 
995 	au = (ssr[2] >> 12) & 0xF;
996 	if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
997 		mmc->ssr.au = sd_au_size[au];
998 		es = (ssr[3] >> 24) & 0xFF;
999 		es |= (ssr[2] & 0xFF) << 8;
1000 		et = (ssr[3] >> 18) & 0x3F;
1001 		if (es && et) {
1002 			eo = (ssr[3] >> 16) & 0x3;
1003 			mmc->ssr.erase_timeout = (et * 1000) / es;
1004 			mmc->ssr.erase_offset = eo * 1000;
1005 		}
1006 	} else {
1007 		debug("Invalid Allocation Unit Size.\n");
1008 	}
1009 
1010 	return 0;
1011 }
1012 
1013 /* frequency bases */
1014 /* divided by 10 to be nice to platforms without floating point */
1015 static const int fbase[] = {
1016 	10000,
1017 	100000,
1018 	1000000,
1019 	10000000,
1020 };
1021 
1022 /* Multiplier values for TRAN_SPEED.  Multiplied by 10 to be nice
1023  * to platforms without floating point.
1024  */
1025 static const u8 multipliers[] = {
1026 	0,	/* reserved */
1027 	10,
1028 	12,
1029 	13,
1030 	15,
1031 	20,
1032 	25,
1033 	30,
1034 	35,
1035 	40,
1036 	45,
1037 	50,
1038 	55,
1039 	60,
1040 	70,
1041 	80,
1042 };
1043 
1044 #ifndef CONFIG_DM_MMC_OPS
1045 static void mmc_set_ios(struct mmc *mmc)
1046 {
1047 	if (mmc->cfg->ops->set_ios)
1048 		mmc->cfg->ops->set_ios(mmc);
1049 }
1050 #endif
1051 
1052 void mmc_set_clock(struct mmc *mmc, uint clock)
1053 {
1054 	if (clock > mmc->cfg->f_max)
1055 		clock = mmc->cfg->f_max;
1056 
1057 	if (clock < mmc->cfg->f_min)
1058 		clock = mmc->cfg->f_min;
1059 
1060 	mmc->clock = clock;
1061 
1062 	mmc_set_ios(mmc);
1063 }
1064 
1065 static void mmc_set_bus_width(struct mmc *mmc, uint width)
1066 {
1067 	mmc->bus_width = width;
1068 
1069 	mmc_set_ios(mmc);
1070 }
1071 
1072 static int mmc_startup(struct mmc *mmc)
1073 {
1074 	int err, i;
1075 	uint mult, freq;
1076 	u64 cmult, csize, capacity;
1077 	struct mmc_cmd cmd;
1078 	ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1079 	ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1080 	int timeout = 1000;
1081 	bool has_parts = false;
1082 	bool part_completed;
1083 	struct blk_desc *bdesc;
1084 
1085 #ifdef CONFIG_MMC_SPI_CRC_ON
1086 	if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
1087 		cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
1088 		cmd.resp_type = MMC_RSP_R1;
1089 		cmd.cmdarg = 1;
1090 		err = mmc_send_cmd(mmc, &cmd, NULL);
1091 
1092 		if (err)
1093 			return err;
1094 	}
1095 #endif
1096 
1097 	/* Put the Card in Identify Mode */
1098 	cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
1099 		MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
1100 	cmd.resp_type = MMC_RSP_R2;
1101 	cmd.cmdarg = 0;
1102 
1103 	err = mmc_send_cmd(mmc, &cmd, NULL);
1104 
1105 	if (err)
1106 		return err;
1107 
1108 	memcpy(mmc->cid, cmd.response, 16);
1109 
1110 	/*
1111 	 * For MMC cards, set the Relative Address.
1112 	 * For SD cards, get the Relatvie Address.
1113 	 * This also puts the cards into Standby State
1114 	 */
1115 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1116 		cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
1117 		cmd.cmdarg = mmc->rca << 16;
1118 		cmd.resp_type = MMC_RSP_R6;
1119 
1120 		err = mmc_send_cmd(mmc, &cmd, NULL);
1121 
1122 		if (err)
1123 			return err;
1124 
1125 		if (IS_SD(mmc))
1126 			mmc->rca = (cmd.response[0] >> 16) & 0xffff;
1127 	}
1128 
1129 	/* Get the Card-Specific Data */
1130 	cmd.cmdidx = MMC_CMD_SEND_CSD;
1131 	cmd.resp_type = MMC_RSP_R2;
1132 	cmd.cmdarg = mmc->rca << 16;
1133 
1134 	err = mmc_send_cmd(mmc, &cmd, NULL);
1135 
1136 	/* Waiting for the ready status */
1137 	mmc_send_status(mmc, timeout);
1138 
1139 	if (err)
1140 		return err;
1141 
1142 	mmc->csd[0] = cmd.response[0];
1143 	mmc->csd[1] = cmd.response[1];
1144 	mmc->csd[2] = cmd.response[2];
1145 	mmc->csd[3] = cmd.response[3];
1146 
1147 	if (mmc->version == MMC_VERSION_UNKNOWN) {
1148 		int version = (cmd.response[0] >> 26) & 0xf;
1149 
1150 		switch (version) {
1151 		case 0:
1152 			mmc->version = MMC_VERSION_1_2;
1153 			break;
1154 		case 1:
1155 			mmc->version = MMC_VERSION_1_4;
1156 			break;
1157 		case 2:
1158 			mmc->version = MMC_VERSION_2_2;
1159 			break;
1160 		case 3:
1161 			mmc->version = MMC_VERSION_3;
1162 			break;
1163 		case 4:
1164 			mmc->version = MMC_VERSION_4;
1165 			break;
1166 		default:
1167 			mmc->version = MMC_VERSION_1_2;
1168 			break;
1169 		}
1170 	}
1171 
1172 	/* divide frequency by 10, since the mults are 10x bigger */
1173 	freq = fbase[(cmd.response[0] & 0x7)];
1174 	mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
1175 
1176 	mmc->tran_speed = freq * mult;
1177 
1178 	mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
1179 	mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
1180 
1181 	if (IS_SD(mmc))
1182 		mmc->write_bl_len = mmc->read_bl_len;
1183 	else
1184 		mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
1185 
1186 	if (mmc->high_capacity) {
1187 		csize = (mmc->csd[1] & 0x3f) << 16
1188 			| (mmc->csd[2] & 0xffff0000) >> 16;
1189 		cmult = 8;
1190 	} else {
1191 		csize = (mmc->csd[1] & 0x3ff) << 2
1192 			| (mmc->csd[2] & 0xc0000000) >> 30;
1193 		cmult = (mmc->csd[2] & 0x00038000) >> 15;
1194 	}
1195 
1196 	mmc->capacity_user = (csize + 1) << (cmult + 2);
1197 	mmc->capacity_user *= mmc->read_bl_len;
1198 	mmc->capacity_boot = 0;
1199 	mmc->capacity_rpmb = 0;
1200 	for (i = 0; i < 4; i++)
1201 		mmc->capacity_gp[i] = 0;
1202 
1203 	if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
1204 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1205 
1206 	if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
1207 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1208 
1209 	if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
1210 		cmd.cmdidx = MMC_CMD_SET_DSR;
1211 		cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
1212 		cmd.resp_type = MMC_RSP_NONE;
1213 		if (mmc_send_cmd(mmc, &cmd, NULL))
1214 			printf("MMC: SET_DSR failed\n");
1215 	}
1216 
1217 	/* Select the card, and put it into Transfer Mode */
1218 	if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
1219 		cmd.cmdidx = MMC_CMD_SELECT_CARD;
1220 		cmd.resp_type = MMC_RSP_R1;
1221 		cmd.cmdarg = mmc->rca << 16;
1222 		err = mmc_send_cmd(mmc, &cmd, NULL);
1223 
1224 		if (err)
1225 			return err;
1226 	}
1227 
1228 	/*
1229 	 * For SD, its erase group is always one sector
1230 	 */
1231 	mmc->erase_grp_size = 1;
1232 	mmc->part_config = MMCPART_NOAVAILABLE;
1233 	if (!IS_SD(mmc) && (mmc->version >= MMC_VERSION_4)) {
1234 		/* check  ext_csd version and capacity */
1235 		err = mmc_send_ext_csd(mmc, ext_csd);
1236 		if (err)
1237 			return err;
1238 		if (ext_csd[EXT_CSD_REV] >= 2) {
1239 			/*
1240 			 * According to the JEDEC Standard, the value of
1241 			 * ext_csd's capacity is valid if the value is more
1242 			 * than 2GB
1243 			 */
1244 			capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1245 					| ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1246 					| ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1247 					| ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1248 			capacity *= MMC_MAX_BLOCK_LEN;
1249 			if ((capacity >> 20) > 2 * 1024)
1250 				mmc->capacity_user = capacity;
1251 		}
1252 
1253 		switch (ext_csd[EXT_CSD_REV]) {
1254 		case 1:
1255 			mmc->version = MMC_VERSION_4_1;
1256 			break;
1257 		case 2:
1258 			mmc->version = MMC_VERSION_4_2;
1259 			break;
1260 		case 3:
1261 			mmc->version = MMC_VERSION_4_3;
1262 			break;
1263 		case 5:
1264 			mmc->version = MMC_VERSION_4_41;
1265 			break;
1266 		case 6:
1267 			mmc->version = MMC_VERSION_4_5;
1268 			break;
1269 		case 7:
1270 			mmc->version = MMC_VERSION_5_0;
1271 			break;
1272 		case 8:
1273 			mmc->version = MMC_VERSION_5_1;
1274 			break;
1275 		}
1276 
1277 		/* The partition data may be non-zero but it is only
1278 		 * effective if PARTITION_SETTING_COMPLETED is set in
1279 		 * EXT_CSD, so ignore any data if this bit is not set,
1280 		 * except for enabling the high-capacity group size
1281 		 * definition (see below). */
1282 		part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1283 				    EXT_CSD_PARTITION_SETTING_COMPLETED);
1284 
1285 		/* store the partition info of emmc */
1286 		mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
1287 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
1288 		    ext_csd[EXT_CSD_BOOT_MULT])
1289 			mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
1290 		if (part_completed &&
1291 		    (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
1292 			mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
1293 
1294 		mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
1295 
1296 		mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
1297 
1298 		for (i = 0; i < 4; i++) {
1299 			int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
1300 			uint mult = (ext_csd[idx + 2] << 16) +
1301 				(ext_csd[idx + 1] << 8) + ext_csd[idx];
1302 			if (mult)
1303 				has_parts = true;
1304 			if (!part_completed)
1305 				continue;
1306 			mmc->capacity_gp[i] = mult;
1307 			mmc->capacity_gp[i] *=
1308 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1309 			mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1310 			mmc->capacity_gp[i] <<= 19;
1311 		}
1312 
1313 		if (part_completed) {
1314 			mmc->enh_user_size =
1315 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+2] << 16) +
1316 				(ext_csd[EXT_CSD_ENH_SIZE_MULT+1] << 8) +
1317 				ext_csd[EXT_CSD_ENH_SIZE_MULT];
1318 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
1319 			mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1320 			mmc->enh_user_size <<= 19;
1321 			mmc->enh_user_start =
1322 				(ext_csd[EXT_CSD_ENH_START_ADDR+3] << 24) +
1323 				(ext_csd[EXT_CSD_ENH_START_ADDR+2] << 16) +
1324 				(ext_csd[EXT_CSD_ENH_START_ADDR+1] << 8) +
1325 				ext_csd[EXT_CSD_ENH_START_ADDR];
1326 			if (mmc->high_capacity)
1327 				mmc->enh_user_start <<= 9;
1328 		}
1329 
1330 		/*
1331 		 * Host needs to enable ERASE_GRP_DEF bit if device is
1332 		 * partitioned. This bit will be lost every time after a reset
1333 		 * or power off. This will affect erase size.
1334 		 */
1335 		if (part_completed)
1336 			has_parts = true;
1337 		if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
1338 		    (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
1339 			has_parts = true;
1340 		if (has_parts) {
1341 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1342 				EXT_CSD_ERASE_GROUP_DEF, 1);
1343 
1344 			if (err)
1345 				return err;
1346 			else
1347 				ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1348 		}
1349 
1350 		if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
1351 			/* Read out group size from ext_csd */
1352 			mmc->erase_grp_size =
1353 				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1354 			/*
1355 			 * if high capacity and partition setting completed
1356 			 * SEC_COUNT is valid even if it is smaller than 2 GiB
1357 			 * JEDEC Standard JESD84-B45, 6.2.4
1358 			 */
1359 			if (mmc->high_capacity && part_completed) {
1360 				capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
1361 					(ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
1362 					(ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
1363 					(ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
1364 				capacity *= MMC_MAX_BLOCK_LEN;
1365 				mmc->capacity_user = capacity;
1366 			}
1367 		} else {
1368 			/* Calculate the group size from the csd value. */
1369 			int erase_gsz, erase_gmul;
1370 			erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
1371 			erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
1372 			mmc->erase_grp_size = (erase_gsz + 1)
1373 				* (erase_gmul + 1);
1374 		}
1375 
1376 		mmc->hc_wp_grp_size = 1024
1377 			* ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1378 			* ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
1379 
1380 		mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1381 	}
1382 
1383 	err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
1384 	if (err)
1385 		return err;
1386 
1387 	if (IS_SD(mmc))
1388 		err = sd_change_freq(mmc);
1389 	else
1390 		err = mmc_change_freq(mmc);
1391 
1392 	if (err)
1393 		return err;
1394 
1395 	/* Restrict card's capabilities by what the host can do */
1396 	mmc->card_caps &= mmc->cfg->host_caps;
1397 
1398 	if (IS_SD(mmc)) {
1399 		if (mmc->card_caps & MMC_MODE_4BIT) {
1400 			cmd.cmdidx = MMC_CMD_APP_CMD;
1401 			cmd.resp_type = MMC_RSP_R1;
1402 			cmd.cmdarg = mmc->rca << 16;
1403 
1404 			err = mmc_send_cmd(mmc, &cmd, NULL);
1405 			if (err)
1406 				return err;
1407 
1408 			cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1409 			cmd.resp_type = MMC_RSP_R1;
1410 			cmd.cmdarg = 2;
1411 			err = mmc_send_cmd(mmc, &cmd, NULL);
1412 			if (err)
1413 				return err;
1414 
1415 			mmc_set_bus_width(mmc, 4);
1416 		}
1417 
1418 		err = sd_read_ssr(mmc);
1419 		if (err)
1420 			return err;
1421 
1422 		if (mmc->card_caps & MMC_MODE_HS)
1423 			mmc->tran_speed = 50000000;
1424 		else
1425 			mmc->tran_speed = 25000000;
1426 	} else if (mmc->version >= MMC_VERSION_4) {
1427 		/* Only version 4 of MMC supports wider bus widths */
1428 		int idx;
1429 
1430 		/* An array of possible bus widths in order of preference */
1431 		static unsigned ext_csd_bits[] = {
1432 			EXT_CSD_DDR_BUS_WIDTH_8,
1433 			EXT_CSD_DDR_BUS_WIDTH_4,
1434 			EXT_CSD_BUS_WIDTH_8,
1435 			EXT_CSD_BUS_WIDTH_4,
1436 			EXT_CSD_BUS_WIDTH_1,
1437 		};
1438 
1439 		/* An array to map CSD bus widths to host cap bits */
1440 		static unsigned ext_to_hostcaps[] = {
1441 			[EXT_CSD_DDR_BUS_WIDTH_4] =
1442 				MMC_MODE_DDR_52MHz | MMC_MODE_4BIT,
1443 			[EXT_CSD_DDR_BUS_WIDTH_8] =
1444 				MMC_MODE_DDR_52MHz | MMC_MODE_8BIT,
1445 			[EXT_CSD_BUS_WIDTH_4] = MMC_MODE_4BIT,
1446 			[EXT_CSD_BUS_WIDTH_8] = MMC_MODE_8BIT,
1447 		};
1448 
1449 		/* An array to map chosen bus width to an integer */
1450 		static unsigned widths[] = {
1451 			8, 4, 8, 4, 1,
1452 		};
1453 
1454 		for (idx=0; idx < ARRAY_SIZE(ext_csd_bits); idx++) {
1455 			unsigned int extw = ext_csd_bits[idx];
1456 			unsigned int caps = ext_to_hostcaps[extw];
1457 
1458 			/*
1459 			 * If the bus width is still not changed,
1460 			 * don't try to set the default again.
1461 			 * Otherwise, recover from switch attempts
1462 			 * by switching to 1-bit bus width.
1463 			 */
1464 			if (extw == EXT_CSD_BUS_WIDTH_1 &&
1465 					mmc->bus_width == 1) {
1466 				err = 0;
1467 				break;
1468 			}
1469 
1470 			/*
1471 			 * Check to make sure the card and controller support
1472 			 * these capabilities
1473 			 */
1474 			if ((mmc->card_caps & caps) != caps)
1475 				continue;
1476 
1477 			err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1478 					EXT_CSD_BUS_WIDTH, extw);
1479 
1480 			if (err)
1481 				continue;
1482 
1483 			mmc->ddr_mode = (caps & MMC_MODE_DDR_52MHz) ? 1 : 0;
1484 			mmc_set_bus_width(mmc, widths[idx]);
1485 
1486 			err = mmc_send_ext_csd(mmc, test_csd);
1487 
1488 			if (err)
1489 				continue;
1490 
1491 			/* Only compare read only fields */
1492 			if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1493 				== test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1494 			    ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1495 				== test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1496 			    ext_csd[EXT_CSD_REV]
1497 				== test_csd[EXT_CSD_REV] &&
1498 			    ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1499 				== test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1500 			    memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1501 				   &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1502 				break;
1503 			else
1504 				err = -EBADMSG;
1505 		}
1506 
1507 		if (err)
1508 			return err;
1509 
1510 		if (mmc->card_caps & MMC_MODE_HS) {
1511 			if (mmc->card_caps & MMC_MODE_HS_52MHz)
1512 				mmc->tran_speed = 52000000;
1513 			else
1514 				mmc->tran_speed = 26000000;
1515 		}
1516 	}
1517 
1518 	mmc_set_clock(mmc, mmc->tran_speed);
1519 
1520 	/* Fix the block length for DDR mode */
1521 	if (mmc->ddr_mode) {
1522 		mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
1523 		mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
1524 	}
1525 
1526 	/* fill in device description */
1527 	bdesc = mmc_get_blk_desc(mmc);
1528 	bdesc->lun = 0;
1529 	bdesc->hwpart = 0;
1530 	bdesc->type = 0;
1531 	bdesc->blksz = mmc->read_bl_len;
1532 	bdesc->log2blksz = LOG2(bdesc->blksz);
1533 	bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1534 #if !defined(CONFIG_SPL_BUILD) || \
1535 		(defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
1536 		!defined(CONFIG_USE_TINY_PRINTF))
1537 	sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
1538 		mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
1539 		(mmc->cid[3] >> 16) & 0xffff);
1540 	sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
1541 		(mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
1542 		(mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
1543 		(mmc->cid[2] >> 24) & 0xff);
1544 	sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
1545 		(mmc->cid[2] >> 16) & 0xf);
1546 #else
1547 	bdesc->vendor[0] = 0;
1548 	bdesc->product[0] = 0;
1549 	bdesc->revision[0] = 0;
1550 #endif
1551 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
1552 	part_init(bdesc);
1553 #endif
1554 
1555 	return 0;
1556 }
1557 
1558 static int mmc_send_if_cond(struct mmc *mmc)
1559 {
1560 	struct mmc_cmd cmd;
1561 	int err;
1562 
1563 	cmd.cmdidx = SD_CMD_SEND_IF_COND;
1564 	/* We set the bit if the host supports voltages between 2.7 and 3.6 V */
1565 	cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
1566 	cmd.resp_type = MMC_RSP_R7;
1567 
1568 	err = mmc_send_cmd(mmc, &cmd, NULL);
1569 
1570 	if (err)
1571 		return err;
1572 
1573 	if ((cmd.response[0] & 0xff) != 0xaa)
1574 		return -EOPNOTSUPP;
1575 	else
1576 		mmc->version = SD_VERSION_2;
1577 
1578 	return 0;
1579 }
1580 
1581 /* board-specific MMC power initializations. */
1582 __weak void board_mmc_power_init(void)
1583 {
1584 }
1585 
1586 static int mmc_power_init(struct mmc *mmc)
1587 {
1588 	board_mmc_power_init();
1589 
1590 #if defined(CONFIG_DM_MMC) && defined(CONFIG_DM_REGULATOR) && \
1591 	!defined(CONFIG_SPL_BUILD)
1592 	struct udevice *vmmc_supply;
1593 	int ret;
1594 
1595 	ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
1596 					  &vmmc_supply);
1597 	if (ret) {
1598 		debug("%s: No vmmc supply\n", mmc->dev->name);
1599 		return 0;
1600 	}
1601 
1602 	ret = regulator_set_enable(vmmc_supply, true);
1603 	if (ret) {
1604 		puts("Error enabling VMMC supply\n");
1605 		return ret;
1606 	}
1607 #endif
1608 	return 0;
1609 }
1610 
1611 int mmc_start_init(struct mmc *mmc)
1612 {
1613 	bool no_card;
1614 	int err;
1615 
1616 	/* we pretend there's no card when init is NULL */
1617 	no_card = mmc_getcd(mmc) == 0;
1618 #ifndef CONFIG_DM_MMC_OPS
1619 	no_card = no_card || (mmc->cfg->ops->init == NULL);
1620 #endif
1621 	if (no_card) {
1622 		mmc->has_init = 0;
1623 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1624 		printf("MMC: no card present\n");
1625 #endif
1626 		return -ENOMEDIUM;
1627 	}
1628 
1629 	if (mmc->has_init)
1630 		return 0;
1631 
1632 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
1633 	mmc_adapter_card_type_ident();
1634 #endif
1635 	err = mmc_power_init(mmc);
1636 	if (err)
1637 		return err;
1638 
1639 #ifdef CONFIG_DM_MMC_OPS
1640 	/* The device has already been probed ready for use */
1641 #else
1642 	/* made sure it's not NULL earlier */
1643 	err = mmc->cfg->ops->init(mmc);
1644 	if (err)
1645 		return err;
1646 #endif
1647 	mmc->ddr_mode = 0;
1648 	mmc_set_bus_width(mmc, 1);
1649 	mmc_set_clock(mmc, 1);
1650 
1651 	/* Reset the Card */
1652 	err = mmc_go_idle(mmc);
1653 
1654 	if (err)
1655 		return err;
1656 
1657 	/* The internal partition reset to user partition(0) at every CMD0*/
1658 	mmc_get_blk_desc(mmc)->hwpart = 0;
1659 
1660 	/* Test for SD version 2 */
1661 	err = mmc_send_if_cond(mmc);
1662 
1663 	/* Now try to get the SD card's operating condition */
1664 	err = sd_send_op_cond(mmc);
1665 
1666 	/* If the command timed out, we check for an MMC card */
1667 	if (err == -ETIMEDOUT) {
1668 		err = mmc_send_op_cond(mmc);
1669 
1670 		if (err) {
1671 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
1672 			printf("Card did not respond to voltage select!\n");
1673 #endif
1674 			return -EOPNOTSUPP;
1675 		}
1676 	}
1677 
1678 	if (!err)
1679 		mmc->init_in_progress = 1;
1680 
1681 	return err;
1682 }
1683 
1684 static int mmc_complete_init(struct mmc *mmc)
1685 {
1686 	int err = 0;
1687 
1688 	mmc->init_in_progress = 0;
1689 	if (mmc->op_cond_pending)
1690 		err = mmc_complete_op_cond(mmc);
1691 
1692 	if (!err)
1693 		err = mmc_startup(mmc);
1694 	if (err)
1695 		mmc->has_init = 0;
1696 	else
1697 		mmc->has_init = 1;
1698 	return err;
1699 }
1700 
1701 int mmc_init(struct mmc *mmc)
1702 {
1703 	int err = 0;
1704 	unsigned start;
1705 #ifdef CONFIG_DM_MMC
1706 	struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
1707 
1708 	upriv->mmc = mmc;
1709 #endif
1710 	if (mmc->has_init)
1711 		return 0;
1712 
1713 	start = get_timer(0);
1714 
1715 	if (!mmc->init_in_progress)
1716 		err = mmc_start_init(mmc);
1717 
1718 	if (!err)
1719 		err = mmc_complete_init(mmc);
1720 	debug("%s: %d, time %lu\n", __func__, err, get_timer(start));
1721 	return err;
1722 }
1723 
1724 int mmc_set_dsr(struct mmc *mmc, u16 val)
1725 {
1726 	mmc->dsr = val;
1727 	return 0;
1728 }
1729 
1730 /* CPU-specific MMC initializations */
1731 __weak int cpu_mmc_init(bd_t *bis)
1732 {
1733 	return -1;
1734 }
1735 
1736 /* board-specific MMC initializations. */
1737 __weak int board_mmc_init(bd_t *bis)
1738 {
1739 	return -1;
1740 }
1741 
1742 void mmc_set_preinit(struct mmc *mmc, int preinit)
1743 {
1744 	mmc->preinit = preinit;
1745 }
1746 
1747 #if defined(CONFIG_DM_MMC) && defined(CONFIG_SPL_BUILD)
1748 static int mmc_probe(bd_t *bis)
1749 {
1750 	return 0;
1751 }
1752 #elif defined(CONFIG_DM_MMC)
1753 static int mmc_probe(bd_t *bis)
1754 {
1755 	int ret, i;
1756 	struct uclass *uc;
1757 	struct udevice *dev;
1758 
1759 	ret = uclass_get(UCLASS_MMC, &uc);
1760 	if (ret)
1761 		return ret;
1762 
1763 	/*
1764 	 * Try to add them in sequence order. Really with driver model we
1765 	 * should allow holes, but the current MMC list does not allow that.
1766 	 * So if we request 0, 1, 3 we will get 0, 1, 2.
1767 	 */
1768 	for (i = 0; ; i++) {
1769 		ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
1770 		if (ret == -ENODEV)
1771 			break;
1772 	}
1773 	uclass_foreach_dev(dev, uc) {
1774 		ret = device_probe(dev);
1775 		if (ret)
1776 			printf("%s - probe failed: %d\n", dev->name, ret);
1777 	}
1778 
1779 	return 0;
1780 }
1781 #else
1782 static int mmc_probe(bd_t *bis)
1783 {
1784 	if (board_mmc_init(bis) < 0)
1785 		cpu_mmc_init(bis);
1786 
1787 	return 0;
1788 }
1789 #endif
1790 
1791 int mmc_initialize(bd_t *bis)
1792 {
1793 	static int initialized = 0;
1794 	int ret;
1795 	if (initialized)	/* Avoid initializing mmc multiple times */
1796 		return 0;
1797 	initialized = 1;
1798 
1799 #ifndef CONFIG_BLK
1800 	mmc_list_init();
1801 #endif
1802 	ret = mmc_probe(bis);
1803 	if (ret)
1804 		return ret;
1805 
1806 #ifndef CONFIG_SPL_BUILD
1807 	print_mmc_devices(',');
1808 #endif
1809 
1810 	mmc_do_preinit();
1811 	return 0;
1812 }
1813