1 /* 2 * Copyright 2008, Freescale Semiconductor, Inc 3 * Andy Fleming 4 * 5 * Based vaguely on the Linux code 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <config.h> 11 #include <common.h> 12 #include <command.h> 13 #include <dm.h> 14 #include <dm/device-internal.h> 15 #include <errno.h> 16 #include <mmc.h> 17 #include <part.h> 18 #include <power/regulator.h> 19 #include <malloc.h> 20 #include <memalign.h> 21 #include <linux/list.h> 22 #include <div64.h> 23 #include "mmc_private.h" 24 25 static const unsigned int sd_au_size[] = { 26 0, SZ_16K / 512, SZ_32K / 512, 27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, 31 }; 32 33 #if CONFIG_IS_ENABLED(MMC_TINY) 34 static struct mmc mmc_static; 35 struct mmc *find_mmc_device(int dev_num) 36 { 37 return &mmc_static; 38 } 39 40 void mmc_do_preinit(void) 41 { 42 struct mmc *m = &mmc_static; 43 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 44 mmc_set_preinit(m, 1); 45 #endif 46 if (m->preinit) 47 mmc_start_init(m); 48 } 49 50 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc) 51 { 52 return &mmc->block_dev; 53 } 54 #endif 55 56 #if !CONFIG_IS_ENABLED(DM_MMC) 57 __weak int board_mmc_getwp(struct mmc *mmc) 58 { 59 return -1; 60 } 61 62 int mmc_getwp(struct mmc *mmc) 63 { 64 int wp; 65 66 wp = board_mmc_getwp(mmc); 67 68 if (wp < 0) { 69 if (mmc->cfg->ops->getwp) 70 wp = mmc->cfg->ops->getwp(mmc); 71 else 72 wp = 0; 73 } 74 75 return wp; 76 } 77 78 __weak int board_mmc_getcd(struct mmc *mmc) 79 { 80 return -1; 81 } 82 #endif 83 84 #ifdef CONFIG_MMC_TRACE 85 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 86 { 87 printf("CMD_SEND:%d\n", cmd->cmdidx); 88 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 89 } 90 91 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 92 { 93 int i; 94 u8 *ptr; 95 96 if (ret) { 97 printf("\t\tRET\t\t\t %d\n", ret); 98 } else { 99 switch (cmd->resp_type) { 100 case MMC_RSP_NONE: 101 printf("\t\tMMC_RSP_NONE\n"); 102 break; 103 case MMC_RSP_R1: 104 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 105 cmd->response[0]); 106 break; 107 case MMC_RSP_R1b: 108 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 109 cmd->response[0]); 110 break; 111 case MMC_RSP_R2: 112 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 113 cmd->response[0]); 114 printf("\t\t \t\t 0x%08X \n", 115 cmd->response[1]); 116 printf("\t\t \t\t 0x%08X \n", 117 cmd->response[2]); 118 printf("\t\t \t\t 0x%08X \n", 119 cmd->response[3]); 120 printf("\n"); 121 printf("\t\t\t\t\tDUMPING DATA\n"); 122 for (i = 0; i < 4; i++) { 123 int j; 124 printf("\t\t\t\t\t%03d - ", i*4); 125 ptr = (u8 *)&cmd->response[i]; 126 ptr += 3; 127 for (j = 0; j < 4; j++) 128 printf("%02X ", *ptr--); 129 printf("\n"); 130 } 131 break; 132 case MMC_RSP_R3: 133 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 134 cmd->response[0]); 135 break; 136 default: 137 printf("\t\tERROR MMC rsp not supported\n"); 138 break; 139 } 140 } 141 } 142 143 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 144 { 145 int status; 146 147 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 148 printf("CURR STATE:%d\n", status); 149 } 150 #endif 151 152 #if !CONFIG_IS_ENABLED(DM_MMC) 153 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 154 { 155 int ret; 156 157 mmmc_trace_before_send(mmc, cmd); 158 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 159 mmmc_trace_after_send(mmc, cmd, ret); 160 161 return ret; 162 } 163 #endif 164 165 int mmc_send_status(struct mmc *mmc, int timeout) 166 { 167 struct mmc_cmd cmd; 168 int err, retries = 5; 169 170 cmd.cmdidx = MMC_CMD_SEND_STATUS; 171 cmd.resp_type = MMC_RSP_R1; 172 if (!mmc_host_is_spi(mmc)) 173 cmd.cmdarg = mmc->rca << 16; 174 175 while (1) { 176 err = mmc_send_cmd(mmc, &cmd, NULL); 177 if (!err) { 178 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 179 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 180 MMC_STATE_PRG) 181 break; 182 else if (cmd.response[0] & MMC_STATUS_MASK) { 183 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 184 printf("Status Error: 0x%08X\n", 185 cmd.response[0]); 186 #endif 187 return -ECOMM; 188 } 189 } else if (--retries < 0) 190 return err; 191 192 if (timeout-- <= 0) 193 break; 194 195 udelay(1000); 196 } 197 198 mmc_trace_state(mmc, &cmd); 199 if (timeout <= 0) { 200 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 201 printf("Timeout waiting card ready\n"); 202 #endif 203 return -ETIMEDOUT; 204 } 205 206 return 0; 207 } 208 209 int mmc_set_blocklen(struct mmc *mmc, int len) 210 { 211 struct mmc_cmd cmd; 212 213 if (mmc->ddr_mode) 214 return 0; 215 216 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 217 cmd.resp_type = MMC_RSP_R1; 218 cmd.cmdarg = len; 219 220 return mmc_send_cmd(mmc, &cmd, NULL); 221 } 222 223 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 224 lbaint_t blkcnt) 225 { 226 struct mmc_cmd cmd; 227 struct mmc_data data; 228 229 if (blkcnt > 1) 230 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 231 else 232 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 233 234 if (mmc->high_capacity) 235 cmd.cmdarg = start; 236 else 237 cmd.cmdarg = start * mmc->read_bl_len; 238 239 cmd.resp_type = MMC_RSP_R1; 240 241 data.dest = dst; 242 data.blocks = blkcnt; 243 data.blocksize = mmc->read_bl_len; 244 data.flags = MMC_DATA_READ; 245 246 if (mmc_send_cmd(mmc, &cmd, &data)) 247 return 0; 248 249 if (blkcnt > 1) { 250 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 251 cmd.cmdarg = 0; 252 cmd.resp_type = MMC_RSP_R1b; 253 if (mmc_send_cmd(mmc, &cmd, NULL)) { 254 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 255 printf("mmc fail to send stop cmd\n"); 256 #endif 257 return 0; 258 } 259 } 260 261 return blkcnt; 262 } 263 264 #if CONFIG_IS_ENABLED(BLK) 265 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 266 #else 267 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 268 void *dst) 269 #endif 270 { 271 #if CONFIG_IS_ENABLED(BLK) 272 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 273 #endif 274 int dev_num = block_dev->devnum; 275 int err; 276 lbaint_t cur, blocks_todo = blkcnt; 277 278 if (blkcnt == 0) 279 return 0; 280 281 struct mmc *mmc = find_mmc_device(dev_num); 282 if (!mmc) 283 return 0; 284 285 if (CONFIG_IS_ENABLED(MMC_TINY)) 286 err = mmc_switch_part(mmc, block_dev->hwpart); 287 else 288 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 289 290 if (err < 0) 291 return 0; 292 293 if ((start + blkcnt) > block_dev->lba) { 294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 295 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 296 start + blkcnt, block_dev->lba); 297 #endif 298 return 0; 299 } 300 301 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 302 debug("%s: Failed to set blocklen\n", __func__); 303 return 0; 304 } 305 306 do { 307 cur = (blocks_todo > mmc->cfg->b_max) ? 308 mmc->cfg->b_max : blocks_todo; 309 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 310 debug("%s: Failed to read blocks\n", __func__); 311 return 0; 312 } 313 blocks_todo -= cur; 314 start += cur; 315 dst += cur * mmc->read_bl_len; 316 } while (blocks_todo > 0); 317 318 return blkcnt; 319 } 320 321 static int mmc_go_idle(struct mmc *mmc) 322 { 323 struct mmc_cmd cmd; 324 int err; 325 326 udelay(1000); 327 328 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 329 cmd.cmdarg = 0; 330 cmd.resp_type = MMC_RSP_NONE; 331 332 err = mmc_send_cmd(mmc, &cmd, NULL); 333 334 if (err) 335 return err; 336 337 udelay(2000); 338 339 return 0; 340 } 341 342 static int sd_send_op_cond(struct mmc *mmc) 343 { 344 int timeout = 1000; 345 int err; 346 struct mmc_cmd cmd; 347 348 while (1) { 349 cmd.cmdidx = MMC_CMD_APP_CMD; 350 cmd.resp_type = MMC_RSP_R1; 351 cmd.cmdarg = 0; 352 353 err = mmc_send_cmd(mmc, &cmd, NULL); 354 355 if (err) 356 return err; 357 358 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 359 cmd.resp_type = MMC_RSP_R3; 360 361 /* 362 * Most cards do not answer if some reserved bits 363 * in the ocr are set. However, Some controller 364 * can set bit 7 (reserved for low voltages), but 365 * how to manage low voltages SD card is not yet 366 * specified. 367 */ 368 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 369 (mmc->cfg->voltages & 0xff8000); 370 371 if (mmc->version == SD_VERSION_2) 372 cmd.cmdarg |= OCR_HCS; 373 374 err = mmc_send_cmd(mmc, &cmd, NULL); 375 376 if (err) 377 return err; 378 379 if (cmd.response[0] & OCR_BUSY) 380 break; 381 382 if (timeout-- <= 0) 383 return -EOPNOTSUPP; 384 385 udelay(1000); 386 } 387 388 if (mmc->version != SD_VERSION_2) 389 mmc->version = SD_VERSION_1_0; 390 391 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 392 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 393 cmd.resp_type = MMC_RSP_R3; 394 cmd.cmdarg = 0; 395 396 err = mmc_send_cmd(mmc, &cmd, NULL); 397 398 if (err) 399 return err; 400 } 401 402 mmc->ocr = cmd.response[0]; 403 404 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 405 mmc->rca = 0; 406 407 return 0; 408 } 409 410 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 411 { 412 struct mmc_cmd cmd; 413 int err; 414 415 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 416 cmd.resp_type = MMC_RSP_R3; 417 cmd.cmdarg = 0; 418 if (use_arg && !mmc_host_is_spi(mmc)) 419 cmd.cmdarg = OCR_HCS | 420 (mmc->cfg->voltages & 421 (mmc->ocr & OCR_VOLTAGE_MASK)) | 422 (mmc->ocr & OCR_ACCESS_MODE); 423 424 err = mmc_send_cmd(mmc, &cmd, NULL); 425 if (err) 426 return err; 427 mmc->ocr = cmd.response[0]; 428 return 0; 429 } 430 431 static int mmc_send_op_cond(struct mmc *mmc) 432 { 433 int err, i; 434 435 /* Some cards seem to need this */ 436 mmc_go_idle(mmc); 437 438 /* Asking to the card its capabilities */ 439 for (i = 0; i < 2; i++) { 440 err = mmc_send_op_cond_iter(mmc, i != 0); 441 if (err) 442 return err; 443 444 /* exit if not busy (flag seems to be inverted) */ 445 if (mmc->ocr & OCR_BUSY) 446 break; 447 } 448 mmc->op_cond_pending = 1; 449 return 0; 450 } 451 452 static int mmc_complete_op_cond(struct mmc *mmc) 453 { 454 struct mmc_cmd cmd; 455 int timeout = 1000; 456 uint start; 457 int err; 458 459 mmc->op_cond_pending = 0; 460 if (!(mmc->ocr & OCR_BUSY)) { 461 /* Some cards seem to need this */ 462 mmc_go_idle(mmc); 463 464 start = get_timer(0); 465 while (1) { 466 err = mmc_send_op_cond_iter(mmc, 1); 467 if (err) 468 return err; 469 if (mmc->ocr & OCR_BUSY) 470 break; 471 if (get_timer(start) > timeout) 472 return -EOPNOTSUPP; 473 udelay(100); 474 } 475 } 476 477 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 478 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 479 cmd.resp_type = MMC_RSP_R3; 480 cmd.cmdarg = 0; 481 482 err = mmc_send_cmd(mmc, &cmd, NULL); 483 484 if (err) 485 return err; 486 487 mmc->ocr = cmd.response[0]; 488 } 489 490 mmc->version = MMC_VERSION_UNKNOWN; 491 492 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 493 mmc->rca = 1; 494 495 return 0; 496 } 497 498 499 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 500 { 501 struct mmc_cmd cmd; 502 struct mmc_data data; 503 int err; 504 505 /* Get the Card Status Register */ 506 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 507 cmd.resp_type = MMC_RSP_R1; 508 cmd.cmdarg = 0; 509 510 data.dest = (char *)ext_csd; 511 data.blocks = 1; 512 data.blocksize = MMC_MAX_BLOCK_LEN; 513 data.flags = MMC_DATA_READ; 514 515 err = mmc_send_cmd(mmc, &cmd, &data); 516 517 return err; 518 } 519 520 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 521 { 522 struct mmc_cmd cmd; 523 int timeout = 1000; 524 int retries = 3; 525 int ret; 526 527 cmd.cmdidx = MMC_CMD_SWITCH; 528 cmd.resp_type = MMC_RSP_R1b; 529 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 530 (index << 16) | 531 (value << 8); 532 533 while (retries > 0) { 534 ret = mmc_send_cmd(mmc, &cmd, NULL); 535 536 /* Waiting for the ready status */ 537 if (!ret) { 538 ret = mmc_send_status(mmc, timeout); 539 return ret; 540 } 541 542 retries--; 543 } 544 545 return ret; 546 547 } 548 549 static int mmc_change_freq(struct mmc *mmc) 550 { 551 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 552 char cardtype; 553 int err; 554 555 mmc->card_caps = 0; 556 557 if (mmc_host_is_spi(mmc)) 558 return 0; 559 560 /* Only version 4 supports high-speed */ 561 if (mmc->version < MMC_VERSION_4) 562 return 0; 563 564 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 565 566 err = mmc_send_ext_csd(mmc, ext_csd); 567 568 if (err) 569 return err; 570 571 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0xf; 572 573 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1); 574 575 if (err) 576 return err; 577 578 /* Now check to see that it worked */ 579 err = mmc_send_ext_csd(mmc, ext_csd); 580 581 if (err) 582 return err; 583 584 /* No high-speed support */ 585 if (!ext_csd[EXT_CSD_HS_TIMING]) 586 return 0; 587 588 /* High Speed is set, there are two types: 52MHz and 26MHz */ 589 if (cardtype & EXT_CSD_CARD_TYPE_52) { 590 if (cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V) 591 mmc->card_caps |= MMC_MODE_DDR_52MHz; 592 mmc->card_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS; 593 } else { 594 mmc->card_caps |= MMC_MODE_HS; 595 } 596 597 return 0; 598 } 599 600 static int mmc_set_capacity(struct mmc *mmc, int part_num) 601 { 602 switch (part_num) { 603 case 0: 604 mmc->capacity = mmc->capacity_user; 605 break; 606 case 1: 607 case 2: 608 mmc->capacity = mmc->capacity_boot; 609 break; 610 case 3: 611 mmc->capacity = mmc->capacity_rpmb; 612 break; 613 case 4: 614 case 5: 615 case 6: 616 case 7: 617 mmc->capacity = mmc->capacity_gp[part_num - 4]; 618 break; 619 default: 620 return -1; 621 } 622 623 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 624 625 return 0; 626 } 627 628 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 629 { 630 int ret; 631 632 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 633 (mmc->part_config & ~PART_ACCESS_MASK) 634 | (part_num & PART_ACCESS_MASK)); 635 636 /* 637 * Set the capacity if the switch succeeded or was intended 638 * to return to representing the raw device. 639 */ 640 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 641 ret = mmc_set_capacity(mmc, part_num); 642 mmc_get_blk_desc(mmc)->hwpart = part_num; 643 } 644 645 return ret; 646 } 647 648 int mmc_hwpart_config(struct mmc *mmc, 649 const struct mmc_hwpart_conf *conf, 650 enum mmc_hwpart_conf_mode mode) 651 { 652 u8 part_attrs = 0; 653 u32 enh_size_mult; 654 u32 enh_start_addr; 655 u32 gp_size_mult[4]; 656 u32 max_enh_size_mult; 657 u32 tot_enh_size_mult = 0; 658 u8 wr_rel_set; 659 int i, pidx, err; 660 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 661 662 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 663 return -EINVAL; 664 665 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 666 printf("eMMC >= 4.4 required for enhanced user data area\n"); 667 return -EMEDIUMTYPE; 668 } 669 670 if (!(mmc->part_support & PART_SUPPORT)) { 671 printf("Card does not support partitioning\n"); 672 return -EMEDIUMTYPE; 673 } 674 675 if (!mmc->hc_wp_grp_size) { 676 printf("Card does not define HC WP group size\n"); 677 return -EMEDIUMTYPE; 678 } 679 680 /* check partition alignment and total enhanced size */ 681 if (conf->user.enh_size) { 682 if (conf->user.enh_size % mmc->hc_wp_grp_size || 683 conf->user.enh_start % mmc->hc_wp_grp_size) { 684 printf("User data enhanced area not HC WP group " 685 "size aligned\n"); 686 return -EINVAL; 687 } 688 part_attrs |= EXT_CSD_ENH_USR; 689 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 690 if (mmc->high_capacity) { 691 enh_start_addr = conf->user.enh_start; 692 } else { 693 enh_start_addr = (conf->user.enh_start << 9); 694 } 695 } else { 696 enh_size_mult = 0; 697 enh_start_addr = 0; 698 } 699 tot_enh_size_mult += enh_size_mult; 700 701 for (pidx = 0; pidx < 4; pidx++) { 702 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 703 printf("GP%i partition not HC WP group size " 704 "aligned\n", pidx+1); 705 return -EINVAL; 706 } 707 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 708 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 709 part_attrs |= EXT_CSD_ENH_GP(pidx); 710 tot_enh_size_mult += gp_size_mult[pidx]; 711 } 712 } 713 714 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 715 printf("Card does not support enhanced attribute\n"); 716 return -EMEDIUMTYPE; 717 } 718 719 err = mmc_send_ext_csd(mmc, ext_csd); 720 if (err) 721 return err; 722 723 max_enh_size_mult = 724 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 725 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 726 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 727 if (tot_enh_size_mult > max_enh_size_mult) { 728 printf("Total enhanced size exceeds maximum (%u > %u)\n", 729 tot_enh_size_mult, max_enh_size_mult); 730 return -EMEDIUMTYPE; 731 } 732 733 /* The default value of EXT_CSD_WR_REL_SET is device 734 * dependent, the values can only be changed if the 735 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 736 * changed only once and before partitioning is completed. */ 737 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 738 if (conf->user.wr_rel_change) { 739 if (conf->user.wr_rel_set) 740 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 741 else 742 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 743 } 744 for (pidx = 0; pidx < 4; pidx++) { 745 if (conf->gp_part[pidx].wr_rel_change) { 746 if (conf->gp_part[pidx].wr_rel_set) 747 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 748 else 749 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 750 } 751 } 752 753 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 754 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 755 puts("Card does not support host controlled partition write " 756 "reliability settings\n"); 757 return -EMEDIUMTYPE; 758 } 759 760 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 761 EXT_CSD_PARTITION_SETTING_COMPLETED) { 762 printf("Card already partitioned\n"); 763 return -EPERM; 764 } 765 766 if (mode == MMC_HWPART_CONF_CHECK) 767 return 0; 768 769 /* Partitioning requires high-capacity size definitions */ 770 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 771 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 772 EXT_CSD_ERASE_GROUP_DEF, 1); 773 774 if (err) 775 return err; 776 777 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 778 779 /* update erase group size to be high-capacity */ 780 mmc->erase_grp_size = 781 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 782 783 } 784 785 /* all OK, write the configuration */ 786 for (i = 0; i < 4; i++) { 787 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 788 EXT_CSD_ENH_START_ADDR+i, 789 (enh_start_addr >> (i*8)) & 0xFF); 790 if (err) 791 return err; 792 } 793 for (i = 0; i < 3; i++) { 794 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 795 EXT_CSD_ENH_SIZE_MULT+i, 796 (enh_size_mult >> (i*8)) & 0xFF); 797 if (err) 798 return err; 799 } 800 for (pidx = 0; pidx < 4; pidx++) { 801 for (i = 0; i < 3; i++) { 802 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 803 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 804 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 805 if (err) 806 return err; 807 } 808 } 809 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 810 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 811 if (err) 812 return err; 813 814 if (mode == MMC_HWPART_CONF_SET) 815 return 0; 816 817 /* The WR_REL_SET is a write-once register but shall be 818 * written before setting PART_SETTING_COMPLETED. As it is 819 * write-once we can only write it when completing the 820 * partitioning. */ 821 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 822 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 823 EXT_CSD_WR_REL_SET, wr_rel_set); 824 if (err) 825 return err; 826 } 827 828 /* Setting PART_SETTING_COMPLETED confirms the partition 829 * configuration but it only becomes effective after power 830 * cycle, so we do not adjust the partition related settings 831 * in the mmc struct. */ 832 833 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 834 EXT_CSD_PARTITION_SETTING, 835 EXT_CSD_PARTITION_SETTING_COMPLETED); 836 if (err) 837 return err; 838 839 return 0; 840 } 841 842 #if !CONFIG_IS_ENABLED(DM_MMC) 843 int mmc_getcd(struct mmc *mmc) 844 { 845 int cd; 846 847 cd = board_mmc_getcd(mmc); 848 849 if (cd < 0) { 850 if (mmc->cfg->ops->getcd) 851 cd = mmc->cfg->ops->getcd(mmc); 852 else 853 cd = 1; 854 } 855 856 return cd; 857 } 858 #endif 859 860 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 861 { 862 struct mmc_cmd cmd; 863 struct mmc_data data; 864 865 /* Switch the frequency */ 866 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 867 cmd.resp_type = MMC_RSP_R1; 868 cmd.cmdarg = (mode << 31) | 0xffffff; 869 cmd.cmdarg &= ~(0xf << (group * 4)); 870 cmd.cmdarg |= value << (group * 4); 871 872 data.dest = (char *)resp; 873 data.blocksize = 64; 874 data.blocks = 1; 875 data.flags = MMC_DATA_READ; 876 877 return mmc_send_cmd(mmc, &cmd, &data); 878 } 879 880 881 static int sd_change_freq(struct mmc *mmc) 882 { 883 int err; 884 struct mmc_cmd cmd; 885 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2); 886 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16); 887 struct mmc_data data; 888 int timeout; 889 890 mmc->card_caps = 0; 891 892 if (mmc_host_is_spi(mmc)) 893 return 0; 894 895 /* Read the SCR to find out if this card supports higher speeds */ 896 cmd.cmdidx = MMC_CMD_APP_CMD; 897 cmd.resp_type = MMC_RSP_R1; 898 cmd.cmdarg = mmc->rca << 16; 899 900 err = mmc_send_cmd(mmc, &cmd, NULL); 901 902 if (err) 903 return err; 904 905 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 906 cmd.resp_type = MMC_RSP_R1; 907 cmd.cmdarg = 0; 908 909 timeout = 3; 910 911 retry_scr: 912 data.dest = (char *)scr; 913 data.blocksize = 8; 914 data.blocks = 1; 915 data.flags = MMC_DATA_READ; 916 917 err = mmc_send_cmd(mmc, &cmd, &data); 918 919 if (err) { 920 if (timeout--) 921 goto retry_scr; 922 923 return err; 924 } 925 926 mmc->scr[0] = __be32_to_cpu(scr[0]); 927 mmc->scr[1] = __be32_to_cpu(scr[1]); 928 929 switch ((mmc->scr[0] >> 24) & 0xf) { 930 case 0: 931 mmc->version = SD_VERSION_1_0; 932 break; 933 case 1: 934 mmc->version = SD_VERSION_1_10; 935 break; 936 case 2: 937 mmc->version = SD_VERSION_2; 938 if ((mmc->scr[0] >> 15) & 0x1) 939 mmc->version = SD_VERSION_3; 940 break; 941 default: 942 mmc->version = SD_VERSION_1_0; 943 break; 944 } 945 946 if (mmc->scr[0] & SD_DATA_4BIT) 947 mmc->card_caps |= MMC_MODE_4BIT; 948 949 /* Version 1.0 doesn't support switching */ 950 if (mmc->version == SD_VERSION_1_0) 951 return 0; 952 953 timeout = 4; 954 while (timeout--) { 955 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 956 (u8 *)switch_status); 957 958 if (err) 959 return err; 960 961 /* The high-speed function is busy. Try again */ 962 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 963 break; 964 } 965 966 /* If high-speed isn't supported, we return */ 967 if (!(__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)) 968 return 0; 969 970 /* 971 * If the host doesn't support SD_HIGHSPEED, do not switch card to 972 * HIGHSPEED mode even if the card support SD_HIGHSPPED. 973 * This can avoid furthur problem when the card runs in different 974 * mode between the host. 975 */ 976 if (!((mmc->cfg->host_caps & MMC_MODE_HS_52MHz) && 977 (mmc->cfg->host_caps & MMC_MODE_HS))) 978 return 0; 979 980 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status); 981 982 if (err) 983 return err; 984 985 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) == 0x01000000) 986 mmc->card_caps |= MMC_MODE_HS; 987 988 return 0; 989 } 990 991 static int sd_read_ssr(struct mmc *mmc) 992 { 993 int err, i; 994 struct mmc_cmd cmd; 995 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 996 struct mmc_data data; 997 int timeout = 3; 998 unsigned int au, eo, et, es; 999 1000 cmd.cmdidx = MMC_CMD_APP_CMD; 1001 cmd.resp_type = MMC_RSP_R1; 1002 cmd.cmdarg = mmc->rca << 16; 1003 1004 err = mmc_send_cmd(mmc, &cmd, NULL); 1005 if (err) 1006 return err; 1007 1008 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1009 cmd.resp_type = MMC_RSP_R1; 1010 cmd.cmdarg = 0; 1011 1012 retry_ssr: 1013 data.dest = (char *)ssr; 1014 data.blocksize = 64; 1015 data.blocks = 1; 1016 data.flags = MMC_DATA_READ; 1017 1018 err = mmc_send_cmd(mmc, &cmd, &data); 1019 if (err) { 1020 if (timeout--) 1021 goto retry_ssr; 1022 1023 return err; 1024 } 1025 1026 for (i = 0; i < 16; i++) 1027 ssr[i] = be32_to_cpu(ssr[i]); 1028 1029 au = (ssr[2] >> 12) & 0xF; 1030 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1031 mmc->ssr.au = sd_au_size[au]; 1032 es = (ssr[3] >> 24) & 0xFF; 1033 es |= (ssr[2] & 0xFF) << 8; 1034 et = (ssr[3] >> 18) & 0x3F; 1035 if (es && et) { 1036 eo = (ssr[3] >> 16) & 0x3; 1037 mmc->ssr.erase_timeout = (et * 1000) / es; 1038 mmc->ssr.erase_offset = eo * 1000; 1039 } 1040 } else { 1041 debug("Invalid Allocation Unit Size.\n"); 1042 } 1043 1044 return 0; 1045 } 1046 1047 /* frequency bases */ 1048 /* divided by 10 to be nice to platforms without floating point */ 1049 static const int fbase[] = { 1050 10000, 1051 100000, 1052 1000000, 1053 10000000, 1054 }; 1055 1056 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1057 * to platforms without floating point. 1058 */ 1059 static const u8 multipliers[] = { 1060 0, /* reserved */ 1061 10, 1062 12, 1063 13, 1064 15, 1065 20, 1066 25, 1067 30, 1068 35, 1069 40, 1070 45, 1071 50, 1072 55, 1073 60, 1074 70, 1075 80, 1076 }; 1077 1078 #if !CONFIG_IS_ENABLED(DM_MMC) 1079 static void mmc_set_ios(struct mmc *mmc) 1080 { 1081 if (mmc->cfg->ops->set_ios) 1082 mmc->cfg->ops->set_ios(mmc); 1083 } 1084 #endif 1085 1086 void mmc_set_clock(struct mmc *mmc, uint clock) 1087 { 1088 if (clock > mmc->cfg->f_max) 1089 clock = mmc->cfg->f_max; 1090 1091 if (clock < mmc->cfg->f_min) 1092 clock = mmc->cfg->f_min; 1093 1094 mmc->clock = clock; 1095 1096 mmc_set_ios(mmc); 1097 } 1098 1099 static void mmc_set_bus_width(struct mmc *mmc, uint width) 1100 { 1101 mmc->bus_width = width; 1102 1103 mmc_set_ios(mmc); 1104 } 1105 1106 static int sd_select_bus_freq_width(struct mmc *mmc) 1107 { 1108 int err; 1109 struct mmc_cmd cmd; 1110 1111 err = sd_change_freq(mmc); 1112 if (err) 1113 return err; 1114 1115 /* Restrict card's capabilities by what the host can do */ 1116 mmc->card_caps &= mmc->cfg->host_caps; 1117 1118 if (mmc->card_caps & MMC_MODE_4BIT) { 1119 cmd.cmdidx = MMC_CMD_APP_CMD; 1120 cmd.resp_type = MMC_RSP_R1; 1121 cmd.cmdarg = mmc->rca << 16; 1122 1123 err = mmc_send_cmd(mmc, &cmd, NULL); 1124 if (err) 1125 return err; 1126 1127 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 1128 cmd.resp_type = MMC_RSP_R1; 1129 cmd.cmdarg = 2; 1130 err = mmc_send_cmd(mmc, &cmd, NULL); 1131 if (err) 1132 return err; 1133 1134 mmc_set_bus_width(mmc, 4); 1135 } 1136 1137 err = sd_read_ssr(mmc); 1138 if (err) 1139 return err; 1140 1141 if (mmc->card_caps & MMC_MODE_HS) 1142 mmc->tran_speed = 50000000; 1143 else 1144 mmc->tran_speed = 25000000; 1145 1146 return 0; 1147 } 1148 1149 /* 1150 * read the compare the part of ext csd that is constant. 1151 * This can be used to check that the transfer is working 1152 * as expected. 1153 */ 1154 static int mmc_read_and_compare_ext_csd(struct mmc *mmc) 1155 { 1156 int err; 1157 const u8 *ext_csd = mmc->ext_csd; 1158 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 1159 1160 err = mmc_send_ext_csd(mmc, test_csd); 1161 if (err) 1162 return err; 1163 1164 /* Only compare read only fields */ 1165 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] 1166 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] && 1167 ext_csd[EXT_CSD_HC_WP_GRP_SIZE] 1168 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] && 1169 ext_csd[EXT_CSD_REV] 1170 == test_csd[EXT_CSD_REV] && 1171 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1172 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] && 1173 memcmp(&ext_csd[EXT_CSD_SEC_CNT], 1174 &test_csd[EXT_CSD_SEC_CNT], 4) == 0) 1175 return 0; 1176 1177 return -EBADMSG; 1178 } 1179 1180 static int mmc_select_bus_freq_width(struct mmc *mmc) 1181 { 1182 /* An array of possible bus widths in order of preference */ 1183 static const unsigned int ext_csd_bits[] = { 1184 EXT_CSD_DDR_BUS_WIDTH_8, 1185 EXT_CSD_DDR_BUS_WIDTH_4, 1186 EXT_CSD_BUS_WIDTH_8, 1187 EXT_CSD_BUS_WIDTH_4, 1188 EXT_CSD_BUS_WIDTH_1, 1189 }; 1190 /* An array to map CSD bus widths to host cap bits */ 1191 static const unsigned int ext_to_hostcaps[] = { 1192 [EXT_CSD_DDR_BUS_WIDTH_4] = 1193 MMC_MODE_DDR_52MHz | MMC_MODE_4BIT, 1194 [EXT_CSD_DDR_BUS_WIDTH_8] = 1195 MMC_MODE_DDR_52MHz | MMC_MODE_8BIT, 1196 [EXT_CSD_BUS_WIDTH_4] = MMC_MODE_4BIT, 1197 [EXT_CSD_BUS_WIDTH_8] = MMC_MODE_8BIT, 1198 }; 1199 /* An array to map chosen bus width to an integer */ 1200 static const unsigned int widths[] = { 1201 8, 4, 8, 4, 1, 1202 }; 1203 int err; 1204 int idx; 1205 1206 err = mmc_change_freq(mmc); 1207 if (err) 1208 return err; 1209 1210 /* Restrict card's capabilities by what the host can do */ 1211 mmc->card_caps &= mmc->cfg->host_caps; 1212 1213 /* Only version 4 of MMC supports wider bus widths */ 1214 if (mmc->version < MMC_VERSION_4) 1215 return 0; 1216 1217 if (!mmc->ext_csd) { 1218 debug("No ext_csd found!\n"); /* this should enver happen */ 1219 return -ENOTSUPP; 1220 } 1221 1222 for (idx = 0; idx < ARRAY_SIZE(ext_csd_bits); idx++) { 1223 unsigned int extw = ext_csd_bits[idx]; 1224 unsigned int caps = ext_to_hostcaps[extw]; 1225 /* 1226 * If the bus width is still not changed, 1227 * don't try to set the default again. 1228 * Otherwise, recover from switch attempts 1229 * by switching to 1-bit bus width. 1230 */ 1231 if (extw == EXT_CSD_BUS_WIDTH_1 && 1232 mmc->bus_width == 1) { 1233 err = 0; 1234 break; 1235 } 1236 1237 /* 1238 * Check to make sure the card and controller support 1239 * these capabilities 1240 */ 1241 if ((mmc->card_caps & caps) != caps) 1242 continue; 1243 1244 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1245 EXT_CSD_BUS_WIDTH, extw); 1246 1247 if (err) 1248 continue; 1249 1250 mmc->ddr_mode = (caps & MMC_MODE_DDR_52MHz) ? 1 : 0; 1251 mmc_set_bus_width(mmc, widths[idx]); 1252 1253 err = mmc_read_and_compare_ext_csd(mmc); 1254 if (!err) 1255 break; 1256 } 1257 1258 if (err) 1259 return err; 1260 1261 if (mmc->card_caps & MMC_MODE_HS) { 1262 if (mmc->card_caps & MMC_MODE_HS_52MHz) 1263 mmc->tran_speed = 52000000; 1264 else 1265 mmc->tran_speed = 26000000; 1266 } 1267 1268 return err; 1269 } 1270 1271 static int mmc_startup_v4(struct mmc *mmc) 1272 { 1273 int err, i; 1274 u64 capacity; 1275 bool has_parts = false; 1276 bool part_completed; 1277 u8 *ext_csd; 1278 1279 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4)) 1280 return 0; 1281 1282 ext_csd = malloc_cache_aligned(MMC_MAX_BLOCK_LEN); 1283 if (!ext_csd) 1284 return -ENOMEM; 1285 1286 mmc->ext_csd = ext_csd; 1287 1288 /* check ext_csd version and capacity */ 1289 err = mmc_send_ext_csd(mmc, ext_csd); 1290 if (err) 1291 return err; 1292 if (ext_csd[EXT_CSD_REV] >= 2) { 1293 /* 1294 * According to the JEDEC Standard, the value of 1295 * ext_csd's capacity is valid if the value is more 1296 * than 2GB 1297 */ 1298 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 1299 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 1300 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 1301 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 1302 capacity *= MMC_MAX_BLOCK_LEN; 1303 if ((capacity >> 20) > 2 * 1024) 1304 mmc->capacity_user = capacity; 1305 } 1306 1307 switch (ext_csd[EXT_CSD_REV]) { 1308 case 1: 1309 mmc->version = MMC_VERSION_4_1; 1310 break; 1311 case 2: 1312 mmc->version = MMC_VERSION_4_2; 1313 break; 1314 case 3: 1315 mmc->version = MMC_VERSION_4_3; 1316 break; 1317 case 5: 1318 mmc->version = MMC_VERSION_4_41; 1319 break; 1320 case 6: 1321 mmc->version = MMC_VERSION_4_5; 1322 break; 1323 case 7: 1324 mmc->version = MMC_VERSION_5_0; 1325 break; 1326 case 8: 1327 mmc->version = MMC_VERSION_5_1; 1328 break; 1329 } 1330 1331 /* The partition data may be non-zero but it is only 1332 * effective if PARTITION_SETTING_COMPLETED is set in 1333 * EXT_CSD, so ignore any data if this bit is not set, 1334 * except for enabling the high-capacity group size 1335 * definition (see below). 1336 */ 1337 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 1338 EXT_CSD_PARTITION_SETTING_COMPLETED); 1339 1340 /* store the partition info of emmc */ 1341 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 1342 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 1343 ext_csd[EXT_CSD_BOOT_MULT]) 1344 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 1345 if (part_completed && 1346 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 1347 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 1348 1349 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 1350 1351 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 1352 1353 for (i = 0; i < 4; i++) { 1354 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 1355 uint mult = (ext_csd[idx + 2] << 16) + 1356 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 1357 if (mult) 1358 has_parts = true; 1359 if (!part_completed) 1360 continue; 1361 mmc->capacity_gp[i] = mult; 1362 mmc->capacity_gp[i] *= 1363 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1364 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1365 mmc->capacity_gp[i] <<= 19; 1366 } 1367 1368 if (part_completed) { 1369 mmc->enh_user_size = 1370 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) + 1371 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) + 1372 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 1373 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1374 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1375 mmc->enh_user_size <<= 19; 1376 mmc->enh_user_start = 1377 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) + 1378 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) + 1379 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) + 1380 ext_csd[EXT_CSD_ENH_START_ADDR]; 1381 if (mmc->high_capacity) 1382 mmc->enh_user_start <<= 9; 1383 } 1384 1385 /* 1386 * Host needs to enable ERASE_GRP_DEF bit if device is 1387 * partitioned. This bit will be lost every time after a reset 1388 * or power off. This will affect erase size. 1389 */ 1390 if (part_completed) 1391 has_parts = true; 1392 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 1393 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 1394 has_parts = true; 1395 if (has_parts) { 1396 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1397 EXT_CSD_ERASE_GROUP_DEF, 1); 1398 1399 if (err) 1400 return err; 1401 1402 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1403 } 1404 1405 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 1406 /* Read out group size from ext_csd */ 1407 mmc->erase_grp_size = 1408 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1409 /* 1410 * if high capacity and partition setting completed 1411 * SEC_COUNT is valid even if it is smaller than 2 GiB 1412 * JEDEC Standard JESD84-B45, 6.2.4 1413 */ 1414 if (mmc->high_capacity && part_completed) { 1415 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 1416 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 1417 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 1418 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 1419 capacity *= MMC_MAX_BLOCK_LEN; 1420 mmc->capacity_user = capacity; 1421 } 1422 } else { 1423 /* Calculate the group size from the csd value. */ 1424 int erase_gsz, erase_gmul; 1425 1426 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 1427 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 1428 mmc->erase_grp_size = (erase_gsz + 1) 1429 * (erase_gmul + 1); 1430 } 1431 1432 mmc->hc_wp_grp_size = 1024 1433 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1434 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1435 1436 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1437 1438 return 0; 1439 } 1440 1441 static int mmc_startup(struct mmc *mmc) 1442 { 1443 int err, i; 1444 uint mult, freq; 1445 u64 cmult, csize; 1446 struct mmc_cmd cmd; 1447 struct blk_desc *bdesc; 1448 1449 #ifdef CONFIG_MMC_SPI_CRC_ON 1450 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 1451 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 1452 cmd.resp_type = MMC_RSP_R1; 1453 cmd.cmdarg = 1; 1454 err = mmc_send_cmd(mmc, &cmd, NULL); 1455 1456 if (err) 1457 return err; 1458 } 1459 #endif 1460 1461 /* Put the Card in Identify Mode */ 1462 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 1463 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 1464 cmd.resp_type = MMC_RSP_R2; 1465 cmd.cmdarg = 0; 1466 1467 err = mmc_send_cmd(mmc, &cmd, NULL); 1468 1469 if (err) 1470 return err; 1471 1472 memcpy(mmc->cid, cmd.response, 16); 1473 1474 /* 1475 * For MMC cards, set the Relative Address. 1476 * For SD cards, get the Relatvie Address. 1477 * This also puts the cards into Standby State 1478 */ 1479 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1480 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 1481 cmd.cmdarg = mmc->rca << 16; 1482 cmd.resp_type = MMC_RSP_R6; 1483 1484 err = mmc_send_cmd(mmc, &cmd, NULL); 1485 1486 if (err) 1487 return err; 1488 1489 if (IS_SD(mmc)) 1490 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 1491 } 1492 1493 /* Get the Card-Specific Data */ 1494 cmd.cmdidx = MMC_CMD_SEND_CSD; 1495 cmd.resp_type = MMC_RSP_R2; 1496 cmd.cmdarg = mmc->rca << 16; 1497 1498 err = mmc_send_cmd(mmc, &cmd, NULL); 1499 1500 if (err) 1501 return err; 1502 1503 mmc->csd[0] = cmd.response[0]; 1504 mmc->csd[1] = cmd.response[1]; 1505 mmc->csd[2] = cmd.response[2]; 1506 mmc->csd[3] = cmd.response[3]; 1507 1508 if (mmc->version == MMC_VERSION_UNKNOWN) { 1509 int version = (cmd.response[0] >> 26) & 0xf; 1510 1511 switch (version) { 1512 case 0: 1513 mmc->version = MMC_VERSION_1_2; 1514 break; 1515 case 1: 1516 mmc->version = MMC_VERSION_1_4; 1517 break; 1518 case 2: 1519 mmc->version = MMC_VERSION_2_2; 1520 break; 1521 case 3: 1522 mmc->version = MMC_VERSION_3; 1523 break; 1524 case 4: 1525 mmc->version = MMC_VERSION_4; 1526 break; 1527 default: 1528 mmc->version = MMC_VERSION_1_2; 1529 break; 1530 } 1531 } 1532 1533 /* divide frequency by 10, since the mults are 10x bigger */ 1534 freq = fbase[(cmd.response[0] & 0x7)]; 1535 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 1536 1537 mmc->tran_speed = freq * mult; 1538 1539 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 1540 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 1541 1542 if (IS_SD(mmc)) 1543 mmc->write_bl_len = mmc->read_bl_len; 1544 else 1545 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 1546 1547 if (mmc->high_capacity) { 1548 csize = (mmc->csd[1] & 0x3f) << 16 1549 | (mmc->csd[2] & 0xffff0000) >> 16; 1550 cmult = 8; 1551 } else { 1552 csize = (mmc->csd[1] & 0x3ff) << 2 1553 | (mmc->csd[2] & 0xc0000000) >> 30; 1554 cmult = (mmc->csd[2] & 0x00038000) >> 15; 1555 } 1556 1557 mmc->capacity_user = (csize + 1) << (cmult + 2); 1558 mmc->capacity_user *= mmc->read_bl_len; 1559 mmc->capacity_boot = 0; 1560 mmc->capacity_rpmb = 0; 1561 for (i = 0; i < 4; i++) 1562 mmc->capacity_gp[i] = 0; 1563 1564 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 1565 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1566 1567 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 1568 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1569 1570 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 1571 cmd.cmdidx = MMC_CMD_SET_DSR; 1572 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 1573 cmd.resp_type = MMC_RSP_NONE; 1574 if (mmc_send_cmd(mmc, &cmd, NULL)) 1575 printf("MMC: SET_DSR failed\n"); 1576 } 1577 1578 /* Select the card, and put it into Transfer Mode */ 1579 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1580 cmd.cmdidx = MMC_CMD_SELECT_CARD; 1581 cmd.resp_type = MMC_RSP_R1; 1582 cmd.cmdarg = mmc->rca << 16; 1583 err = mmc_send_cmd(mmc, &cmd, NULL); 1584 1585 if (err) 1586 return err; 1587 } 1588 1589 /* 1590 * For SD, its erase group is always one sector 1591 */ 1592 mmc->erase_grp_size = 1; 1593 mmc->part_config = MMCPART_NOAVAILABLE; 1594 1595 err = mmc_startup_v4(mmc); 1596 if (err) 1597 return err; 1598 1599 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 1600 if (err) 1601 return err; 1602 1603 if (IS_SD(mmc)) 1604 err = sd_select_bus_freq_width(mmc); 1605 else 1606 err = mmc_select_bus_freq_width(mmc); 1607 1608 if (err) 1609 return err; 1610 1611 mmc_set_clock(mmc, mmc->tran_speed); 1612 1613 /* Fix the block length for DDR mode */ 1614 if (mmc->ddr_mode) { 1615 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1616 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1617 } 1618 1619 /* fill in device description */ 1620 bdesc = mmc_get_blk_desc(mmc); 1621 bdesc->lun = 0; 1622 bdesc->hwpart = 0; 1623 bdesc->type = 0; 1624 bdesc->blksz = mmc->read_bl_len; 1625 bdesc->log2blksz = LOG2(bdesc->blksz); 1626 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1627 #if !defined(CONFIG_SPL_BUILD) || \ 1628 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 1629 !defined(CONFIG_USE_TINY_PRINTF)) 1630 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 1631 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 1632 (mmc->cid[3] >> 16) & 0xffff); 1633 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 1634 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 1635 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 1636 (mmc->cid[2] >> 24) & 0xff); 1637 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 1638 (mmc->cid[2] >> 16) & 0xf); 1639 #else 1640 bdesc->vendor[0] = 0; 1641 bdesc->product[0] = 0; 1642 bdesc->revision[0] = 0; 1643 #endif 1644 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) 1645 part_init(bdesc); 1646 #endif 1647 1648 return 0; 1649 } 1650 1651 static int mmc_send_if_cond(struct mmc *mmc) 1652 { 1653 struct mmc_cmd cmd; 1654 int err; 1655 1656 cmd.cmdidx = SD_CMD_SEND_IF_COND; 1657 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 1658 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 1659 cmd.resp_type = MMC_RSP_R7; 1660 1661 err = mmc_send_cmd(mmc, &cmd, NULL); 1662 1663 if (err) 1664 return err; 1665 1666 if ((cmd.response[0] & 0xff) != 0xaa) 1667 return -EOPNOTSUPP; 1668 else 1669 mmc->version = SD_VERSION_2; 1670 1671 return 0; 1672 } 1673 1674 #if !CONFIG_IS_ENABLED(DM_MMC) 1675 /* board-specific MMC power initializations. */ 1676 __weak void board_mmc_power_init(void) 1677 { 1678 } 1679 #endif 1680 1681 static int mmc_power_init(struct mmc *mmc) 1682 { 1683 #if CONFIG_IS_ENABLED(DM_MMC) 1684 #if CONFIG_IS_ENABLED(DM_REGULATOR) 1685 int ret; 1686 1687 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 1688 &mmc->vmmc_supply); 1689 if (ret) 1690 debug("%s: No vmmc supply\n", mmc->dev->name); 1691 1692 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply", 1693 &mmc->vqmmc_supply); 1694 if (ret) 1695 debug("%s: No vqmmc supply\n", mmc->dev->name); 1696 1697 if (mmc->vmmc_supply) { 1698 ret = regulator_set_enable(mmc->vmmc_supply, true); 1699 if (ret) { 1700 puts("Error enabling VMMC supply\n"); 1701 return ret; 1702 } 1703 } 1704 #endif 1705 #else /* !CONFIG_DM_MMC */ 1706 /* 1707 * Driver model should use a regulator, as above, rather than calling 1708 * out to board code. 1709 */ 1710 board_mmc_power_init(); 1711 #endif 1712 return 0; 1713 } 1714 1715 int mmc_start_init(struct mmc *mmc) 1716 { 1717 bool no_card; 1718 int err; 1719 1720 /* we pretend there's no card when init is NULL */ 1721 no_card = mmc_getcd(mmc) == 0; 1722 #if !CONFIG_IS_ENABLED(DM_MMC) 1723 no_card = no_card || (mmc->cfg->ops->init == NULL); 1724 #endif 1725 if (no_card) { 1726 mmc->has_init = 0; 1727 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 1728 printf("MMC: no card present\n"); 1729 #endif 1730 return -ENOMEDIUM; 1731 } 1732 1733 if (mmc->has_init) 1734 return 0; 1735 1736 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 1737 mmc_adapter_card_type_ident(); 1738 #endif 1739 err = mmc_power_init(mmc); 1740 if (err) 1741 return err; 1742 1743 #if CONFIG_IS_ENABLED(DM_MMC) 1744 /* The device has already been probed ready for use */ 1745 #else 1746 /* made sure it's not NULL earlier */ 1747 err = mmc->cfg->ops->init(mmc); 1748 if (err) 1749 return err; 1750 #endif 1751 mmc->ddr_mode = 0; 1752 mmc_set_bus_width(mmc, 1); 1753 mmc_set_clock(mmc, 1); 1754 1755 /* Reset the Card */ 1756 err = mmc_go_idle(mmc); 1757 1758 if (err) 1759 return err; 1760 1761 /* The internal partition reset to user partition(0) at every CMD0*/ 1762 mmc_get_blk_desc(mmc)->hwpart = 0; 1763 1764 /* Test for SD version 2 */ 1765 err = mmc_send_if_cond(mmc); 1766 1767 /* Now try to get the SD card's operating condition */ 1768 err = sd_send_op_cond(mmc); 1769 1770 /* If the command timed out, we check for an MMC card */ 1771 if (err == -ETIMEDOUT) { 1772 err = mmc_send_op_cond(mmc); 1773 1774 if (err) { 1775 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 1776 printf("Card did not respond to voltage select!\n"); 1777 #endif 1778 return -EOPNOTSUPP; 1779 } 1780 } 1781 1782 if (!err) 1783 mmc->init_in_progress = 1; 1784 1785 return err; 1786 } 1787 1788 static int mmc_complete_init(struct mmc *mmc) 1789 { 1790 int err = 0; 1791 1792 mmc->init_in_progress = 0; 1793 if (mmc->op_cond_pending) 1794 err = mmc_complete_op_cond(mmc); 1795 1796 if (!err) 1797 err = mmc_startup(mmc); 1798 if (err) 1799 mmc->has_init = 0; 1800 else 1801 mmc->has_init = 1; 1802 return err; 1803 } 1804 1805 int mmc_init(struct mmc *mmc) 1806 { 1807 int err = 0; 1808 __maybe_unused unsigned start; 1809 #if CONFIG_IS_ENABLED(DM_MMC) 1810 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 1811 1812 upriv->mmc = mmc; 1813 #endif 1814 if (mmc->has_init) 1815 return 0; 1816 1817 start = get_timer(0); 1818 1819 if (!mmc->init_in_progress) 1820 err = mmc_start_init(mmc); 1821 1822 if (!err) 1823 err = mmc_complete_init(mmc); 1824 if (err) 1825 printf("%s: %d, time %lu\n", __func__, err, get_timer(start)); 1826 1827 return err; 1828 } 1829 1830 int mmc_set_dsr(struct mmc *mmc, u16 val) 1831 { 1832 mmc->dsr = val; 1833 return 0; 1834 } 1835 1836 /* CPU-specific MMC initializations */ 1837 __weak int cpu_mmc_init(bd_t *bis) 1838 { 1839 return -1; 1840 } 1841 1842 /* board-specific MMC initializations. */ 1843 __weak int board_mmc_init(bd_t *bis) 1844 { 1845 return -1; 1846 } 1847 1848 void mmc_set_preinit(struct mmc *mmc, int preinit) 1849 { 1850 mmc->preinit = preinit; 1851 } 1852 1853 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD) 1854 static int mmc_probe(bd_t *bis) 1855 { 1856 return 0; 1857 } 1858 #elif CONFIG_IS_ENABLED(DM_MMC) 1859 static int mmc_probe(bd_t *bis) 1860 { 1861 int ret, i; 1862 struct uclass *uc; 1863 struct udevice *dev; 1864 1865 ret = uclass_get(UCLASS_MMC, &uc); 1866 if (ret) 1867 return ret; 1868 1869 /* 1870 * Try to add them in sequence order. Really with driver model we 1871 * should allow holes, but the current MMC list does not allow that. 1872 * So if we request 0, 1, 3 we will get 0, 1, 2. 1873 */ 1874 for (i = 0; ; i++) { 1875 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 1876 if (ret == -ENODEV) 1877 break; 1878 } 1879 uclass_foreach_dev(dev, uc) { 1880 ret = device_probe(dev); 1881 if (ret) 1882 printf("%s - probe failed: %d\n", dev->name, ret); 1883 } 1884 1885 return 0; 1886 } 1887 #else 1888 static int mmc_probe(bd_t *bis) 1889 { 1890 if (board_mmc_init(bis) < 0) 1891 cpu_mmc_init(bis); 1892 1893 return 0; 1894 } 1895 #endif 1896 1897 int mmc_initialize(bd_t *bis) 1898 { 1899 static int initialized = 0; 1900 int ret; 1901 if (initialized) /* Avoid initializing mmc multiple times */ 1902 return 0; 1903 initialized = 1; 1904 1905 #if !CONFIG_IS_ENABLED(BLK) 1906 #if !CONFIG_IS_ENABLED(MMC_TINY) 1907 mmc_list_init(); 1908 #endif 1909 #endif 1910 ret = mmc_probe(bis); 1911 if (ret) 1912 return ret; 1913 1914 #ifndef CONFIG_SPL_BUILD 1915 print_mmc_devices(','); 1916 #endif 1917 1918 mmc_do_preinit(); 1919 return 0; 1920 } 1921 1922 #ifdef CONFIG_CMD_BKOPS_ENABLE 1923 int mmc_set_bkops_enable(struct mmc *mmc) 1924 { 1925 int err; 1926 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 1927 1928 err = mmc_send_ext_csd(mmc, ext_csd); 1929 if (err) { 1930 puts("Could not get ext_csd register values\n"); 1931 return err; 1932 } 1933 1934 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 1935 puts("Background operations not supported on device\n"); 1936 return -EMEDIUMTYPE; 1937 } 1938 1939 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 1940 puts("Background operations already enabled\n"); 1941 return 0; 1942 } 1943 1944 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 1945 if (err) { 1946 puts("Failed to enable manual background operations\n"); 1947 return err; 1948 } 1949 1950 puts("Enabled manual background operations\n"); 1951 1952 return 0; 1953 } 1954 #endif 1955