1 /* 2 * Copyright 2008, Freescale Semiconductor, Inc 3 * Andy Fleming 4 * 5 * Based vaguely on the Linux code 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 10 #include <config.h> 11 #include <common.h> 12 #include <command.h> 13 #include <dm.h> 14 #include <dm/device-internal.h> 15 #include <errno.h> 16 #include <mmc.h> 17 #include <part.h> 18 #include <power/regulator.h> 19 #include <malloc.h> 20 #include <memalign.h> 21 #include <linux/list.h> 22 #include <div64.h> 23 #include "mmc_private.h" 24 25 static const unsigned int sd_au_size[] = { 26 0, SZ_16K / 512, SZ_32K / 512, 27 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 28 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 29 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 30 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, 31 }; 32 33 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage); 34 static int mmc_power_cycle(struct mmc *mmc); 35 36 #if CONFIG_IS_ENABLED(MMC_TINY) 37 static struct mmc mmc_static; 38 struct mmc *find_mmc_device(int dev_num) 39 { 40 return &mmc_static; 41 } 42 43 void mmc_do_preinit(void) 44 { 45 struct mmc *m = &mmc_static; 46 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 47 mmc_set_preinit(m, 1); 48 #endif 49 if (m->preinit) 50 mmc_start_init(m); 51 } 52 53 struct blk_desc *mmc_get_blk_desc(struct mmc *mmc) 54 { 55 return &mmc->block_dev; 56 } 57 #endif 58 59 #if !CONFIG_IS_ENABLED(DM_MMC) 60 __weak int board_mmc_getwp(struct mmc *mmc) 61 { 62 return -1; 63 } 64 65 int mmc_getwp(struct mmc *mmc) 66 { 67 int wp; 68 69 wp = board_mmc_getwp(mmc); 70 71 if (wp < 0) { 72 if (mmc->cfg->ops->getwp) 73 wp = mmc->cfg->ops->getwp(mmc); 74 else 75 wp = 0; 76 } 77 78 return wp; 79 } 80 81 __weak int board_mmc_getcd(struct mmc *mmc) 82 { 83 return -1; 84 } 85 #endif 86 87 #ifdef CONFIG_MMC_TRACE 88 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 89 { 90 printf("CMD_SEND:%d\n", cmd->cmdidx); 91 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 92 } 93 94 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 95 { 96 int i; 97 u8 *ptr; 98 99 if (ret) { 100 printf("\t\tRET\t\t\t %d\n", ret); 101 } else { 102 switch (cmd->resp_type) { 103 case MMC_RSP_NONE: 104 printf("\t\tMMC_RSP_NONE\n"); 105 break; 106 case MMC_RSP_R1: 107 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 108 cmd->response[0]); 109 break; 110 case MMC_RSP_R1b: 111 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 112 cmd->response[0]); 113 break; 114 case MMC_RSP_R2: 115 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 116 cmd->response[0]); 117 printf("\t\t \t\t 0x%08X \n", 118 cmd->response[1]); 119 printf("\t\t \t\t 0x%08X \n", 120 cmd->response[2]); 121 printf("\t\t \t\t 0x%08X \n", 122 cmd->response[3]); 123 printf("\n"); 124 printf("\t\t\t\t\tDUMPING DATA\n"); 125 for (i = 0; i < 4; i++) { 126 int j; 127 printf("\t\t\t\t\t%03d - ", i*4); 128 ptr = (u8 *)&cmd->response[i]; 129 ptr += 3; 130 for (j = 0; j < 4; j++) 131 printf("%02X ", *ptr--); 132 printf("\n"); 133 } 134 break; 135 case MMC_RSP_R3: 136 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 137 cmd->response[0]); 138 break; 139 default: 140 printf("\t\tERROR MMC rsp not supported\n"); 141 break; 142 } 143 } 144 } 145 146 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 147 { 148 int status; 149 150 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 151 printf("CURR STATE:%d\n", status); 152 } 153 #endif 154 155 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG) 156 const char *mmc_mode_name(enum bus_mode mode) 157 { 158 static const char *const names[] = { 159 [MMC_LEGACY] = "MMC legacy", 160 [SD_LEGACY] = "SD Legacy", 161 [MMC_HS] = "MMC High Speed (26MHz)", 162 [SD_HS] = "SD High Speed (50MHz)", 163 [UHS_SDR12] = "UHS SDR12 (25MHz)", 164 [UHS_SDR25] = "UHS SDR25 (50MHz)", 165 [UHS_SDR50] = "UHS SDR50 (100MHz)", 166 [UHS_SDR104] = "UHS SDR104 (208MHz)", 167 [UHS_DDR50] = "UHS DDR50 (50MHz)", 168 [MMC_HS_52] = "MMC High Speed (52MHz)", 169 [MMC_DDR_52] = "MMC DDR52 (52MHz)", 170 [MMC_HS_200] = "HS200 (200MHz)", 171 }; 172 173 if (mode >= MMC_MODES_END) 174 return "Unknown mode"; 175 else 176 return names[mode]; 177 } 178 #endif 179 180 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode) 181 { 182 static const int freqs[] = { 183 [SD_LEGACY] = 25000000, 184 [MMC_HS] = 26000000, 185 [SD_HS] = 50000000, 186 [UHS_SDR12] = 25000000, 187 [UHS_SDR25] = 50000000, 188 [UHS_SDR50] = 100000000, 189 [UHS_SDR104] = 208000000, 190 [UHS_DDR50] = 50000000, 191 [MMC_HS_52] = 52000000, 192 [MMC_DDR_52] = 52000000, 193 [MMC_HS_200] = 200000000, 194 }; 195 196 if (mode == MMC_LEGACY) 197 return mmc->legacy_speed; 198 else if (mode >= MMC_MODES_END) 199 return 0; 200 else 201 return freqs[mode]; 202 } 203 204 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode) 205 { 206 mmc->selected_mode = mode; 207 mmc->tran_speed = mmc_mode2freq(mmc, mode); 208 mmc->ddr_mode = mmc_is_mode_ddr(mode); 209 debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode), 210 mmc->tran_speed / 1000000); 211 return 0; 212 } 213 214 #if !CONFIG_IS_ENABLED(DM_MMC) 215 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 216 { 217 int ret; 218 219 mmmc_trace_before_send(mmc, cmd); 220 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 221 mmmc_trace_after_send(mmc, cmd, ret); 222 223 return ret; 224 } 225 #endif 226 227 int mmc_send_status(struct mmc *mmc, int timeout) 228 { 229 struct mmc_cmd cmd; 230 int err, retries = 5; 231 232 cmd.cmdidx = MMC_CMD_SEND_STATUS; 233 cmd.resp_type = MMC_RSP_R1; 234 if (!mmc_host_is_spi(mmc)) 235 cmd.cmdarg = mmc->rca << 16; 236 237 while (1) { 238 err = mmc_send_cmd(mmc, &cmd, NULL); 239 if (!err) { 240 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 241 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 242 MMC_STATE_PRG) 243 break; 244 245 if (cmd.response[0] & MMC_STATUS_MASK) { 246 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 247 printf("Status Error: 0x%08X\n", 248 cmd.response[0]); 249 #endif 250 return -ECOMM; 251 } 252 } else if (--retries < 0) 253 return err; 254 255 if (timeout-- <= 0) 256 break; 257 258 udelay(1000); 259 } 260 261 mmc_trace_state(mmc, &cmd); 262 if (timeout <= 0) { 263 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 264 printf("Timeout waiting card ready\n"); 265 #endif 266 return -ETIMEDOUT; 267 } 268 269 return 0; 270 } 271 272 int mmc_set_blocklen(struct mmc *mmc, int len) 273 { 274 struct mmc_cmd cmd; 275 276 if (mmc->ddr_mode) 277 return 0; 278 279 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 280 cmd.resp_type = MMC_RSP_R1; 281 cmd.cmdarg = len; 282 283 return mmc_send_cmd(mmc, &cmd, NULL); 284 } 285 286 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 287 lbaint_t blkcnt) 288 { 289 struct mmc_cmd cmd; 290 struct mmc_data data; 291 292 if (blkcnt > 1) 293 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 294 else 295 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 296 297 if (mmc->high_capacity) 298 cmd.cmdarg = start; 299 else 300 cmd.cmdarg = start * mmc->read_bl_len; 301 302 cmd.resp_type = MMC_RSP_R1; 303 304 data.dest = dst; 305 data.blocks = blkcnt; 306 data.blocksize = mmc->read_bl_len; 307 data.flags = MMC_DATA_READ; 308 309 if (mmc_send_cmd(mmc, &cmd, &data)) 310 return 0; 311 312 if (blkcnt > 1) { 313 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 314 cmd.cmdarg = 0; 315 cmd.resp_type = MMC_RSP_R1b; 316 if (mmc_send_cmd(mmc, &cmd, NULL)) { 317 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 318 printf("mmc fail to send stop cmd\n"); 319 #endif 320 return 0; 321 } 322 } 323 324 return blkcnt; 325 } 326 327 #if CONFIG_IS_ENABLED(BLK) 328 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 329 #else 330 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 331 void *dst) 332 #endif 333 { 334 #if CONFIG_IS_ENABLED(BLK) 335 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 336 #endif 337 int dev_num = block_dev->devnum; 338 int err; 339 lbaint_t cur, blocks_todo = blkcnt; 340 341 if (blkcnt == 0) 342 return 0; 343 344 struct mmc *mmc = find_mmc_device(dev_num); 345 if (!mmc) 346 return 0; 347 348 if (CONFIG_IS_ENABLED(MMC_TINY)) 349 err = mmc_switch_part(mmc, block_dev->hwpart); 350 else 351 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 352 353 if (err < 0) 354 return 0; 355 356 if ((start + blkcnt) > block_dev->lba) { 357 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 358 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 359 start + blkcnt, block_dev->lba); 360 #endif 361 return 0; 362 } 363 364 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 365 debug("%s: Failed to set blocklen\n", __func__); 366 return 0; 367 } 368 369 do { 370 cur = (blocks_todo > mmc->cfg->b_max) ? 371 mmc->cfg->b_max : blocks_todo; 372 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 373 debug("%s: Failed to read blocks\n", __func__); 374 return 0; 375 } 376 blocks_todo -= cur; 377 start += cur; 378 dst += cur * mmc->read_bl_len; 379 } while (blocks_todo > 0); 380 381 return blkcnt; 382 } 383 384 static int mmc_go_idle(struct mmc *mmc) 385 { 386 struct mmc_cmd cmd; 387 int err; 388 389 udelay(1000); 390 391 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 392 cmd.cmdarg = 0; 393 cmd.resp_type = MMC_RSP_NONE; 394 395 err = mmc_send_cmd(mmc, &cmd, NULL); 396 397 if (err) 398 return err; 399 400 udelay(2000); 401 402 return 0; 403 } 404 405 static int sd_send_op_cond(struct mmc *mmc) 406 { 407 int timeout = 1000; 408 int err; 409 struct mmc_cmd cmd; 410 411 while (1) { 412 cmd.cmdidx = MMC_CMD_APP_CMD; 413 cmd.resp_type = MMC_RSP_R1; 414 cmd.cmdarg = 0; 415 416 err = mmc_send_cmd(mmc, &cmd, NULL); 417 418 if (err) 419 return err; 420 421 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 422 cmd.resp_type = MMC_RSP_R3; 423 424 /* 425 * Most cards do not answer if some reserved bits 426 * in the ocr are set. However, Some controller 427 * can set bit 7 (reserved for low voltages), but 428 * how to manage low voltages SD card is not yet 429 * specified. 430 */ 431 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 432 (mmc->cfg->voltages & 0xff8000); 433 434 if (mmc->version == SD_VERSION_2) 435 cmd.cmdarg |= OCR_HCS; 436 437 err = mmc_send_cmd(mmc, &cmd, NULL); 438 439 if (err) 440 return err; 441 442 if (cmd.response[0] & OCR_BUSY) 443 break; 444 445 if (timeout-- <= 0) 446 return -EOPNOTSUPP; 447 448 udelay(1000); 449 } 450 451 if (mmc->version != SD_VERSION_2) 452 mmc->version = SD_VERSION_1_0; 453 454 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 455 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 456 cmd.resp_type = MMC_RSP_R3; 457 cmd.cmdarg = 0; 458 459 err = mmc_send_cmd(mmc, &cmd, NULL); 460 461 if (err) 462 return err; 463 } 464 465 mmc->ocr = cmd.response[0]; 466 467 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 468 mmc->rca = 0; 469 470 return 0; 471 } 472 473 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 474 { 475 struct mmc_cmd cmd; 476 int err; 477 478 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 479 cmd.resp_type = MMC_RSP_R3; 480 cmd.cmdarg = 0; 481 if (use_arg && !mmc_host_is_spi(mmc)) 482 cmd.cmdarg = OCR_HCS | 483 (mmc->cfg->voltages & 484 (mmc->ocr & OCR_VOLTAGE_MASK)) | 485 (mmc->ocr & OCR_ACCESS_MODE); 486 487 err = mmc_send_cmd(mmc, &cmd, NULL); 488 if (err) 489 return err; 490 mmc->ocr = cmd.response[0]; 491 return 0; 492 } 493 494 static int mmc_send_op_cond(struct mmc *mmc) 495 { 496 int err, i; 497 498 /* Some cards seem to need this */ 499 mmc_go_idle(mmc); 500 501 /* Asking to the card its capabilities */ 502 for (i = 0; i < 2; i++) { 503 err = mmc_send_op_cond_iter(mmc, i != 0); 504 if (err) 505 return err; 506 507 /* exit if not busy (flag seems to be inverted) */ 508 if (mmc->ocr & OCR_BUSY) 509 break; 510 } 511 mmc->op_cond_pending = 1; 512 return 0; 513 } 514 515 static int mmc_complete_op_cond(struct mmc *mmc) 516 { 517 struct mmc_cmd cmd; 518 int timeout = 1000; 519 uint start; 520 int err; 521 522 mmc->op_cond_pending = 0; 523 if (!(mmc->ocr & OCR_BUSY)) { 524 /* Some cards seem to need this */ 525 mmc_go_idle(mmc); 526 527 start = get_timer(0); 528 while (1) { 529 err = mmc_send_op_cond_iter(mmc, 1); 530 if (err) 531 return err; 532 if (mmc->ocr & OCR_BUSY) 533 break; 534 if (get_timer(start) > timeout) 535 return -EOPNOTSUPP; 536 udelay(100); 537 } 538 } 539 540 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 541 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 542 cmd.resp_type = MMC_RSP_R3; 543 cmd.cmdarg = 0; 544 545 err = mmc_send_cmd(mmc, &cmd, NULL); 546 547 if (err) 548 return err; 549 550 mmc->ocr = cmd.response[0]; 551 } 552 553 mmc->version = MMC_VERSION_UNKNOWN; 554 555 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 556 mmc->rca = 1; 557 558 return 0; 559 } 560 561 562 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 563 { 564 struct mmc_cmd cmd; 565 struct mmc_data data; 566 int err; 567 568 /* Get the Card Status Register */ 569 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 570 cmd.resp_type = MMC_RSP_R1; 571 cmd.cmdarg = 0; 572 573 data.dest = (char *)ext_csd; 574 data.blocks = 1; 575 data.blocksize = MMC_MAX_BLOCK_LEN; 576 data.flags = MMC_DATA_READ; 577 578 err = mmc_send_cmd(mmc, &cmd, &data); 579 580 return err; 581 } 582 583 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 584 { 585 struct mmc_cmd cmd; 586 int timeout = 1000; 587 int retries = 3; 588 int ret; 589 590 cmd.cmdidx = MMC_CMD_SWITCH; 591 cmd.resp_type = MMC_RSP_R1b; 592 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 593 (index << 16) | 594 (value << 8); 595 596 while (retries > 0) { 597 ret = mmc_send_cmd(mmc, &cmd, NULL); 598 599 /* Waiting for the ready status */ 600 if (!ret) { 601 ret = mmc_send_status(mmc, timeout); 602 return ret; 603 } 604 605 retries--; 606 } 607 608 return ret; 609 610 } 611 612 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode) 613 { 614 int err; 615 int speed_bits; 616 617 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 618 619 switch (mode) { 620 case MMC_HS: 621 case MMC_HS_52: 622 case MMC_DDR_52: 623 speed_bits = EXT_CSD_TIMING_HS; 624 case MMC_LEGACY: 625 speed_bits = EXT_CSD_TIMING_LEGACY; 626 break; 627 default: 628 return -EINVAL; 629 } 630 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 631 speed_bits); 632 if (err) 633 return err; 634 635 if ((mode == MMC_HS) || (mode == MMC_HS_52)) { 636 /* Now check to see that it worked */ 637 err = mmc_send_ext_csd(mmc, test_csd); 638 if (err) 639 return err; 640 641 /* No high-speed support */ 642 if (!test_csd[EXT_CSD_HS_TIMING]) 643 return -ENOTSUPP; 644 } 645 646 return 0; 647 } 648 649 static int mmc_get_capabilities(struct mmc *mmc) 650 { 651 u8 *ext_csd = mmc->ext_csd; 652 char cardtype; 653 654 mmc->card_caps = MMC_MODE_1BIT; 655 656 if (mmc_host_is_spi(mmc)) 657 return 0; 658 659 /* Only version 4 supports high-speed */ 660 if (mmc->version < MMC_VERSION_4) 661 return 0; 662 663 if (!ext_csd) { 664 printf("No ext_csd found!\n"); /* this should enver happen */ 665 return -ENOTSUPP; 666 } 667 668 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 669 670 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0xf; 671 672 /* High Speed is set, there are two types: 52MHz and 26MHz */ 673 if (cardtype & EXT_CSD_CARD_TYPE_52) { 674 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52) 675 mmc->card_caps |= MMC_MODE_DDR_52MHz; 676 mmc->card_caps |= MMC_MODE_HS_52MHz; 677 } 678 if (cardtype & EXT_CSD_CARD_TYPE_26) 679 mmc->card_caps |= MMC_MODE_HS; 680 681 return 0; 682 } 683 684 static int mmc_set_capacity(struct mmc *mmc, int part_num) 685 { 686 switch (part_num) { 687 case 0: 688 mmc->capacity = mmc->capacity_user; 689 break; 690 case 1: 691 case 2: 692 mmc->capacity = mmc->capacity_boot; 693 break; 694 case 3: 695 mmc->capacity = mmc->capacity_rpmb; 696 break; 697 case 4: 698 case 5: 699 case 6: 700 case 7: 701 mmc->capacity = mmc->capacity_gp[part_num - 4]; 702 break; 703 default: 704 return -1; 705 } 706 707 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 708 709 return 0; 710 } 711 712 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 713 { 714 int ret; 715 716 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 717 (mmc->part_config & ~PART_ACCESS_MASK) 718 | (part_num & PART_ACCESS_MASK)); 719 720 /* 721 * Set the capacity if the switch succeeded or was intended 722 * to return to representing the raw device. 723 */ 724 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 725 ret = mmc_set_capacity(mmc, part_num); 726 mmc_get_blk_desc(mmc)->hwpart = part_num; 727 } 728 729 return ret; 730 } 731 732 int mmc_hwpart_config(struct mmc *mmc, 733 const struct mmc_hwpart_conf *conf, 734 enum mmc_hwpart_conf_mode mode) 735 { 736 u8 part_attrs = 0; 737 u32 enh_size_mult; 738 u32 enh_start_addr; 739 u32 gp_size_mult[4]; 740 u32 max_enh_size_mult; 741 u32 tot_enh_size_mult = 0; 742 u8 wr_rel_set; 743 int i, pidx, err; 744 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 745 746 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 747 return -EINVAL; 748 749 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 750 printf("eMMC >= 4.4 required for enhanced user data area\n"); 751 return -EMEDIUMTYPE; 752 } 753 754 if (!(mmc->part_support & PART_SUPPORT)) { 755 printf("Card does not support partitioning\n"); 756 return -EMEDIUMTYPE; 757 } 758 759 if (!mmc->hc_wp_grp_size) { 760 printf("Card does not define HC WP group size\n"); 761 return -EMEDIUMTYPE; 762 } 763 764 /* check partition alignment and total enhanced size */ 765 if (conf->user.enh_size) { 766 if (conf->user.enh_size % mmc->hc_wp_grp_size || 767 conf->user.enh_start % mmc->hc_wp_grp_size) { 768 printf("User data enhanced area not HC WP group " 769 "size aligned\n"); 770 return -EINVAL; 771 } 772 part_attrs |= EXT_CSD_ENH_USR; 773 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 774 if (mmc->high_capacity) { 775 enh_start_addr = conf->user.enh_start; 776 } else { 777 enh_start_addr = (conf->user.enh_start << 9); 778 } 779 } else { 780 enh_size_mult = 0; 781 enh_start_addr = 0; 782 } 783 tot_enh_size_mult += enh_size_mult; 784 785 for (pidx = 0; pidx < 4; pidx++) { 786 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 787 printf("GP%i partition not HC WP group size " 788 "aligned\n", pidx+1); 789 return -EINVAL; 790 } 791 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 792 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 793 part_attrs |= EXT_CSD_ENH_GP(pidx); 794 tot_enh_size_mult += gp_size_mult[pidx]; 795 } 796 } 797 798 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 799 printf("Card does not support enhanced attribute\n"); 800 return -EMEDIUMTYPE; 801 } 802 803 err = mmc_send_ext_csd(mmc, ext_csd); 804 if (err) 805 return err; 806 807 max_enh_size_mult = 808 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 809 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 810 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 811 if (tot_enh_size_mult > max_enh_size_mult) { 812 printf("Total enhanced size exceeds maximum (%u > %u)\n", 813 tot_enh_size_mult, max_enh_size_mult); 814 return -EMEDIUMTYPE; 815 } 816 817 /* The default value of EXT_CSD_WR_REL_SET is device 818 * dependent, the values can only be changed if the 819 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 820 * changed only once and before partitioning is completed. */ 821 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 822 if (conf->user.wr_rel_change) { 823 if (conf->user.wr_rel_set) 824 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 825 else 826 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 827 } 828 for (pidx = 0; pidx < 4; pidx++) { 829 if (conf->gp_part[pidx].wr_rel_change) { 830 if (conf->gp_part[pidx].wr_rel_set) 831 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 832 else 833 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 834 } 835 } 836 837 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 838 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 839 puts("Card does not support host controlled partition write " 840 "reliability settings\n"); 841 return -EMEDIUMTYPE; 842 } 843 844 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 845 EXT_CSD_PARTITION_SETTING_COMPLETED) { 846 printf("Card already partitioned\n"); 847 return -EPERM; 848 } 849 850 if (mode == MMC_HWPART_CONF_CHECK) 851 return 0; 852 853 /* Partitioning requires high-capacity size definitions */ 854 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 855 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 856 EXT_CSD_ERASE_GROUP_DEF, 1); 857 858 if (err) 859 return err; 860 861 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 862 863 /* update erase group size to be high-capacity */ 864 mmc->erase_grp_size = 865 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 866 867 } 868 869 /* all OK, write the configuration */ 870 for (i = 0; i < 4; i++) { 871 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 872 EXT_CSD_ENH_START_ADDR+i, 873 (enh_start_addr >> (i*8)) & 0xFF); 874 if (err) 875 return err; 876 } 877 for (i = 0; i < 3; i++) { 878 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 879 EXT_CSD_ENH_SIZE_MULT+i, 880 (enh_size_mult >> (i*8)) & 0xFF); 881 if (err) 882 return err; 883 } 884 for (pidx = 0; pidx < 4; pidx++) { 885 for (i = 0; i < 3; i++) { 886 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 887 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 888 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 889 if (err) 890 return err; 891 } 892 } 893 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 894 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 895 if (err) 896 return err; 897 898 if (mode == MMC_HWPART_CONF_SET) 899 return 0; 900 901 /* The WR_REL_SET is a write-once register but shall be 902 * written before setting PART_SETTING_COMPLETED. As it is 903 * write-once we can only write it when completing the 904 * partitioning. */ 905 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 906 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 907 EXT_CSD_WR_REL_SET, wr_rel_set); 908 if (err) 909 return err; 910 } 911 912 /* Setting PART_SETTING_COMPLETED confirms the partition 913 * configuration but it only becomes effective after power 914 * cycle, so we do not adjust the partition related settings 915 * in the mmc struct. */ 916 917 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 918 EXT_CSD_PARTITION_SETTING, 919 EXT_CSD_PARTITION_SETTING_COMPLETED); 920 if (err) 921 return err; 922 923 return 0; 924 } 925 926 #if !CONFIG_IS_ENABLED(DM_MMC) 927 int mmc_getcd(struct mmc *mmc) 928 { 929 int cd; 930 931 cd = board_mmc_getcd(mmc); 932 933 if (cd < 0) { 934 if (mmc->cfg->ops->getcd) 935 cd = mmc->cfg->ops->getcd(mmc); 936 else 937 cd = 1; 938 } 939 940 return cd; 941 } 942 #endif 943 944 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 945 { 946 struct mmc_cmd cmd; 947 struct mmc_data data; 948 949 /* Switch the frequency */ 950 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 951 cmd.resp_type = MMC_RSP_R1; 952 cmd.cmdarg = (mode << 31) | 0xffffff; 953 cmd.cmdarg &= ~(0xf << (group * 4)); 954 cmd.cmdarg |= value << (group * 4); 955 956 data.dest = (char *)resp; 957 data.blocksize = 64; 958 data.blocks = 1; 959 data.flags = MMC_DATA_READ; 960 961 return mmc_send_cmd(mmc, &cmd, &data); 962 } 963 964 965 static int sd_get_capabilities(struct mmc *mmc) 966 { 967 int err; 968 struct mmc_cmd cmd; 969 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2); 970 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16); 971 struct mmc_data data; 972 int timeout; 973 974 mmc->card_caps = MMC_MODE_1BIT; 975 976 if (mmc_host_is_spi(mmc)) 977 return 0; 978 979 /* Read the SCR to find out if this card supports higher speeds */ 980 cmd.cmdidx = MMC_CMD_APP_CMD; 981 cmd.resp_type = MMC_RSP_R1; 982 cmd.cmdarg = mmc->rca << 16; 983 984 err = mmc_send_cmd(mmc, &cmd, NULL); 985 986 if (err) 987 return err; 988 989 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 990 cmd.resp_type = MMC_RSP_R1; 991 cmd.cmdarg = 0; 992 993 timeout = 3; 994 995 retry_scr: 996 data.dest = (char *)scr; 997 data.blocksize = 8; 998 data.blocks = 1; 999 data.flags = MMC_DATA_READ; 1000 1001 err = mmc_send_cmd(mmc, &cmd, &data); 1002 1003 if (err) { 1004 if (timeout--) 1005 goto retry_scr; 1006 1007 return err; 1008 } 1009 1010 mmc->scr[0] = __be32_to_cpu(scr[0]); 1011 mmc->scr[1] = __be32_to_cpu(scr[1]); 1012 1013 switch ((mmc->scr[0] >> 24) & 0xf) { 1014 case 0: 1015 mmc->version = SD_VERSION_1_0; 1016 break; 1017 case 1: 1018 mmc->version = SD_VERSION_1_10; 1019 break; 1020 case 2: 1021 mmc->version = SD_VERSION_2; 1022 if ((mmc->scr[0] >> 15) & 0x1) 1023 mmc->version = SD_VERSION_3; 1024 break; 1025 default: 1026 mmc->version = SD_VERSION_1_0; 1027 break; 1028 } 1029 1030 if (mmc->scr[0] & SD_DATA_4BIT) 1031 mmc->card_caps |= MMC_MODE_4BIT; 1032 1033 /* Version 1.0 doesn't support switching */ 1034 if (mmc->version == SD_VERSION_1_0) 1035 return 0; 1036 1037 timeout = 4; 1038 while (timeout--) { 1039 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 1040 (u8 *)switch_status); 1041 1042 if (err) 1043 return err; 1044 1045 /* The high-speed function is busy. Try again */ 1046 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 1047 break; 1048 } 1049 1050 /* If high-speed isn't supported, we return */ 1051 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED) 1052 mmc->card_caps |= MMC_CAP(SD_HS); 1053 1054 return 0; 1055 } 1056 1057 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode) 1058 { 1059 int err; 1060 1061 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16); 1062 1063 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, 1, (u8 *)switch_status); 1064 if (err) 1065 return err; 1066 1067 if ((__be32_to_cpu(switch_status[4]) & 0x0f000000) != 0x01000000) 1068 return -ENOTSUPP; 1069 1070 return 0; 1071 } 1072 1073 int sd_select_bus_width(struct mmc *mmc, int w) 1074 { 1075 int err; 1076 struct mmc_cmd cmd; 1077 1078 if ((w != 4) && (w != 1)) 1079 return -EINVAL; 1080 1081 cmd.cmdidx = MMC_CMD_APP_CMD; 1082 cmd.resp_type = MMC_RSP_R1; 1083 cmd.cmdarg = mmc->rca << 16; 1084 1085 err = mmc_send_cmd(mmc, &cmd, NULL); 1086 if (err) 1087 return err; 1088 1089 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 1090 cmd.resp_type = MMC_RSP_R1; 1091 if (w == 4) 1092 cmd.cmdarg = 2; 1093 else if (w == 1) 1094 cmd.cmdarg = 0; 1095 err = mmc_send_cmd(mmc, &cmd, NULL); 1096 if (err) 1097 return err; 1098 1099 return 0; 1100 } 1101 1102 static int sd_read_ssr(struct mmc *mmc) 1103 { 1104 int err, i; 1105 struct mmc_cmd cmd; 1106 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 1107 struct mmc_data data; 1108 int timeout = 3; 1109 unsigned int au, eo, et, es; 1110 1111 cmd.cmdidx = MMC_CMD_APP_CMD; 1112 cmd.resp_type = MMC_RSP_R1; 1113 cmd.cmdarg = mmc->rca << 16; 1114 1115 err = mmc_send_cmd(mmc, &cmd, NULL); 1116 if (err) 1117 return err; 1118 1119 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1120 cmd.resp_type = MMC_RSP_R1; 1121 cmd.cmdarg = 0; 1122 1123 retry_ssr: 1124 data.dest = (char *)ssr; 1125 data.blocksize = 64; 1126 data.blocks = 1; 1127 data.flags = MMC_DATA_READ; 1128 1129 err = mmc_send_cmd(mmc, &cmd, &data); 1130 if (err) { 1131 if (timeout--) 1132 goto retry_ssr; 1133 1134 return err; 1135 } 1136 1137 for (i = 0; i < 16; i++) 1138 ssr[i] = be32_to_cpu(ssr[i]); 1139 1140 au = (ssr[2] >> 12) & 0xF; 1141 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1142 mmc->ssr.au = sd_au_size[au]; 1143 es = (ssr[3] >> 24) & 0xFF; 1144 es |= (ssr[2] & 0xFF) << 8; 1145 et = (ssr[3] >> 18) & 0x3F; 1146 if (es && et) { 1147 eo = (ssr[3] >> 16) & 0x3; 1148 mmc->ssr.erase_timeout = (et * 1000) / es; 1149 mmc->ssr.erase_offset = eo * 1000; 1150 } 1151 } else { 1152 debug("Invalid Allocation Unit Size.\n"); 1153 } 1154 1155 return 0; 1156 } 1157 1158 /* frequency bases */ 1159 /* divided by 10 to be nice to platforms without floating point */ 1160 static const int fbase[] = { 1161 10000, 1162 100000, 1163 1000000, 1164 10000000, 1165 }; 1166 1167 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1168 * to platforms without floating point. 1169 */ 1170 static const u8 multipliers[] = { 1171 0, /* reserved */ 1172 10, 1173 12, 1174 13, 1175 15, 1176 20, 1177 25, 1178 30, 1179 35, 1180 40, 1181 45, 1182 50, 1183 55, 1184 60, 1185 70, 1186 80, 1187 }; 1188 1189 static inline int bus_width(uint cap) 1190 { 1191 if (cap == MMC_MODE_8BIT) 1192 return 8; 1193 if (cap == MMC_MODE_4BIT) 1194 return 4; 1195 if (cap == MMC_MODE_1BIT) 1196 return 1; 1197 printf("invalid bus witdh capability 0x%x\n", cap); 1198 return 0; 1199 } 1200 1201 #if !CONFIG_IS_ENABLED(DM_MMC) 1202 static int mmc_execute_tuning(struct mmc *mmc, uint opcode) 1203 { 1204 return -ENOTSUPP; 1205 } 1206 1207 static void mmc_send_init_stream(struct mmc *mmc) 1208 { 1209 } 1210 1211 static int mmc_set_ios(struct mmc *mmc) 1212 { 1213 int ret = 0; 1214 1215 if (mmc->cfg->ops->set_ios) 1216 ret = mmc->cfg->ops->set_ios(mmc); 1217 1218 return ret; 1219 } 1220 #endif 1221 1222 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable) 1223 { 1224 if (clock > mmc->cfg->f_max) 1225 clock = mmc->cfg->f_max; 1226 1227 if (clock < mmc->cfg->f_min) 1228 clock = mmc->cfg->f_min; 1229 1230 mmc->clock = clock; 1231 mmc->clk_disable = disable; 1232 1233 return mmc_set_ios(mmc); 1234 } 1235 1236 static int mmc_set_bus_width(struct mmc *mmc, uint width) 1237 { 1238 mmc->bus_width = width; 1239 1240 return mmc_set_ios(mmc); 1241 } 1242 1243 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG) 1244 /* 1245 * helper function to display the capabilities in a human 1246 * friendly manner. The capabilities include bus width and 1247 * supported modes. 1248 */ 1249 void mmc_dump_capabilities(const char *text, uint caps) 1250 { 1251 enum bus_mode mode; 1252 1253 printf("%s: widths [", text); 1254 if (caps & MMC_MODE_8BIT) 1255 printf("8, "); 1256 if (caps & MMC_MODE_4BIT) 1257 printf("4, "); 1258 if (caps & MMC_MODE_1BIT) 1259 printf("1, "); 1260 printf("\b\b] modes ["); 1261 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++) 1262 if (MMC_CAP(mode) & caps) 1263 printf("%s, ", mmc_mode_name(mode)); 1264 printf("\b\b]\n"); 1265 } 1266 #endif 1267 1268 struct mode_width_tuning { 1269 enum bus_mode mode; 1270 uint widths; 1271 }; 1272 1273 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage) 1274 { 1275 mmc->signal_voltage = signal_voltage; 1276 return mmc_set_ios(mmc); 1277 } 1278 1279 static const struct mode_width_tuning sd_modes_by_pref[] = { 1280 { 1281 .mode = SD_HS, 1282 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT, 1283 }, 1284 { 1285 .mode = SD_LEGACY, 1286 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT, 1287 } 1288 }; 1289 1290 #define for_each_sd_mode_by_pref(caps, mwt) \ 1291 for (mwt = sd_modes_by_pref;\ 1292 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\ 1293 mwt++) \ 1294 if (caps & MMC_CAP(mwt->mode)) 1295 1296 static int sd_select_mode_and_width(struct mmc *mmc) 1297 { 1298 int err; 1299 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT}; 1300 const struct mode_width_tuning *mwt; 1301 1302 err = sd_get_capabilities(mmc); 1303 if (err) 1304 return err; 1305 /* Restrict card's capabilities by what the host can do */ 1306 mmc->card_caps &= (mmc->cfg->host_caps | MMC_MODE_1BIT); 1307 1308 for_each_sd_mode_by_pref(mmc->card_caps, mwt) { 1309 uint *w; 1310 1311 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) { 1312 if (*w & mmc->card_caps & mwt->widths) { 1313 debug("trying mode %s width %d (at %d MHz)\n", 1314 mmc_mode_name(mwt->mode), 1315 bus_width(*w), 1316 mmc_mode2freq(mmc, mwt->mode) / 1000000); 1317 1318 /* configure the bus width (card + host) */ 1319 err = sd_select_bus_width(mmc, bus_width(*w)); 1320 if (err) 1321 goto error; 1322 mmc_set_bus_width(mmc, bus_width(*w)); 1323 1324 /* configure the bus mode (card) */ 1325 err = sd_set_card_speed(mmc, mwt->mode); 1326 if (err) 1327 goto error; 1328 1329 /* configure the bus mode (host) */ 1330 mmc_select_mode(mmc, mwt->mode); 1331 mmc_set_clock(mmc, mmc->tran_speed, false); 1332 1333 err = sd_read_ssr(mmc); 1334 if (!err) 1335 return 0; 1336 1337 printf("bad ssr\n"); 1338 1339 error: 1340 /* revert to a safer bus speed */ 1341 mmc_select_mode(mmc, SD_LEGACY); 1342 mmc_set_clock(mmc, mmc->tran_speed, false); 1343 } 1344 } 1345 } 1346 1347 printf("unable to select a mode\n"); 1348 return -ENOTSUPP; 1349 } 1350 1351 /* 1352 * read the compare the part of ext csd that is constant. 1353 * This can be used to check that the transfer is working 1354 * as expected. 1355 */ 1356 static int mmc_read_and_compare_ext_csd(struct mmc *mmc) 1357 { 1358 int err; 1359 const u8 *ext_csd = mmc->ext_csd; 1360 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 1361 1362 err = mmc_send_ext_csd(mmc, test_csd); 1363 if (err) 1364 return err; 1365 1366 /* Only compare read only fields */ 1367 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] 1368 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] && 1369 ext_csd[EXT_CSD_HC_WP_GRP_SIZE] 1370 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] && 1371 ext_csd[EXT_CSD_REV] 1372 == test_csd[EXT_CSD_REV] && 1373 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1374 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] && 1375 memcmp(&ext_csd[EXT_CSD_SEC_CNT], 1376 &test_csd[EXT_CSD_SEC_CNT], 4) == 0) 1377 return 0; 1378 1379 return -EBADMSG; 1380 } 1381 1382 static const struct mode_width_tuning mmc_modes_by_pref[] = { 1383 { 1384 .mode = MMC_HS_200, 1385 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT, 1386 }, 1387 { 1388 .mode = MMC_DDR_52, 1389 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT, 1390 }, 1391 { 1392 .mode = MMC_HS_52, 1393 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT, 1394 }, 1395 { 1396 .mode = MMC_HS, 1397 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT, 1398 }, 1399 { 1400 .mode = MMC_LEGACY, 1401 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT, 1402 } 1403 }; 1404 1405 #define for_each_mmc_mode_by_pref(caps, mwt) \ 1406 for (mwt = mmc_modes_by_pref;\ 1407 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\ 1408 mwt++) \ 1409 if (caps & MMC_CAP(mwt->mode)) 1410 1411 static const struct ext_csd_bus_width { 1412 uint cap; 1413 bool is_ddr; 1414 uint ext_csd_bits; 1415 } ext_csd_bus_width[] = { 1416 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8}, 1417 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4}, 1418 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8}, 1419 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4}, 1420 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1}, 1421 }; 1422 1423 #define for_each_supported_width(caps, ddr, ecbv) \ 1424 for (ecbv = ext_csd_bus_width;\ 1425 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\ 1426 ecbv++) \ 1427 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap)) 1428 1429 static int mmc_select_mode_and_width(struct mmc *mmc) 1430 { 1431 int err; 1432 const struct mode_width_tuning *mwt; 1433 const struct ext_csd_bus_width *ecbw; 1434 1435 err = mmc_get_capabilities(mmc); 1436 if (err) 1437 return err; 1438 1439 /* Restrict card's capabilities by what the host can do */ 1440 mmc->card_caps &= (mmc->cfg->host_caps | MMC_MODE_1BIT); 1441 1442 /* Only version 4 of MMC supports wider bus widths */ 1443 if (mmc->version < MMC_VERSION_4) 1444 return 0; 1445 1446 if (!mmc->ext_csd) { 1447 debug("No ext_csd found!\n"); /* this should enver happen */ 1448 return -ENOTSUPP; 1449 } 1450 1451 for_each_mmc_mode_by_pref(mmc->card_caps, mwt) { 1452 for_each_supported_width(mmc->card_caps & mwt->widths, 1453 mmc_is_mode_ddr(mwt->mode), ecbw) { 1454 debug("trying mode %s width %d (at %d MHz)\n", 1455 mmc_mode_name(mwt->mode), 1456 bus_width(ecbw->cap), 1457 mmc_mode2freq(mmc, mwt->mode) / 1000000); 1458 /* configure the bus width (card + host) */ 1459 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1460 EXT_CSD_BUS_WIDTH, 1461 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG); 1462 if (err) 1463 goto error; 1464 mmc_set_bus_width(mmc, bus_width(ecbw->cap)); 1465 1466 /* configure the bus speed (card) */ 1467 err = mmc_set_card_speed(mmc, mwt->mode); 1468 if (err) 1469 goto error; 1470 1471 /* 1472 * configure the bus width AND the ddr mode (card) 1473 * The host side will be taken care of in the next step 1474 */ 1475 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) { 1476 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1477 EXT_CSD_BUS_WIDTH, 1478 ecbw->ext_csd_bits); 1479 if (err) 1480 goto error; 1481 } 1482 1483 /* configure the bus mode (host) */ 1484 mmc_select_mode(mmc, mwt->mode); 1485 mmc_set_clock(mmc, mmc->tran_speed, false); 1486 1487 /* do a transfer to check the configuration */ 1488 err = mmc_read_and_compare_ext_csd(mmc); 1489 if (!err) 1490 return 0; 1491 error: 1492 /* if an error occured, revert to a safer bus mode */ 1493 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1494 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1); 1495 mmc_select_mode(mmc, MMC_LEGACY); 1496 mmc_set_bus_width(mmc, 1); 1497 } 1498 } 1499 1500 printf("unable to select a mode\n"); 1501 1502 return -ENOTSUPP; 1503 } 1504 1505 static int mmc_startup_v4(struct mmc *mmc) 1506 { 1507 int err, i; 1508 u64 capacity; 1509 bool has_parts = false; 1510 bool part_completed; 1511 u8 *ext_csd; 1512 1513 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4)) 1514 return 0; 1515 1516 ext_csd = malloc_cache_aligned(MMC_MAX_BLOCK_LEN); 1517 if (!ext_csd) 1518 return -ENOMEM; 1519 1520 mmc->ext_csd = ext_csd; 1521 1522 /* check ext_csd version and capacity */ 1523 err = mmc_send_ext_csd(mmc, ext_csd); 1524 if (err) 1525 return err; 1526 if (ext_csd[EXT_CSD_REV] >= 2) { 1527 /* 1528 * According to the JEDEC Standard, the value of 1529 * ext_csd's capacity is valid if the value is more 1530 * than 2GB 1531 */ 1532 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 1533 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 1534 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 1535 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 1536 capacity *= MMC_MAX_BLOCK_LEN; 1537 if ((capacity >> 20) > 2 * 1024) 1538 mmc->capacity_user = capacity; 1539 } 1540 1541 switch (ext_csd[EXT_CSD_REV]) { 1542 case 1: 1543 mmc->version = MMC_VERSION_4_1; 1544 break; 1545 case 2: 1546 mmc->version = MMC_VERSION_4_2; 1547 break; 1548 case 3: 1549 mmc->version = MMC_VERSION_4_3; 1550 break; 1551 case 5: 1552 mmc->version = MMC_VERSION_4_41; 1553 break; 1554 case 6: 1555 mmc->version = MMC_VERSION_4_5; 1556 break; 1557 case 7: 1558 mmc->version = MMC_VERSION_5_0; 1559 break; 1560 case 8: 1561 mmc->version = MMC_VERSION_5_1; 1562 break; 1563 } 1564 1565 /* The partition data may be non-zero but it is only 1566 * effective if PARTITION_SETTING_COMPLETED is set in 1567 * EXT_CSD, so ignore any data if this bit is not set, 1568 * except for enabling the high-capacity group size 1569 * definition (see below). 1570 */ 1571 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 1572 EXT_CSD_PARTITION_SETTING_COMPLETED); 1573 1574 /* store the partition info of emmc */ 1575 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 1576 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 1577 ext_csd[EXT_CSD_BOOT_MULT]) 1578 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 1579 if (part_completed && 1580 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 1581 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 1582 1583 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 1584 1585 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 1586 1587 for (i = 0; i < 4; i++) { 1588 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 1589 uint mult = (ext_csd[idx + 2] << 16) + 1590 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 1591 if (mult) 1592 has_parts = true; 1593 if (!part_completed) 1594 continue; 1595 mmc->capacity_gp[i] = mult; 1596 mmc->capacity_gp[i] *= 1597 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1598 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1599 mmc->capacity_gp[i] <<= 19; 1600 } 1601 1602 if (part_completed) { 1603 mmc->enh_user_size = 1604 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) + 1605 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) + 1606 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 1607 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 1608 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1609 mmc->enh_user_size <<= 19; 1610 mmc->enh_user_start = 1611 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) + 1612 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) + 1613 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) + 1614 ext_csd[EXT_CSD_ENH_START_ADDR]; 1615 if (mmc->high_capacity) 1616 mmc->enh_user_start <<= 9; 1617 } 1618 1619 /* 1620 * Host needs to enable ERASE_GRP_DEF bit if device is 1621 * partitioned. This bit will be lost every time after a reset 1622 * or power off. This will affect erase size. 1623 */ 1624 if (part_completed) 1625 has_parts = true; 1626 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 1627 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 1628 has_parts = true; 1629 if (has_parts) { 1630 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1631 EXT_CSD_ERASE_GROUP_DEF, 1); 1632 1633 if (err) 1634 return err; 1635 1636 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1637 } 1638 1639 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 1640 /* Read out group size from ext_csd */ 1641 mmc->erase_grp_size = 1642 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1643 /* 1644 * if high capacity and partition setting completed 1645 * SEC_COUNT is valid even if it is smaller than 2 GiB 1646 * JEDEC Standard JESD84-B45, 6.2.4 1647 */ 1648 if (mmc->high_capacity && part_completed) { 1649 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 1650 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 1651 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 1652 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 1653 capacity *= MMC_MAX_BLOCK_LEN; 1654 mmc->capacity_user = capacity; 1655 } 1656 } else { 1657 /* Calculate the group size from the csd value. */ 1658 int erase_gsz, erase_gmul; 1659 1660 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 1661 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 1662 mmc->erase_grp_size = (erase_gsz + 1) 1663 * (erase_gmul + 1); 1664 } 1665 1666 mmc->hc_wp_grp_size = 1024 1667 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1668 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 1669 1670 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1671 1672 return 0; 1673 } 1674 1675 static int mmc_startup(struct mmc *mmc) 1676 { 1677 int err, i; 1678 uint mult, freq; 1679 u64 cmult, csize; 1680 struct mmc_cmd cmd; 1681 struct blk_desc *bdesc; 1682 1683 #ifdef CONFIG_MMC_SPI_CRC_ON 1684 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 1685 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 1686 cmd.resp_type = MMC_RSP_R1; 1687 cmd.cmdarg = 1; 1688 err = mmc_send_cmd(mmc, &cmd, NULL); 1689 1690 if (err) 1691 return err; 1692 } 1693 #endif 1694 1695 /* Put the Card in Identify Mode */ 1696 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 1697 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 1698 cmd.resp_type = MMC_RSP_R2; 1699 cmd.cmdarg = 0; 1700 1701 err = mmc_send_cmd(mmc, &cmd, NULL); 1702 1703 if (err) 1704 return err; 1705 1706 memcpy(mmc->cid, cmd.response, 16); 1707 1708 /* 1709 * For MMC cards, set the Relative Address. 1710 * For SD cards, get the Relatvie Address. 1711 * This also puts the cards into Standby State 1712 */ 1713 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1714 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 1715 cmd.cmdarg = mmc->rca << 16; 1716 cmd.resp_type = MMC_RSP_R6; 1717 1718 err = mmc_send_cmd(mmc, &cmd, NULL); 1719 1720 if (err) 1721 return err; 1722 1723 if (IS_SD(mmc)) 1724 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 1725 } 1726 1727 /* Get the Card-Specific Data */ 1728 cmd.cmdidx = MMC_CMD_SEND_CSD; 1729 cmd.resp_type = MMC_RSP_R2; 1730 cmd.cmdarg = mmc->rca << 16; 1731 1732 err = mmc_send_cmd(mmc, &cmd, NULL); 1733 1734 if (err) 1735 return err; 1736 1737 mmc->csd[0] = cmd.response[0]; 1738 mmc->csd[1] = cmd.response[1]; 1739 mmc->csd[2] = cmd.response[2]; 1740 mmc->csd[3] = cmd.response[3]; 1741 1742 if (mmc->version == MMC_VERSION_UNKNOWN) { 1743 int version = (cmd.response[0] >> 26) & 0xf; 1744 1745 switch (version) { 1746 case 0: 1747 mmc->version = MMC_VERSION_1_2; 1748 break; 1749 case 1: 1750 mmc->version = MMC_VERSION_1_4; 1751 break; 1752 case 2: 1753 mmc->version = MMC_VERSION_2_2; 1754 break; 1755 case 3: 1756 mmc->version = MMC_VERSION_3; 1757 break; 1758 case 4: 1759 mmc->version = MMC_VERSION_4; 1760 break; 1761 default: 1762 mmc->version = MMC_VERSION_1_2; 1763 break; 1764 } 1765 } 1766 1767 /* divide frequency by 10, since the mults are 10x bigger */ 1768 freq = fbase[(cmd.response[0] & 0x7)]; 1769 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 1770 1771 mmc->legacy_speed = freq * mult; 1772 mmc_select_mode(mmc, MMC_LEGACY); 1773 1774 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 1775 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 1776 1777 if (IS_SD(mmc)) 1778 mmc->write_bl_len = mmc->read_bl_len; 1779 else 1780 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 1781 1782 if (mmc->high_capacity) { 1783 csize = (mmc->csd[1] & 0x3f) << 16 1784 | (mmc->csd[2] & 0xffff0000) >> 16; 1785 cmult = 8; 1786 } else { 1787 csize = (mmc->csd[1] & 0x3ff) << 2 1788 | (mmc->csd[2] & 0xc0000000) >> 30; 1789 cmult = (mmc->csd[2] & 0x00038000) >> 15; 1790 } 1791 1792 mmc->capacity_user = (csize + 1) << (cmult + 2); 1793 mmc->capacity_user *= mmc->read_bl_len; 1794 mmc->capacity_boot = 0; 1795 mmc->capacity_rpmb = 0; 1796 for (i = 0; i < 4; i++) 1797 mmc->capacity_gp[i] = 0; 1798 1799 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 1800 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1801 1802 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 1803 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1804 1805 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 1806 cmd.cmdidx = MMC_CMD_SET_DSR; 1807 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 1808 cmd.resp_type = MMC_RSP_NONE; 1809 if (mmc_send_cmd(mmc, &cmd, NULL)) 1810 printf("MMC: SET_DSR failed\n"); 1811 } 1812 1813 /* Select the card, and put it into Transfer Mode */ 1814 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 1815 cmd.cmdidx = MMC_CMD_SELECT_CARD; 1816 cmd.resp_type = MMC_RSP_R1; 1817 cmd.cmdarg = mmc->rca << 16; 1818 err = mmc_send_cmd(mmc, &cmd, NULL); 1819 1820 if (err) 1821 return err; 1822 } 1823 1824 /* 1825 * For SD, its erase group is always one sector 1826 */ 1827 mmc->erase_grp_size = 1; 1828 mmc->part_config = MMCPART_NOAVAILABLE; 1829 1830 err = mmc_startup_v4(mmc); 1831 if (err) 1832 return err; 1833 1834 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 1835 if (err) 1836 return err; 1837 1838 if (IS_SD(mmc)) 1839 err = sd_select_mode_and_width(mmc); 1840 else 1841 err = mmc_select_mode_and_width(mmc); 1842 1843 if (err) 1844 return err; 1845 1846 1847 /* Fix the block length for DDR mode */ 1848 if (mmc->ddr_mode) { 1849 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 1850 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 1851 } 1852 1853 /* fill in device description */ 1854 bdesc = mmc_get_blk_desc(mmc); 1855 bdesc->lun = 0; 1856 bdesc->hwpart = 0; 1857 bdesc->type = 0; 1858 bdesc->blksz = mmc->read_bl_len; 1859 bdesc->log2blksz = LOG2(bdesc->blksz); 1860 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 1861 #if !defined(CONFIG_SPL_BUILD) || \ 1862 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 1863 !defined(CONFIG_USE_TINY_PRINTF)) 1864 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 1865 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 1866 (mmc->cid[3] >> 16) & 0xffff); 1867 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 1868 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 1869 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 1870 (mmc->cid[2] >> 24) & 0xff); 1871 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 1872 (mmc->cid[2] >> 16) & 0xf); 1873 #else 1874 bdesc->vendor[0] = 0; 1875 bdesc->product[0] = 0; 1876 bdesc->revision[0] = 0; 1877 #endif 1878 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT) 1879 part_init(bdesc); 1880 #endif 1881 1882 return 0; 1883 } 1884 1885 static int mmc_send_if_cond(struct mmc *mmc) 1886 { 1887 struct mmc_cmd cmd; 1888 int err; 1889 1890 cmd.cmdidx = SD_CMD_SEND_IF_COND; 1891 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 1892 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 1893 cmd.resp_type = MMC_RSP_R7; 1894 1895 err = mmc_send_cmd(mmc, &cmd, NULL); 1896 1897 if (err) 1898 return err; 1899 1900 if ((cmd.response[0] & 0xff) != 0xaa) 1901 return -EOPNOTSUPP; 1902 else 1903 mmc->version = SD_VERSION_2; 1904 1905 return 0; 1906 } 1907 1908 #if !CONFIG_IS_ENABLED(DM_MMC) 1909 /* board-specific MMC power initializations. */ 1910 __weak void board_mmc_power_init(void) 1911 { 1912 } 1913 #endif 1914 1915 static int mmc_power_init(struct mmc *mmc) 1916 { 1917 #if CONFIG_IS_ENABLED(DM_MMC) 1918 #if CONFIG_IS_ENABLED(DM_REGULATOR) 1919 int ret; 1920 1921 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 1922 &mmc->vmmc_supply); 1923 if (ret) 1924 debug("%s: No vmmc supply\n", mmc->dev->name); 1925 1926 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply", 1927 &mmc->vqmmc_supply); 1928 if (ret) 1929 debug("%s: No vqmmc supply\n", mmc->dev->name); 1930 #endif 1931 #else /* !CONFIG_DM_MMC */ 1932 /* 1933 * Driver model should use a regulator, as above, rather than calling 1934 * out to board code. 1935 */ 1936 board_mmc_power_init(); 1937 #endif 1938 return 0; 1939 } 1940 1941 /* 1942 * put the host in the initial state: 1943 * - turn on Vdd (card power supply) 1944 * - configure the bus width and clock to minimal values 1945 */ 1946 static void mmc_set_initial_state(struct mmc *mmc) 1947 { 1948 int err; 1949 1950 /* First try to set 3.3V. If it fails set to 1.8V */ 1951 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330); 1952 if (err != 0) 1953 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180); 1954 if (err != 0) 1955 printf("mmc: failed to set signal voltage\n"); 1956 1957 mmc_select_mode(mmc, MMC_LEGACY); 1958 mmc_set_bus_width(mmc, 1); 1959 mmc_set_clock(mmc, 0, false); 1960 } 1961 1962 static int mmc_power_on(struct mmc *mmc) 1963 { 1964 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR) 1965 if (mmc->vmmc_supply) { 1966 int ret = regulator_set_enable(mmc->vmmc_supply, true); 1967 1968 if (ret) { 1969 puts("Error enabling VMMC supply\n"); 1970 return ret; 1971 } 1972 } 1973 #endif 1974 return 0; 1975 } 1976 1977 static int mmc_power_off(struct mmc *mmc) 1978 { 1979 mmc_set_clock(mmc, 1, true); 1980 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR) 1981 if (mmc->vmmc_supply) { 1982 int ret = regulator_set_enable(mmc->vmmc_supply, false); 1983 1984 if (ret) { 1985 puts("Error disabling VMMC supply\n"); 1986 return ret; 1987 } 1988 } 1989 #endif 1990 return 0; 1991 } 1992 1993 static int mmc_power_cycle(struct mmc *mmc) 1994 { 1995 int ret; 1996 1997 ret = mmc_power_off(mmc); 1998 if (ret) 1999 return ret; 2000 /* 2001 * SD spec recommends at least 1ms of delay. Let's wait for 2ms 2002 * to be on the safer side. 2003 */ 2004 udelay(2000); 2005 return mmc_power_on(mmc); 2006 } 2007 2008 int mmc_start_init(struct mmc *mmc) 2009 { 2010 bool no_card; 2011 int err; 2012 2013 /* we pretend there's no card when init is NULL */ 2014 no_card = mmc_getcd(mmc) == 0; 2015 #if !CONFIG_IS_ENABLED(DM_MMC) 2016 no_card = no_card || (mmc->cfg->ops->init == NULL); 2017 #endif 2018 if (no_card) { 2019 mmc->has_init = 0; 2020 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2021 printf("MMC: no card present\n"); 2022 #endif 2023 return -ENOMEDIUM; 2024 } 2025 2026 if (mmc->has_init) 2027 return 0; 2028 2029 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 2030 mmc_adapter_card_type_ident(); 2031 #endif 2032 err = mmc_power_init(mmc); 2033 if (err) 2034 return err; 2035 2036 err = mmc_power_on(mmc); 2037 if (err) 2038 return err; 2039 2040 #if CONFIG_IS_ENABLED(DM_MMC) 2041 /* The device has already been probed ready for use */ 2042 #else 2043 /* made sure it's not NULL earlier */ 2044 err = mmc->cfg->ops->init(mmc); 2045 if (err) 2046 return err; 2047 #endif 2048 mmc->ddr_mode = 0; 2049 2050 mmc_set_initial_state(mmc); 2051 mmc_send_init_stream(mmc); 2052 2053 /* Reset the Card */ 2054 err = mmc_go_idle(mmc); 2055 2056 if (err) 2057 return err; 2058 2059 /* The internal partition reset to user partition(0) at every CMD0*/ 2060 mmc_get_blk_desc(mmc)->hwpart = 0; 2061 2062 /* Test for SD version 2 */ 2063 err = mmc_send_if_cond(mmc); 2064 2065 /* Now try to get the SD card's operating condition */ 2066 err = sd_send_op_cond(mmc); 2067 2068 /* If the command timed out, we check for an MMC card */ 2069 if (err == -ETIMEDOUT) { 2070 err = mmc_send_op_cond(mmc); 2071 2072 if (err) { 2073 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2074 printf("Card did not respond to voltage select!\n"); 2075 #endif 2076 return -EOPNOTSUPP; 2077 } 2078 } 2079 2080 if (!err) 2081 mmc->init_in_progress = 1; 2082 2083 return err; 2084 } 2085 2086 static int mmc_complete_init(struct mmc *mmc) 2087 { 2088 int err = 0; 2089 2090 mmc->init_in_progress = 0; 2091 if (mmc->op_cond_pending) 2092 err = mmc_complete_op_cond(mmc); 2093 2094 if (!err) 2095 err = mmc_startup(mmc); 2096 if (err) 2097 mmc->has_init = 0; 2098 else 2099 mmc->has_init = 1; 2100 return err; 2101 } 2102 2103 int mmc_init(struct mmc *mmc) 2104 { 2105 int err = 0; 2106 __maybe_unused unsigned start; 2107 #if CONFIG_IS_ENABLED(DM_MMC) 2108 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 2109 2110 upriv->mmc = mmc; 2111 #endif 2112 if (mmc->has_init) 2113 return 0; 2114 2115 start = get_timer(0); 2116 2117 if (!mmc->init_in_progress) 2118 err = mmc_start_init(mmc); 2119 2120 if (!err) 2121 err = mmc_complete_init(mmc); 2122 if (err) 2123 printf("%s: %d, time %lu\n", __func__, err, get_timer(start)); 2124 2125 return err; 2126 } 2127 2128 int mmc_set_dsr(struct mmc *mmc, u16 val) 2129 { 2130 mmc->dsr = val; 2131 return 0; 2132 } 2133 2134 /* CPU-specific MMC initializations */ 2135 __weak int cpu_mmc_init(bd_t *bis) 2136 { 2137 return -1; 2138 } 2139 2140 /* board-specific MMC initializations. */ 2141 __weak int board_mmc_init(bd_t *bis) 2142 { 2143 return -1; 2144 } 2145 2146 void mmc_set_preinit(struct mmc *mmc, int preinit) 2147 { 2148 mmc->preinit = preinit; 2149 } 2150 2151 #if CONFIG_IS_ENABLED(DM_MMC) && defined(CONFIG_SPL_BUILD) 2152 static int mmc_probe(bd_t *bis) 2153 { 2154 return 0; 2155 } 2156 #elif CONFIG_IS_ENABLED(DM_MMC) 2157 static int mmc_probe(bd_t *bis) 2158 { 2159 int ret, i; 2160 struct uclass *uc; 2161 struct udevice *dev; 2162 2163 ret = uclass_get(UCLASS_MMC, &uc); 2164 if (ret) 2165 return ret; 2166 2167 /* 2168 * Try to add them in sequence order. Really with driver model we 2169 * should allow holes, but the current MMC list does not allow that. 2170 * So if we request 0, 1, 3 we will get 0, 1, 2. 2171 */ 2172 for (i = 0; ; i++) { 2173 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 2174 if (ret == -ENODEV) 2175 break; 2176 } 2177 uclass_foreach_dev(dev, uc) { 2178 ret = device_probe(dev); 2179 if (ret) 2180 printf("%s - probe failed: %d\n", dev->name, ret); 2181 } 2182 2183 return 0; 2184 } 2185 #else 2186 static int mmc_probe(bd_t *bis) 2187 { 2188 if (board_mmc_init(bis) < 0) 2189 cpu_mmc_init(bis); 2190 2191 return 0; 2192 } 2193 #endif 2194 2195 int mmc_initialize(bd_t *bis) 2196 { 2197 static int initialized = 0; 2198 int ret; 2199 if (initialized) /* Avoid initializing mmc multiple times */ 2200 return 0; 2201 initialized = 1; 2202 2203 #if !CONFIG_IS_ENABLED(BLK) 2204 #if !CONFIG_IS_ENABLED(MMC_TINY) 2205 mmc_list_init(); 2206 #endif 2207 #endif 2208 ret = mmc_probe(bis); 2209 if (ret) 2210 return ret; 2211 2212 #ifndef CONFIG_SPL_BUILD 2213 print_mmc_devices(','); 2214 #endif 2215 2216 mmc_do_preinit(); 2217 return 0; 2218 } 2219 2220 #ifdef CONFIG_CMD_BKOPS_ENABLE 2221 int mmc_set_bkops_enable(struct mmc *mmc) 2222 { 2223 int err; 2224 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 2225 2226 err = mmc_send_ext_csd(mmc, ext_csd); 2227 if (err) { 2228 puts("Could not get ext_csd register values\n"); 2229 return err; 2230 } 2231 2232 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 2233 puts("Background operations not supported on device\n"); 2234 return -EMEDIUMTYPE; 2235 } 2236 2237 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 2238 puts("Background operations already enabled\n"); 2239 return 0; 2240 } 2241 2242 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 2243 if (err) { 2244 puts("Failed to enable manual background operations\n"); 2245 return err; 2246 } 2247 2248 puts("Enabled manual background operations\n"); 2249 2250 return 0; 2251 } 2252 #endif 2253