1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright 2008, Freescale Semiconductor, Inc 4 * Andy Fleming 5 * 6 * Based vaguely on the Linux code 7 */ 8 9 #include <config.h> 10 #include <common.h> 11 #include <command.h> 12 #include <dm.h> 13 #include <dm/device-internal.h> 14 #include <errno.h> 15 #include <mmc.h> 16 #include <part.h> 17 #include <power/regulator.h> 18 #include <malloc.h> 19 #include <memalign.h> 20 #include <linux/list.h> 21 #include <div64.h> 22 #include "mmc_private.h" 23 24 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage); 25 static int mmc_power_cycle(struct mmc *mmc); 26 #if !CONFIG_IS_ENABLED(MMC_TINY) 27 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps); 28 #endif 29 30 #if !CONFIG_IS_ENABLED(DM_MMC) 31 32 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) 33 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout) 34 { 35 return -ENOSYS; 36 } 37 #endif 38 39 __weak int board_mmc_getwp(struct mmc *mmc) 40 { 41 return -1; 42 } 43 44 int mmc_getwp(struct mmc *mmc) 45 { 46 int wp; 47 48 wp = board_mmc_getwp(mmc); 49 50 if (wp < 0) { 51 if (mmc->cfg->ops->getwp) 52 wp = mmc->cfg->ops->getwp(mmc); 53 else 54 wp = 0; 55 } 56 57 return wp; 58 } 59 60 __weak int board_mmc_getcd(struct mmc *mmc) 61 { 62 return -1; 63 } 64 #endif 65 66 #ifdef CONFIG_MMC_TRACE 67 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd) 68 { 69 printf("CMD_SEND:%d\n", cmd->cmdidx); 70 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg); 71 } 72 73 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret) 74 { 75 int i; 76 u8 *ptr; 77 78 if (ret) { 79 printf("\t\tRET\t\t\t %d\n", ret); 80 } else { 81 switch (cmd->resp_type) { 82 case MMC_RSP_NONE: 83 printf("\t\tMMC_RSP_NONE\n"); 84 break; 85 case MMC_RSP_R1: 86 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n", 87 cmd->response[0]); 88 break; 89 case MMC_RSP_R1b: 90 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n", 91 cmd->response[0]); 92 break; 93 case MMC_RSP_R2: 94 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n", 95 cmd->response[0]); 96 printf("\t\t \t\t 0x%08X \n", 97 cmd->response[1]); 98 printf("\t\t \t\t 0x%08X \n", 99 cmd->response[2]); 100 printf("\t\t \t\t 0x%08X \n", 101 cmd->response[3]); 102 printf("\n"); 103 printf("\t\t\t\t\tDUMPING DATA\n"); 104 for (i = 0; i < 4; i++) { 105 int j; 106 printf("\t\t\t\t\t%03d - ", i*4); 107 ptr = (u8 *)&cmd->response[i]; 108 ptr += 3; 109 for (j = 0; j < 4; j++) 110 printf("%02X ", *ptr--); 111 printf("\n"); 112 } 113 break; 114 case MMC_RSP_R3: 115 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n", 116 cmd->response[0]); 117 break; 118 default: 119 printf("\t\tERROR MMC rsp not supported\n"); 120 break; 121 } 122 } 123 } 124 125 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd) 126 { 127 int status; 128 129 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9; 130 printf("CURR STATE:%d\n", status); 131 } 132 #endif 133 134 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG) 135 const char *mmc_mode_name(enum bus_mode mode) 136 { 137 static const char *const names[] = { 138 [MMC_LEGACY] = "MMC legacy", 139 [SD_LEGACY] = "SD Legacy", 140 [MMC_HS] = "MMC High Speed (26MHz)", 141 [SD_HS] = "SD High Speed (50MHz)", 142 [UHS_SDR12] = "UHS SDR12 (25MHz)", 143 [UHS_SDR25] = "UHS SDR25 (50MHz)", 144 [UHS_SDR50] = "UHS SDR50 (100MHz)", 145 [UHS_SDR104] = "UHS SDR104 (208MHz)", 146 [UHS_DDR50] = "UHS DDR50 (50MHz)", 147 [MMC_HS_52] = "MMC High Speed (52MHz)", 148 [MMC_DDR_52] = "MMC DDR52 (52MHz)", 149 [MMC_HS_200] = "HS200 (200MHz)", 150 [MMC_HS_400] = "HS400 (200MHz)", 151 }; 152 153 if (mode >= MMC_MODES_END) 154 return "Unknown mode"; 155 else 156 return names[mode]; 157 } 158 #endif 159 160 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode) 161 { 162 static const int freqs[] = { 163 [MMC_LEGACY] = 25000000, 164 [SD_LEGACY] = 25000000, 165 [MMC_HS] = 26000000, 166 [SD_HS] = 50000000, 167 [MMC_HS_52] = 52000000, 168 [MMC_DDR_52] = 52000000, 169 [UHS_SDR12] = 25000000, 170 [UHS_SDR25] = 50000000, 171 [UHS_SDR50] = 100000000, 172 [UHS_DDR50] = 50000000, 173 [UHS_SDR104] = 208000000, 174 [MMC_HS_200] = 200000000, 175 [MMC_HS_400] = 200000000, 176 }; 177 178 if (mode == MMC_LEGACY) 179 return mmc->legacy_speed; 180 else if (mode >= MMC_MODES_END) 181 return 0; 182 else 183 return freqs[mode]; 184 } 185 186 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode) 187 { 188 mmc->selected_mode = mode; 189 mmc->tran_speed = mmc_mode2freq(mmc, mode); 190 mmc->ddr_mode = mmc_is_mode_ddr(mode); 191 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode), 192 mmc->tran_speed / 1000000); 193 return 0; 194 } 195 196 #if !CONFIG_IS_ENABLED(DM_MMC) 197 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data) 198 { 199 int ret; 200 201 mmmc_trace_before_send(mmc, cmd); 202 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data); 203 mmmc_trace_after_send(mmc, cmd, ret); 204 205 return ret; 206 } 207 #endif 208 209 int mmc_send_status(struct mmc *mmc, int timeout) 210 { 211 struct mmc_cmd cmd; 212 int err, retries = 5; 213 214 cmd.cmdidx = MMC_CMD_SEND_STATUS; 215 cmd.resp_type = MMC_RSP_R1; 216 if (!mmc_host_is_spi(mmc)) 217 cmd.cmdarg = mmc->rca << 16; 218 219 while (1) { 220 err = mmc_send_cmd(mmc, &cmd, NULL); 221 if (!err) { 222 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) && 223 (cmd.response[0] & MMC_STATUS_CURR_STATE) != 224 MMC_STATE_PRG) 225 break; 226 227 if (cmd.response[0] & MMC_STATUS_MASK) { 228 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 229 pr_err("Status Error: 0x%08X\n", 230 cmd.response[0]); 231 #endif 232 return -ECOMM; 233 } 234 } else if (--retries < 0) 235 return err; 236 237 if (timeout-- <= 0) 238 break; 239 240 udelay(1000); 241 } 242 243 mmc_trace_state(mmc, &cmd); 244 if (timeout <= 0) { 245 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 246 pr_err("Timeout waiting card ready\n"); 247 #endif 248 return -ETIMEDOUT; 249 } 250 251 return 0; 252 } 253 254 int mmc_set_blocklen(struct mmc *mmc, int len) 255 { 256 struct mmc_cmd cmd; 257 int err; 258 259 if (mmc->ddr_mode) 260 return 0; 261 262 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN; 263 cmd.resp_type = MMC_RSP_R1; 264 cmd.cmdarg = len; 265 266 err = mmc_send_cmd(mmc, &cmd, NULL); 267 268 #ifdef CONFIG_MMC_QUIRKS 269 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) { 270 int retries = 4; 271 /* 272 * It has been seen that SET_BLOCKLEN may fail on the first 273 * attempt, let's try a few more time 274 */ 275 do { 276 err = mmc_send_cmd(mmc, &cmd, NULL); 277 if (!err) 278 break; 279 } while (retries--); 280 } 281 #endif 282 283 return err; 284 } 285 286 #ifdef MMC_SUPPORTS_TUNING 287 static const u8 tuning_blk_pattern_4bit[] = { 288 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 289 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 290 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 291 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 292 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 293 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 294 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 295 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, 296 }; 297 298 static const u8 tuning_blk_pattern_8bit[] = { 299 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 300 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 301 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 302 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 303 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 304 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 305 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 306 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 307 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 308 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 309 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 310 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 311 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 312 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 313 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 314 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 315 }; 316 317 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error) 318 { 319 struct mmc_cmd cmd; 320 struct mmc_data data; 321 const u8 *tuning_block_pattern; 322 int size, err; 323 324 if (mmc->bus_width == 8) { 325 tuning_block_pattern = tuning_blk_pattern_8bit; 326 size = sizeof(tuning_blk_pattern_8bit); 327 } else if (mmc->bus_width == 4) { 328 tuning_block_pattern = tuning_blk_pattern_4bit; 329 size = sizeof(tuning_blk_pattern_4bit); 330 } else { 331 return -EINVAL; 332 } 333 334 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size); 335 336 cmd.cmdidx = opcode; 337 cmd.cmdarg = 0; 338 cmd.resp_type = MMC_RSP_R1; 339 340 data.dest = (void *)data_buf; 341 data.blocks = 1; 342 data.blocksize = size; 343 data.flags = MMC_DATA_READ; 344 345 err = mmc_send_cmd(mmc, &cmd, &data); 346 if (err) 347 return err; 348 349 if (memcmp(data_buf, tuning_block_pattern, size)) 350 return -EIO; 351 352 return 0; 353 } 354 #endif 355 356 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start, 357 lbaint_t blkcnt) 358 { 359 struct mmc_cmd cmd; 360 struct mmc_data data; 361 362 if (blkcnt > 1) 363 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; 364 else 365 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK; 366 367 if (mmc->high_capacity) 368 cmd.cmdarg = start; 369 else 370 cmd.cmdarg = start * mmc->read_bl_len; 371 372 cmd.resp_type = MMC_RSP_R1; 373 374 data.dest = dst; 375 data.blocks = blkcnt; 376 data.blocksize = mmc->read_bl_len; 377 data.flags = MMC_DATA_READ; 378 379 if (mmc_send_cmd(mmc, &cmd, &data)) 380 return 0; 381 382 if (blkcnt > 1) { 383 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; 384 cmd.cmdarg = 0; 385 cmd.resp_type = MMC_RSP_R1b; 386 if (mmc_send_cmd(mmc, &cmd, NULL)) { 387 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 388 pr_err("mmc fail to send stop cmd\n"); 389 #endif 390 return 0; 391 } 392 } 393 394 return blkcnt; 395 } 396 397 #if CONFIG_IS_ENABLED(BLK) 398 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst) 399 #else 400 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, 401 void *dst) 402 #endif 403 { 404 #if CONFIG_IS_ENABLED(BLK) 405 struct blk_desc *block_dev = dev_get_uclass_platdata(dev); 406 #endif 407 int dev_num = block_dev->devnum; 408 int err; 409 lbaint_t cur, blocks_todo = blkcnt; 410 411 if (blkcnt == 0) 412 return 0; 413 414 struct mmc *mmc = find_mmc_device(dev_num); 415 if (!mmc) 416 return 0; 417 418 if (CONFIG_IS_ENABLED(MMC_TINY)) 419 err = mmc_switch_part(mmc, block_dev->hwpart); 420 else 421 err = blk_dselect_hwpart(block_dev, block_dev->hwpart); 422 423 if (err < 0) 424 return 0; 425 426 if ((start + blkcnt) > block_dev->lba) { 427 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 428 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", 429 start + blkcnt, block_dev->lba); 430 #endif 431 return 0; 432 } 433 434 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) { 435 pr_debug("%s: Failed to set blocklen\n", __func__); 436 return 0; 437 } 438 439 do { 440 cur = (blocks_todo > mmc->cfg->b_max) ? 441 mmc->cfg->b_max : blocks_todo; 442 if (mmc_read_blocks(mmc, dst, start, cur) != cur) { 443 pr_debug("%s: Failed to read blocks\n", __func__); 444 return 0; 445 } 446 blocks_todo -= cur; 447 start += cur; 448 dst += cur * mmc->read_bl_len; 449 } while (blocks_todo > 0); 450 451 return blkcnt; 452 } 453 454 static int mmc_go_idle(struct mmc *mmc) 455 { 456 struct mmc_cmd cmd; 457 int err; 458 459 udelay(1000); 460 461 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE; 462 cmd.cmdarg = 0; 463 cmd.resp_type = MMC_RSP_NONE; 464 465 err = mmc_send_cmd(mmc, &cmd, NULL); 466 467 if (err) 468 return err; 469 470 udelay(2000); 471 472 return 0; 473 } 474 475 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) 476 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage) 477 { 478 struct mmc_cmd cmd; 479 int err = 0; 480 481 /* 482 * Send CMD11 only if the request is to switch the card to 483 * 1.8V signalling. 484 */ 485 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) 486 return mmc_set_signal_voltage(mmc, signal_voltage); 487 488 cmd.cmdidx = SD_CMD_SWITCH_UHS18V; 489 cmd.cmdarg = 0; 490 cmd.resp_type = MMC_RSP_R1; 491 492 err = mmc_send_cmd(mmc, &cmd, NULL); 493 if (err) 494 return err; 495 496 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR)) 497 return -EIO; 498 499 /* 500 * The card should drive cmd and dat[0:3] low immediately 501 * after the response of cmd11, but wait 100 us to be sure 502 */ 503 err = mmc_wait_dat0(mmc, 0, 100); 504 if (err == -ENOSYS) 505 udelay(100); 506 else if (err) 507 return -ETIMEDOUT; 508 509 /* 510 * During a signal voltage level switch, the clock must be gated 511 * for 5 ms according to the SD spec 512 */ 513 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE); 514 515 err = mmc_set_signal_voltage(mmc, signal_voltage); 516 if (err) 517 return err; 518 519 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */ 520 mdelay(10); 521 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE); 522 523 /* 524 * Failure to switch is indicated by the card holding 525 * dat[0:3] low. Wait for at least 1 ms according to spec 526 */ 527 err = mmc_wait_dat0(mmc, 1, 1000); 528 if (err == -ENOSYS) 529 udelay(1000); 530 else if (err) 531 return -ETIMEDOUT; 532 533 return 0; 534 } 535 #endif 536 537 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en) 538 { 539 int timeout = 1000; 540 int err; 541 struct mmc_cmd cmd; 542 543 while (1) { 544 cmd.cmdidx = MMC_CMD_APP_CMD; 545 cmd.resp_type = MMC_RSP_R1; 546 cmd.cmdarg = 0; 547 548 err = mmc_send_cmd(mmc, &cmd, NULL); 549 550 if (err) 551 return err; 552 553 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND; 554 cmd.resp_type = MMC_RSP_R3; 555 556 /* 557 * Most cards do not answer if some reserved bits 558 * in the ocr are set. However, Some controller 559 * can set bit 7 (reserved for low voltages), but 560 * how to manage low voltages SD card is not yet 561 * specified. 562 */ 563 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 : 564 (mmc->cfg->voltages & 0xff8000); 565 566 if (mmc->version == SD_VERSION_2) 567 cmd.cmdarg |= OCR_HCS; 568 569 if (uhs_en) 570 cmd.cmdarg |= OCR_S18R; 571 572 err = mmc_send_cmd(mmc, &cmd, NULL); 573 574 if (err) 575 return err; 576 577 if (cmd.response[0] & OCR_BUSY) 578 break; 579 580 if (timeout-- <= 0) 581 return -EOPNOTSUPP; 582 583 udelay(1000); 584 } 585 586 if (mmc->version != SD_VERSION_2) 587 mmc->version = SD_VERSION_1_0; 588 589 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 590 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 591 cmd.resp_type = MMC_RSP_R3; 592 cmd.cmdarg = 0; 593 594 err = mmc_send_cmd(mmc, &cmd, NULL); 595 596 if (err) 597 return err; 598 } 599 600 mmc->ocr = cmd.response[0]; 601 602 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) 603 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000) 604 == 0x41000000) { 605 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180); 606 if (err) 607 return err; 608 } 609 #endif 610 611 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 612 mmc->rca = 0; 613 614 return 0; 615 } 616 617 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg) 618 { 619 struct mmc_cmd cmd; 620 int err; 621 622 cmd.cmdidx = MMC_CMD_SEND_OP_COND; 623 cmd.resp_type = MMC_RSP_R3; 624 cmd.cmdarg = 0; 625 if (use_arg && !mmc_host_is_spi(mmc)) 626 cmd.cmdarg = OCR_HCS | 627 (mmc->cfg->voltages & 628 (mmc->ocr & OCR_VOLTAGE_MASK)) | 629 (mmc->ocr & OCR_ACCESS_MODE); 630 631 err = mmc_send_cmd(mmc, &cmd, NULL); 632 if (err) 633 return err; 634 mmc->ocr = cmd.response[0]; 635 return 0; 636 } 637 638 static int mmc_send_op_cond(struct mmc *mmc) 639 { 640 int err, i; 641 642 /* Some cards seem to need this */ 643 mmc_go_idle(mmc); 644 645 /* Asking to the card its capabilities */ 646 for (i = 0; i < 2; i++) { 647 err = mmc_send_op_cond_iter(mmc, i != 0); 648 if (err) 649 return err; 650 651 /* exit if not busy (flag seems to be inverted) */ 652 if (mmc->ocr & OCR_BUSY) 653 break; 654 } 655 mmc->op_cond_pending = 1; 656 return 0; 657 } 658 659 static int mmc_complete_op_cond(struct mmc *mmc) 660 { 661 struct mmc_cmd cmd; 662 int timeout = 1000; 663 ulong start; 664 int err; 665 666 mmc->op_cond_pending = 0; 667 if (!(mmc->ocr & OCR_BUSY)) { 668 /* Some cards seem to need this */ 669 mmc_go_idle(mmc); 670 671 start = get_timer(0); 672 while (1) { 673 err = mmc_send_op_cond_iter(mmc, 1); 674 if (err) 675 return err; 676 if (mmc->ocr & OCR_BUSY) 677 break; 678 if (get_timer(start) > timeout) 679 return -EOPNOTSUPP; 680 udelay(100); 681 } 682 } 683 684 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */ 685 cmd.cmdidx = MMC_CMD_SPI_READ_OCR; 686 cmd.resp_type = MMC_RSP_R3; 687 cmd.cmdarg = 0; 688 689 err = mmc_send_cmd(mmc, &cmd, NULL); 690 691 if (err) 692 return err; 693 694 mmc->ocr = cmd.response[0]; 695 } 696 697 mmc->version = MMC_VERSION_UNKNOWN; 698 699 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS); 700 mmc->rca = 1; 701 702 return 0; 703 } 704 705 706 static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd) 707 { 708 struct mmc_cmd cmd; 709 struct mmc_data data; 710 int err; 711 712 /* Get the Card Status Register */ 713 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD; 714 cmd.resp_type = MMC_RSP_R1; 715 cmd.cmdarg = 0; 716 717 data.dest = (char *)ext_csd; 718 data.blocks = 1; 719 data.blocksize = MMC_MAX_BLOCK_LEN; 720 data.flags = MMC_DATA_READ; 721 722 err = mmc_send_cmd(mmc, &cmd, &data); 723 724 return err; 725 } 726 727 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value, 728 bool send_status) 729 { 730 struct mmc_cmd cmd; 731 int timeout = 1000; 732 int retries = 3; 733 int ret; 734 735 cmd.cmdidx = MMC_CMD_SWITCH; 736 cmd.resp_type = MMC_RSP_R1b; 737 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 738 (index << 16) | 739 (value << 8); 740 741 while (retries > 0) { 742 ret = mmc_send_cmd(mmc, &cmd, NULL); 743 744 if (ret) { 745 retries--; 746 continue; 747 } 748 749 if (!send_status) { 750 mdelay(50); 751 return 0; 752 } 753 754 /* Waiting for the ready status */ 755 return mmc_send_status(mmc, timeout); 756 } 757 758 return ret; 759 760 } 761 762 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value) 763 { 764 return __mmc_switch(mmc, set, index, value, true); 765 } 766 767 #if !CONFIG_IS_ENABLED(MMC_TINY) 768 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode, 769 bool hsdowngrade) 770 { 771 int err; 772 int speed_bits; 773 774 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 775 776 switch (mode) { 777 case MMC_HS: 778 case MMC_HS_52: 779 case MMC_DDR_52: 780 speed_bits = EXT_CSD_TIMING_HS; 781 break; 782 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) 783 case MMC_HS_200: 784 speed_bits = EXT_CSD_TIMING_HS200; 785 break; 786 #endif 787 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) 788 case MMC_HS_400: 789 speed_bits = EXT_CSD_TIMING_HS400; 790 break; 791 #endif 792 case MMC_LEGACY: 793 speed_bits = EXT_CSD_TIMING_LEGACY; 794 break; 795 default: 796 return -EINVAL; 797 } 798 799 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 800 speed_bits, !hsdowngrade); 801 if (err) 802 return err; 803 804 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \ 805 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) 806 /* 807 * In case the eMMC is in HS200/HS400 mode and we are downgrading 808 * to HS mode, the card clock are still running much faster than 809 * the supported HS mode clock, so we can not reliably read out 810 * Extended CSD. Reconfigure the controller to run at HS mode. 811 */ 812 if (hsdowngrade) { 813 mmc_select_mode(mmc, MMC_HS); 814 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false); 815 } 816 #endif 817 818 if ((mode == MMC_HS) || (mode == MMC_HS_52)) { 819 /* Now check to see that it worked */ 820 err = mmc_send_ext_csd(mmc, test_csd); 821 if (err) 822 return err; 823 824 /* No high-speed support */ 825 if (!test_csd[EXT_CSD_HS_TIMING]) 826 return -ENOTSUPP; 827 } 828 829 return 0; 830 } 831 832 static int mmc_get_capabilities(struct mmc *mmc) 833 { 834 u8 *ext_csd = mmc->ext_csd; 835 char cardtype; 836 837 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY); 838 839 if (mmc_host_is_spi(mmc)) 840 return 0; 841 842 /* Only version 4 supports high-speed */ 843 if (mmc->version < MMC_VERSION_4) 844 return 0; 845 846 if (!ext_csd) { 847 pr_err("No ext_csd found!\n"); /* this should enver happen */ 848 return -ENOTSUPP; 849 } 850 851 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT; 852 853 cardtype = ext_csd[EXT_CSD_CARD_TYPE]; 854 mmc->cardtype = cardtype; 855 856 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) 857 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V | 858 EXT_CSD_CARD_TYPE_HS200_1_8V)) { 859 mmc->card_caps |= MMC_MODE_HS200; 860 } 861 #endif 862 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) 863 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V | 864 EXT_CSD_CARD_TYPE_HS400_1_8V)) { 865 mmc->card_caps |= MMC_MODE_HS400; 866 } 867 #endif 868 if (cardtype & EXT_CSD_CARD_TYPE_52) { 869 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52) 870 mmc->card_caps |= MMC_MODE_DDR_52MHz; 871 mmc->card_caps |= MMC_MODE_HS_52MHz; 872 } 873 if (cardtype & EXT_CSD_CARD_TYPE_26) 874 mmc->card_caps |= MMC_MODE_HS; 875 876 return 0; 877 } 878 #endif 879 880 static int mmc_set_capacity(struct mmc *mmc, int part_num) 881 { 882 switch (part_num) { 883 case 0: 884 mmc->capacity = mmc->capacity_user; 885 break; 886 case 1: 887 case 2: 888 mmc->capacity = mmc->capacity_boot; 889 break; 890 case 3: 891 mmc->capacity = mmc->capacity_rpmb; 892 break; 893 case 4: 894 case 5: 895 case 6: 896 case 7: 897 mmc->capacity = mmc->capacity_gp[part_num - 4]; 898 break; 899 default: 900 return -1; 901 } 902 903 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len); 904 905 return 0; 906 } 907 908 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) 909 static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num) 910 { 911 int forbidden = 0; 912 bool change = false; 913 914 if (part_num & PART_ACCESS_MASK) 915 forbidden = MMC_CAP(MMC_HS_200); 916 917 if (MMC_CAP(mmc->selected_mode) & forbidden) { 918 pr_debug("selected mode (%s) is forbidden for part %d\n", 919 mmc_mode_name(mmc->selected_mode), part_num); 920 change = true; 921 } else if (mmc->selected_mode != mmc->best_mode) { 922 pr_debug("selected mode is not optimal\n"); 923 change = true; 924 } 925 926 if (change) 927 return mmc_select_mode_and_width(mmc, 928 mmc->card_caps & ~forbidden); 929 930 return 0; 931 } 932 #else 933 static inline int mmc_boot_part_access_chk(struct mmc *mmc, 934 unsigned int part_num) 935 { 936 return 0; 937 } 938 #endif 939 940 int mmc_switch_part(struct mmc *mmc, unsigned int part_num) 941 { 942 int ret; 943 944 ret = mmc_boot_part_access_chk(mmc, part_num); 945 if (ret) 946 return ret; 947 948 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF, 949 (mmc->part_config & ~PART_ACCESS_MASK) 950 | (part_num & PART_ACCESS_MASK)); 951 952 /* 953 * Set the capacity if the switch succeeded or was intended 954 * to return to representing the raw device. 955 */ 956 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) { 957 ret = mmc_set_capacity(mmc, part_num); 958 mmc_get_blk_desc(mmc)->hwpart = part_num; 959 } 960 961 return ret; 962 } 963 964 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING) 965 int mmc_hwpart_config(struct mmc *mmc, 966 const struct mmc_hwpart_conf *conf, 967 enum mmc_hwpart_conf_mode mode) 968 { 969 u8 part_attrs = 0; 970 u32 enh_size_mult; 971 u32 enh_start_addr; 972 u32 gp_size_mult[4]; 973 u32 max_enh_size_mult; 974 u32 tot_enh_size_mult = 0; 975 u8 wr_rel_set; 976 int i, pidx, err; 977 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 978 979 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE) 980 return -EINVAL; 981 982 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) { 983 pr_err("eMMC >= 4.4 required for enhanced user data area\n"); 984 return -EMEDIUMTYPE; 985 } 986 987 if (!(mmc->part_support & PART_SUPPORT)) { 988 pr_err("Card does not support partitioning\n"); 989 return -EMEDIUMTYPE; 990 } 991 992 if (!mmc->hc_wp_grp_size) { 993 pr_err("Card does not define HC WP group size\n"); 994 return -EMEDIUMTYPE; 995 } 996 997 /* check partition alignment and total enhanced size */ 998 if (conf->user.enh_size) { 999 if (conf->user.enh_size % mmc->hc_wp_grp_size || 1000 conf->user.enh_start % mmc->hc_wp_grp_size) { 1001 pr_err("User data enhanced area not HC WP group " 1002 "size aligned\n"); 1003 return -EINVAL; 1004 } 1005 part_attrs |= EXT_CSD_ENH_USR; 1006 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size; 1007 if (mmc->high_capacity) { 1008 enh_start_addr = conf->user.enh_start; 1009 } else { 1010 enh_start_addr = (conf->user.enh_start << 9); 1011 } 1012 } else { 1013 enh_size_mult = 0; 1014 enh_start_addr = 0; 1015 } 1016 tot_enh_size_mult += enh_size_mult; 1017 1018 for (pidx = 0; pidx < 4; pidx++) { 1019 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) { 1020 pr_err("GP%i partition not HC WP group size " 1021 "aligned\n", pidx+1); 1022 return -EINVAL; 1023 } 1024 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size; 1025 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) { 1026 part_attrs |= EXT_CSD_ENH_GP(pidx); 1027 tot_enh_size_mult += gp_size_mult[pidx]; 1028 } 1029 } 1030 1031 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) { 1032 pr_err("Card does not support enhanced attribute\n"); 1033 return -EMEDIUMTYPE; 1034 } 1035 1036 err = mmc_send_ext_csd(mmc, ext_csd); 1037 if (err) 1038 return err; 1039 1040 max_enh_size_mult = 1041 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) + 1042 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) + 1043 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT]; 1044 if (tot_enh_size_mult > max_enh_size_mult) { 1045 pr_err("Total enhanced size exceeds maximum (%u > %u)\n", 1046 tot_enh_size_mult, max_enh_size_mult); 1047 return -EMEDIUMTYPE; 1048 } 1049 1050 /* The default value of EXT_CSD_WR_REL_SET is device 1051 * dependent, the values can only be changed if the 1052 * EXT_CSD_HS_CTRL_REL bit is set. The values can be 1053 * changed only once and before partitioning is completed. */ 1054 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 1055 if (conf->user.wr_rel_change) { 1056 if (conf->user.wr_rel_set) 1057 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR; 1058 else 1059 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR; 1060 } 1061 for (pidx = 0; pidx < 4; pidx++) { 1062 if (conf->gp_part[pidx].wr_rel_change) { 1063 if (conf->gp_part[pidx].wr_rel_set) 1064 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx); 1065 else 1066 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx); 1067 } 1068 } 1069 1070 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] && 1071 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) { 1072 puts("Card does not support host controlled partition write " 1073 "reliability settings\n"); 1074 return -EMEDIUMTYPE; 1075 } 1076 1077 if (ext_csd[EXT_CSD_PARTITION_SETTING] & 1078 EXT_CSD_PARTITION_SETTING_COMPLETED) { 1079 pr_err("Card already partitioned\n"); 1080 return -EPERM; 1081 } 1082 1083 if (mode == MMC_HWPART_CONF_CHECK) 1084 return 0; 1085 1086 /* Partitioning requires high-capacity size definitions */ 1087 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) { 1088 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1089 EXT_CSD_ERASE_GROUP_DEF, 1); 1090 1091 if (err) 1092 return err; 1093 1094 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 1095 1096 /* update erase group size to be high-capacity */ 1097 mmc->erase_grp_size = 1098 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 1099 1100 } 1101 1102 /* all OK, write the configuration */ 1103 for (i = 0; i < 4; i++) { 1104 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1105 EXT_CSD_ENH_START_ADDR+i, 1106 (enh_start_addr >> (i*8)) & 0xFF); 1107 if (err) 1108 return err; 1109 } 1110 for (i = 0; i < 3; i++) { 1111 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1112 EXT_CSD_ENH_SIZE_MULT+i, 1113 (enh_size_mult >> (i*8)) & 0xFF); 1114 if (err) 1115 return err; 1116 } 1117 for (pidx = 0; pidx < 4; pidx++) { 1118 for (i = 0; i < 3; i++) { 1119 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1120 EXT_CSD_GP_SIZE_MULT+pidx*3+i, 1121 (gp_size_mult[pidx] >> (i*8)) & 0xFF); 1122 if (err) 1123 return err; 1124 } 1125 } 1126 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1127 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs); 1128 if (err) 1129 return err; 1130 1131 if (mode == MMC_HWPART_CONF_SET) 1132 return 0; 1133 1134 /* The WR_REL_SET is a write-once register but shall be 1135 * written before setting PART_SETTING_COMPLETED. As it is 1136 * write-once we can only write it when completing the 1137 * partitioning. */ 1138 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) { 1139 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1140 EXT_CSD_WR_REL_SET, wr_rel_set); 1141 if (err) 1142 return err; 1143 } 1144 1145 /* Setting PART_SETTING_COMPLETED confirms the partition 1146 * configuration but it only becomes effective after power 1147 * cycle, so we do not adjust the partition related settings 1148 * in the mmc struct. */ 1149 1150 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1151 EXT_CSD_PARTITION_SETTING, 1152 EXT_CSD_PARTITION_SETTING_COMPLETED); 1153 if (err) 1154 return err; 1155 1156 return 0; 1157 } 1158 #endif 1159 1160 #if !CONFIG_IS_ENABLED(DM_MMC) 1161 int mmc_getcd(struct mmc *mmc) 1162 { 1163 int cd; 1164 1165 cd = board_mmc_getcd(mmc); 1166 1167 if (cd < 0) { 1168 if (mmc->cfg->ops->getcd) 1169 cd = mmc->cfg->ops->getcd(mmc); 1170 else 1171 cd = 1; 1172 } 1173 1174 return cd; 1175 } 1176 #endif 1177 1178 #if !CONFIG_IS_ENABLED(MMC_TINY) 1179 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp) 1180 { 1181 struct mmc_cmd cmd; 1182 struct mmc_data data; 1183 1184 /* Switch the frequency */ 1185 cmd.cmdidx = SD_CMD_SWITCH_FUNC; 1186 cmd.resp_type = MMC_RSP_R1; 1187 cmd.cmdarg = (mode << 31) | 0xffffff; 1188 cmd.cmdarg &= ~(0xf << (group * 4)); 1189 cmd.cmdarg |= value << (group * 4); 1190 1191 data.dest = (char *)resp; 1192 data.blocksize = 64; 1193 data.blocks = 1; 1194 data.flags = MMC_DATA_READ; 1195 1196 return mmc_send_cmd(mmc, &cmd, &data); 1197 } 1198 1199 static int sd_get_capabilities(struct mmc *mmc) 1200 { 1201 int err; 1202 struct mmc_cmd cmd; 1203 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2); 1204 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16); 1205 struct mmc_data data; 1206 int timeout; 1207 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) 1208 u32 sd3_bus_mode; 1209 #endif 1210 1211 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY); 1212 1213 if (mmc_host_is_spi(mmc)) 1214 return 0; 1215 1216 /* Read the SCR to find out if this card supports higher speeds */ 1217 cmd.cmdidx = MMC_CMD_APP_CMD; 1218 cmd.resp_type = MMC_RSP_R1; 1219 cmd.cmdarg = mmc->rca << 16; 1220 1221 err = mmc_send_cmd(mmc, &cmd, NULL); 1222 1223 if (err) 1224 return err; 1225 1226 cmd.cmdidx = SD_CMD_APP_SEND_SCR; 1227 cmd.resp_type = MMC_RSP_R1; 1228 cmd.cmdarg = 0; 1229 1230 timeout = 3; 1231 1232 retry_scr: 1233 data.dest = (char *)scr; 1234 data.blocksize = 8; 1235 data.blocks = 1; 1236 data.flags = MMC_DATA_READ; 1237 1238 err = mmc_send_cmd(mmc, &cmd, &data); 1239 1240 if (err) { 1241 if (timeout--) 1242 goto retry_scr; 1243 1244 return err; 1245 } 1246 1247 mmc->scr[0] = __be32_to_cpu(scr[0]); 1248 mmc->scr[1] = __be32_to_cpu(scr[1]); 1249 1250 switch ((mmc->scr[0] >> 24) & 0xf) { 1251 case 0: 1252 mmc->version = SD_VERSION_1_0; 1253 break; 1254 case 1: 1255 mmc->version = SD_VERSION_1_10; 1256 break; 1257 case 2: 1258 mmc->version = SD_VERSION_2; 1259 if ((mmc->scr[0] >> 15) & 0x1) 1260 mmc->version = SD_VERSION_3; 1261 break; 1262 default: 1263 mmc->version = SD_VERSION_1_0; 1264 break; 1265 } 1266 1267 if (mmc->scr[0] & SD_DATA_4BIT) 1268 mmc->card_caps |= MMC_MODE_4BIT; 1269 1270 /* Version 1.0 doesn't support switching */ 1271 if (mmc->version == SD_VERSION_1_0) 1272 return 0; 1273 1274 timeout = 4; 1275 while (timeout--) { 1276 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1, 1277 (u8 *)switch_status); 1278 1279 if (err) 1280 return err; 1281 1282 /* The high-speed function is busy. Try again */ 1283 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY)) 1284 break; 1285 } 1286 1287 /* If high-speed isn't supported, we return */ 1288 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED) 1289 mmc->card_caps |= MMC_CAP(SD_HS); 1290 1291 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) 1292 /* Version before 3.0 don't support UHS modes */ 1293 if (mmc->version < SD_VERSION_3) 1294 return 0; 1295 1296 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f; 1297 if (sd3_bus_mode & SD_MODE_UHS_SDR104) 1298 mmc->card_caps |= MMC_CAP(UHS_SDR104); 1299 if (sd3_bus_mode & SD_MODE_UHS_SDR50) 1300 mmc->card_caps |= MMC_CAP(UHS_SDR50); 1301 if (sd3_bus_mode & SD_MODE_UHS_SDR25) 1302 mmc->card_caps |= MMC_CAP(UHS_SDR25); 1303 if (sd3_bus_mode & SD_MODE_UHS_SDR12) 1304 mmc->card_caps |= MMC_CAP(UHS_SDR12); 1305 if (sd3_bus_mode & SD_MODE_UHS_DDR50) 1306 mmc->card_caps |= MMC_CAP(UHS_DDR50); 1307 #endif 1308 1309 return 0; 1310 } 1311 1312 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode) 1313 { 1314 int err; 1315 1316 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16); 1317 int speed; 1318 1319 /* SD version 1.00 and 1.01 does not support CMD 6 */ 1320 if (mmc->version == SD_VERSION_1_0) 1321 return 0; 1322 1323 switch (mode) { 1324 case SD_LEGACY: 1325 speed = UHS_SDR12_BUS_SPEED; 1326 break; 1327 case SD_HS: 1328 speed = HIGH_SPEED_BUS_SPEED; 1329 break; 1330 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) 1331 case UHS_SDR12: 1332 speed = UHS_SDR12_BUS_SPEED; 1333 break; 1334 case UHS_SDR25: 1335 speed = UHS_SDR25_BUS_SPEED; 1336 break; 1337 case UHS_SDR50: 1338 speed = UHS_SDR50_BUS_SPEED; 1339 break; 1340 case UHS_DDR50: 1341 speed = UHS_DDR50_BUS_SPEED; 1342 break; 1343 case UHS_SDR104: 1344 speed = UHS_SDR104_BUS_SPEED; 1345 break; 1346 #endif 1347 default: 1348 return -EINVAL; 1349 } 1350 1351 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status); 1352 if (err) 1353 return err; 1354 1355 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed) 1356 return -ENOTSUPP; 1357 1358 return 0; 1359 } 1360 1361 static int sd_select_bus_width(struct mmc *mmc, int w) 1362 { 1363 int err; 1364 struct mmc_cmd cmd; 1365 1366 if ((w != 4) && (w != 1)) 1367 return -EINVAL; 1368 1369 cmd.cmdidx = MMC_CMD_APP_CMD; 1370 cmd.resp_type = MMC_RSP_R1; 1371 cmd.cmdarg = mmc->rca << 16; 1372 1373 err = mmc_send_cmd(mmc, &cmd, NULL); 1374 if (err) 1375 return err; 1376 1377 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH; 1378 cmd.resp_type = MMC_RSP_R1; 1379 if (w == 4) 1380 cmd.cmdarg = 2; 1381 else if (w == 1) 1382 cmd.cmdarg = 0; 1383 err = mmc_send_cmd(mmc, &cmd, NULL); 1384 if (err) 1385 return err; 1386 1387 return 0; 1388 } 1389 #endif 1390 1391 #if CONFIG_IS_ENABLED(MMC_WRITE) 1392 static int sd_read_ssr(struct mmc *mmc) 1393 { 1394 static const unsigned int sd_au_size[] = { 1395 0, SZ_16K / 512, SZ_32K / 512, 1396 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512, 1397 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512, 1398 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, 1399 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, 1400 SZ_64M / 512, 1401 }; 1402 int err, i; 1403 struct mmc_cmd cmd; 1404 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16); 1405 struct mmc_data data; 1406 int timeout = 3; 1407 unsigned int au, eo, et, es; 1408 1409 cmd.cmdidx = MMC_CMD_APP_CMD; 1410 cmd.resp_type = MMC_RSP_R1; 1411 cmd.cmdarg = mmc->rca << 16; 1412 1413 err = mmc_send_cmd(mmc, &cmd, NULL); 1414 if (err) 1415 return err; 1416 1417 cmd.cmdidx = SD_CMD_APP_SD_STATUS; 1418 cmd.resp_type = MMC_RSP_R1; 1419 cmd.cmdarg = 0; 1420 1421 retry_ssr: 1422 data.dest = (char *)ssr; 1423 data.blocksize = 64; 1424 data.blocks = 1; 1425 data.flags = MMC_DATA_READ; 1426 1427 err = mmc_send_cmd(mmc, &cmd, &data); 1428 if (err) { 1429 if (timeout--) 1430 goto retry_ssr; 1431 1432 return err; 1433 } 1434 1435 for (i = 0; i < 16; i++) 1436 ssr[i] = be32_to_cpu(ssr[i]); 1437 1438 au = (ssr[2] >> 12) & 0xF; 1439 if ((au <= 9) || (mmc->version == SD_VERSION_3)) { 1440 mmc->ssr.au = sd_au_size[au]; 1441 es = (ssr[3] >> 24) & 0xFF; 1442 es |= (ssr[2] & 0xFF) << 8; 1443 et = (ssr[3] >> 18) & 0x3F; 1444 if (es && et) { 1445 eo = (ssr[3] >> 16) & 0x3; 1446 mmc->ssr.erase_timeout = (et * 1000) / es; 1447 mmc->ssr.erase_offset = eo * 1000; 1448 } 1449 } else { 1450 pr_debug("Invalid Allocation Unit Size.\n"); 1451 } 1452 1453 return 0; 1454 } 1455 #endif 1456 /* frequency bases */ 1457 /* divided by 10 to be nice to platforms without floating point */ 1458 static const int fbase[] = { 1459 10000, 1460 100000, 1461 1000000, 1462 10000000, 1463 }; 1464 1465 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice 1466 * to platforms without floating point. 1467 */ 1468 static const u8 multipliers[] = { 1469 0, /* reserved */ 1470 10, 1471 12, 1472 13, 1473 15, 1474 20, 1475 25, 1476 30, 1477 35, 1478 40, 1479 45, 1480 50, 1481 55, 1482 60, 1483 70, 1484 80, 1485 }; 1486 1487 static inline int bus_width(uint cap) 1488 { 1489 if (cap == MMC_MODE_8BIT) 1490 return 8; 1491 if (cap == MMC_MODE_4BIT) 1492 return 4; 1493 if (cap == MMC_MODE_1BIT) 1494 return 1; 1495 pr_warn("invalid bus witdh capability 0x%x\n", cap); 1496 return 0; 1497 } 1498 1499 #if !CONFIG_IS_ENABLED(DM_MMC) 1500 #ifdef MMC_SUPPORTS_TUNING 1501 static int mmc_execute_tuning(struct mmc *mmc, uint opcode) 1502 { 1503 return -ENOTSUPP; 1504 } 1505 #endif 1506 1507 static void mmc_send_init_stream(struct mmc *mmc) 1508 { 1509 } 1510 1511 static int mmc_set_ios(struct mmc *mmc) 1512 { 1513 int ret = 0; 1514 1515 if (mmc->cfg->ops->set_ios) 1516 ret = mmc->cfg->ops->set_ios(mmc); 1517 1518 return ret; 1519 } 1520 #endif 1521 1522 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable) 1523 { 1524 if (!disable) { 1525 if (clock > mmc->cfg->f_max) 1526 clock = mmc->cfg->f_max; 1527 1528 if (clock < mmc->cfg->f_min) 1529 clock = mmc->cfg->f_min; 1530 } 1531 1532 mmc->clock = clock; 1533 mmc->clk_disable = disable; 1534 1535 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock); 1536 1537 return mmc_set_ios(mmc); 1538 } 1539 1540 static int mmc_set_bus_width(struct mmc *mmc, uint width) 1541 { 1542 mmc->bus_width = width; 1543 1544 return mmc_set_ios(mmc); 1545 } 1546 1547 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG) 1548 /* 1549 * helper function to display the capabilities in a human 1550 * friendly manner. The capabilities include bus width and 1551 * supported modes. 1552 */ 1553 void mmc_dump_capabilities(const char *text, uint caps) 1554 { 1555 enum bus_mode mode; 1556 1557 pr_debug("%s: widths [", text); 1558 if (caps & MMC_MODE_8BIT) 1559 pr_debug("8, "); 1560 if (caps & MMC_MODE_4BIT) 1561 pr_debug("4, "); 1562 if (caps & MMC_MODE_1BIT) 1563 pr_debug("1, "); 1564 pr_debug("\b\b] modes ["); 1565 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++) 1566 if (MMC_CAP(mode) & caps) 1567 pr_debug("%s, ", mmc_mode_name(mode)); 1568 pr_debug("\b\b]\n"); 1569 } 1570 #endif 1571 1572 struct mode_width_tuning { 1573 enum bus_mode mode; 1574 uint widths; 1575 #ifdef MMC_SUPPORTS_TUNING 1576 uint tuning; 1577 #endif 1578 }; 1579 1580 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE) 1581 int mmc_voltage_to_mv(enum mmc_voltage voltage) 1582 { 1583 switch (voltage) { 1584 case MMC_SIGNAL_VOLTAGE_000: return 0; 1585 case MMC_SIGNAL_VOLTAGE_330: return 3300; 1586 case MMC_SIGNAL_VOLTAGE_180: return 1800; 1587 case MMC_SIGNAL_VOLTAGE_120: return 1200; 1588 } 1589 return -EINVAL; 1590 } 1591 1592 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage) 1593 { 1594 int err; 1595 1596 if (mmc->signal_voltage == signal_voltage) 1597 return 0; 1598 1599 mmc->signal_voltage = signal_voltage; 1600 err = mmc_set_ios(mmc); 1601 if (err) 1602 pr_debug("unable to set voltage (err %d)\n", err); 1603 1604 return err; 1605 } 1606 #else 1607 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage) 1608 { 1609 return 0; 1610 } 1611 #endif 1612 1613 #if !CONFIG_IS_ENABLED(MMC_TINY) 1614 static const struct mode_width_tuning sd_modes_by_pref[] = { 1615 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) 1616 #ifdef MMC_SUPPORTS_TUNING 1617 { 1618 .mode = UHS_SDR104, 1619 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT, 1620 .tuning = MMC_CMD_SEND_TUNING_BLOCK 1621 }, 1622 #endif 1623 { 1624 .mode = UHS_SDR50, 1625 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT, 1626 }, 1627 { 1628 .mode = UHS_DDR50, 1629 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT, 1630 }, 1631 { 1632 .mode = UHS_SDR25, 1633 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT, 1634 }, 1635 #endif 1636 { 1637 .mode = SD_HS, 1638 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT, 1639 }, 1640 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) 1641 { 1642 .mode = UHS_SDR12, 1643 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT, 1644 }, 1645 #endif 1646 { 1647 .mode = SD_LEGACY, 1648 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT, 1649 } 1650 }; 1651 1652 #define for_each_sd_mode_by_pref(caps, mwt) \ 1653 for (mwt = sd_modes_by_pref;\ 1654 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\ 1655 mwt++) \ 1656 if (caps & MMC_CAP(mwt->mode)) 1657 1658 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps) 1659 { 1660 int err; 1661 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT}; 1662 const struct mode_width_tuning *mwt; 1663 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) 1664 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false; 1665 #else 1666 bool uhs_en = false; 1667 #endif 1668 uint caps; 1669 1670 #ifdef DEBUG 1671 mmc_dump_capabilities("sd card", card_caps); 1672 mmc_dump_capabilities("host", mmc->host_caps); 1673 #endif 1674 1675 /* Restrict card's capabilities by what the host can do */ 1676 caps = card_caps & mmc->host_caps; 1677 1678 if (!uhs_en) 1679 caps &= ~UHS_CAPS; 1680 1681 for_each_sd_mode_by_pref(caps, mwt) { 1682 uint *w; 1683 1684 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) { 1685 if (*w & caps & mwt->widths) { 1686 pr_debug("trying mode %s width %d (at %d MHz)\n", 1687 mmc_mode_name(mwt->mode), 1688 bus_width(*w), 1689 mmc_mode2freq(mmc, mwt->mode) / 1000000); 1690 1691 /* configure the bus width (card + host) */ 1692 err = sd_select_bus_width(mmc, bus_width(*w)); 1693 if (err) 1694 goto error; 1695 mmc_set_bus_width(mmc, bus_width(*w)); 1696 1697 /* configure the bus mode (card) */ 1698 err = sd_set_card_speed(mmc, mwt->mode); 1699 if (err) 1700 goto error; 1701 1702 /* configure the bus mode (host) */ 1703 mmc_select_mode(mmc, mwt->mode); 1704 mmc_set_clock(mmc, mmc->tran_speed, 1705 MMC_CLK_ENABLE); 1706 1707 #ifdef MMC_SUPPORTS_TUNING 1708 /* execute tuning if needed */ 1709 if (mwt->tuning && !mmc_host_is_spi(mmc)) { 1710 err = mmc_execute_tuning(mmc, 1711 mwt->tuning); 1712 if (err) { 1713 pr_debug("tuning failed\n"); 1714 goto error; 1715 } 1716 } 1717 #endif 1718 1719 #if CONFIG_IS_ENABLED(MMC_WRITE) 1720 err = sd_read_ssr(mmc); 1721 if (err) 1722 pr_warn("unable to read ssr\n"); 1723 #endif 1724 if (!err) 1725 return 0; 1726 1727 error: 1728 /* revert to a safer bus speed */ 1729 mmc_select_mode(mmc, SD_LEGACY); 1730 mmc_set_clock(mmc, mmc->tran_speed, 1731 MMC_CLK_ENABLE); 1732 } 1733 } 1734 } 1735 1736 pr_err("unable to select a mode\n"); 1737 return -ENOTSUPP; 1738 } 1739 1740 /* 1741 * read the compare the part of ext csd that is constant. 1742 * This can be used to check that the transfer is working 1743 * as expected. 1744 */ 1745 static int mmc_read_and_compare_ext_csd(struct mmc *mmc) 1746 { 1747 int err; 1748 const u8 *ext_csd = mmc->ext_csd; 1749 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN); 1750 1751 if (mmc->version < MMC_VERSION_4) 1752 return 0; 1753 1754 err = mmc_send_ext_csd(mmc, test_csd); 1755 if (err) 1756 return err; 1757 1758 /* Only compare read only fields */ 1759 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] 1760 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] && 1761 ext_csd[EXT_CSD_HC_WP_GRP_SIZE] 1762 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] && 1763 ext_csd[EXT_CSD_REV] 1764 == test_csd[EXT_CSD_REV] && 1765 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 1766 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] && 1767 memcmp(&ext_csd[EXT_CSD_SEC_CNT], 1768 &test_csd[EXT_CSD_SEC_CNT], 4) == 0) 1769 return 0; 1770 1771 return -EBADMSG; 1772 } 1773 1774 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE) 1775 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode, 1776 uint32_t allowed_mask) 1777 { 1778 u32 card_mask = 0; 1779 1780 switch (mode) { 1781 case MMC_HS_400: 1782 case MMC_HS_200: 1783 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V | 1784 EXT_CSD_CARD_TYPE_HS400_1_8V)) 1785 card_mask |= MMC_SIGNAL_VOLTAGE_180; 1786 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V | 1787 EXT_CSD_CARD_TYPE_HS400_1_2V)) 1788 card_mask |= MMC_SIGNAL_VOLTAGE_120; 1789 break; 1790 case MMC_DDR_52: 1791 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V) 1792 card_mask |= MMC_SIGNAL_VOLTAGE_330 | 1793 MMC_SIGNAL_VOLTAGE_180; 1794 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V) 1795 card_mask |= MMC_SIGNAL_VOLTAGE_120; 1796 break; 1797 default: 1798 card_mask |= MMC_SIGNAL_VOLTAGE_330; 1799 break; 1800 } 1801 1802 while (card_mask & allowed_mask) { 1803 enum mmc_voltage best_match; 1804 1805 best_match = 1 << (ffs(card_mask & allowed_mask) - 1); 1806 if (!mmc_set_signal_voltage(mmc, best_match)) 1807 return 0; 1808 1809 allowed_mask &= ~best_match; 1810 } 1811 1812 return -ENOTSUPP; 1813 } 1814 #else 1815 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode, 1816 uint32_t allowed_mask) 1817 { 1818 return 0; 1819 } 1820 #endif 1821 1822 static const struct mode_width_tuning mmc_modes_by_pref[] = { 1823 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) 1824 { 1825 .mode = MMC_HS_400, 1826 .widths = MMC_MODE_8BIT, 1827 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200 1828 }, 1829 #endif 1830 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) 1831 { 1832 .mode = MMC_HS_200, 1833 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT, 1834 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200 1835 }, 1836 #endif 1837 { 1838 .mode = MMC_DDR_52, 1839 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT, 1840 }, 1841 { 1842 .mode = MMC_HS_52, 1843 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT, 1844 }, 1845 { 1846 .mode = MMC_HS, 1847 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT, 1848 }, 1849 { 1850 .mode = MMC_LEGACY, 1851 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT, 1852 } 1853 }; 1854 1855 #define for_each_mmc_mode_by_pref(caps, mwt) \ 1856 for (mwt = mmc_modes_by_pref;\ 1857 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\ 1858 mwt++) \ 1859 if (caps & MMC_CAP(mwt->mode)) 1860 1861 static const struct ext_csd_bus_width { 1862 uint cap; 1863 bool is_ddr; 1864 uint ext_csd_bits; 1865 } ext_csd_bus_width[] = { 1866 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8}, 1867 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4}, 1868 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8}, 1869 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4}, 1870 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1}, 1871 }; 1872 1873 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) 1874 static int mmc_select_hs400(struct mmc *mmc) 1875 { 1876 int err; 1877 1878 /* Set timing to HS200 for tuning */ 1879 err = mmc_set_card_speed(mmc, MMC_HS_200, false); 1880 if (err) 1881 return err; 1882 1883 /* configure the bus mode (host) */ 1884 mmc_select_mode(mmc, MMC_HS_200); 1885 mmc_set_clock(mmc, mmc->tran_speed, false); 1886 1887 /* execute tuning if needed */ 1888 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200); 1889 if (err) { 1890 debug("tuning failed\n"); 1891 return err; 1892 } 1893 1894 /* Set back to HS */ 1895 mmc_set_card_speed(mmc, MMC_HS, false); 1896 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false); 1897 1898 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, 1899 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG); 1900 if (err) 1901 return err; 1902 1903 err = mmc_set_card_speed(mmc, MMC_HS_400, false); 1904 if (err) 1905 return err; 1906 1907 mmc_select_mode(mmc, MMC_HS_400); 1908 err = mmc_set_clock(mmc, mmc->tran_speed, false); 1909 if (err) 1910 return err; 1911 1912 return 0; 1913 } 1914 #else 1915 static int mmc_select_hs400(struct mmc *mmc) 1916 { 1917 return -ENOTSUPP; 1918 } 1919 #endif 1920 1921 #define for_each_supported_width(caps, ddr, ecbv) \ 1922 for (ecbv = ext_csd_bus_width;\ 1923 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\ 1924 ecbv++) \ 1925 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap)) 1926 1927 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps) 1928 { 1929 int err; 1930 const struct mode_width_tuning *mwt; 1931 const struct ext_csd_bus_width *ecbw; 1932 1933 #ifdef DEBUG 1934 mmc_dump_capabilities("mmc", card_caps); 1935 mmc_dump_capabilities("host", mmc->host_caps); 1936 #endif 1937 1938 /* Restrict card's capabilities by what the host can do */ 1939 card_caps &= mmc->host_caps; 1940 1941 /* Only version 4 of MMC supports wider bus widths */ 1942 if (mmc->version < MMC_VERSION_4) 1943 return 0; 1944 1945 if (!mmc->ext_csd) { 1946 pr_debug("No ext_csd found!\n"); /* this should enver happen */ 1947 return -ENOTSUPP; 1948 } 1949 1950 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \ 1951 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) 1952 /* 1953 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode 1954 * before doing anything else, since a transition from either of 1955 * the HS200/HS400 mode directly to legacy mode is not supported. 1956 */ 1957 if (mmc->selected_mode == MMC_HS_200 || 1958 mmc->selected_mode == MMC_HS_400) 1959 mmc_set_card_speed(mmc, MMC_HS, true); 1960 else 1961 #endif 1962 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE); 1963 1964 for_each_mmc_mode_by_pref(card_caps, mwt) { 1965 for_each_supported_width(card_caps & mwt->widths, 1966 mmc_is_mode_ddr(mwt->mode), ecbw) { 1967 enum mmc_voltage old_voltage; 1968 pr_debug("trying mode %s width %d (at %d MHz)\n", 1969 mmc_mode_name(mwt->mode), 1970 bus_width(ecbw->cap), 1971 mmc_mode2freq(mmc, mwt->mode) / 1000000); 1972 old_voltage = mmc->signal_voltage; 1973 err = mmc_set_lowest_voltage(mmc, mwt->mode, 1974 MMC_ALL_SIGNAL_VOLTAGE); 1975 if (err) 1976 continue; 1977 1978 /* configure the bus width (card + host) */ 1979 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 1980 EXT_CSD_BUS_WIDTH, 1981 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG); 1982 if (err) 1983 goto error; 1984 mmc_set_bus_width(mmc, bus_width(ecbw->cap)); 1985 1986 if (mwt->mode == MMC_HS_400) { 1987 err = mmc_select_hs400(mmc); 1988 if (err) { 1989 printf("Select HS400 failed %d\n", err); 1990 goto error; 1991 } 1992 } else { 1993 /* configure the bus speed (card) */ 1994 err = mmc_set_card_speed(mmc, mwt->mode, false); 1995 if (err) 1996 goto error; 1997 1998 /* 1999 * configure the bus width AND the ddr mode 2000 * (card). The host side will be taken care 2001 * of in the next step 2002 */ 2003 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) { 2004 err = mmc_switch(mmc, 2005 EXT_CSD_CMD_SET_NORMAL, 2006 EXT_CSD_BUS_WIDTH, 2007 ecbw->ext_csd_bits); 2008 if (err) 2009 goto error; 2010 } 2011 2012 /* configure the bus mode (host) */ 2013 mmc_select_mode(mmc, mwt->mode); 2014 mmc_set_clock(mmc, mmc->tran_speed, 2015 MMC_CLK_ENABLE); 2016 #ifdef MMC_SUPPORTS_TUNING 2017 2018 /* execute tuning if needed */ 2019 if (mwt->tuning) { 2020 err = mmc_execute_tuning(mmc, 2021 mwt->tuning); 2022 if (err) { 2023 pr_debug("tuning failed\n"); 2024 goto error; 2025 } 2026 } 2027 #endif 2028 } 2029 2030 /* do a transfer to check the configuration */ 2031 err = mmc_read_and_compare_ext_csd(mmc); 2032 if (!err) 2033 return 0; 2034 error: 2035 mmc_set_signal_voltage(mmc, old_voltage); 2036 /* if an error occured, revert to a safer bus mode */ 2037 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 2038 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1); 2039 mmc_select_mode(mmc, MMC_LEGACY); 2040 mmc_set_bus_width(mmc, 1); 2041 } 2042 } 2043 2044 pr_err("unable to select a mode\n"); 2045 2046 return -ENOTSUPP; 2047 } 2048 #endif 2049 2050 #if CONFIG_IS_ENABLED(MMC_TINY) 2051 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN); 2052 #endif 2053 2054 static int mmc_startup_v4(struct mmc *mmc) 2055 { 2056 int err, i; 2057 u64 capacity; 2058 bool has_parts = false; 2059 bool part_completed; 2060 static const u32 mmc_versions[] = { 2061 MMC_VERSION_4, 2062 MMC_VERSION_4_1, 2063 MMC_VERSION_4_2, 2064 MMC_VERSION_4_3, 2065 MMC_VERSION_4_4, 2066 MMC_VERSION_4_41, 2067 MMC_VERSION_4_5, 2068 MMC_VERSION_5_0, 2069 MMC_VERSION_5_1 2070 }; 2071 2072 #if CONFIG_IS_ENABLED(MMC_TINY) 2073 u8 *ext_csd = ext_csd_bkup; 2074 2075 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4) 2076 return 0; 2077 2078 if (!mmc->ext_csd) 2079 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup)); 2080 2081 err = mmc_send_ext_csd(mmc, ext_csd); 2082 if (err) 2083 goto error; 2084 2085 /* store the ext csd for future reference */ 2086 if (!mmc->ext_csd) 2087 mmc->ext_csd = ext_csd; 2088 #else 2089 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 2090 2091 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4)) 2092 return 0; 2093 2094 /* check ext_csd version and capacity */ 2095 err = mmc_send_ext_csd(mmc, ext_csd); 2096 if (err) 2097 goto error; 2098 2099 /* store the ext csd for future reference */ 2100 if (!mmc->ext_csd) 2101 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN); 2102 if (!mmc->ext_csd) 2103 return -ENOMEM; 2104 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN); 2105 #endif 2106 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions)) 2107 return -EINVAL; 2108 2109 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]]; 2110 2111 if (mmc->version >= MMC_VERSION_4_2) { 2112 /* 2113 * According to the JEDEC Standard, the value of 2114 * ext_csd's capacity is valid if the value is more 2115 * than 2GB 2116 */ 2117 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0 2118 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 2119 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 2120 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; 2121 capacity *= MMC_MAX_BLOCK_LEN; 2122 if ((capacity >> 20) > 2 * 1024) 2123 mmc->capacity_user = capacity; 2124 } 2125 2126 /* The partition data may be non-zero but it is only 2127 * effective if PARTITION_SETTING_COMPLETED is set in 2128 * EXT_CSD, so ignore any data if this bit is not set, 2129 * except for enabling the high-capacity group size 2130 * definition (see below). 2131 */ 2132 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] & 2133 EXT_CSD_PARTITION_SETTING_COMPLETED); 2134 2135 /* store the partition info of emmc */ 2136 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT]; 2137 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) || 2138 ext_csd[EXT_CSD_BOOT_MULT]) 2139 mmc->part_config = ext_csd[EXT_CSD_PART_CONF]; 2140 if (part_completed && 2141 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT)) 2142 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE]; 2143 2144 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17; 2145 2146 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17; 2147 2148 for (i = 0; i < 4; i++) { 2149 int idx = EXT_CSD_GP_SIZE_MULT + i * 3; 2150 uint mult = (ext_csd[idx + 2] << 16) + 2151 (ext_csd[idx + 1] << 8) + ext_csd[idx]; 2152 if (mult) 2153 has_parts = true; 2154 if (!part_completed) 2155 continue; 2156 mmc->capacity_gp[i] = mult; 2157 mmc->capacity_gp[i] *= 2158 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 2159 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 2160 mmc->capacity_gp[i] <<= 19; 2161 } 2162 2163 #ifndef CONFIG_SPL_BUILD 2164 if (part_completed) { 2165 mmc->enh_user_size = 2166 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) + 2167 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) + 2168 ext_csd[EXT_CSD_ENH_SIZE_MULT]; 2169 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; 2170 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 2171 mmc->enh_user_size <<= 19; 2172 mmc->enh_user_start = 2173 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) + 2174 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) + 2175 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) + 2176 ext_csd[EXT_CSD_ENH_START_ADDR]; 2177 if (mmc->high_capacity) 2178 mmc->enh_user_start <<= 9; 2179 } 2180 #endif 2181 2182 /* 2183 * Host needs to enable ERASE_GRP_DEF bit if device is 2184 * partitioned. This bit will be lost every time after a reset 2185 * or power off. This will affect erase size. 2186 */ 2187 if (part_completed) 2188 has_parts = true; 2189 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) && 2190 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB)) 2191 has_parts = true; 2192 if (has_parts) { 2193 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, 2194 EXT_CSD_ERASE_GROUP_DEF, 1); 2195 2196 if (err) 2197 goto error; 2198 2199 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1; 2200 } 2201 2202 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) { 2203 #if CONFIG_IS_ENABLED(MMC_WRITE) 2204 /* Read out group size from ext_csd */ 2205 mmc->erase_grp_size = 2206 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024; 2207 #endif 2208 /* 2209 * if high capacity and partition setting completed 2210 * SEC_COUNT is valid even if it is smaller than 2 GiB 2211 * JEDEC Standard JESD84-B45, 6.2.4 2212 */ 2213 if (mmc->high_capacity && part_completed) { 2214 capacity = (ext_csd[EXT_CSD_SEC_CNT]) | 2215 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) | 2216 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) | 2217 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24); 2218 capacity *= MMC_MAX_BLOCK_LEN; 2219 mmc->capacity_user = capacity; 2220 } 2221 } 2222 #if CONFIG_IS_ENABLED(MMC_WRITE) 2223 else { 2224 /* Calculate the group size from the csd value. */ 2225 int erase_gsz, erase_gmul; 2226 2227 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10; 2228 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5; 2229 mmc->erase_grp_size = (erase_gsz + 1) 2230 * (erase_gmul + 1); 2231 } 2232 #endif 2233 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING) 2234 mmc->hc_wp_grp_size = 1024 2235 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] 2236 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; 2237 #endif 2238 2239 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET]; 2240 2241 return 0; 2242 error: 2243 if (mmc->ext_csd) { 2244 #if !CONFIG_IS_ENABLED(MMC_TINY) 2245 free(mmc->ext_csd); 2246 #endif 2247 mmc->ext_csd = NULL; 2248 } 2249 return err; 2250 } 2251 2252 static int mmc_startup(struct mmc *mmc) 2253 { 2254 int err, i; 2255 uint mult, freq; 2256 u64 cmult, csize; 2257 struct mmc_cmd cmd; 2258 struct blk_desc *bdesc; 2259 2260 #ifdef CONFIG_MMC_SPI_CRC_ON 2261 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */ 2262 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF; 2263 cmd.resp_type = MMC_RSP_R1; 2264 cmd.cmdarg = 1; 2265 err = mmc_send_cmd(mmc, &cmd, NULL); 2266 if (err) 2267 return err; 2268 } 2269 #endif 2270 2271 /* Put the Card in Identify Mode */ 2272 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID : 2273 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */ 2274 cmd.resp_type = MMC_RSP_R2; 2275 cmd.cmdarg = 0; 2276 2277 err = mmc_send_cmd(mmc, &cmd, NULL); 2278 2279 #ifdef CONFIG_MMC_QUIRKS 2280 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) { 2281 int retries = 4; 2282 /* 2283 * It has been seen that SEND_CID may fail on the first 2284 * attempt, let's try a few more time 2285 */ 2286 do { 2287 err = mmc_send_cmd(mmc, &cmd, NULL); 2288 if (!err) 2289 break; 2290 } while (retries--); 2291 } 2292 #endif 2293 2294 if (err) 2295 return err; 2296 2297 memcpy(mmc->cid, cmd.response, 16); 2298 2299 /* 2300 * For MMC cards, set the Relative Address. 2301 * For SD cards, get the Relatvie Address. 2302 * This also puts the cards into Standby State 2303 */ 2304 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 2305 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR; 2306 cmd.cmdarg = mmc->rca << 16; 2307 cmd.resp_type = MMC_RSP_R6; 2308 2309 err = mmc_send_cmd(mmc, &cmd, NULL); 2310 2311 if (err) 2312 return err; 2313 2314 if (IS_SD(mmc)) 2315 mmc->rca = (cmd.response[0] >> 16) & 0xffff; 2316 } 2317 2318 /* Get the Card-Specific Data */ 2319 cmd.cmdidx = MMC_CMD_SEND_CSD; 2320 cmd.resp_type = MMC_RSP_R2; 2321 cmd.cmdarg = mmc->rca << 16; 2322 2323 err = mmc_send_cmd(mmc, &cmd, NULL); 2324 2325 if (err) 2326 return err; 2327 2328 mmc->csd[0] = cmd.response[0]; 2329 mmc->csd[1] = cmd.response[1]; 2330 mmc->csd[2] = cmd.response[2]; 2331 mmc->csd[3] = cmd.response[3]; 2332 2333 if (mmc->version == MMC_VERSION_UNKNOWN) { 2334 int version = (cmd.response[0] >> 26) & 0xf; 2335 2336 switch (version) { 2337 case 0: 2338 mmc->version = MMC_VERSION_1_2; 2339 break; 2340 case 1: 2341 mmc->version = MMC_VERSION_1_4; 2342 break; 2343 case 2: 2344 mmc->version = MMC_VERSION_2_2; 2345 break; 2346 case 3: 2347 mmc->version = MMC_VERSION_3; 2348 break; 2349 case 4: 2350 mmc->version = MMC_VERSION_4; 2351 break; 2352 default: 2353 mmc->version = MMC_VERSION_1_2; 2354 break; 2355 } 2356 } 2357 2358 /* divide frequency by 10, since the mults are 10x bigger */ 2359 freq = fbase[(cmd.response[0] & 0x7)]; 2360 mult = multipliers[((cmd.response[0] >> 3) & 0xf)]; 2361 2362 mmc->legacy_speed = freq * mult; 2363 mmc_select_mode(mmc, MMC_LEGACY); 2364 2365 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1); 2366 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf); 2367 #if CONFIG_IS_ENABLED(MMC_WRITE) 2368 2369 if (IS_SD(mmc)) 2370 mmc->write_bl_len = mmc->read_bl_len; 2371 else 2372 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf); 2373 #endif 2374 2375 if (mmc->high_capacity) { 2376 csize = (mmc->csd[1] & 0x3f) << 16 2377 | (mmc->csd[2] & 0xffff0000) >> 16; 2378 cmult = 8; 2379 } else { 2380 csize = (mmc->csd[1] & 0x3ff) << 2 2381 | (mmc->csd[2] & 0xc0000000) >> 30; 2382 cmult = (mmc->csd[2] & 0x00038000) >> 15; 2383 } 2384 2385 mmc->capacity_user = (csize + 1) << (cmult + 2); 2386 mmc->capacity_user *= mmc->read_bl_len; 2387 mmc->capacity_boot = 0; 2388 mmc->capacity_rpmb = 0; 2389 for (i = 0; i < 4; i++) 2390 mmc->capacity_gp[i] = 0; 2391 2392 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN) 2393 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 2394 2395 #if CONFIG_IS_ENABLED(MMC_WRITE) 2396 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN) 2397 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 2398 #endif 2399 2400 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) { 2401 cmd.cmdidx = MMC_CMD_SET_DSR; 2402 cmd.cmdarg = (mmc->dsr & 0xffff) << 16; 2403 cmd.resp_type = MMC_RSP_NONE; 2404 if (mmc_send_cmd(mmc, &cmd, NULL)) 2405 pr_warn("MMC: SET_DSR failed\n"); 2406 } 2407 2408 /* Select the card, and put it into Transfer Mode */ 2409 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */ 2410 cmd.cmdidx = MMC_CMD_SELECT_CARD; 2411 cmd.resp_type = MMC_RSP_R1; 2412 cmd.cmdarg = mmc->rca << 16; 2413 err = mmc_send_cmd(mmc, &cmd, NULL); 2414 2415 if (err) 2416 return err; 2417 } 2418 2419 /* 2420 * For SD, its erase group is always one sector 2421 */ 2422 #if CONFIG_IS_ENABLED(MMC_WRITE) 2423 mmc->erase_grp_size = 1; 2424 #endif 2425 mmc->part_config = MMCPART_NOAVAILABLE; 2426 2427 err = mmc_startup_v4(mmc); 2428 if (err) 2429 return err; 2430 2431 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart); 2432 if (err) 2433 return err; 2434 2435 #if CONFIG_IS_ENABLED(MMC_TINY) 2436 mmc_set_clock(mmc, mmc->legacy_speed, false); 2437 mmc_select_mode(mmc, IS_SD(mmc) ? SD_LEGACY : MMC_LEGACY); 2438 mmc_set_bus_width(mmc, 1); 2439 #else 2440 if (IS_SD(mmc)) { 2441 err = sd_get_capabilities(mmc); 2442 if (err) 2443 return err; 2444 err = sd_select_mode_and_width(mmc, mmc->card_caps); 2445 } else { 2446 err = mmc_get_capabilities(mmc); 2447 if (err) 2448 return err; 2449 mmc_select_mode_and_width(mmc, mmc->card_caps); 2450 } 2451 #endif 2452 if (err) 2453 return err; 2454 2455 mmc->best_mode = mmc->selected_mode; 2456 2457 /* Fix the block length for DDR mode */ 2458 if (mmc->ddr_mode) { 2459 mmc->read_bl_len = MMC_MAX_BLOCK_LEN; 2460 #if CONFIG_IS_ENABLED(MMC_WRITE) 2461 mmc->write_bl_len = MMC_MAX_BLOCK_LEN; 2462 #endif 2463 } 2464 2465 /* fill in device description */ 2466 bdesc = mmc_get_blk_desc(mmc); 2467 bdesc->lun = 0; 2468 bdesc->hwpart = 0; 2469 bdesc->type = 0; 2470 bdesc->blksz = mmc->read_bl_len; 2471 bdesc->log2blksz = LOG2(bdesc->blksz); 2472 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len); 2473 #if !defined(CONFIG_SPL_BUILD) || \ 2474 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \ 2475 !defined(CONFIG_USE_TINY_PRINTF)) 2476 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x", 2477 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff), 2478 (mmc->cid[3] >> 16) & 0xffff); 2479 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff, 2480 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff, 2481 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff, 2482 (mmc->cid[2] >> 24) & 0xff); 2483 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf, 2484 (mmc->cid[2] >> 16) & 0xf); 2485 #else 2486 bdesc->vendor[0] = 0; 2487 bdesc->product[0] = 0; 2488 bdesc->revision[0] = 0; 2489 #endif 2490 2491 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)) 2492 part_init(bdesc); 2493 #endif 2494 2495 return 0; 2496 } 2497 2498 static int mmc_send_if_cond(struct mmc *mmc) 2499 { 2500 struct mmc_cmd cmd; 2501 int err; 2502 2503 cmd.cmdidx = SD_CMD_SEND_IF_COND; 2504 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */ 2505 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa; 2506 cmd.resp_type = MMC_RSP_R7; 2507 2508 err = mmc_send_cmd(mmc, &cmd, NULL); 2509 2510 if (err) 2511 return err; 2512 2513 if ((cmd.response[0] & 0xff) != 0xaa) 2514 return -EOPNOTSUPP; 2515 else 2516 mmc->version = SD_VERSION_2; 2517 2518 return 0; 2519 } 2520 2521 #if !CONFIG_IS_ENABLED(DM_MMC) 2522 /* board-specific MMC power initializations. */ 2523 __weak void board_mmc_power_init(void) 2524 { 2525 } 2526 #endif 2527 2528 static int mmc_power_init(struct mmc *mmc) 2529 { 2530 #if CONFIG_IS_ENABLED(DM_MMC) 2531 #if CONFIG_IS_ENABLED(DM_REGULATOR) 2532 int ret; 2533 2534 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply", 2535 &mmc->vmmc_supply); 2536 if (ret) 2537 pr_debug("%s: No vmmc supply\n", mmc->dev->name); 2538 2539 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply", 2540 &mmc->vqmmc_supply); 2541 if (ret) 2542 pr_debug("%s: No vqmmc supply\n", mmc->dev->name); 2543 #endif 2544 #else /* !CONFIG_DM_MMC */ 2545 /* 2546 * Driver model should use a regulator, as above, rather than calling 2547 * out to board code. 2548 */ 2549 board_mmc_power_init(); 2550 #endif 2551 return 0; 2552 } 2553 2554 /* 2555 * put the host in the initial state: 2556 * - turn on Vdd (card power supply) 2557 * - configure the bus width and clock to minimal values 2558 */ 2559 static void mmc_set_initial_state(struct mmc *mmc) 2560 { 2561 int err; 2562 2563 /* First try to set 3.3V. If it fails set to 1.8V */ 2564 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330); 2565 if (err != 0) 2566 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180); 2567 if (err != 0) 2568 pr_warn("mmc: failed to set signal voltage\n"); 2569 2570 mmc_select_mode(mmc, MMC_LEGACY); 2571 mmc_set_bus_width(mmc, 1); 2572 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE); 2573 } 2574 2575 static int mmc_power_on(struct mmc *mmc) 2576 { 2577 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR) 2578 if (mmc->vmmc_supply) { 2579 int ret = regulator_set_enable(mmc->vmmc_supply, true); 2580 2581 if (ret) { 2582 puts("Error enabling VMMC supply\n"); 2583 return ret; 2584 } 2585 } 2586 #endif 2587 return 0; 2588 } 2589 2590 static int mmc_power_off(struct mmc *mmc) 2591 { 2592 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE); 2593 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR) 2594 if (mmc->vmmc_supply) { 2595 int ret = regulator_set_enable(mmc->vmmc_supply, false); 2596 2597 if (ret) { 2598 pr_debug("Error disabling VMMC supply\n"); 2599 return ret; 2600 } 2601 } 2602 #endif 2603 return 0; 2604 } 2605 2606 static int mmc_power_cycle(struct mmc *mmc) 2607 { 2608 int ret; 2609 2610 ret = mmc_power_off(mmc); 2611 if (ret) 2612 return ret; 2613 /* 2614 * SD spec recommends at least 1ms of delay. Let's wait for 2ms 2615 * to be on the safer side. 2616 */ 2617 udelay(2000); 2618 return mmc_power_on(mmc); 2619 } 2620 2621 int mmc_get_op_cond(struct mmc *mmc) 2622 { 2623 bool uhs_en = supports_uhs(mmc->cfg->host_caps); 2624 int err; 2625 2626 if (mmc->has_init) 2627 return 0; 2628 2629 #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT 2630 mmc_adapter_card_type_ident(); 2631 #endif 2632 err = mmc_power_init(mmc); 2633 if (err) 2634 return err; 2635 2636 #ifdef CONFIG_MMC_QUIRKS 2637 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN | 2638 MMC_QUIRK_RETRY_SEND_CID; 2639 #endif 2640 2641 err = mmc_power_cycle(mmc); 2642 if (err) { 2643 /* 2644 * if power cycling is not supported, we should not try 2645 * to use the UHS modes, because we wouldn't be able to 2646 * recover from an error during the UHS initialization. 2647 */ 2648 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n"); 2649 uhs_en = false; 2650 mmc->host_caps &= ~UHS_CAPS; 2651 err = mmc_power_on(mmc); 2652 } 2653 if (err) 2654 return err; 2655 2656 #if CONFIG_IS_ENABLED(DM_MMC) 2657 /* The device has already been probed ready for use */ 2658 #else 2659 /* made sure it's not NULL earlier */ 2660 err = mmc->cfg->ops->init(mmc); 2661 if (err) 2662 return err; 2663 #endif 2664 mmc->ddr_mode = 0; 2665 2666 retry: 2667 mmc_set_initial_state(mmc); 2668 mmc_send_init_stream(mmc); 2669 2670 /* Reset the Card */ 2671 err = mmc_go_idle(mmc); 2672 2673 if (err) 2674 return err; 2675 2676 /* The internal partition reset to user partition(0) at every CMD0*/ 2677 mmc_get_blk_desc(mmc)->hwpart = 0; 2678 2679 /* Test for SD version 2 */ 2680 err = mmc_send_if_cond(mmc); 2681 2682 /* Now try to get the SD card's operating condition */ 2683 err = sd_send_op_cond(mmc, uhs_en); 2684 if (err && uhs_en) { 2685 uhs_en = false; 2686 mmc_power_cycle(mmc); 2687 goto retry; 2688 } 2689 2690 /* If the command timed out, we check for an MMC card */ 2691 if (err == -ETIMEDOUT) { 2692 err = mmc_send_op_cond(mmc); 2693 2694 if (err) { 2695 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2696 pr_err("Card did not respond to voltage select!\n"); 2697 #endif 2698 return -EOPNOTSUPP; 2699 } 2700 } 2701 2702 return err; 2703 } 2704 2705 int mmc_start_init(struct mmc *mmc) 2706 { 2707 bool no_card; 2708 int err = 0; 2709 2710 /* 2711 * all hosts are capable of 1 bit bus-width and able to use the legacy 2712 * timings. 2713 */ 2714 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) | 2715 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT; 2716 2717 #if !defined(CONFIG_MMC_BROKEN_CD) 2718 /* we pretend there's no card when init is NULL */ 2719 no_card = mmc_getcd(mmc) == 0; 2720 #else 2721 no_card = 0; 2722 #endif 2723 #if !CONFIG_IS_ENABLED(DM_MMC) 2724 no_card = no_card || (mmc->cfg->ops->init == NULL); 2725 #endif 2726 if (no_card) { 2727 mmc->has_init = 0; 2728 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT) 2729 pr_err("MMC: no card present\n"); 2730 #endif 2731 return -ENOMEDIUM; 2732 } 2733 2734 err = mmc_get_op_cond(mmc); 2735 2736 if (!err) 2737 mmc->init_in_progress = 1; 2738 2739 return err; 2740 } 2741 2742 static int mmc_complete_init(struct mmc *mmc) 2743 { 2744 int err = 0; 2745 2746 mmc->init_in_progress = 0; 2747 if (mmc->op_cond_pending) 2748 err = mmc_complete_op_cond(mmc); 2749 2750 if (!err) 2751 err = mmc_startup(mmc); 2752 if (err) 2753 mmc->has_init = 0; 2754 else 2755 mmc->has_init = 1; 2756 return err; 2757 } 2758 2759 int mmc_init(struct mmc *mmc) 2760 { 2761 int err = 0; 2762 __maybe_unused ulong start; 2763 #if CONFIG_IS_ENABLED(DM_MMC) 2764 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev); 2765 2766 upriv->mmc = mmc; 2767 #endif 2768 if (mmc->has_init) 2769 return 0; 2770 2771 start = get_timer(0); 2772 2773 if (!mmc->init_in_progress) 2774 err = mmc_start_init(mmc); 2775 2776 if (!err) 2777 err = mmc_complete_init(mmc); 2778 if (err) 2779 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start)); 2780 2781 return err; 2782 } 2783 2784 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \ 2785 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \ 2786 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) 2787 int mmc_deinit(struct mmc *mmc) 2788 { 2789 u32 caps_filtered; 2790 2791 if (!mmc->has_init) 2792 return 0; 2793 2794 if (IS_SD(mmc)) { 2795 caps_filtered = mmc->card_caps & 2796 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) | 2797 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) | 2798 MMC_CAP(UHS_SDR104)); 2799 2800 return sd_select_mode_and_width(mmc, caps_filtered); 2801 } else { 2802 caps_filtered = mmc->card_caps & 2803 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400)); 2804 2805 return mmc_select_mode_and_width(mmc, caps_filtered); 2806 } 2807 } 2808 #endif 2809 2810 int mmc_set_dsr(struct mmc *mmc, u16 val) 2811 { 2812 mmc->dsr = val; 2813 return 0; 2814 } 2815 2816 /* CPU-specific MMC initializations */ 2817 __weak int cpu_mmc_init(bd_t *bis) 2818 { 2819 return -1; 2820 } 2821 2822 /* board-specific MMC initializations. */ 2823 __weak int board_mmc_init(bd_t *bis) 2824 { 2825 return -1; 2826 } 2827 2828 void mmc_set_preinit(struct mmc *mmc, int preinit) 2829 { 2830 mmc->preinit = preinit; 2831 } 2832 2833 #if CONFIG_IS_ENABLED(DM_MMC) 2834 static int mmc_probe(bd_t *bis) 2835 { 2836 int ret, i; 2837 struct uclass *uc; 2838 struct udevice *dev; 2839 2840 ret = uclass_get(UCLASS_MMC, &uc); 2841 if (ret) 2842 return ret; 2843 2844 /* 2845 * Try to add them in sequence order. Really with driver model we 2846 * should allow holes, but the current MMC list does not allow that. 2847 * So if we request 0, 1, 3 we will get 0, 1, 2. 2848 */ 2849 for (i = 0; ; i++) { 2850 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev); 2851 if (ret == -ENODEV) 2852 break; 2853 } 2854 uclass_foreach_dev(dev, uc) { 2855 ret = device_probe(dev); 2856 if (ret) 2857 pr_err("%s - probe failed: %d\n", dev->name, ret); 2858 } 2859 2860 return 0; 2861 } 2862 #else 2863 static int mmc_probe(bd_t *bis) 2864 { 2865 if (board_mmc_init(bis) < 0) 2866 cpu_mmc_init(bis); 2867 2868 return 0; 2869 } 2870 #endif 2871 2872 int mmc_initialize(bd_t *bis) 2873 { 2874 static int initialized = 0; 2875 int ret; 2876 if (initialized) /* Avoid initializing mmc multiple times */ 2877 return 0; 2878 initialized = 1; 2879 2880 #if !CONFIG_IS_ENABLED(BLK) 2881 #if !CONFIG_IS_ENABLED(MMC_TINY) 2882 mmc_list_init(); 2883 #endif 2884 #endif 2885 ret = mmc_probe(bis); 2886 if (ret) 2887 return ret; 2888 2889 #ifndef CONFIG_SPL_BUILD 2890 print_mmc_devices(','); 2891 #endif 2892 2893 mmc_do_preinit(); 2894 return 0; 2895 } 2896 2897 #ifdef CONFIG_CMD_BKOPS_ENABLE 2898 int mmc_set_bkops_enable(struct mmc *mmc) 2899 { 2900 int err; 2901 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN); 2902 2903 err = mmc_send_ext_csd(mmc, ext_csd); 2904 if (err) { 2905 puts("Could not get ext_csd register values\n"); 2906 return err; 2907 } 2908 2909 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) { 2910 puts("Background operations not supported on device\n"); 2911 return -EMEDIUMTYPE; 2912 } 2913 2914 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) { 2915 puts("Background operations already enabled\n"); 2916 return 0; 2917 } 2918 2919 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); 2920 if (err) { 2921 puts("Failed to enable manual background operations\n"); 2922 return err; 2923 } 2924 2925 puts("Enabled manual background operations\n"); 2926 2927 return 0; 2928 } 2929 #endif 2930