1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright 2011, Marvell Semiconductor Inc. 4 * Lei Wen <leiwen@marvell.com> 5 * 6 * Back ported to the 8xx platform (from the 8260 platform) by 7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01. 8 */ 9 10 #include <common.h> 11 #include <errno.h> 12 #include <malloc.h> 13 #include <mmc.h> 14 #include <sdhci.h> 15 16 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER) 17 void *aligned_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER; 18 #else 19 void *aligned_buffer; 20 #endif 21 22 static void sdhci_reset(struct sdhci_host *host, u8 mask) 23 { 24 unsigned long timeout; 25 26 /* Wait max 100 ms */ 27 timeout = 100; 28 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 29 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 30 if (timeout == 0) { 31 printf("%s: Reset 0x%x never completed.\n", 32 __func__, (int)mask); 33 return; 34 } 35 timeout--; 36 udelay(1000); 37 } 38 } 39 40 static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd) 41 { 42 int i; 43 if (cmd->resp_type & MMC_RSP_136) { 44 /* CRC is stripped so we need to do some shifting. */ 45 for (i = 0; i < 4; i++) { 46 cmd->response[i] = sdhci_readl(host, 47 SDHCI_RESPONSE + (3-i)*4) << 8; 48 if (i != 3) 49 cmd->response[i] |= sdhci_readb(host, 50 SDHCI_RESPONSE + (3-i)*4-1); 51 } 52 } else { 53 cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE); 54 } 55 } 56 57 static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data) 58 { 59 int i; 60 char *offs; 61 for (i = 0; i < data->blocksize; i += 4) { 62 offs = data->dest + i; 63 if (data->flags == MMC_DATA_READ) 64 *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER); 65 else 66 sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER); 67 } 68 } 69 70 static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data, 71 unsigned int start_addr) 72 { 73 unsigned int stat, rdy, mask, timeout, block = 0; 74 bool transfer_done = false; 75 #ifdef CONFIG_MMC_SDHCI_SDMA 76 unsigned char ctrl; 77 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 78 ctrl &= ~SDHCI_CTRL_DMA_MASK; 79 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 80 #endif 81 82 timeout = 1000000; 83 rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL; 84 mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE; 85 do { 86 stat = sdhci_readl(host, SDHCI_INT_STATUS); 87 if (stat & SDHCI_INT_ERROR) { 88 pr_debug("%s: Error detected in status(0x%X)!\n", 89 __func__, stat); 90 return -EIO; 91 } 92 if (!transfer_done && (stat & rdy)) { 93 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)) 94 continue; 95 sdhci_writel(host, rdy, SDHCI_INT_STATUS); 96 sdhci_transfer_pio(host, data); 97 data->dest += data->blocksize; 98 if (++block >= data->blocks) { 99 /* Keep looping until the SDHCI_INT_DATA_END is 100 * cleared, even if we finished sending all the 101 * blocks. 102 */ 103 transfer_done = true; 104 continue; 105 } 106 } 107 #ifdef CONFIG_MMC_SDHCI_SDMA 108 if (!transfer_done && (stat & SDHCI_INT_DMA_END)) { 109 sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS); 110 start_addr &= ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1); 111 start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE; 112 sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS); 113 } 114 #endif 115 if (timeout-- > 0) 116 udelay(10); 117 else { 118 printf("%s: Transfer data timeout\n", __func__); 119 return -ETIMEDOUT; 120 } 121 } while (!(stat & SDHCI_INT_DATA_END)); 122 return 0; 123 } 124 125 /* 126 * No command will be sent by driver if card is busy, so driver must wait 127 * for card ready state. 128 * Every time when card is busy after timeout then (last) timeout value will be 129 * increased twice but only if it doesn't exceed global defined maximum. 130 * Each function call will use last timeout value. 131 */ 132 #define SDHCI_CMD_MAX_TIMEOUT 3200 133 #define SDHCI_CMD_DEFAULT_TIMEOUT 100 134 #define SDHCI_READ_STATUS_TIMEOUT 1000 135 136 #ifdef CONFIG_DM_MMC 137 static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd, 138 struct mmc_data *data) 139 { 140 struct mmc *mmc = mmc_get_mmc_dev(dev); 141 142 #else 143 static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd, 144 struct mmc_data *data) 145 { 146 #endif 147 struct sdhci_host *host = mmc->priv; 148 unsigned int stat = 0; 149 int ret = 0; 150 int trans_bytes = 0, is_aligned = 1; 151 u32 mask, flags, mode; 152 unsigned int time = 0, start_addr = 0; 153 int mmc_dev = mmc_get_blk_desc(mmc)->devnum; 154 ulong start = get_timer(0); 155 156 /* Timeout unit - ms */ 157 static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT; 158 159 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT; 160 161 /* We shouldn't wait for data inihibit for stop commands, even 162 though they might use busy signaling */ 163 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION || 164 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK) 165 mask &= ~SDHCI_DATA_INHIBIT; 166 167 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 168 if (time >= cmd_timeout) { 169 printf("%s: MMC: %d busy ", __func__, mmc_dev); 170 if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) { 171 cmd_timeout += cmd_timeout; 172 printf("timeout increasing to: %u ms.\n", 173 cmd_timeout); 174 } else { 175 puts("timeout.\n"); 176 return -ECOMM; 177 } 178 } 179 time++; 180 udelay(1000); 181 } 182 183 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS); 184 185 mask = SDHCI_INT_RESPONSE; 186 if (cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK) 187 mask = SDHCI_INT_DATA_AVAIL; 188 189 if (!(cmd->resp_type & MMC_RSP_PRESENT)) 190 flags = SDHCI_CMD_RESP_NONE; 191 else if (cmd->resp_type & MMC_RSP_136) 192 flags = SDHCI_CMD_RESP_LONG; 193 else if (cmd->resp_type & MMC_RSP_BUSY) { 194 flags = SDHCI_CMD_RESP_SHORT_BUSY; 195 if (data) 196 mask |= SDHCI_INT_DATA_END; 197 } else 198 flags = SDHCI_CMD_RESP_SHORT; 199 200 if (cmd->resp_type & MMC_RSP_CRC) 201 flags |= SDHCI_CMD_CRC; 202 if (cmd->resp_type & MMC_RSP_OPCODE) 203 flags |= SDHCI_CMD_INDEX; 204 if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK) 205 flags |= SDHCI_CMD_DATA; 206 207 /* Set Transfer mode regarding to data flag */ 208 if (data) { 209 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL); 210 mode = SDHCI_TRNS_BLK_CNT_EN; 211 trans_bytes = data->blocks * data->blocksize; 212 if (data->blocks > 1) 213 mode |= SDHCI_TRNS_MULTI; 214 215 if (data->flags == MMC_DATA_READ) 216 mode |= SDHCI_TRNS_READ; 217 218 #ifdef CONFIG_MMC_SDHCI_SDMA 219 if (data->flags == MMC_DATA_READ) 220 start_addr = (unsigned long)data->dest; 221 else 222 start_addr = (unsigned long)data->src; 223 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && 224 (start_addr & 0x7) != 0x0) { 225 is_aligned = 0; 226 start_addr = (unsigned long)aligned_buffer; 227 if (data->flags != MMC_DATA_READ) 228 memcpy(aligned_buffer, data->src, trans_bytes); 229 } 230 231 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER) 232 /* 233 * Always use this bounce-buffer when 234 * CONFIG_FIXED_SDHCI_ALIGNED_BUFFER is defined 235 */ 236 is_aligned = 0; 237 start_addr = (unsigned long)aligned_buffer; 238 if (data->flags != MMC_DATA_READ) 239 memcpy(aligned_buffer, data->src, trans_bytes); 240 #endif 241 242 sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS); 243 mode |= SDHCI_TRNS_DMA; 244 #endif 245 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 246 data->blocksize), 247 SDHCI_BLOCK_SIZE); 248 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 249 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 250 } else if (cmd->resp_type & MMC_RSP_BUSY) { 251 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL); 252 } 253 254 sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT); 255 #ifdef CONFIG_MMC_SDHCI_SDMA 256 if (data) { 257 trans_bytes = ALIGN(trans_bytes, CONFIG_SYS_CACHELINE_SIZE); 258 flush_cache(start_addr, trans_bytes); 259 } 260 #endif 261 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND); 262 start = get_timer(0); 263 do { 264 stat = sdhci_readl(host, SDHCI_INT_STATUS); 265 if (stat & SDHCI_INT_ERROR) 266 break; 267 268 if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) { 269 if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) { 270 return 0; 271 } else { 272 printf("%s: Timeout for status update!\n", 273 __func__); 274 return -ETIMEDOUT; 275 } 276 } 277 } while ((stat & mask) != mask); 278 279 if ((stat & (SDHCI_INT_ERROR | mask)) == mask) { 280 sdhci_cmd_done(host, cmd); 281 sdhci_writel(host, mask, SDHCI_INT_STATUS); 282 } else 283 ret = -1; 284 285 if (!ret && data) 286 ret = sdhci_transfer_data(host, data, start_addr); 287 288 if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD) 289 udelay(1000); 290 291 stat = sdhci_readl(host, SDHCI_INT_STATUS); 292 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS); 293 if (!ret) { 294 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && 295 !is_aligned && (data->flags == MMC_DATA_READ)) 296 memcpy(data->dest, aligned_buffer, trans_bytes); 297 return 0; 298 } 299 300 sdhci_reset(host, SDHCI_RESET_CMD); 301 sdhci_reset(host, SDHCI_RESET_DATA); 302 if (stat & SDHCI_INT_TIMEOUT) 303 return -ETIMEDOUT; 304 else 305 return -ECOMM; 306 } 307 308 #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING) 309 static int sdhci_execute_tuning(struct udevice *dev, uint opcode) 310 { 311 int err; 312 struct mmc *mmc = mmc_get_mmc_dev(dev); 313 struct sdhci_host *host = mmc->priv; 314 315 debug("%s\n", __func__); 316 317 if (host->ops->platform_execute_tuning) { 318 err = host->ops->platform_execute_tuning(mmc, opcode); 319 if (err) 320 return err; 321 return 0; 322 } 323 return 0; 324 } 325 #endif 326 static int sdhci_set_clock(struct mmc *mmc, unsigned int clock) 327 { 328 struct sdhci_host *host = mmc->priv; 329 unsigned int div, clk = 0, timeout; 330 331 /* Wait max 20 ms */ 332 timeout = 200; 333 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & 334 (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) { 335 if (timeout == 0) { 336 printf("%s: Timeout to wait cmd & data inhibit\n", 337 __func__); 338 return -EBUSY; 339 } 340 341 timeout--; 342 udelay(100); 343 } 344 345 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 346 347 if (clock == 0) 348 return 0; 349 350 if (host->ops->set_delay) 351 host->ops->set_delay(host); 352 353 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) { 354 /* 355 * Check if the Host Controller supports Programmable Clock 356 * Mode. 357 */ 358 if (host->clk_mul) { 359 for (div = 1; div <= 1024; div++) { 360 if ((host->max_clk / div) <= clock) 361 break; 362 } 363 364 /* 365 * Set Programmable Clock Mode in the Clock 366 * Control register. 367 */ 368 clk = SDHCI_PROG_CLOCK_MODE; 369 div--; 370 } else { 371 /* Version 3.00 divisors must be a multiple of 2. */ 372 if (host->max_clk <= clock) { 373 div = 1; 374 } else { 375 for (div = 2; 376 div < SDHCI_MAX_DIV_SPEC_300; 377 div += 2) { 378 if ((host->max_clk / div) <= clock) 379 break; 380 } 381 } 382 div >>= 1; 383 } 384 } else { 385 /* Version 2.00 divisors must be a power of 2. */ 386 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 387 if ((host->max_clk / div) <= clock) 388 break; 389 } 390 div >>= 1; 391 } 392 393 if (host->ops && host->ops->set_clock) 394 host->ops->set_clock(host, div); 395 396 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 397 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 398 << SDHCI_DIVIDER_HI_SHIFT; 399 clk |= SDHCI_CLOCK_INT_EN; 400 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 401 402 /* Wait max 20 ms */ 403 timeout = 20; 404 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 405 & SDHCI_CLOCK_INT_STABLE)) { 406 if (timeout == 0) { 407 printf("%s: Internal clock never stabilised.\n", 408 __func__); 409 return -EBUSY; 410 } 411 timeout--; 412 udelay(1000); 413 } 414 415 clk |= SDHCI_CLOCK_CARD_EN; 416 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 417 return 0; 418 } 419 420 static void sdhci_set_power(struct sdhci_host *host, unsigned short power) 421 { 422 u8 pwr = 0; 423 424 if (power != (unsigned short)-1) { 425 switch (1 << power) { 426 case MMC_VDD_165_195: 427 pwr = SDHCI_POWER_180; 428 break; 429 case MMC_VDD_29_30: 430 case MMC_VDD_30_31: 431 pwr = SDHCI_POWER_300; 432 break; 433 case MMC_VDD_32_33: 434 case MMC_VDD_33_34: 435 pwr = SDHCI_POWER_330; 436 break; 437 } 438 } 439 440 if (pwr == 0) { 441 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 442 return; 443 } 444 445 pwr |= SDHCI_POWER_ON; 446 447 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 448 } 449 450 #ifdef CONFIG_DM_MMC 451 static int sdhci_set_ios(struct udevice *dev) 452 { 453 struct mmc *mmc = mmc_get_mmc_dev(dev); 454 #else 455 static int sdhci_set_ios(struct mmc *mmc) 456 { 457 #endif 458 u32 ctrl; 459 struct sdhci_host *host = mmc->priv; 460 461 if (host->ops && host->ops->set_control_reg) 462 host->ops->set_control_reg(host); 463 464 if (mmc->clock != host->clock) 465 sdhci_set_clock(mmc, mmc->clock); 466 467 if (mmc->clk_disable) 468 sdhci_set_clock(mmc, 0); 469 470 /* Set bus width */ 471 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 472 if (mmc->bus_width == 8) { 473 ctrl &= ~SDHCI_CTRL_4BITBUS; 474 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) || 475 (host->quirks & SDHCI_QUIRK_USE_WIDE8)) 476 ctrl |= SDHCI_CTRL_8BITBUS; 477 } else { 478 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) || 479 (host->quirks & SDHCI_QUIRK_USE_WIDE8)) 480 ctrl &= ~SDHCI_CTRL_8BITBUS; 481 if (mmc->bus_width == 4) 482 ctrl |= SDHCI_CTRL_4BITBUS; 483 else 484 ctrl &= ~SDHCI_CTRL_4BITBUS; 485 } 486 487 if (mmc->clock > 26000000) 488 ctrl |= SDHCI_CTRL_HISPD; 489 else 490 ctrl &= ~SDHCI_CTRL_HISPD; 491 492 if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) || 493 (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE)) 494 ctrl &= ~SDHCI_CTRL_HISPD; 495 496 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 497 498 /* If available, call the driver specific "post" set_ios() function */ 499 if (host->ops && host->ops->set_ios_post) 500 host->ops->set_ios_post(host); 501 502 return 0; 503 } 504 505 static int sdhci_init(struct mmc *mmc) 506 { 507 struct sdhci_host *host = mmc->priv; 508 509 sdhci_reset(host, SDHCI_RESET_ALL); 510 511 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && !aligned_buffer) { 512 aligned_buffer = memalign(8, 512*1024); 513 if (!aligned_buffer) { 514 printf("%s: Aligned buffer alloc failed!!!\n", 515 __func__); 516 return -ENOMEM; 517 } 518 } 519 520 sdhci_set_power(host, fls(mmc->cfg->voltages) - 1); 521 522 if (host->ops && host->ops->get_cd) 523 host->ops->get_cd(host); 524 525 /* Enable only interrupts served by the SD controller */ 526 sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK, 527 SDHCI_INT_ENABLE); 528 /* Mask all sdhci interrupt sources */ 529 sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE); 530 531 return 0; 532 } 533 534 #ifdef CONFIG_DM_MMC 535 int sdhci_probe(struct udevice *dev) 536 { 537 struct mmc *mmc = mmc_get_mmc_dev(dev); 538 539 return sdhci_init(mmc); 540 } 541 542 const struct dm_mmc_ops sdhci_ops = { 543 .send_cmd = sdhci_send_command, 544 .set_ios = sdhci_set_ios, 545 #ifdef MMC_SUPPORTS_TUNING 546 .execute_tuning = sdhci_execute_tuning, 547 #endif 548 }; 549 #else 550 static const struct mmc_ops sdhci_ops = { 551 .send_cmd = sdhci_send_command, 552 .set_ios = sdhci_set_ios, 553 .init = sdhci_init, 554 }; 555 #endif 556 557 int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host, 558 u32 f_max, u32 f_min) 559 { 560 u32 caps, caps_1 = 0; 561 562 caps = sdhci_readl(host, SDHCI_CAPABILITIES); 563 564 #ifdef CONFIG_MMC_SDHCI_SDMA 565 if (!(caps & SDHCI_CAN_DO_SDMA)) { 566 printf("%s: Your controller doesn't support SDMA!!\n", 567 __func__); 568 return -EINVAL; 569 } 570 #endif 571 if (host->quirks & SDHCI_QUIRK_REG32_RW) 572 host->version = 573 sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16; 574 else 575 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); 576 577 cfg->name = host->name; 578 #ifndef CONFIG_DM_MMC 579 cfg->ops = &sdhci_ops; 580 #endif 581 582 /* Check whether the clock multiplier is supported or not */ 583 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) { 584 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 585 host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >> 586 SDHCI_CLOCK_MUL_SHIFT; 587 } 588 589 if (host->max_clk == 0) { 590 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) 591 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >> 592 SDHCI_CLOCK_BASE_SHIFT; 593 else 594 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >> 595 SDHCI_CLOCK_BASE_SHIFT; 596 host->max_clk *= 1000000; 597 if (host->clk_mul) 598 host->max_clk *= host->clk_mul; 599 } 600 if (host->max_clk == 0) { 601 printf("%s: Hardware doesn't specify base clock frequency\n", 602 __func__); 603 return -EINVAL; 604 } 605 if (f_max && (f_max < host->max_clk)) 606 cfg->f_max = f_max; 607 else 608 cfg->f_max = host->max_clk; 609 if (f_min) 610 cfg->f_min = f_min; 611 else { 612 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) 613 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300; 614 else 615 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200; 616 } 617 cfg->voltages = 0; 618 if (caps & SDHCI_CAN_VDD_330) 619 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34; 620 if (caps & SDHCI_CAN_VDD_300) 621 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31; 622 if (caps & SDHCI_CAN_VDD_180) 623 cfg->voltages |= MMC_VDD_165_195; 624 625 if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE) 626 cfg->voltages |= host->voltages; 627 628 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz | MMC_MODE_4BIT; 629 630 /* Since Host Controller Version3.0 */ 631 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) { 632 if (!(caps & SDHCI_CAN_DO_8BIT)) 633 cfg->host_caps &= ~MMC_MODE_8BIT; 634 } 635 636 if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) { 637 cfg->host_caps &= ~MMC_MODE_HS; 638 cfg->host_caps &= ~MMC_MODE_HS_52MHz; 639 } 640 641 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) 642 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 643 644 if (!(cfg->voltages & MMC_VDD_165_195) || 645 (host->quirks & SDHCI_QUIRK_NO_1_8_V)) 646 caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 647 SDHCI_SUPPORT_DDR50); 648 649 if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 650 SDHCI_SUPPORT_DDR50)) 651 cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25); 652 653 if (caps_1 & SDHCI_SUPPORT_SDR104) { 654 cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50); 655 /* 656 * SD3.0: SDR104 is supported so (for eMMC) the caps2 657 * field can be promoted to support HS200. 658 */ 659 cfg->host_caps |= MMC_CAP(MMC_HS_200); 660 } else if (caps_1 & SDHCI_SUPPORT_SDR50) { 661 cfg->host_caps |= MMC_CAP(UHS_SDR50); 662 } 663 664 if (caps_1 & SDHCI_SUPPORT_DDR50) 665 cfg->host_caps |= MMC_CAP(UHS_DDR50); 666 667 if (host->host_caps) 668 cfg->host_caps |= host->host_caps; 669 670 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT; 671 672 return 0; 673 } 674 675 #ifdef CONFIG_BLK 676 int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg) 677 { 678 return mmc_bind(dev, mmc, cfg); 679 } 680 #else 681 int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min) 682 { 683 int ret; 684 685 ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min); 686 if (ret) 687 return ret; 688 689 host->mmc = mmc_create(&host->cfg, host); 690 if (host->mmc == NULL) { 691 printf("%s: mmc create fail!\n", __func__); 692 return -ENOMEM; 693 } 694 695 return 0; 696 } 697 #endif 698