1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright 2011, Marvell Semiconductor Inc. 4 * Lei Wen <leiwen@marvell.com> 5 * 6 * Back ported to the 8xx platform (from the 8260 platform) by 7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01. 8 */ 9 10 #include <common.h> 11 #include <errno.h> 12 #include <malloc.h> 13 #include <mmc.h> 14 #include <sdhci.h> 15 16 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER) 17 void *aligned_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER; 18 #else 19 void *aligned_buffer; 20 #endif 21 22 static void sdhci_reset(struct sdhci_host *host, u8 mask) 23 { 24 unsigned long timeout; 25 26 /* Wait max 100 ms */ 27 timeout = 100; 28 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 29 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 30 if (timeout == 0) { 31 printf("%s: Reset 0x%x never completed.\n", 32 __func__, (int)mask); 33 return; 34 } 35 timeout--; 36 udelay(1000); 37 } 38 } 39 40 static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd) 41 { 42 int i; 43 if (cmd->resp_type & MMC_RSP_136) { 44 /* CRC is stripped so we need to do some shifting. */ 45 for (i = 0; i < 4; i++) { 46 cmd->response[i] = sdhci_readl(host, 47 SDHCI_RESPONSE + (3-i)*4) << 8; 48 if (i != 3) 49 cmd->response[i] |= sdhci_readb(host, 50 SDHCI_RESPONSE + (3-i)*4-1); 51 } 52 } else { 53 cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE); 54 } 55 } 56 57 static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data) 58 { 59 int i; 60 char *offs; 61 for (i = 0; i < data->blocksize; i += 4) { 62 offs = data->dest + i; 63 if (data->flags == MMC_DATA_READ) 64 *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER); 65 else 66 sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER); 67 } 68 } 69 70 static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data, 71 unsigned int start_addr) 72 { 73 unsigned int stat, rdy, mask, timeout, block = 0; 74 bool transfer_done = false; 75 #ifdef CONFIG_MMC_SDHCI_SDMA 76 unsigned char ctrl; 77 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 78 ctrl &= ~SDHCI_CTRL_DMA_MASK; 79 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 80 #endif 81 82 timeout = 1000000; 83 rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL; 84 mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE; 85 do { 86 stat = sdhci_readl(host, SDHCI_INT_STATUS); 87 if (stat & SDHCI_INT_ERROR) { 88 pr_debug("%s: Error detected in status(0x%X)!\n", 89 __func__, stat); 90 return -EIO; 91 } 92 if (!transfer_done && (stat & rdy)) { 93 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)) 94 continue; 95 sdhci_writel(host, rdy, SDHCI_INT_STATUS); 96 sdhci_transfer_pio(host, data); 97 data->dest += data->blocksize; 98 if (++block >= data->blocks) { 99 /* Keep looping until the SDHCI_INT_DATA_END is 100 * cleared, even if we finished sending all the 101 * blocks. 102 */ 103 transfer_done = true; 104 continue; 105 } 106 } 107 #ifdef CONFIG_MMC_SDHCI_SDMA 108 if (!transfer_done && (stat & SDHCI_INT_DMA_END)) { 109 sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS); 110 start_addr &= ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1); 111 start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE; 112 sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS); 113 } 114 #endif 115 if (timeout-- > 0) 116 udelay(10); 117 else { 118 printf("%s: Transfer data timeout\n", __func__); 119 return -ETIMEDOUT; 120 } 121 } while (!(stat & SDHCI_INT_DATA_END)); 122 return 0; 123 } 124 125 /* 126 * No command will be sent by driver if card is busy, so driver must wait 127 * for card ready state. 128 * Every time when card is busy after timeout then (last) timeout value will be 129 * increased twice but only if it doesn't exceed global defined maximum. 130 * Each function call will use last timeout value. 131 */ 132 #define SDHCI_CMD_MAX_TIMEOUT 3200 133 #define SDHCI_CMD_DEFAULT_TIMEOUT 100 134 #define SDHCI_READ_STATUS_TIMEOUT 1000 135 136 #ifdef CONFIG_DM_MMC 137 static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd, 138 struct mmc_data *data) 139 { 140 struct mmc *mmc = mmc_get_mmc_dev(dev); 141 142 #else 143 static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd, 144 struct mmc_data *data) 145 { 146 #endif 147 struct sdhci_host *host = mmc->priv; 148 unsigned int stat = 0; 149 int ret = 0; 150 int trans_bytes = 0, is_aligned = 1; 151 u32 mask, flags, mode; 152 unsigned int time = 0, start_addr = 0; 153 int mmc_dev = mmc_get_blk_desc(mmc)->devnum; 154 ulong start = get_timer(0); 155 156 /* Timeout unit - ms */ 157 static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT; 158 159 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT; 160 161 /* We shouldn't wait for data inihibit for stop commands, even 162 though they might use busy signaling */ 163 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION || 164 ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK || 165 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)) 166 mask &= ~SDHCI_DATA_INHIBIT; 167 168 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 169 if (time >= cmd_timeout) { 170 printf("%s: MMC: %d busy ", __func__, mmc_dev); 171 if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) { 172 cmd_timeout += cmd_timeout; 173 printf("timeout increasing to: %u ms.\n", 174 cmd_timeout); 175 } else { 176 puts("timeout.\n"); 177 return -ECOMM; 178 } 179 } 180 time++; 181 udelay(1000); 182 } 183 184 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS); 185 186 mask = SDHCI_INT_RESPONSE; 187 if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK || 188 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data) 189 mask = SDHCI_INT_DATA_AVAIL; 190 191 if (!(cmd->resp_type & MMC_RSP_PRESENT)) 192 flags = SDHCI_CMD_RESP_NONE; 193 else if (cmd->resp_type & MMC_RSP_136) 194 flags = SDHCI_CMD_RESP_LONG; 195 else if (cmd->resp_type & MMC_RSP_BUSY) { 196 flags = SDHCI_CMD_RESP_SHORT_BUSY; 197 if (data) 198 mask |= SDHCI_INT_DATA_END; 199 } else 200 flags = SDHCI_CMD_RESP_SHORT; 201 202 if (cmd->resp_type & MMC_RSP_CRC) 203 flags |= SDHCI_CMD_CRC; 204 if (cmd->resp_type & MMC_RSP_OPCODE) 205 flags |= SDHCI_CMD_INDEX; 206 if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK || 207 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) 208 flags |= SDHCI_CMD_DATA; 209 210 /* Set Transfer mode regarding to data flag */ 211 if (data) { 212 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL); 213 mode = SDHCI_TRNS_BLK_CNT_EN; 214 trans_bytes = data->blocks * data->blocksize; 215 if (data->blocks > 1) 216 mode |= SDHCI_TRNS_MULTI; 217 218 if (data->flags == MMC_DATA_READ) 219 mode |= SDHCI_TRNS_READ; 220 221 #ifdef CONFIG_MMC_SDHCI_SDMA 222 if (data->flags == MMC_DATA_READ) 223 start_addr = (unsigned long)data->dest; 224 else 225 start_addr = (unsigned long)data->src; 226 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && 227 (start_addr & 0x7) != 0x0) { 228 is_aligned = 0; 229 start_addr = (unsigned long)aligned_buffer; 230 if (data->flags != MMC_DATA_READ) 231 memcpy(aligned_buffer, data->src, trans_bytes); 232 } 233 234 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER) 235 /* 236 * Always use this bounce-buffer when 237 * CONFIG_FIXED_SDHCI_ALIGNED_BUFFER is defined 238 */ 239 is_aligned = 0; 240 start_addr = (unsigned long)aligned_buffer; 241 if (data->flags != MMC_DATA_READ) 242 memcpy(aligned_buffer, data->src, trans_bytes); 243 #endif 244 245 sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS); 246 mode |= SDHCI_TRNS_DMA; 247 #endif 248 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 249 data->blocksize), 250 SDHCI_BLOCK_SIZE); 251 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 252 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 253 } else if (cmd->resp_type & MMC_RSP_BUSY) { 254 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL); 255 } 256 257 sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT); 258 #ifdef CONFIG_MMC_SDHCI_SDMA 259 if (data) { 260 trans_bytes = ALIGN(trans_bytes, CONFIG_SYS_CACHELINE_SIZE); 261 flush_cache(start_addr, trans_bytes); 262 } 263 #endif 264 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND); 265 start = get_timer(0); 266 do { 267 stat = sdhci_readl(host, SDHCI_INT_STATUS); 268 if (stat & SDHCI_INT_ERROR) 269 break; 270 271 if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) { 272 if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) { 273 return 0; 274 } else { 275 printf("%s: Timeout for status update!\n", 276 __func__); 277 return -ETIMEDOUT; 278 } 279 } 280 } while ((stat & mask) != mask); 281 282 if ((stat & (SDHCI_INT_ERROR | mask)) == mask) { 283 sdhci_cmd_done(host, cmd); 284 sdhci_writel(host, mask, SDHCI_INT_STATUS); 285 } else 286 ret = -1; 287 288 if (!ret && data) 289 ret = sdhci_transfer_data(host, data, start_addr); 290 291 if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD) 292 udelay(1000); 293 294 stat = sdhci_readl(host, SDHCI_INT_STATUS); 295 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS); 296 if (!ret) { 297 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && 298 !is_aligned && (data->flags == MMC_DATA_READ)) 299 memcpy(data->dest, aligned_buffer, trans_bytes); 300 return 0; 301 } 302 303 sdhci_reset(host, SDHCI_RESET_CMD); 304 sdhci_reset(host, SDHCI_RESET_DATA); 305 if (stat & SDHCI_INT_TIMEOUT) 306 return -ETIMEDOUT; 307 else 308 return -ECOMM; 309 } 310 311 #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING) 312 static int sdhci_execute_tuning(struct udevice *dev, uint opcode) 313 { 314 int err; 315 struct mmc *mmc = mmc_get_mmc_dev(dev); 316 struct sdhci_host *host = mmc->priv; 317 318 debug("%s\n", __func__); 319 320 if (host->ops && host->ops->platform_execute_tuning) { 321 err = host->ops->platform_execute_tuning(mmc, opcode); 322 if (err) 323 return err; 324 return 0; 325 } 326 return 0; 327 } 328 #endif 329 static int sdhci_set_clock(struct mmc *mmc, unsigned int clock) 330 { 331 struct sdhci_host *host = mmc->priv; 332 unsigned int div, clk = 0, timeout; 333 334 /* Wait max 20 ms */ 335 timeout = 200; 336 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & 337 (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) { 338 if (timeout == 0) { 339 printf("%s: Timeout to wait cmd & data inhibit\n", 340 __func__); 341 return -EBUSY; 342 } 343 344 timeout--; 345 udelay(100); 346 } 347 348 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 349 350 if (clock == 0) 351 return 0; 352 353 if (host->ops && host->ops->set_delay) 354 host->ops->set_delay(host); 355 356 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) { 357 /* 358 * Check if the Host Controller supports Programmable Clock 359 * Mode. 360 */ 361 if (host->clk_mul) { 362 for (div = 1; div <= 1024; div++) { 363 if ((host->max_clk / div) <= clock) 364 break; 365 } 366 367 /* 368 * Set Programmable Clock Mode in the Clock 369 * Control register. 370 */ 371 clk = SDHCI_PROG_CLOCK_MODE; 372 div--; 373 } else { 374 /* Version 3.00 divisors must be a multiple of 2. */ 375 if (host->max_clk <= clock) { 376 div = 1; 377 } else { 378 for (div = 2; 379 div < SDHCI_MAX_DIV_SPEC_300; 380 div += 2) { 381 if ((host->max_clk / div) <= clock) 382 break; 383 } 384 } 385 div >>= 1; 386 } 387 } else { 388 /* Version 2.00 divisors must be a power of 2. */ 389 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 390 if ((host->max_clk / div) <= clock) 391 break; 392 } 393 div >>= 1; 394 } 395 396 if (host->ops && host->ops->set_clock) 397 host->ops->set_clock(host, div); 398 399 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 400 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 401 << SDHCI_DIVIDER_HI_SHIFT; 402 clk |= SDHCI_CLOCK_INT_EN; 403 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 404 405 /* Wait max 20 ms */ 406 timeout = 20; 407 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 408 & SDHCI_CLOCK_INT_STABLE)) { 409 if (timeout == 0) { 410 printf("%s: Internal clock never stabilised.\n", 411 __func__); 412 return -EBUSY; 413 } 414 timeout--; 415 udelay(1000); 416 } 417 418 clk |= SDHCI_CLOCK_CARD_EN; 419 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 420 return 0; 421 } 422 423 static void sdhci_set_power(struct sdhci_host *host, unsigned short power) 424 { 425 u8 pwr = 0; 426 427 if (power != (unsigned short)-1) { 428 switch (1 << power) { 429 case MMC_VDD_165_195: 430 pwr = SDHCI_POWER_180; 431 break; 432 case MMC_VDD_29_30: 433 case MMC_VDD_30_31: 434 pwr = SDHCI_POWER_300; 435 break; 436 case MMC_VDD_32_33: 437 case MMC_VDD_33_34: 438 pwr = SDHCI_POWER_330; 439 break; 440 } 441 } 442 443 if (pwr == 0) { 444 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 445 return; 446 } 447 448 pwr |= SDHCI_POWER_ON; 449 450 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 451 } 452 453 #ifdef CONFIG_DM_MMC 454 static int sdhci_set_ios(struct udevice *dev) 455 { 456 struct mmc *mmc = mmc_get_mmc_dev(dev); 457 #else 458 static int sdhci_set_ios(struct mmc *mmc) 459 { 460 #endif 461 u32 ctrl; 462 u32 gen_addr, gen_ctrl; 463 u16 ctrl_2; 464 struct sdhci_host *host = mmc->priv; 465 466 if (host->ops && host->ops->set_control_reg) 467 host->ops->set_control_reg(host); 468 469 if (mmc->clock != host->clock) 470 sdhci_set_clock(mmc, mmc->clock); 471 472 if (mmc->clk_disable) 473 sdhci_set_clock(mmc, 0); 474 475 #ifdef CONFIG_MMC_SDHCI_ASPEED 476 /* Set bus width */ 477 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 478 gen_addr = (u32)host->ioaddr; 479 gen_addr &= ~0x300; 480 gen_ctrl = readl(gen_addr); 481 if (mmc->bus_width == 8) { 482 if((u32)host->ioaddr & 0x100) 483 writel(gen_ctrl | BIT(24), gen_addr); 484 else 485 writel(gen_ctrl | BIT(25), gen_addr); 486 } else { 487 writel(gen_ctrl & ~(BIT(24) | BIT(25)), gen_addr); 488 if (mmc->bus_width == 4) 489 ctrl |= SDHCI_CTRL_4BITBUS; 490 else 491 ctrl &= ~SDHCI_CTRL_4BITBUS; 492 } 493 #else 494 /* Set bus width */ 495 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 496 if (mmc->bus_width == 8) { 497 ctrl &= ~SDHCI_CTRL_4BITBUS; 498 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) || 499 (host->quirks & SDHCI_QUIRK_USE_WIDE8)) 500 ctrl |= SDHCI_CTRL_8BITBUS; 501 } else { 502 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) || 503 (host->quirks & SDHCI_QUIRK_USE_WIDE8)) 504 ctrl &= ~SDHCI_CTRL_8BITBUS; 505 if (mmc->bus_width == 4) 506 ctrl |= SDHCI_CTRL_4BITBUS; 507 else 508 ctrl &= ~SDHCI_CTRL_4BITBUS; 509 } 510 #endif 511 if (mmc->clock > 26000000) 512 ctrl |= SDHCI_CTRL_HISPD; 513 else 514 ctrl &= ~SDHCI_CTRL_HISPD; 515 516 if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) || 517 (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE)) 518 ctrl &= ~SDHCI_CTRL_HISPD; 519 520 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 521 522 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)) { 523 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL_2); 524 ctrl_2 &= ~SDHCI_DRIVER_STRENGTH_MASK; 525 ctrl_2 |= host->mmc->drv_type << SDHCI_DRIVER_STRENGTH_SHIFT; 526 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL_2); 527 } 528 529 /* If available, call the driver specific "post" set_ios() function */ 530 if (host->ops && host->ops->set_ios_post) 531 host->ops->set_ios_post(host); 532 533 return 0; 534 } 535 536 static int sdhci_init(struct mmc *mmc) 537 { 538 struct sdhci_host *host = mmc->priv; 539 540 sdhci_reset(host, SDHCI_RESET_ALL); 541 542 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && !aligned_buffer) { 543 aligned_buffer = memalign(8, 512*1024); 544 if (!aligned_buffer) { 545 printf("%s: Aligned buffer alloc failed!!!\n", 546 __func__); 547 return -ENOMEM; 548 } 549 } 550 551 sdhci_set_power(host, fls(mmc->cfg->voltages) - 1); 552 553 if (host->ops && host->ops->get_cd) 554 host->ops->get_cd(host); 555 556 /* Enable only interrupts served by the SD controller */ 557 sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK, 558 SDHCI_INT_ENABLE); 559 /* Mask all sdhci interrupt sources */ 560 sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE); 561 562 return 0; 563 } 564 565 #ifdef CONFIG_DM_MMC 566 int sdhci_probe(struct udevice *dev) 567 { 568 struct mmc *mmc = mmc_get_mmc_dev(dev); 569 570 return sdhci_init(mmc); 571 } 572 573 const struct dm_mmc_ops sdhci_ops = { 574 .send_cmd = sdhci_send_command, 575 .set_ios = sdhci_set_ios, 576 #ifdef MMC_SUPPORTS_TUNING 577 .execute_tuning = sdhci_execute_tuning, 578 #endif 579 }; 580 #else 581 static const struct mmc_ops sdhci_ops = { 582 .send_cmd = sdhci_send_command, 583 .set_ios = sdhci_set_ios, 584 .init = sdhci_init, 585 }; 586 #endif 587 588 int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host, 589 u32 f_max, u32 f_min) 590 { 591 u32 caps, caps_1 = 0; 592 593 caps = sdhci_readl(host, SDHCI_CAPABILITIES); 594 595 #ifdef CONFIG_MMC_SDHCI_SDMA 596 if (!(caps & SDHCI_CAN_DO_SDMA)) { 597 printf("%s: Your controller doesn't support SDMA!!\n", 598 __func__); 599 return -EINVAL; 600 } 601 #endif 602 if (host->quirks & SDHCI_QUIRK_REG32_RW) 603 host->version = 604 sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16; 605 else 606 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); 607 608 cfg->name = host->name; 609 #ifndef CONFIG_DM_MMC 610 cfg->ops = &sdhci_ops; 611 #endif 612 613 /* Check whether the clock multiplier is supported or not */ 614 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) { 615 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 616 host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >> 617 SDHCI_CLOCK_MUL_SHIFT; 618 } 619 620 if (host->max_clk == 0) { 621 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) 622 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >> 623 SDHCI_CLOCK_BASE_SHIFT; 624 else 625 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >> 626 SDHCI_CLOCK_BASE_SHIFT; 627 host->max_clk *= 1000000; 628 if (host->clk_mul) 629 host->max_clk *= host->clk_mul; 630 } 631 if (host->max_clk == 0) { 632 printf("%s: Hardware doesn't specify base clock frequency\n", 633 __func__); 634 return -EINVAL; 635 } 636 if (f_max && (f_max < host->max_clk)) 637 cfg->f_max = f_max; 638 else 639 cfg->f_max = host->max_clk; 640 if (f_min) 641 cfg->f_min = f_min; 642 else { 643 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) 644 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300; 645 else 646 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200; 647 } 648 cfg->voltages = 0; 649 if (caps & SDHCI_CAN_VDD_330) 650 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34; 651 if (caps & SDHCI_CAN_VDD_300) 652 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31; 653 if (caps & SDHCI_CAN_VDD_180) 654 cfg->voltages |= MMC_VDD_165_195; 655 656 if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE) 657 cfg->voltages |= host->voltages; 658 659 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz | MMC_MODE_4BIT; 660 661 /* Since Host Controller Version3.0 */ 662 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) { 663 if (!(caps & SDHCI_CAN_DO_8BIT)) 664 cfg->host_caps &= ~MMC_MODE_8BIT; 665 } 666 667 if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) { 668 cfg->host_caps &= ~MMC_MODE_HS; 669 cfg->host_caps &= ~MMC_MODE_HS_52MHz; 670 } 671 672 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) 673 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 674 675 if (!(cfg->voltages & MMC_VDD_165_195) || 676 (host->quirks & SDHCI_QUIRK_NO_1_8_V)) 677 caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 678 SDHCI_SUPPORT_DDR50); 679 680 if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 681 SDHCI_SUPPORT_DDR50)) 682 cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25); 683 684 if (caps_1 & SDHCI_SUPPORT_SDR104) { 685 cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50); 686 /* 687 * SD3.0: SDR104 is supported so (for eMMC) the caps2 688 * field can be promoted to support HS200. 689 */ 690 cfg->host_caps |= MMC_CAP(MMC_HS_200); 691 } else if (caps_1 & SDHCI_SUPPORT_SDR50) { 692 cfg->host_caps |= MMC_CAP(UHS_SDR50); 693 } 694 695 if (caps_1 & SDHCI_SUPPORT_DDR50) 696 cfg->host_caps |= MMC_CAP(UHS_DDR50); 697 698 if (host->host_caps) 699 cfg->host_caps |= host->host_caps; 700 701 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT; 702 703 return 0; 704 } 705 706 #ifdef CONFIG_BLK 707 int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg) 708 { 709 return mmc_bind(dev, mmc, cfg); 710 } 711 #else 712 int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min) 713 { 714 int ret; 715 716 ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min); 717 if (ret) 718 return ret; 719 720 host->mmc = mmc_create(&host->cfg, host); 721 if (host->mmc == NULL) { 722 printf("%s: mmc create fail!\n", __func__); 723 return -ENOMEM; 724 } 725 726 return 0; 727 } 728 #endif 729