1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright 2011, Marvell Semiconductor Inc. 4 * Lei Wen <leiwen@marvell.com> 5 * 6 * Back ported to the 8xx platform (from the 8260 platform) by 7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01. 8 */ 9 10 #include <common.h> 11 #include <errno.h> 12 #include <malloc.h> 13 #include <mmc.h> 14 #include <sdhci.h> 15 16 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER) 17 void *aligned_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER; 18 #else 19 void *aligned_buffer; 20 #endif 21 22 static void sdhci_reset(struct sdhci_host *host, u8 mask) 23 { 24 unsigned long timeout; 25 26 /* Wait max 100 ms */ 27 timeout = 100; 28 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 29 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { 30 if (timeout == 0) { 31 printf("%s: Reset 0x%x never completed.\n", 32 __func__, (int)mask); 33 return; 34 } 35 timeout--; 36 udelay(1000); 37 } 38 } 39 40 static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd) 41 { 42 int i; 43 if (cmd->resp_type & MMC_RSP_136) { 44 /* CRC is stripped so we need to do some shifting. */ 45 for (i = 0; i < 4; i++) { 46 cmd->response[i] = sdhci_readl(host, 47 SDHCI_RESPONSE + (3-i)*4) << 8; 48 if (i != 3) 49 cmd->response[i] |= sdhci_readb(host, 50 SDHCI_RESPONSE + (3-i)*4-1); 51 } 52 } else { 53 cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE); 54 } 55 } 56 57 static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data) 58 { 59 int i; 60 char *offs; 61 for (i = 0; i < data->blocksize; i += 4) { 62 offs = data->dest + i; 63 if (data->flags == MMC_DATA_READ) 64 *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER); 65 else 66 sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER); 67 } 68 } 69 70 static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data, 71 unsigned int start_addr) 72 { 73 unsigned int stat, rdy, mask, timeout, block = 0; 74 bool transfer_done = false; 75 #ifdef CONFIG_MMC_SDHCI_SDMA 76 unsigned char ctrl; 77 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 78 ctrl &= ~SDHCI_CTRL_DMA_MASK; 79 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 80 #endif 81 82 timeout = 1000000; 83 rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL; 84 mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE; 85 do { 86 stat = sdhci_readl(host, SDHCI_INT_STATUS); 87 if (stat & SDHCI_INT_ERROR) { 88 pr_debug("%s: Error detected in status(0x%X)!\n", 89 __func__, stat); 90 return -EIO; 91 } 92 if (!transfer_done && (stat & rdy)) { 93 if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)) 94 continue; 95 sdhci_writel(host, rdy, SDHCI_INT_STATUS); 96 sdhci_transfer_pio(host, data); 97 data->dest += data->blocksize; 98 if (++block >= data->blocks) { 99 /* Keep looping until the SDHCI_INT_DATA_END is 100 * cleared, even if we finished sending all the 101 * blocks. 102 */ 103 transfer_done = true; 104 continue; 105 } 106 } 107 #ifdef CONFIG_MMC_SDHCI_SDMA 108 if (!transfer_done && (stat & SDHCI_INT_DMA_END)) { 109 sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS); 110 start_addr &= ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1); 111 start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE; 112 sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS); 113 } 114 #endif 115 if (timeout-- > 0) 116 udelay(10); 117 else { 118 printf("%s: Transfer data timeout\n", __func__); 119 return -ETIMEDOUT; 120 } 121 } while (!(stat & SDHCI_INT_DATA_END)); 122 return 0; 123 } 124 125 /* 126 * No command will be sent by driver if card is busy, so driver must wait 127 * for card ready state. 128 * Every time when card is busy after timeout then (last) timeout value will be 129 * increased twice but only if it doesn't exceed global defined maximum. 130 * Each function call will use last timeout value. 131 */ 132 #define SDHCI_CMD_MAX_TIMEOUT 3200 133 #define SDHCI_CMD_DEFAULT_TIMEOUT 100 134 #define SDHCI_READ_STATUS_TIMEOUT 1000 135 136 #ifdef CONFIG_DM_MMC 137 static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd, 138 struct mmc_data *data) 139 { 140 struct mmc *mmc = mmc_get_mmc_dev(dev); 141 142 #else 143 static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd, 144 struct mmc_data *data) 145 { 146 #endif 147 struct sdhci_host *host = mmc->priv; 148 unsigned int stat = 0; 149 int ret = 0; 150 int trans_bytes = 0, is_aligned = 1; 151 u32 mask, flags, mode; 152 unsigned int time = 0, start_addr = 0; 153 int mmc_dev = mmc_get_blk_desc(mmc)->devnum; 154 ulong start = get_timer(0); 155 156 /* Timeout unit - ms */ 157 static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT; 158 159 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT; 160 161 /* We shouldn't wait for data inihibit for stop commands, even 162 though they might use busy signaling */ 163 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION || 164 ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK || 165 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)) 166 mask &= ~SDHCI_DATA_INHIBIT; 167 168 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 169 if (time >= cmd_timeout) { 170 printf("%s: MMC: %d busy ", __func__, mmc_dev); 171 if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) { 172 cmd_timeout += cmd_timeout; 173 printf("timeout increasing to: %u ms.\n", 174 cmd_timeout); 175 } else { 176 puts("timeout.\n"); 177 return -ECOMM; 178 } 179 } 180 time++; 181 udelay(1000); 182 } 183 184 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS); 185 186 mask = SDHCI_INT_RESPONSE; 187 if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK || 188 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data) 189 mask = SDHCI_INT_DATA_AVAIL; 190 191 if (!(cmd->resp_type & MMC_RSP_PRESENT)) 192 flags = SDHCI_CMD_RESP_NONE; 193 else if (cmd->resp_type & MMC_RSP_136) 194 flags = SDHCI_CMD_RESP_LONG; 195 else if (cmd->resp_type & MMC_RSP_BUSY) { 196 flags = SDHCI_CMD_RESP_SHORT_BUSY; 197 if (data) 198 mask |= SDHCI_INT_DATA_END; 199 } else 200 flags = SDHCI_CMD_RESP_SHORT; 201 202 if (cmd->resp_type & MMC_RSP_CRC) 203 flags |= SDHCI_CMD_CRC; 204 if (cmd->resp_type & MMC_RSP_OPCODE) 205 flags |= SDHCI_CMD_INDEX; 206 if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK || 207 cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) 208 flags |= SDHCI_CMD_DATA; 209 210 /* Set Transfer mode regarding to data flag */ 211 if (data) { 212 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL); 213 mode = SDHCI_TRNS_BLK_CNT_EN; 214 trans_bytes = data->blocks * data->blocksize; 215 if (data->blocks > 1) 216 mode |= SDHCI_TRNS_MULTI; 217 218 if (data->flags == MMC_DATA_READ) 219 mode |= SDHCI_TRNS_READ; 220 221 #ifdef CONFIG_MMC_SDHCI_SDMA 222 if (data->flags == MMC_DATA_READ) 223 start_addr = (unsigned long)data->dest; 224 else 225 start_addr = (unsigned long)data->src; 226 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && 227 (start_addr & 0x7) != 0x0) { 228 is_aligned = 0; 229 start_addr = (unsigned long)aligned_buffer; 230 if (data->flags != MMC_DATA_READ) 231 memcpy(aligned_buffer, data->src, trans_bytes); 232 } 233 234 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER) 235 /* 236 * Always use this bounce-buffer when 237 * CONFIG_FIXED_SDHCI_ALIGNED_BUFFER is defined 238 */ 239 is_aligned = 0; 240 start_addr = (unsigned long)aligned_buffer; 241 if (data->flags != MMC_DATA_READ) 242 memcpy(aligned_buffer, data->src, trans_bytes); 243 #endif 244 245 sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS); 246 mode |= SDHCI_TRNS_DMA; 247 #endif 248 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 249 data->blocksize), 250 SDHCI_BLOCK_SIZE); 251 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 252 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 253 } else if (cmd->resp_type & MMC_RSP_BUSY) { 254 sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL); 255 } 256 257 sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT); 258 #ifdef CONFIG_MMC_SDHCI_SDMA 259 if (data) { 260 trans_bytes = ALIGN(trans_bytes, CONFIG_SYS_CACHELINE_SIZE); 261 flush_cache(start_addr, trans_bytes); 262 } 263 #endif 264 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND); 265 start = get_timer(0); 266 do { 267 stat = sdhci_readl(host, SDHCI_INT_STATUS); 268 if (stat & SDHCI_INT_ERROR) 269 break; 270 271 if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) { 272 if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) { 273 return 0; 274 } else { 275 printf("%s: Timeout for status update!\n", 276 __func__); 277 return -ETIMEDOUT; 278 } 279 } 280 } while ((stat & mask) != mask); 281 282 if ((stat & (SDHCI_INT_ERROR | mask)) == mask) { 283 sdhci_cmd_done(host, cmd); 284 sdhci_writel(host, mask, SDHCI_INT_STATUS); 285 } else 286 ret = -1; 287 288 if (!ret && data) 289 ret = sdhci_transfer_data(host, data, start_addr); 290 291 if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD) 292 udelay(1000); 293 294 stat = sdhci_readl(host, SDHCI_INT_STATUS); 295 sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS); 296 if (!ret) { 297 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && 298 !is_aligned && (data->flags == MMC_DATA_READ)) 299 memcpy(data->dest, aligned_buffer, trans_bytes); 300 return 0; 301 } 302 303 sdhci_reset(host, SDHCI_RESET_CMD); 304 sdhci_reset(host, SDHCI_RESET_DATA); 305 if (stat & SDHCI_INT_TIMEOUT) 306 return -ETIMEDOUT; 307 else 308 return -ECOMM; 309 } 310 311 #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING) 312 static int sdhci_execute_tuning(struct udevice *dev, uint opcode) 313 { 314 int err; 315 struct mmc *mmc = mmc_get_mmc_dev(dev); 316 struct sdhci_host *host = mmc->priv; 317 318 debug("%s\n", __func__); 319 320 if (host->ops && host->ops->platform_execute_tuning) { 321 err = host->ops->platform_execute_tuning(mmc, opcode); 322 if (err) 323 return err; 324 return 0; 325 } 326 return 0; 327 } 328 #endif 329 static int sdhci_set_clock(struct mmc *mmc, unsigned int clock) 330 { 331 struct sdhci_host *host = mmc->priv; 332 unsigned int div, clk = 0, timeout; 333 334 /* Wait max 20 ms */ 335 timeout = 200; 336 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & 337 (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) { 338 if (timeout == 0) { 339 printf("%s: Timeout to wait cmd & data inhibit\n", 340 __func__); 341 return -EBUSY; 342 } 343 344 timeout--; 345 udelay(100); 346 } 347 348 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 349 350 if (clock == 0) 351 return 0; 352 353 if (host->ops && host->ops->set_delay) 354 host->ops->set_delay(host); 355 356 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) { 357 /* 358 * Check if the Host Controller supports Programmable Clock 359 * Mode. 360 */ 361 if (host->clk_mul) { 362 for (div = 1; div <= 1024; div++) { 363 if ((host->max_clk / div) <= clock) 364 break; 365 } 366 367 /* 368 * Set Programmable Clock Mode in the Clock 369 * Control register. 370 */ 371 clk = SDHCI_PROG_CLOCK_MODE; 372 div--; 373 } else { 374 /* Version 3.00 divisors must be a multiple of 2. */ 375 if (host->max_clk <= clock) { 376 div = 1; 377 } else { 378 for (div = 2; 379 div < SDHCI_MAX_DIV_SPEC_300; 380 div += 2) { 381 if ((host->max_clk / div) <= clock) 382 break; 383 } 384 } 385 div >>= 1; 386 } 387 } else { 388 /* Version 2.00 divisors must be a power of 2. */ 389 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 390 if ((host->max_clk / div) <= clock) 391 break; 392 } 393 div >>= 1; 394 } 395 396 if (host->ops && host->ops->set_clock) 397 host->ops->set_clock(host, div); 398 399 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 400 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 401 << SDHCI_DIVIDER_HI_SHIFT; 402 clk |= SDHCI_CLOCK_INT_EN; 403 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 404 405 /* Wait max 20 ms */ 406 timeout = 20; 407 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) 408 & SDHCI_CLOCK_INT_STABLE)) { 409 if (timeout == 0) { 410 printf("%s: Internal clock never stabilised.\n", 411 __func__); 412 return -EBUSY; 413 } 414 timeout--; 415 udelay(1000); 416 } 417 418 clk |= SDHCI_CLOCK_CARD_EN; 419 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 420 return 0; 421 } 422 423 static void sdhci_set_power(struct sdhci_host *host, unsigned short power) 424 { 425 u8 pwr = 0; 426 427 if (power != (unsigned short)-1) { 428 switch (1 << power) { 429 case MMC_VDD_165_195: 430 pwr = SDHCI_POWER_180; 431 break; 432 case MMC_VDD_29_30: 433 case MMC_VDD_30_31: 434 pwr = SDHCI_POWER_300; 435 break; 436 case MMC_VDD_32_33: 437 case MMC_VDD_33_34: 438 pwr = SDHCI_POWER_330; 439 break; 440 } 441 } 442 443 if (pwr == 0) { 444 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 445 return; 446 } 447 448 pwr |= SDHCI_POWER_ON; 449 450 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 451 } 452 453 #ifdef CONFIG_DM_MMC 454 static int sdhci_set_ios(struct udevice *dev) 455 { 456 struct mmc *mmc = mmc_get_mmc_dev(dev); 457 #else 458 static int sdhci_set_ios(struct mmc *mmc) 459 { 460 #endif 461 u32 ctrl; 462 u32 gen_addr, gen_ctrl; 463 struct sdhci_host *host = mmc->priv; 464 465 if (host->ops && host->ops->set_control_reg) 466 host->ops->set_control_reg(host); 467 468 if (mmc->clock != host->clock) 469 sdhci_set_clock(mmc, mmc->clock); 470 471 if (mmc->clk_disable) 472 sdhci_set_clock(mmc, 0); 473 474 #ifdef CONFIG_MMC_SDHCI_ASPEED 475 /* Set bus width */ 476 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 477 gen_addr = (u32)host->ioaddr; 478 gen_addr &= ~0x300; 479 gen_ctrl = readl(gen_addr); 480 if (mmc->bus_width == 8) { 481 if((u32)host->ioaddr & 0x100) 482 writel(gen_ctrl | BIT(24), gen_addr); 483 else 484 writel(gen_ctrl | BIT(25), gen_addr); 485 } else { 486 writel(gen_ctrl & ~(BIT(24) | BIT(25)), gen_addr); 487 if (mmc->bus_width == 4) 488 ctrl |= SDHCI_CTRL_4BITBUS; 489 else 490 ctrl &= ~SDHCI_CTRL_4BITBUS; 491 } 492 #else 493 /* Set bus width */ 494 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 495 if (mmc->bus_width == 8) { 496 ctrl &= ~SDHCI_CTRL_4BITBUS; 497 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) || 498 (host->quirks & SDHCI_QUIRK_USE_WIDE8)) 499 ctrl |= SDHCI_CTRL_8BITBUS; 500 } else { 501 if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) || 502 (host->quirks & SDHCI_QUIRK_USE_WIDE8)) 503 ctrl &= ~SDHCI_CTRL_8BITBUS; 504 if (mmc->bus_width == 4) 505 ctrl |= SDHCI_CTRL_4BITBUS; 506 else 507 ctrl &= ~SDHCI_CTRL_4BITBUS; 508 } 509 #endif 510 if (mmc->clock > 26000000) 511 ctrl |= SDHCI_CTRL_HISPD; 512 else 513 ctrl &= ~SDHCI_CTRL_HISPD; 514 515 if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) || 516 (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE)) 517 ctrl &= ~SDHCI_CTRL_HISPD; 518 519 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 520 521 /* If available, call the driver specific "post" set_ios() function */ 522 if (host->ops && host->ops->set_ios_post) 523 host->ops->set_ios_post(host); 524 525 return 0; 526 } 527 528 static int sdhci_init(struct mmc *mmc) 529 { 530 struct sdhci_host *host = mmc->priv; 531 532 sdhci_reset(host, SDHCI_RESET_ALL); 533 534 if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && !aligned_buffer) { 535 aligned_buffer = memalign(8, 512*1024); 536 if (!aligned_buffer) { 537 printf("%s: Aligned buffer alloc failed!!!\n", 538 __func__); 539 return -ENOMEM; 540 } 541 } 542 543 sdhci_set_power(host, fls(mmc->cfg->voltages) - 1); 544 545 if (host->ops && host->ops->get_cd) 546 host->ops->get_cd(host); 547 548 /* Enable only interrupts served by the SD controller */ 549 sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK, 550 SDHCI_INT_ENABLE); 551 /* Mask all sdhci interrupt sources */ 552 sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE); 553 554 return 0; 555 } 556 557 #ifdef CONFIG_DM_MMC 558 int sdhci_probe(struct udevice *dev) 559 { 560 struct mmc *mmc = mmc_get_mmc_dev(dev); 561 562 return sdhci_init(mmc); 563 } 564 565 const struct dm_mmc_ops sdhci_ops = { 566 .send_cmd = sdhci_send_command, 567 .set_ios = sdhci_set_ios, 568 #ifdef MMC_SUPPORTS_TUNING 569 .execute_tuning = sdhci_execute_tuning, 570 #endif 571 }; 572 #else 573 static const struct mmc_ops sdhci_ops = { 574 .send_cmd = sdhci_send_command, 575 .set_ios = sdhci_set_ios, 576 .init = sdhci_init, 577 }; 578 #endif 579 580 int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host, 581 u32 f_max, u32 f_min) 582 { 583 u32 caps, caps_1 = 0; 584 585 caps = sdhci_readl(host, SDHCI_CAPABILITIES); 586 587 #ifdef CONFIG_MMC_SDHCI_SDMA 588 if (!(caps & SDHCI_CAN_DO_SDMA)) { 589 printf("%s: Your controller doesn't support SDMA!!\n", 590 __func__); 591 return -EINVAL; 592 } 593 #endif 594 if (host->quirks & SDHCI_QUIRK_REG32_RW) 595 host->version = 596 sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16; 597 else 598 host->version = sdhci_readw(host, SDHCI_HOST_VERSION); 599 600 cfg->name = host->name; 601 #ifndef CONFIG_DM_MMC 602 cfg->ops = &sdhci_ops; 603 #endif 604 605 /* Check whether the clock multiplier is supported or not */ 606 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) { 607 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 608 host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >> 609 SDHCI_CLOCK_MUL_SHIFT; 610 } 611 612 if (host->max_clk == 0) { 613 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) 614 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >> 615 SDHCI_CLOCK_BASE_SHIFT; 616 else 617 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >> 618 SDHCI_CLOCK_BASE_SHIFT; 619 host->max_clk *= 1000000; 620 if (host->clk_mul) 621 host->max_clk *= host->clk_mul; 622 } 623 if (host->max_clk == 0) { 624 printf("%s: Hardware doesn't specify base clock frequency\n", 625 __func__); 626 return -EINVAL; 627 } 628 if (f_max && (f_max < host->max_clk)) 629 cfg->f_max = f_max; 630 else 631 cfg->f_max = host->max_clk; 632 if (f_min) 633 cfg->f_min = f_min; 634 else { 635 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) 636 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300; 637 else 638 cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200; 639 } 640 cfg->voltages = 0; 641 if (caps & SDHCI_CAN_VDD_330) 642 cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34; 643 if (caps & SDHCI_CAN_VDD_300) 644 cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31; 645 if (caps & SDHCI_CAN_VDD_180) 646 cfg->voltages |= MMC_VDD_165_195; 647 648 if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE) 649 cfg->voltages |= host->voltages; 650 651 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz | MMC_MODE_4BIT; 652 653 /* Since Host Controller Version3.0 */ 654 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) { 655 if (!(caps & SDHCI_CAN_DO_8BIT)) 656 cfg->host_caps &= ~MMC_MODE_8BIT; 657 } 658 659 if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) { 660 cfg->host_caps &= ~MMC_MODE_HS; 661 cfg->host_caps &= ~MMC_MODE_HS_52MHz; 662 } 663 664 if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) 665 caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 666 667 if (!(cfg->voltages & MMC_VDD_165_195) || 668 (host->quirks & SDHCI_QUIRK_NO_1_8_V)) 669 caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 670 SDHCI_SUPPORT_DDR50); 671 672 if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 673 SDHCI_SUPPORT_DDR50)) 674 cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25); 675 676 if (caps_1 & SDHCI_SUPPORT_SDR104) { 677 cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50); 678 /* 679 * SD3.0: SDR104 is supported so (for eMMC) the caps2 680 * field can be promoted to support HS200. 681 */ 682 cfg->host_caps |= MMC_CAP(MMC_HS_200); 683 } else if (caps_1 & SDHCI_SUPPORT_SDR50) { 684 cfg->host_caps |= MMC_CAP(UHS_SDR50); 685 } 686 687 if (caps_1 & SDHCI_SUPPORT_DDR50) 688 cfg->host_caps |= MMC_CAP(UHS_DDR50); 689 690 if (host->host_caps) 691 cfg->host_caps |= host->host_caps; 692 693 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT; 694 695 return 0; 696 } 697 698 #ifdef CONFIG_BLK 699 int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg) 700 { 701 return mmc_bind(dev, mmc, cfg); 702 } 703 #else 704 int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min) 705 { 706 int ret; 707 708 ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min); 709 if (ret) 710 return ret; 711 712 host->mmc = mmc_create(&host->cfg, host); 713 if (host->mmc == NULL) { 714 printf("%s: mmc create fail!\n", __func__); 715 return -ENOMEM; 716 } 717 718 return 0; 719 } 720 #endif 721