1 /* 2 * (C) Copyright 2012 SAMSUNG Electronics 3 * Jaehoon Chung <jh80.chung@samsung.com> 4 * Rajeshawari Shinde <rajeshwari.s@samsung.com> 5 * 6 * SPDX-License-Identifier: GPL-2.0+ 7 */ 8 9 #include <bouncebuf.h> 10 #include <common.h> 11 #include <errno.h> 12 #include <malloc.h> 13 #include <memalign.h> 14 #include <mmc.h> 15 #include <dwmmc.h> 16 #include <asm-generic/errno.h> 17 18 #define PAGE_SIZE 4096 19 20 static int dwmci_wait_reset(struct dwmci_host *host, u32 value) 21 { 22 unsigned long timeout = 1000; 23 u32 ctrl; 24 25 dwmci_writel(host, DWMCI_CTRL, value); 26 27 while (timeout--) { 28 ctrl = dwmci_readl(host, DWMCI_CTRL); 29 if (!(ctrl & DWMCI_RESET_ALL)) 30 return 1; 31 } 32 return 0; 33 } 34 35 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac, 36 u32 desc0, u32 desc1, u32 desc2) 37 { 38 struct dwmci_idmac *desc = idmac; 39 40 desc->flags = desc0; 41 desc->cnt = desc1; 42 desc->addr = desc2; 43 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac); 44 } 45 46 static void dwmci_prepare_data(struct dwmci_host *host, 47 struct mmc_data *data, 48 struct dwmci_idmac *cur_idmac, 49 void *bounce_buffer) 50 { 51 unsigned long ctrl; 52 unsigned int i = 0, flags, cnt, blk_cnt; 53 ulong data_start, data_end; 54 55 56 blk_cnt = data->blocks; 57 58 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET); 59 60 data_start = (ulong)cur_idmac; 61 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac); 62 63 do { 64 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ; 65 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0; 66 if (blk_cnt <= 8) { 67 flags |= DWMCI_IDMAC_LD; 68 cnt = data->blocksize * blk_cnt; 69 } else 70 cnt = data->blocksize * 8; 71 72 dwmci_set_idma_desc(cur_idmac, flags, cnt, 73 (ulong)bounce_buffer + (i * PAGE_SIZE)); 74 75 if (blk_cnt <= 8) 76 break; 77 blk_cnt -= 8; 78 cur_idmac++; 79 i++; 80 } while(1); 81 82 data_end = (ulong)cur_idmac; 83 flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN); 84 85 ctrl = dwmci_readl(host, DWMCI_CTRL); 86 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN; 87 dwmci_writel(host, DWMCI_CTRL, ctrl); 88 89 ctrl = dwmci_readl(host, DWMCI_BMOD); 90 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN; 91 dwmci_writel(host, DWMCI_BMOD, ctrl); 92 93 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize); 94 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks); 95 } 96 97 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data) 98 { 99 int ret = 0; 100 u32 timeout = 240000; 101 u32 mask, size, i, len = 0; 102 u32 *buf = NULL; 103 ulong start = get_timer(0); 104 u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >> 105 RX_WMARK_SHIFT) + 1) * 2; 106 107 size = data->blocksize * data->blocks / 4; 108 if (data->flags == MMC_DATA_READ) 109 buf = (unsigned int *)data->dest; 110 else 111 buf = (unsigned int *)data->src; 112 113 for (;;) { 114 mask = dwmci_readl(host, DWMCI_RINTSTS); 115 /* Error during data transfer. */ 116 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) { 117 debug("%s: DATA ERROR!\n", __func__); 118 ret = -EINVAL; 119 break; 120 } 121 122 if (host->fifo_mode && size) { 123 if (data->flags == MMC_DATA_READ) { 124 if ((dwmci_readl(host, DWMCI_RINTSTS) & 125 DWMCI_INTMSK_RXDR)) { 126 len = dwmci_readl(host, DWMCI_STATUS); 127 len = (len >> DWMCI_FIFO_SHIFT) & 128 DWMCI_FIFO_MASK; 129 for (i = 0; i < len; i++) 130 *buf++ = 131 dwmci_readl(host, DWMCI_DATA); 132 dwmci_writel(host, DWMCI_RINTSTS, 133 DWMCI_INTMSK_RXDR); 134 } 135 } else { 136 if ((dwmci_readl(host, DWMCI_RINTSTS) & 137 DWMCI_INTMSK_TXDR)) { 138 len = dwmci_readl(host, DWMCI_STATUS); 139 len = fifo_depth - ((len >> 140 DWMCI_FIFO_SHIFT) & 141 DWMCI_FIFO_MASK); 142 for (i = 0; i < len; i++) 143 dwmci_writel(host, DWMCI_DATA, 144 *buf++); 145 dwmci_writel(host, DWMCI_RINTSTS, 146 DWMCI_INTMSK_TXDR); 147 } 148 } 149 size = size > len ? (size - len) : 0; 150 } 151 152 /* Data arrived correctly. */ 153 if (mask & DWMCI_INTMSK_DTO) { 154 ret = 0; 155 break; 156 } 157 158 /* Check for timeout. */ 159 if (get_timer(start) > timeout) { 160 debug("%s: Timeout waiting for data!\n", 161 __func__); 162 ret = TIMEOUT; 163 break; 164 } 165 } 166 167 dwmci_writel(host, DWMCI_RINTSTS, mask); 168 169 return ret; 170 } 171 172 static int dwmci_set_transfer_mode(struct dwmci_host *host, 173 struct mmc_data *data) 174 { 175 unsigned long mode; 176 177 mode = DWMCI_CMD_DATA_EXP; 178 if (data->flags & MMC_DATA_WRITE) 179 mode |= DWMCI_CMD_RW; 180 181 return mode; 182 } 183 184 #ifdef CONFIG_DM_MMC_OPS 185 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd, 186 struct mmc_data *data) 187 { 188 struct mmc *mmc = mmc_get_mmc_dev(dev); 189 #else 190 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, 191 struct mmc_data *data) 192 { 193 #endif 194 struct dwmci_host *host = mmc->priv; 195 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac, 196 data ? DIV_ROUND_UP(data->blocks, 8) : 0); 197 int ret = 0, flags = 0, i; 198 unsigned int timeout = 500; 199 u32 retry = 100000; 200 u32 mask, ctrl; 201 ulong start = get_timer(0); 202 struct bounce_buffer bbstate; 203 204 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) { 205 if (get_timer(start) > timeout) { 206 debug("%s: Timeout on data busy\n", __func__); 207 return TIMEOUT; 208 } 209 } 210 211 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL); 212 213 if (data) { 214 if (host->fifo_mode) { 215 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize); 216 dwmci_writel(host, DWMCI_BYTCNT, 217 data->blocksize * data->blocks); 218 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET); 219 } else { 220 if (data->flags == MMC_DATA_READ) { 221 bounce_buffer_start(&bbstate, (void*)data->dest, 222 data->blocksize * 223 data->blocks, GEN_BB_WRITE); 224 } else { 225 bounce_buffer_start(&bbstate, (void*)data->src, 226 data->blocksize * 227 data->blocks, GEN_BB_READ); 228 } 229 dwmci_prepare_data(host, data, cur_idmac, 230 bbstate.bounce_buffer); 231 } 232 } 233 234 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg); 235 236 if (data) 237 flags = dwmci_set_transfer_mode(host, data); 238 239 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY)) 240 return -1; 241 242 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION) 243 flags |= DWMCI_CMD_ABORT_STOP; 244 else 245 flags |= DWMCI_CMD_PRV_DAT_WAIT; 246 247 if (cmd->resp_type & MMC_RSP_PRESENT) { 248 flags |= DWMCI_CMD_RESP_EXP; 249 if (cmd->resp_type & MMC_RSP_136) 250 flags |= DWMCI_CMD_RESP_LENGTH; 251 } 252 253 if (cmd->resp_type & MMC_RSP_CRC) 254 flags |= DWMCI_CMD_CHECK_CRC; 255 256 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG); 257 258 debug("Sending CMD%d\n",cmd->cmdidx); 259 260 dwmci_writel(host, DWMCI_CMD, flags); 261 262 for (i = 0; i < retry; i++) { 263 mask = dwmci_readl(host, DWMCI_RINTSTS); 264 if (mask & DWMCI_INTMSK_CDONE) { 265 if (!data) 266 dwmci_writel(host, DWMCI_RINTSTS, mask); 267 break; 268 } 269 } 270 271 if (i == retry) { 272 debug("%s: Timeout.\n", __func__); 273 return TIMEOUT; 274 } 275 276 if (mask & DWMCI_INTMSK_RTO) { 277 /* 278 * Timeout here is not necessarily fatal. (e)MMC cards 279 * will splat here when they receive CMD55 as they do 280 * not support this command and that is exactly the way 281 * to tell them apart from SD cards. Thus, this output 282 * below shall be debug(). eMMC cards also do not favor 283 * CMD8, please keep that in mind. 284 */ 285 debug("%s: Response Timeout.\n", __func__); 286 return TIMEOUT; 287 } else if (mask & DWMCI_INTMSK_RE) { 288 debug("%s: Response Error.\n", __func__); 289 return -EIO; 290 } 291 292 293 if (cmd->resp_type & MMC_RSP_PRESENT) { 294 if (cmd->resp_type & MMC_RSP_136) { 295 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3); 296 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2); 297 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1); 298 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0); 299 } else { 300 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0); 301 } 302 } 303 304 if (data) { 305 ret = dwmci_data_transfer(host, data); 306 307 /* only dma mode need it */ 308 if (!host->fifo_mode) { 309 ctrl = dwmci_readl(host, DWMCI_CTRL); 310 ctrl &= ~(DWMCI_DMA_EN); 311 dwmci_writel(host, DWMCI_CTRL, ctrl); 312 bounce_buffer_stop(&bbstate); 313 } 314 } 315 316 udelay(100); 317 318 return ret; 319 } 320 321 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq) 322 { 323 u32 div, status; 324 int timeout = 10000; 325 unsigned long sclk; 326 327 if ((freq == host->clock) || (freq == 0)) 328 return 0; 329 /* 330 * If host->get_mmc_clk isn't defined, 331 * then assume that host->bus_hz is source clock value. 332 * host->bus_hz should be set by user. 333 */ 334 if (host->get_mmc_clk) 335 sclk = host->get_mmc_clk(host, freq); 336 else if (host->bus_hz) 337 sclk = host->bus_hz; 338 else { 339 debug("%s: Didn't get source clock value.\n", __func__); 340 return -EINVAL; 341 } 342 343 if (sclk == freq) 344 div = 0; /* bypass mode */ 345 else 346 div = DIV_ROUND_UP(sclk, 2 * freq); 347 348 dwmci_writel(host, DWMCI_CLKENA, 0); 349 dwmci_writel(host, DWMCI_CLKSRC, 0); 350 351 dwmci_writel(host, DWMCI_CLKDIV, div); 352 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT | 353 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START); 354 355 do { 356 status = dwmci_readl(host, DWMCI_CMD); 357 if (timeout-- < 0) { 358 debug("%s: Timeout!\n", __func__); 359 return -ETIMEDOUT; 360 } 361 } while (status & DWMCI_CMD_START); 362 363 dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE | 364 DWMCI_CLKEN_LOW_PWR); 365 366 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT | 367 DWMCI_CMD_UPD_CLK | DWMCI_CMD_START); 368 369 timeout = 10000; 370 do { 371 status = dwmci_readl(host, DWMCI_CMD); 372 if (timeout-- < 0) { 373 debug("%s: Timeout!\n", __func__); 374 return -ETIMEDOUT; 375 } 376 } while (status & DWMCI_CMD_START); 377 378 host->clock = freq; 379 380 return 0; 381 } 382 383 #ifdef CONFIG_DM_MMC_OPS 384 static int dwmci_set_ios(struct udevice *dev) 385 { 386 struct mmc *mmc = mmc_get_mmc_dev(dev); 387 #else 388 static void dwmci_set_ios(struct mmc *mmc) 389 { 390 #endif 391 struct dwmci_host *host = (struct dwmci_host *)mmc->priv; 392 u32 ctype, regs; 393 394 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock); 395 396 dwmci_setup_bus(host, mmc->clock); 397 switch (mmc->bus_width) { 398 case 8: 399 ctype = DWMCI_CTYPE_8BIT; 400 break; 401 case 4: 402 ctype = DWMCI_CTYPE_4BIT; 403 break; 404 default: 405 ctype = DWMCI_CTYPE_1BIT; 406 break; 407 } 408 409 dwmci_writel(host, DWMCI_CTYPE, ctype); 410 411 regs = dwmci_readl(host, DWMCI_UHS_REG); 412 if (mmc->ddr_mode) 413 regs |= DWMCI_DDR_MODE; 414 else 415 regs &= ~DWMCI_DDR_MODE; 416 417 dwmci_writel(host, DWMCI_UHS_REG, regs); 418 419 if (host->clksel) 420 host->clksel(host); 421 #ifdef CONFIG_DM_MMC_OPS 422 return 0; 423 #endif 424 } 425 426 static int dwmci_init(struct mmc *mmc) 427 { 428 struct dwmci_host *host = mmc->priv; 429 430 if (host->board_init) 431 host->board_init(host); 432 433 dwmci_writel(host, DWMCI_PWREN, 1); 434 435 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) { 436 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__); 437 return -EIO; 438 } 439 440 /* Enumerate at 400KHz */ 441 dwmci_setup_bus(host, mmc->cfg->f_min); 442 443 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF); 444 dwmci_writel(host, DWMCI_INTMASK, 0); 445 446 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF); 447 448 dwmci_writel(host, DWMCI_IDINTEN, 0); 449 dwmci_writel(host, DWMCI_BMOD, 1); 450 451 if (!host->fifoth_val) { 452 uint32_t fifo_size; 453 454 fifo_size = dwmci_readl(host, DWMCI_FIFOTH); 455 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1; 456 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) | 457 TX_WMARK(fifo_size / 2); 458 } 459 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val); 460 461 dwmci_writel(host, DWMCI_CLKENA, 0); 462 dwmci_writel(host, DWMCI_CLKSRC, 0); 463 464 return 0; 465 } 466 467 #ifdef CONFIG_DM_MMC_OPS 468 int dwmci_probe(struct udevice *dev) 469 { 470 struct mmc *mmc = mmc_get_mmc_dev(dev); 471 472 return dwmci_init(mmc); 473 } 474 475 const struct dm_mmc_ops dm_dwmci_ops = { 476 .send_cmd = dwmci_send_cmd, 477 .set_ios = dwmci_set_ios, 478 }; 479 480 #else 481 static const struct mmc_ops dwmci_ops = { 482 .send_cmd = dwmci_send_cmd, 483 .set_ios = dwmci_set_ios, 484 .init = dwmci_init, 485 }; 486 #endif 487 488 void dwmci_setup_cfg(struct mmc_config *cfg, const char *name, int buswidth, 489 uint caps, u32 max_clk, u32 min_clk) 490 { 491 cfg->name = name; 492 #ifndef CONFIG_DM_MMC_OPS 493 cfg->ops = &dwmci_ops; 494 #endif 495 cfg->f_min = min_clk; 496 cfg->f_max = max_clk; 497 498 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; 499 500 cfg->host_caps = caps; 501 502 if (buswidth == 8) { 503 cfg->host_caps |= MMC_MODE_8BIT; 504 cfg->host_caps &= ~MMC_MODE_4BIT; 505 } else { 506 cfg->host_caps |= MMC_MODE_4BIT; 507 cfg->host_caps &= ~MMC_MODE_8BIT; 508 } 509 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz; 510 511 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT; 512 } 513 514 #ifdef CONFIG_BLK 515 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg) 516 { 517 return mmc_bind(dev, mmc, cfg); 518 } 519 #else 520 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk) 521 { 522 dwmci_setup_cfg(&host->cfg, host->name, host->buswidth, host->caps, 523 max_clk, min_clk); 524 525 host->mmc = mmc_create(&host->cfg, host); 526 if (host->mmc == NULL) 527 return -1; 528 529 return 0; 530 } 531 #endif 532