1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Synopsys DesignWare Multimedia Card Interface driver 4 * (Based on NXP driver for lpc 31xx) 5 * 6 * Copyright (C) 2009 NXP Semiconductors 7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 8 */ 9 10 #include <linux/blkdev.h> 11 #include <linux/clk.h> 12 #include <linux/debugfs.h> 13 #include <linux/device.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/err.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/iopoll.h> 19 #include <linux/ioport.h> 20 #include <linux/ktime.h> 21 #include <linux/module.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/prandom.h> 25 #include <linux/seq_file.h> 26 #include <linux/slab.h> 27 #include <linux/stat.h> 28 #include <linux/delay.h> 29 #include <linux/irq.h> 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/host.h> 32 #include <linux/mmc/mmc.h> 33 #include <linux/mmc/sd.h> 34 #include <linux/mmc/sdio.h> 35 #include <linux/bitops.h> 36 #include <linux/regulator/consumer.h> 37 #include <linux/of.h> 38 #include <linux/of_gpio.h> 39 #include <linux/mmc/slot-gpio.h> 40 41 #include "dw_mmc.h" 42 43 /* Common flag combinations */ 44 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 45 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 46 SDMMC_INT_EBE | SDMMC_INT_HLE) 47 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 48 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) 49 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 50 DW_MCI_CMD_ERROR_FLAGS) 51 #define DW_MCI_SEND_STATUS 1 52 #define DW_MCI_RECV_STATUS 2 53 #define DW_MCI_DMA_THRESHOLD 16 54 55 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 56 #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */ 57 58 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 59 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 60 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 61 SDMMC_IDMAC_INT_TI) 62 63 #define DESC_RING_BUF_SZ PAGE_SIZE 64 65 struct idmac_desc_64addr { 66 u32 des0; /* Control Descriptor */ 67 #define IDMAC_OWN_CLR64(x) \ 68 !((x) & cpu_to_le32(IDMAC_DES0_OWN)) 69 70 u32 des1; /* Reserved */ 71 72 u32 des2; /*Buffer sizes */ 73 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ 74 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ 75 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) 76 77 u32 des3; /* Reserved */ 78 79 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ 80 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ 81 82 u32 des6; /* Lower 32-bits of Next Descriptor Address */ 83 u32 des7; /* Upper 32-bits of Next Descriptor Address */ 84 }; 85 86 struct idmac_desc { 87 __le32 des0; /* Control Descriptor */ 88 #define IDMAC_DES0_DIC BIT(1) 89 #define IDMAC_DES0_LD BIT(2) 90 #define IDMAC_DES0_FD BIT(3) 91 #define IDMAC_DES0_CH BIT(4) 92 #define IDMAC_DES0_ER BIT(5) 93 #define IDMAC_DES0_CES BIT(30) 94 #define IDMAC_DES0_OWN BIT(31) 95 96 __le32 des1; /* Buffer sizes */ 97 #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 98 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) 99 100 __le32 des2; /* buffer 1 physical address */ 101 102 __le32 des3; /* buffer 2 physical address */ 103 }; 104 105 /* Each descriptor can transfer up to 4KB of data in chained mode */ 106 #define DW_MCI_DESC_DATA_LENGTH 0x1000 107 108 #if defined(CONFIG_DEBUG_FS) 109 static int dw_mci_req_show(struct seq_file *s, void *v) 110 { 111 struct dw_mci_slot *slot = s->private; 112 struct mmc_request *mrq; 113 struct mmc_command *cmd; 114 struct mmc_command *stop; 115 struct mmc_data *data; 116 117 /* Make sure we get a consistent snapshot */ 118 spin_lock_bh(&slot->host->lock); 119 mrq = slot->mrq; 120 121 if (mrq) { 122 cmd = mrq->cmd; 123 data = mrq->data; 124 stop = mrq->stop; 125 126 if (cmd) 127 seq_printf(s, 128 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 129 cmd->opcode, cmd->arg, cmd->flags, 130 cmd->resp[0], cmd->resp[1], cmd->resp[2], 131 cmd->resp[2], cmd->error); 132 if (data) 133 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 134 data->bytes_xfered, data->blocks, 135 data->blksz, data->flags, data->error); 136 if (stop) 137 seq_printf(s, 138 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 139 stop->opcode, stop->arg, stop->flags, 140 stop->resp[0], stop->resp[1], stop->resp[2], 141 stop->resp[2], stop->error); 142 } 143 144 spin_unlock_bh(&slot->host->lock); 145 146 return 0; 147 } 148 DEFINE_SHOW_ATTRIBUTE(dw_mci_req); 149 150 static int dw_mci_regs_show(struct seq_file *s, void *v) 151 { 152 struct dw_mci *host = s->private; 153 154 pm_runtime_get_sync(host->dev); 155 156 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); 157 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); 158 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); 159 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL)); 160 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); 161 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); 162 163 pm_runtime_put_autosuspend(host->dev); 164 165 return 0; 166 } 167 DEFINE_SHOW_ATTRIBUTE(dw_mci_regs); 168 169 static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 170 { 171 struct mmc_host *mmc = slot->mmc; 172 struct dw_mci *host = slot->host; 173 struct dentry *root; 174 175 root = mmc->debugfs_root; 176 if (!root) 177 return; 178 179 debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops); 180 debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops); 181 debugfs_create_u32("state", S_IRUSR, root, &host->state); 182 debugfs_create_xul("pending_events", S_IRUSR, root, 183 &host->pending_events); 184 debugfs_create_xul("completed_events", S_IRUSR, root, 185 &host->completed_events); 186 #ifdef CONFIG_FAULT_INJECTION 187 fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc); 188 #endif 189 } 190 #endif /* defined(CONFIG_DEBUG_FS) */ 191 192 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 193 { 194 u32 ctrl; 195 196 ctrl = mci_readl(host, CTRL); 197 ctrl |= reset; 198 mci_writel(host, CTRL, ctrl); 199 200 /* wait till resets clear */ 201 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl, 202 !(ctrl & reset), 203 1, 500 * USEC_PER_MSEC)) { 204 dev_err(host->dev, 205 "Timeout resetting block (ctrl reset %#x)\n", 206 ctrl & reset); 207 return false; 208 } 209 210 return true; 211 } 212 213 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) 214 { 215 u32 status; 216 217 /* 218 * Databook says that before issuing a new data transfer command 219 * we need to check to see if the card is busy. Data transfer commands 220 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. 221 * 222 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is 223 * expected. 224 */ 225 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && 226 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { 227 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 228 status, 229 !(status & SDMMC_STATUS_BUSY), 230 10, 500 * USEC_PER_MSEC)) 231 dev_err(host->dev, "Busy; trying anyway\n"); 232 } 233 } 234 235 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 236 { 237 struct dw_mci *host = slot->host; 238 unsigned int cmd_status = 0; 239 240 mci_writel(host, CMDARG, arg); 241 wmb(); /* drain writebuffer */ 242 dw_mci_wait_while_busy(host, cmd); 243 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 244 245 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status, 246 !(cmd_status & SDMMC_CMD_START), 247 1, 500 * USEC_PER_MSEC)) 248 dev_err(&slot->mmc->class_dev, 249 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 250 cmd, arg, cmd_status); 251 } 252 253 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 254 { 255 struct dw_mci_slot *slot = mmc_priv(mmc); 256 struct dw_mci *host = slot->host; 257 u32 cmdr; 258 259 cmd->error = -EINPROGRESS; 260 cmdr = cmd->opcode; 261 262 if (cmd->opcode == MMC_STOP_TRANSMISSION || 263 cmd->opcode == MMC_GO_IDLE_STATE || 264 cmd->opcode == MMC_GO_INACTIVE_STATE || 265 (cmd->opcode == SD_IO_RW_DIRECT && 266 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 267 cmdr |= SDMMC_CMD_STOP; 268 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 269 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 270 271 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 272 u32 clk_en_a; 273 274 /* Special bit makes CMD11 not die */ 275 cmdr |= SDMMC_CMD_VOLT_SWITCH; 276 277 /* Change state to continue to handle CMD11 weirdness */ 278 WARN_ON(slot->host->state != STATE_SENDING_CMD); 279 slot->host->state = STATE_SENDING_CMD11; 280 281 /* 282 * We need to disable low power mode (automatic clock stop) 283 * while doing voltage switch so we don't confuse the card, 284 * since stopping the clock is a specific part of the UHS 285 * voltage change dance. 286 * 287 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be 288 * unconditionally turned back on in dw_mci_setup_bus() if it's 289 * ever called with a non-zero clock. That shouldn't happen 290 * until the voltage change is all done. 291 */ 292 clk_en_a = mci_readl(host, CLKENA); 293 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); 294 mci_writel(host, CLKENA, clk_en_a); 295 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 296 SDMMC_CMD_PRV_DAT_WAIT, 0); 297 } 298 299 if (cmd->flags & MMC_RSP_PRESENT) { 300 /* We expect a response, so set this bit */ 301 cmdr |= SDMMC_CMD_RESP_EXP; 302 if (cmd->flags & MMC_RSP_136) 303 cmdr |= SDMMC_CMD_RESP_LONG; 304 } 305 306 if (cmd->flags & MMC_RSP_CRC) 307 cmdr |= SDMMC_CMD_RESP_CRC; 308 309 if (cmd->data) { 310 cmdr |= SDMMC_CMD_DAT_EXP; 311 if (cmd->data->flags & MMC_DATA_WRITE) 312 cmdr |= SDMMC_CMD_DAT_WR; 313 } 314 315 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) 316 cmdr |= SDMMC_CMD_USE_HOLD_REG; 317 318 return cmdr; 319 } 320 321 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) 322 { 323 struct mmc_command *stop; 324 u32 cmdr; 325 326 if (!cmd->data) 327 return 0; 328 329 stop = &host->stop_abort; 330 cmdr = cmd->opcode; 331 memset(stop, 0, sizeof(struct mmc_command)); 332 333 if (cmdr == MMC_READ_SINGLE_BLOCK || 334 cmdr == MMC_READ_MULTIPLE_BLOCK || 335 cmdr == MMC_WRITE_BLOCK || 336 cmdr == MMC_WRITE_MULTIPLE_BLOCK || 337 cmdr == MMC_SEND_TUNING_BLOCK || 338 cmdr == MMC_SEND_TUNING_BLOCK_HS200 || 339 cmdr == MMC_GEN_CMD) { 340 stop->opcode = MMC_STOP_TRANSMISSION; 341 stop->arg = 0; 342 stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 343 } else if (cmdr == SD_IO_RW_EXTENDED) { 344 stop->opcode = SD_IO_RW_DIRECT; 345 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | 346 ((cmd->arg >> 28) & 0x7); 347 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; 348 } else { 349 return 0; 350 } 351 352 cmdr = stop->opcode | SDMMC_CMD_STOP | 353 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; 354 355 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags)) 356 cmdr |= SDMMC_CMD_USE_HOLD_REG; 357 358 return cmdr; 359 } 360 361 static inline void dw_mci_set_cto(struct dw_mci *host) 362 { 363 unsigned int cto_clks; 364 unsigned int cto_div; 365 unsigned int cto_ms; 366 unsigned long irqflags; 367 368 cto_clks = mci_readl(host, TMOUT) & 0xff; 369 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 370 if (cto_div == 0) 371 cto_div = 1; 372 373 cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div, 374 host->bus_hz); 375 376 /* add a bit spare time */ 377 cto_ms += 10; 378 379 /* 380 * The durations we're working with are fairly short so we have to be 381 * extra careful about synchronization here. Specifically in hardware a 382 * command timeout is _at most_ 5.1 ms, so that means we expect an 383 * interrupt (either command done or timeout) to come rather quickly 384 * after the mci_writel. ...but just in case we have a long interrupt 385 * latency let's add a bit of paranoia. 386 * 387 * In general we'll assume that at least an interrupt will be asserted 388 * in hardware by the time the cto_timer runs. ...and if it hasn't 389 * been asserted in hardware by that time then we'll assume it'll never 390 * come. 391 */ 392 spin_lock_irqsave(&host->irq_lock, irqflags); 393 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 394 mod_timer(&host->cto_timer, 395 jiffies + msecs_to_jiffies(cto_ms) + 1); 396 spin_unlock_irqrestore(&host->irq_lock, irqflags); 397 } 398 399 static void dw_mci_start_command(struct dw_mci *host, 400 struct mmc_command *cmd, u32 cmd_flags) 401 { 402 host->cmd = cmd; 403 dev_vdbg(host->dev, 404 "start command: ARGR=0x%08x CMDR=0x%08x\n", 405 cmd->arg, cmd_flags); 406 407 mci_writel(host, CMDARG, cmd->arg); 408 wmb(); /* drain writebuffer */ 409 dw_mci_wait_while_busy(host, cmd_flags); 410 411 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 412 413 /* response expected command only */ 414 if (cmd_flags & SDMMC_CMD_RESP_EXP) 415 dw_mci_set_cto(host); 416 } 417 418 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 419 { 420 struct mmc_command *stop = &host->stop_abort; 421 422 dw_mci_start_command(host, stop, host->stop_cmdr); 423 } 424 425 /* DMA interface functions */ 426 static void dw_mci_stop_dma(struct dw_mci *host) 427 { 428 if (host->using_dma) { 429 host->dma_ops->stop(host); 430 host->dma_ops->cleanup(host); 431 } 432 433 /* Data transfer was stopped by the interrupt handler */ 434 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 435 } 436 437 static void dw_mci_dma_cleanup(struct dw_mci *host) 438 { 439 struct mmc_data *data = host->data; 440 441 if (data && data->host_cookie == COOKIE_MAPPED) { 442 dma_unmap_sg(host->dev, 443 data->sg, 444 data->sg_len, 445 mmc_get_dma_dir(data)); 446 data->host_cookie = COOKIE_UNMAPPED; 447 } 448 } 449 450 static void dw_mci_idmac_reset(struct dw_mci *host) 451 { 452 u32 bmod = mci_readl(host, BMOD); 453 /* Software reset of DMA */ 454 bmod |= SDMMC_IDMAC_SWRESET; 455 mci_writel(host, BMOD, bmod); 456 } 457 458 static void dw_mci_idmac_stop_dma(struct dw_mci *host) 459 { 460 u32 temp; 461 462 /* Disable and reset the IDMAC interface */ 463 temp = mci_readl(host, CTRL); 464 temp &= ~SDMMC_CTRL_USE_IDMAC; 465 temp |= SDMMC_CTRL_DMA_RESET; 466 mci_writel(host, CTRL, temp); 467 468 /* Stop the IDMAC running */ 469 temp = mci_readl(host, BMOD); 470 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 471 temp |= SDMMC_IDMAC_SWRESET; 472 mci_writel(host, BMOD, temp); 473 } 474 475 static void dw_mci_dmac_complete_dma(void *arg) 476 { 477 struct dw_mci *host = arg; 478 struct mmc_data *data = host->data; 479 480 dev_vdbg(host->dev, "DMA complete\n"); 481 482 if ((host->use_dma == TRANS_MODE_EDMAC) && 483 data && (data->flags & MMC_DATA_READ)) 484 /* Invalidate cache after read */ 485 dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc), 486 data->sg, 487 data->sg_len, 488 DMA_FROM_DEVICE); 489 490 host->dma_ops->cleanup(host); 491 492 /* 493 * If the card was removed, data will be NULL. No point in trying to 494 * send the stop command or waiting for NBUSY in this case. 495 */ 496 if (data) { 497 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 498 tasklet_schedule(&host->tasklet); 499 } 500 } 501 502 static int dw_mci_idmac_init(struct dw_mci *host) 503 { 504 int i; 505 506 if (host->dma_64bit_address == 1) { 507 struct idmac_desc_64addr *p; 508 /* Number of descriptors in the ring buffer */ 509 host->ring_size = 510 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr); 511 512 /* Forward link the descriptor list */ 513 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 514 i++, p++) { 515 p->des6 = (host->sg_dma + 516 (sizeof(struct idmac_desc_64addr) * 517 (i + 1))) & 0xffffffff; 518 519 p->des7 = (u64)(host->sg_dma + 520 (sizeof(struct idmac_desc_64addr) * 521 (i + 1))) >> 32; 522 /* Initialize reserved and buffer size fields to "0" */ 523 p->des0 = 0; 524 p->des1 = 0; 525 p->des2 = 0; 526 p->des3 = 0; 527 } 528 529 /* Set the last descriptor as the end-of-ring descriptor */ 530 p->des6 = host->sg_dma & 0xffffffff; 531 p->des7 = (u64)host->sg_dma >> 32; 532 p->des0 = IDMAC_DES0_ER; 533 534 } else { 535 struct idmac_desc *p; 536 /* Number of descriptors in the ring buffer */ 537 host->ring_size = 538 DESC_RING_BUF_SZ / sizeof(struct idmac_desc); 539 540 /* Forward link the descriptor list */ 541 for (i = 0, p = host->sg_cpu; 542 i < host->ring_size - 1; 543 i++, p++) { 544 p->des3 = cpu_to_le32(host->sg_dma + 545 (sizeof(struct idmac_desc) * (i + 1))); 546 p->des0 = 0; 547 p->des1 = 0; 548 } 549 550 /* Set the last descriptor as the end-of-ring descriptor */ 551 p->des3 = cpu_to_le32(host->sg_dma); 552 p->des0 = cpu_to_le32(IDMAC_DES0_ER); 553 } 554 555 dw_mci_idmac_reset(host); 556 557 if (host->dma_64bit_address == 1) { 558 /* Mask out interrupts - get Tx & Rx complete only */ 559 mci_writel(host, IDSTS64, IDMAC_INT_CLR); 560 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | 561 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 562 563 /* Set the descriptor base address */ 564 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); 565 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); 566 567 } else { 568 /* Mask out interrupts - get Tx & Rx complete only */ 569 mci_writel(host, IDSTS, IDMAC_INT_CLR); 570 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | 571 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 572 573 /* Set the descriptor base address */ 574 mci_writel(host, DBADDR, host->sg_dma); 575 } 576 577 return 0; 578 } 579 580 static inline int dw_mci_prepare_desc64(struct dw_mci *host, 581 struct mmc_data *data, 582 unsigned int sg_len) 583 { 584 unsigned int desc_len; 585 struct idmac_desc_64addr *desc_first, *desc_last, *desc; 586 u32 val; 587 int i; 588 589 desc_first = desc_last = desc = host->sg_cpu; 590 591 for (i = 0; i < sg_len; i++) { 592 unsigned int length = sg_dma_len(&data->sg[i]); 593 594 u64 mem_addr = sg_dma_address(&data->sg[i]); 595 596 for ( ; length ; desc++) { 597 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 598 length : DW_MCI_DESC_DATA_LENGTH; 599 600 length -= desc_len; 601 602 /* 603 * Wait for the former clear OWN bit operation 604 * of IDMAC to make sure that this descriptor 605 * isn't still owned by IDMAC as IDMAC's write 606 * ops and CPU's read ops are asynchronous. 607 */ 608 if (readl_poll_timeout_atomic(&desc->des0, val, 609 !(val & IDMAC_DES0_OWN), 610 10, 100 * USEC_PER_MSEC)) 611 goto err_own_bit; 612 613 /* 614 * Set the OWN bit and disable interrupts 615 * for this descriptor 616 */ 617 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 618 IDMAC_DES0_CH; 619 620 /* Buffer length */ 621 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); 622 623 /* Physical address to DMA to/from */ 624 desc->des4 = mem_addr & 0xffffffff; 625 desc->des5 = mem_addr >> 32; 626 627 /* Update physical address for the next desc */ 628 mem_addr += desc_len; 629 630 /* Save pointer to the last descriptor */ 631 desc_last = desc; 632 } 633 } 634 635 /* Set first descriptor */ 636 desc_first->des0 |= IDMAC_DES0_FD; 637 638 /* Set last descriptor */ 639 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 640 desc_last->des0 |= IDMAC_DES0_LD; 641 642 return 0; 643 err_own_bit: 644 /* restore the descriptor chain as it's polluted */ 645 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 646 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 647 dw_mci_idmac_init(host); 648 return -EINVAL; 649 } 650 651 652 static inline int dw_mci_prepare_desc32(struct dw_mci *host, 653 struct mmc_data *data, 654 unsigned int sg_len) 655 { 656 unsigned int desc_len; 657 struct idmac_desc *desc_first, *desc_last, *desc; 658 u32 val; 659 int i; 660 661 desc_first = desc_last = desc = host->sg_cpu; 662 663 for (i = 0; i < sg_len; i++) { 664 unsigned int length = sg_dma_len(&data->sg[i]); 665 666 u32 mem_addr = sg_dma_address(&data->sg[i]); 667 668 for ( ; length ; desc++) { 669 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 670 length : DW_MCI_DESC_DATA_LENGTH; 671 672 length -= desc_len; 673 674 /* 675 * Wait for the former clear OWN bit operation 676 * of IDMAC to make sure that this descriptor 677 * isn't still owned by IDMAC as IDMAC's write 678 * ops and CPU's read ops are asynchronous. 679 */ 680 if (readl_poll_timeout_atomic(&desc->des0, val, 681 IDMAC_OWN_CLR64(val), 682 10, 683 100 * USEC_PER_MSEC)) 684 goto err_own_bit; 685 686 /* 687 * Set the OWN bit and disable interrupts 688 * for this descriptor 689 */ 690 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 691 IDMAC_DES0_DIC | 692 IDMAC_DES0_CH); 693 694 /* Buffer length */ 695 IDMAC_SET_BUFFER1_SIZE(desc, desc_len); 696 697 /* Physical address to DMA to/from */ 698 desc->des2 = cpu_to_le32(mem_addr); 699 700 /* Update physical address for the next desc */ 701 mem_addr += desc_len; 702 703 /* Save pointer to the last descriptor */ 704 desc_last = desc; 705 } 706 } 707 708 /* Set first descriptor */ 709 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); 710 711 /* Set last descriptor */ 712 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | 713 IDMAC_DES0_DIC)); 714 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); 715 716 return 0; 717 err_own_bit: 718 /* restore the descriptor chain as it's polluted */ 719 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 720 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 721 dw_mci_idmac_init(host); 722 return -EINVAL; 723 } 724 725 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 726 { 727 u32 temp; 728 int ret; 729 730 if (host->dma_64bit_address == 1) 731 ret = dw_mci_prepare_desc64(host, host->data, sg_len); 732 else 733 ret = dw_mci_prepare_desc32(host, host->data, sg_len); 734 735 if (ret) 736 goto out; 737 738 /* drain writebuffer */ 739 wmb(); 740 741 /* Make sure to reset DMA in case we did PIO before this */ 742 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); 743 dw_mci_idmac_reset(host); 744 745 /* Select IDMAC interface */ 746 temp = mci_readl(host, CTRL); 747 temp |= SDMMC_CTRL_USE_IDMAC; 748 mci_writel(host, CTRL, temp); 749 750 /* drain writebuffer */ 751 wmb(); 752 753 /* Enable the IDMAC */ 754 temp = mci_readl(host, BMOD); 755 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 756 mci_writel(host, BMOD, temp); 757 758 /* Start it running */ 759 mci_writel(host, PLDMND, 1); 760 761 out: 762 return ret; 763 } 764 765 static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 766 .init = dw_mci_idmac_init, 767 .start = dw_mci_idmac_start_dma, 768 .stop = dw_mci_idmac_stop_dma, 769 .complete = dw_mci_dmac_complete_dma, 770 .cleanup = dw_mci_dma_cleanup, 771 }; 772 773 static void dw_mci_edmac_stop_dma(struct dw_mci *host) 774 { 775 dmaengine_terminate_async(host->dms->ch); 776 } 777 778 static int dw_mci_edmac_start_dma(struct dw_mci *host, 779 unsigned int sg_len) 780 { 781 struct dma_slave_config cfg; 782 struct dma_async_tx_descriptor *desc = NULL; 783 struct scatterlist *sgl = host->data->sg; 784 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 785 u32 sg_elems = host->data->sg_len; 786 u32 fifoth_val; 787 u32 fifo_offset = host->fifo_reg - host->regs; 788 int ret = 0; 789 790 /* Set external dma config: burst size, burst width */ 791 memset(&cfg, 0, sizeof(cfg)); 792 cfg.dst_addr = host->phy_regs + fifo_offset; 793 cfg.src_addr = cfg.dst_addr; 794 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 795 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 796 797 /* Match burst msize with external dma config */ 798 fifoth_val = mci_readl(host, FIFOTH); 799 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; 800 cfg.src_maxburst = cfg.dst_maxburst; 801 802 if (host->data->flags & MMC_DATA_WRITE) 803 cfg.direction = DMA_MEM_TO_DEV; 804 else 805 cfg.direction = DMA_DEV_TO_MEM; 806 807 ret = dmaengine_slave_config(host->dms->ch, &cfg); 808 if (ret) { 809 dev_err(host->dev, "Failed to config edmac.\n"); 810 return -EBUSY; 811 } 812 813 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, 814 sg_len, cfg.direction, 815 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 816 if (!desc) { 817 dev_err(host->dev, "Can't prepare slave sg.\n"); 818 return -EBUSY; 819 } 820 821 /* Set dw_mci_dmac_complete_dma as callback */ 822 desc->callback = dw_mci_dmac_complete_dma; 823 desc->callback_param = (void *)host; 824 dmaengine_submit(desc); 825 826 /* Flush cache before write */ 827 if (host->data->flags & MMC_DATA_WRITE) 828 dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl, 829 sg_elems, DMA_TO_DEVICE); 830 831 dma_async_issue_pending(host->dms->ch); 832 833 return 0; 834 } 835 836 static int dw_mci_edmac_init(struct dw_mci *host) 837 { 838 /* Request external dma channel */ 839 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); 840 if (!host->dms) 841 return -ENOMEM; 842 843 host->dms->ch = dma_request_chan(host->dev, "rx-tx"); 844 if (IS_ERR(host->dms->ch)) { 845 int ret = PTR_ERR(host->dms->ch); 846 847 dev_err(host->dev, "Failed to get external DMA channel.\n"); 848 kfree(host->dms); 849 host->dms = NULL; 850 return ret; 851 } 852 853 return 0; 854 } 855 856 static void dw_mci_edmac_exit(struct dw_mci *host) 857 { 858 if (host->dms) { 859 if (host->dms->ch) { 860 dma_release_channel(host->dms->ch); 861 host->dms->ch = NULL; 862 } 863 kfree(host->dms); 864 host->dms = NULL; 865 } 866 } 867 868 static const struct dw_mci_dma_ops dw_mci_edmac_ops = { 869 .init = dw_mci_edmac_init, 870 .exit = dw_mci_edmac_exit, 871 .start = dw_mci_edmac_start_dma, 872 .stop = dw_mci_edmac_stop_dma, 873 .complete = dw_mci_dmac_complete_dma, 874 .cleanup = dw_mci_dma_cleanup, 875 }; 876 877 static int dw_mci_pre_dma_transfer(struct dw_mci *host, 878 struct mmc_data *data, 879 int cookie) 880 { 881 struct scatterlist *sg; 882 unsigned int i, sg_len; 883 884 if (data->host_cookie == COOKIE_PRE_MAPPED) 885 return data->sg_len; 886 887 /* 888 * We don't do DMA on "complex" transfers, i.e. with 889 * non-word-aligned buffers or lengths. Also, we don't bother 890 * with all the DMA setup overhead for short transfers. 891 */ 892 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 893 return -EINVAL; 894 895 if (data->blksz & 3) 896 return -EINVAL; 897 898 for_each_sg(data->sg, sg, data->sg_len, i) { 899 if (sg->offset & 3 || sg->length & 3) 900 return -EINVAL; 901 } 902 903 sg_len = dma_map_sg(host->dev, 904 data->sg, 905 data->sg_len, 906 mmc_get_dma_dir(data)); 907 if (sg_len == 0) 908 return -EINVAL; 909 910 data->host_cookie = cookie; 911 912 return sg_len; 913 } 914 915 static void dw_mci_pre_req(struct mmc_host *mmc, 916 struct mmc_request *mrq) 917 { 918 struct dw_mci_slot *slot = mmc_priv(mmc); 919 struct mmc_data *data = mrq->data; 920 921 if (!slot->host->use_dma || !data) 922 return; 923 924 /* This data might be unmapped at this time */ 925 data->host_cookie = COOKIE_UNMAPPED; 926 927 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 928 COOKIE_PRE_MAPPED) < 0) 929 data->host_cookie = COOKIE_UNMAPPED; 930 } 931 932 static void dw_mci_post_req(struct mmc_host *mmc, 933 struct mmc_request *mrq, 934 int err) 935 { 936 struct dw_mci_slot *slot = mmc_priv(mmc); 937 struct mmc_data *data = mrq->data; 938 939 if (!slot->host->use_dma || !data) 940 return; 941 942 if (data->host_cookie != COOKIE_UNMAPPED) 943 dma_unmap_sg(slot->host->dev, 944 data->sg, 945 data->sg_len, 946 mmc_get_dma_dir(data)); 947 data->host_cookie = COOKIE_UNMAPPED; 948 } 949 950 static int dw_mci_get_cd(struct mmc_host *mmc) 951 { 952 int present; 953 struct dw_mci_slot *slot = mmc_priv(mmc); 954 struct dw_mci *host = slot->host; 955 int gpio_cd = mmc_gpio_get_cd(mmc); 956 957 /* Use platform get_cd function, else try onboard card detect */ 958 if (((mmc->caps & MMC_CAP_NEEDS_POLL) 959 || !mmc_card_is_removable(mmc))) { 960 present = 1; 961 962 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 963 if (mmc->caps & MMC_CAP_NEEDS_POLL) { 964 dev_info(&mmc->class_dev, 965 "card is polling.\n"); 966 } else { 967 dev_info(&mmc->class_dev, 968 "card is non-removable.\n"); 969 } 970 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 971 } 972 973 return present; 974 } else if (gpio_cd >= 0) 975 present = gpio_cd; 976 else 977 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 978 == 0 ? 1 : 0; 979 980 spin_lock_bh(&host->lock); 981 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 982 dev_dbg(&mmc->class_dev, "card is present\n"); 983 else if (!present && 984 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 985 dev_dbg(&mmc->class_dev, "card is not present\n"); 986 spin_unlock_bh(&host->lock); 987 988 return present; 989 } 990 991 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 992 { 993 unsigned int blksz = data->blksz; 994 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 995 u32 fifo_width = 1 << host->data_shift; 996 u32 blksz_depth = blksz / fifo_width, fifoth_val; 997 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 998 int idx = ARRAY_SIZE(mszs) - 1; 999 1000 /* pio should ship this scenario */ 1001 if (!host->use_dma) 1002 return; 1003 1004 tx_wmark = (host->fifo_depth) / 2; 1005 tx_wmark_invers = host->fifo_depth - tx_wmark; 1006 1007 /* 1008 * MSIZE is '1', 1009 * if blksz is not a multiple of the FIFO width 1010 */ 1011 if (blksz % fifo_width) 1012 goto done; 1013 1014 do { 1015 if (!((blksz_depth % mszs[idx]) || 1016 (tx_wmark_invers % mszs[idx]))) { 1017 msize = idx; 1018 rx_wmark = mszs[idx] - 1; 1019 break; 1020 } 1021 } while (--idx > 0); 1022 /* 1023 * If idx is '0', it won't be tried 1024 * Thus, initial values are uesed 1025 */ 1026 done: 1027 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 1028 mci_writel(host, FIFOTH, fifoth_val); 1029 } 1030 1031 static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) 1032 { 1033 unsigned int blksz = data->blksz; 1034 u32 blksz_depth, fifo_depth; 1035 u16 thld_size; 1036 u8 enable; 1037 1038 /* 1039 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is 1040 * in the FIFO region, so we really shouldn't access it). 1041 */ 1042 if (host->verid < DW_MMC_240A || 1043 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) 1044 return; 1045 1046 /* 1047 * Card write Threshold is introduced since 2.80a 1048 * It's used when HS400 mode is enabled. 1049 */ 1050 if (data->flags & MMC_DATA_WRITE && 1051 host->timing != MMC_TIMING_MMC_HS400) 1052 goto disable; 1053 1054 if (data->flags & MMC_DATA_WRITE) 1055 enable = SDMMC_CARD_WR_THR_EN; 1056 else 1057 enable = SDMMC_CARD_RD_THR_EN; 1058 1059 if (host->timing != MMC_TIMING_MMC_HS200 && 1060 host->timing != MMC_TIMING_UHS_SDR104 && 1061 host->timing != MMC_TIMING_MMC_HS400) 1062 goto disable; 1063 1064 blksz_depth = blksz / (1 << host->data_shift); 1065 fifo_depth = host->fifo_depth; 1066 1067 if (blksz_depth > fifo_depth) 1068 goto disable; 1069 1070 /* 1071 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' 1072 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz 1073 * Currently just choose blksz. 1074 */ 1075 thld_size = blksz; 1076 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); 1077 return; 1078 1079 disable: 1080 mci_writel(host, CDTHRCTL, 0); 1081 } 1082 1083 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 1084 { 1085 unsigned long irqflags; 1086 int sg_len; 1087 u32 temp; 1088 1089 host->using_dma = 0; 1090 1091 /* If we don't have a channel, we can't do DMA */ 1092 if (!host->use_dma) 1093 return -ENODEV; 1094 1095 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1096 if (sg_len < 0) { 1097 host->dma_ops->stop(host); 1098 return sg_len; 1099 } 1100 1101 host->using_dma = 1; 1102 1103 if (host->use_dma == TRANS_MODE_IDMAC) 1104 dev_vdbg(host->dev, 1105 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 1106 (unsigned long)host->sg_cpu, 1107 (unsigned long)host->sg_dma, 1108 sg_len); 1109 1110 /* 1111 * Decide the MSIZE and RX/TX Watermark. 1112 * If current block size is same with previous size, 1113 * no need to update fifoth. 1114 */ 1115 if (host->prev_blksz != data->blksz) 1116 dw_mci_adjust_fifoth(host, data); 1117 1118 /* Enable the DMA interface */ 1119 temp = mci_readl(host, CTRL); 1120 temp |= SDMMC_CTRL_DMA_ENABLE; 1121 mci_writel(host, CTRL, temp); 1122 1123 /* Disable RX/TX IRQs, let DMA handle it */ 1124 spin_lock_irqsave(&host->irq_lock, irqflags); 1125 temp = mci_readl(host, INTMASK); 1126 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 1127 mci_writel(host, INTMASK, temp); 1128 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1129 1130 if (host->dma_ops->start(host, sg_len)) { 1131 host->dma_ops->stop(host); 1132 /* We can't do DMA, try PIO for this one */ 1133 dev_dbg(host->dev, 1134 "%s: fall back to PIO mode for current transfer\n", 1135 __func__); 1136 return -ENODEV; 1137 } 1138 1139 return 0; 1140 } 1141 1142 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 1143 { 1144 unsigned long irqflags; 1145 int flags = SG_MITER_ATOMIC; 1146 u32 temp; 1147 1148 data->error = -EINPROGRESS; 1149 1150 WARN_ON(host->data); 1151 host->sg = NULL; 1152 host->data = data; 1153 1154 if (data->flags & MMC_DATA_READ) 1155 host->dir_status = DW_MCI_RECV_STATUS; 1156 else 1157 host->dir_status = DW_MCI_SEND_STATUS; 1158 1159 dw_mci_ctrl_thld(host, data); 1160 1161 if (dw_mci_submit_data_dma(host, data)) { 1162 if (host->data->flags & MMC_DATA_READ) 1163 flags |= SG_MITER_TO_SG; 1164 else 1165 flags |= SG_MITER_FROM_SG; 1166 1167 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1168 host->sg = data->sg; 1169 host->part_buf_start = 0; 1170 host->part_buf_count = 0; 1171 1172 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 1173 1174 spin_lock_irqsave(&host->irq_lock, irqflags); 1175 temp = mci_readl(host, INTMASK); 1176 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 1177 mci_writel(host, INTMASK, temp); 1178 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1179 1180 temp = mci_readl(host, CTRL); 1181 temp &= ~SDMMC_CTRL_DMA_ENABLE; 1182 mci_writel(host, CTRL, temp); 1183 1184 /* 1185 * Use the initial fifoth_val for PIO mode. If wm_algined 1186 * is set, we set watermark same as data size. 1187 * If next issued data may be transfered by DMA mode, 1188 * prev_blksz should be invalidated. 1189 */ 1190 if (host->wm_aligned) 1191 dw_mci_adjust_fifoth(host, data); 1192 else 1193 mci_writel(host, FIFOTH, host->fifoth_val); 1194 host->prev_blksz = 0; 1195 } else { 1196 /* 1197 * Keep the current block size. 1198 * It will be used to decide whether to update 1199 * fifoth register next time. 1200 */ 1201 host->prev_blksz = data->blksz; 1202 } 1203 } 1204 1205 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 1206 { 1207 struct dw_mci *host = slot->host; 1208 unsigned int clock = slot->clock; 1209 u32 div; 1210 u32 clk_en_a; 1211 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; 1212 1213 /* We must continue to set bit 28 in CMD until the change is complete */ 1214 if (host->state == STATE_WAITING_CMD11_DONE) 1215 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; 1216 1217 slot->mmc->actual_clock = 0; 1218 1219 if (!clock) { 1220 mci_writel(host, CLKENA, 0); 1221 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1222 } else if (clock != host->current_speed || force_clkinit) { 1223 div = host->bus_hz / clock; 1224 if (host->bus_hz % clock && host->bus_hz > clock) 1225 /* 1226 * move the + 1 after the divide to prevent 1227 * over-clocking the card. 1228 */ 1229 div += 1; 1230 1231 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1232 1233 if ((clock != slot->__clk_old && 1234 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) || 1235 force_clkinit) { 1236 /* Silent the verbose log if calling from PM context */ 1237 if (!force_clkinit) 1238 dev_info(&slot->mmc->class_dev, 1239 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1240 slot->id, host->bus_hz, clock, 1241 div ? ((host->bus_hz / div) >> 1) : 1242 host->bus_hz, div); 1243 1244 /* 1245 * If card is polling, display the message only 1246 * one time at boot time. 1247 */ 1248 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL && 1249 slot->mmc->f_min == clock) 1250 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags); 1251 } 1252 1253 /* disable clock */ 1254 mci_writel(host, CLKENA, 0); 1255 mci_writel(host, CLKSRC, 0); 1256 1257 /* inform CIU */ 1258 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1259 1260 /* set clock to desired speed */ 1261 mci_writel(host, CLKDIV, div); 1262 1263 /* inform CIU */ 1264 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1265 1266 /* enable clock; only low power if no SDIO */ 1267 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 1268 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) 1269 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 1270 mci_writel(host, CLKENA, clk_en_a); 1271 1272 /* inform CIU */ 1273 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1274 1275 /* keep the last clock value that was requested from core */ 1276 slot->__clk_old = clock; 1277 slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) : 1278 host->bus_hz; 1279 } 1280 1281 host->current_speed = clock; 1282 1283 /* Set the current slot bus width */ 1284 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 1285 } 1286 1287 static void __dw_mci_start_request(struct dw_mci *host, 1288 struct dw_mci_slot *slot, 1289 struct mmc_command *cmd) 1290 { 1291 struct mmc_request *mrq; 1292 struct mmc_data *data; 1293 u32 cmdflags; 1294 1295 mrq = slot->mrq; 1296 1297 host->mrq = mrq; 1298 1299 host->pending_events = 0; 1300 host->completed_events = 0; 1301 host->cmd_status = 0; 1302 host->data_status = 0; 1303 host->dir_status = 0; 1304 1305 data = cmd->data; 1306 if (data) { 1307 mci_writel(host, TMOUT, 0xFFFFFFFF); 1308 mci_writel(host, BYTCNT, data->blksz*data->blocks); 1309 mci_writel(host, BLKSIZ, data->blksz); 1310 } 1311 1312 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 1313 1314 /* this is the first command, send the initialization clock */ 1315 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 1316 cmdflags |= SDMMC_CMD_INIT; 1317 1318 if (data) { 1319 dw_mci_submit_data(host, data); 1320 wmb(); /* drain writebuffer */ 1321 } 1322 1323 dw_mci_start_command(host, cmd, cmdflags); 1324 1325 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 1326 unsigned long irqflags; 1327 1328 /* 1329 * Databook says to fail after 2ms w/ no response, but evidence 1330 * shows that sometimes the cmd11 interrupt takes over 130ms. 1331 * We'll set to 500ms, plus an extra jiffy just in case jiffies 1332 * is just about to roll over. 1333 * 1334 * We do this whole thing under spinlock and only if the 1335 * command hasn't already completed (indicating the the irq 1336 * already ran so we don't want the timeout). 1337 */ 1338 spin_lock_irqsave(&host->irq_lock, irqflags); 1339 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 1340 mod_timer(&host->cmd11_timer, 1341 jiffies + msecs_to_jiffies(500) + 1); 1342 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1343 } 1344 1345 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); 1346 } 1347 1348 static void dw_mci_start_request(struct dw_mci *host, 1349 struct dw_mci_slot *slot) 1350 { 1351 struct mmc_request *mrq = slot->mrq; 1352 struct mmc_command *cmd; 1353 1354 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 1355 __dw_mci_start_request(host, slot, cmd); 1356 } 1357 1358 /* must be called with host->lock held */ 1359 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 1360 struct mmc_request *mrq) 1361 { 1362 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1363 host->state); 1364 1365 slot->mrq = mrq; 1366 1367 if (host->state == STATE_WAITING_CMD11_DONE) { 1368 dev_warn(&slot->mmc->class_dev, 1369 "Voltage change didn't complete\n"); 1370 /* 1371 * this case isn't expected to happen, so we can 1372 * either crash here or just try to continue on 1373 * in the closest possible state 1374 */ 1375 host->state = STATE_IDLE; 1376 } 1377 1378 if (host->state == STATE_IDLE) { 1379 host->state = STATE_SENDING_CMD; 1380 dw_mci_start_request(host, slot); 1381 } else { 1382 list_add_tail(&slot->queue_node, &host->queue); 1383 } 1384 } 1385 1386 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1387 { 1388 struct dw_mci_slot *slot = mmc_priv(mmc); 1389 struct dw_mci *host = slot->host; 1390 1391 WARN_ON(slot->mrq); 1392 1393 /* 1394 * The check for card presence and queueing of the request must be 1395 * atomic, otherwise the card could be removed in between and the 1396 * request wouldn't fail until another card was inserted. 1397 */ 1398 1399 if (!dw_mci_get_cd(mmc)) { 1400 mrq->cmd->error = -ENOMEDIUM; 1401 mmc_request_done(mmc, mrq); 1402 return; 1403 } 1404 1405 spin_lock_bh(&host->lock); 1406 1407 dw_mci_queue_request(host, slot, mrq); 1408 1409 spin_unlock_bh(&host->lock); 1410 } 1411 1412 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1413 { 1414 struct dw_mci_slot *slot = mmc_priv(mmc); 1415 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 1416 u32 regs; 1417 int ret; 1418 1419 switch (ios->bus_width) { 1420 case MMC_BUS_WIDTH_4: 1421 slot->ctype = SDMMC_CTYPE_4BIT; 1422 break; 1423 case MMC_BUS_WIDTH_8: 1424 slot->ctype = SDMMC_CTYPE_8BIT; 1425 break; 1426 default: 1427 /* set default 1 bit mode */ 1428 slot->ctype = SDMMC_CTYPE_1BIT; 1429 } 1430 1431 regs = mci_readl(slot->host, UHS_REG); 1432 1433 /* DDR mode set */ 1434 if (ios->timing == MMC_TIMING_MMC_DDR52 || 1435 ios->timing == MMC_TIMING_UHS_DDR50 || 1436 ios->timing == MMC_TIMING_MMC_HS400) 1437 regs |= ((0x1 << slot->id) << 16); 1438 else 1439 regs &= ~((0x1 << slot->id) << 16); 1440 1441 mci_writel(slot->host, UHS_REG, regs); 1442 slot->host->timing = ios->timing; 1443 1444 /* 1445 * Use mirror of ios->clock to prevent race with mmc 1446 * core ios update when finding the minimum. 1447 */ 1448 slot->clock = ios->clock; 1449 1450 if (drv_data && drv_data->set_ios) 1451 drv_data->set_ios(slot->host, ios); 1452 1453 switch (ios->power_mode) { 1454 case MMC_POWER_UP: 1455 if (!IS_ERR(mmc->supply.vmmc)) { 1456 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 1457 ios->vdd); 1458 if (ret) { 1459 dev_err(slot->host->dev, 1460 "failed to enable vmmc regulator\n"); 1461 /*return, if failed turn on vmmc*/ 1462 return; 1463 } 1464 } 1465 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 1466 regs = mci_readl(slot->host, PWREN); 1467 regs |= (1 << slot->id); 1468 mci_writel(slot->host, PWREN, regs); 1469 break; 1470 case MMC_POWER_ON: 1471 if (!slot->host->vqmmc_enabled) { 1472 if (!IS_ERR(mmc->supply.vqmmc)) { 1473 ret = regulator_enable(mmc->supply.vqmmc); 1474 if (ret < 0) 1475 dev_err(slot->host->dev, 1476 "failed to enable vqmmc\n"); 1477 else 1478 slot->host->vqmmc_enabled = true; 1479 1480 } else { 1481 /* Keep track so we don't reset again */ 1482 slot->host->vqmmc_enabled = true; 1483 } 1484 1485 /* Reset our state machine after powering on */ 1486 dw_mci_ctrl_reset(slot->host, 1487 SDMMC_CTRL_ALL_RESET_FLAGS); 1488 } 1489 1490 /* Adjust clock / bus width after power is up */ 1491 dw_mci_setup_bus(slot, false); 1492 1493 break; 1494 case MMC_POWER_OFF: 1495 /* Turn clock off before power goes down */ 1496 dw_mci_setup_bus(slot, false); 1497 1498 if (!IS_ERR(mmc->supply.vmmc)) 1499 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1500 1501 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) 1502 regulator_disable(mmc->supply.vqmmc); 1503 slot->host->vqmmc_enabled = false; 1504 1505 regs = mci_readl(slot->host, PWREN); 1506 regs &= ~(1 << slot->id); 1507 mci_writel(slot->host, PWREN, regs); 1508 break; 1509 default: 1510 break; 1511 } 1512 1513 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) 1514 slot->host->state = STATE_IDLE; 1515 } 1516 1517 static int dw_mci_card_busy(struct mmc_host *mmc) 1518 { 1519 struct dw_mci_slot *slot = mmc_priv(mmc); 1520 u32 status; 1521 1522 /* 1523 * Check the busy bit which is low when DAT[3:0] 1524 * (the data lines) are 0000 1525 */ 1526 status = mci_readl(slot->host, STATUS); 1527 1528 return !!(status & SDMMC_STATUS_BUSY); 1529 } 1530 1531 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 1532 { 1533 struct dw_mci_slot *slot = mmc_priv(mmc); 1534 struct dw_mci *host = slot->host; 1535 const struct dw_mci_drv_data *drv_data = host->drv_data; 1536 u32 uhs; 1537 u32 v18 = SDMMC_UHS_18V << slot->id; 1538 int ret; 1539 1540 if (drv_data && drv_data->switch_voltage) 1541 return drv_data->switch_voltage(mmc, ios); 1542 1543 /* 1544 * Program the voltage. Note that some instances of dw_mmc may use 1545 * the UHS_REG for this. For other instances (like exynos) the UHS_REG 1546 * does no harm but you need to set the regulator directly. Try both. 1547 */ 1548 uhs = mci_readl(host, UHS_REG); 1549 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1550 uhs &= ~v18; 1551 else 1552 uhs |= v18; 1553 1554 if (!IS_ERR(mmc->supply.vqmmc)) { 1555 ret = mmc_regulator_set_vqmmc(mmc, ios); 1556 if (ret < 0) { 1557 dev_dbg(&mmc->class_dev, 1558 "Regulator set error %d - %s V\n", 1559 ret, uhs & v18 ? "1.8" : "3.3"); 1560 return ret; 1561 } 1562 } 1563 mci_writel(host, UHS_REG, uhs); 1564 1565 return 0; 1566 } 1567 1568 static int dw_mci_get_ro(struct mmc_host *mmc) 1569 { 1570 int read_only; 1571 struct dw_mci_slot *slot = mmc_priv(mmc); 1572 int gpio_ro = mmc_gpio_get_ro(mmc); 1573 1574 /* Use platform get_ro function, else try on board write protect */ 1575 if (gpio_ro >= 0) 1576 read_only = gpio_ro; 1577 else 1578 read_only = 1579 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 1580 1581 dev_dbg(&mmc->class_dev, "card is %s\n", 1582 read_only ? "read-only" : "read-write"); 1583 1584 return read_only; 1585 } 1586 1587 static void dw_mci_hw_reset(struct mmc_host *mmc) 1588 { 1589 struct dw_mci_slot *slot = mmc_priv(mmc); 1590 struct dw_mci *host = slot->host; 1591 int reset; 1592 1593 if (host->use_dma == TRANS_MODE_IDMAC) 1594 dw_mci_idmac_reset(host); 1595 1596 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | 1597 SDMMC_CTRL_FIFO_RESET)) 1598 return; 1599 1600 /* 1601 * According to eMMC spec, card reset procedure: 1602 * tRstW >= 1us: RST_n pulse width 1603 * tRSCA >= 200us: RST_n to Command time 1604 * tRSTH >= 1us: RST_n high period 1605 */ 1606 reset = mci_readl(host, RST_N); 1607 reset &= ~(SDMMC_RST_HWACTIVE << slot->id); 1608 mci_writel(host, RST_N, reset); 1609 usleep_range(1, 2); 1610 reset |= SDMMC_RST_HWACTIVE << slot->id; 1611 mci_writel(host, RST_N, reset); 1612 usleep_range(200, 300); 1613 } 1614 1615 static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare) 1616 { 1617 struct dw_mci *host = slot->host; 1618 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 1619 u32 clk_en_a_old; 1620 u32 clk_en_a; 1621 1622 /* 1623 * Low power mode will stop the card clock when idle. According to the 1624 * description of the CLKENA register we should disable low power mode 1625 * for SDIO cards if we need SDIO interrupts to work. 1626 */ 1627 1628 clk_en_a_old = mci_readl(host, CLKENA); 1629 if (prepare) { 1630 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1631 clk_en_a = clk_en_a_old & ~clken_low_pwr; 1632 } else { 1633 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1634 clk_en_a = clk_en_a_old | clken_low_pwr; 1635 } 1636 1637 if (clk_en_a != clk_en_a_old) { 1638 mci_writel(host, CLKENA, clk_en_a); 1639 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 1640 0); 1641 } 1642 } 1643 1644 static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb) 1645 { 1646 struct dw_mci *host = slot->host; 1647 unsigned long irqflags; 1648 u32 int_mask; 1649 1650 spin_lock_irqsave(&host->irq_lock, irqflags); 1651 1652 /* Enable/disable Slot Specific SDIO interrupt */ 1653 int_mask = mci_readl(host, INTMASK); 1654 if (enb) 1655 int_mask |= SDMMC_INT_SDIO(slot->sdio_id); 1656 else 1657 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); 1658 mci_writel(host, INTMASK, int_mask); 1659 1660 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1661 } 1662 1663 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 1664 { 1665 struct dw_mci_slot *slot = mmc_priv(mmc); 1666 struct dw_mci *host = slot->host; 1667 1668 dw_mci_prepare_sdio_irq(slot, enb); 1669 __dw_mci_enable_sdio_irq(slot, enb); 1670 1671 /* Avoid runtime suspending the device when SDIO IRQ is enabled */ 1672 if (enb) 1673 pm_runtime_get_noresume(host->dev); 1674 else 1675 pm_runtime_put_noidle(host->dev); 1676 } 1677 1678 static void dw_mci_ack_sdio_irq(struct mmc_host *mmc) 1679 { 1680 struct dw_mci_slot *slot = mmc_priv(mmc); 1681 1682 __dw_mci_enable_sdio_irq(slot, 1); 1683 } 1684 1685 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1686 { 1687 struct dw_mci_slot *slot = mmc_priv(mmc); 1688 struct dw_mci *host = slot->host; 1689 const struct dw_mci_drv_data *drv_data = host->drv_data; 1690 int err = -EINVAL; 1691 1692 if (drv_data && drv_data->execute_tuning) 1693 err = drv_data->execute_tuning(slot, opcode); 1694 return err; 1695 } 1696 1697 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, 1698 struct mmc_ios *ios) 1699 { 1700 struct dw_mci_slot *slot = mmc_priv(mmc); 1701 struct dw_mci *host = slot->host; 1702 const struct dw_mci_drv_data *drv_data = host->drv_data; 1703 1704 if (drv_data && drv_data->prepare_hs400_tuning) 1705 return drv_data->prepare_hs400_tuning(host, ios); 1706 1707 return 0; 1708 } 1709 1710 static bool dw_mci_reset(struct dw_mci *host) 1711 { 1712 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; 1713 bool ret = false; 1714 u32 status = 0; 1715 1716 /* 1717 * Resetting generates a block interrupt, hence setting 1718 * the scatter-gather pointer to NULL. 1719 */ 1720 if (host->sg) { 1721 sg_miter_stop(&host->sg_miter); 1722 host->sg = NULL; 1723 } 1724 1725 if (host->use_dma) 1726 flags |= SDMMC_CTRL_DMA_RESET; 1727 1728 if (dw_mci_ctrl_reset(host, flags)) { 1729 /* 1730 * In all cases we clear the RAWINTS 1731 * register to clear any interrupts. 1732 */ 1733 mci_writel(host, RINTSTS, 0xFFFFFFFF); 1734 1735 if (!host->use_dma) { 1736 ret = true; 1737 goto ciu_out; 1738 } 1739 1740 /* Wait for dma_req to be cleared */ 1741 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 1742 status, 1743 !(status & SDMMC_STATUS_DMA_REQ), 1744 1, 500 * USEC_PER_MSEC)) { 1745 dev_err(host->dev, 1746 "%s: Timeout waiting for dma_req to be cleared\n", 1747 __func__); 1748 goto ciu_out; 1749 } 1750 1751 /* when using DMA next we reset the fifo again */ 1752 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) 1753 goto ciu_out; 1754 } else { 1755 /* if the controller reset bit did clear, then set clock regs */ 1756 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 1757 dev_err(host->dev, 1758 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", 1759 __func__); 1760 goto ciu_out; 1761 } 1762 } 1763 1764 if (host->use_dma == TRANS_MODE_IDMAC) 1765 /* It is also required that we reinit idmac */ 1766 dw_mci_idmac_init(host); 1767 1768 ret = true; 1769 1770 ciu_out: 1771 /* After a CTRL reset we need to have CIU set clock registers */ 1772 mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0); 1773 1774 return ret; 1775 } 1776 1777 static const struct mmc_host_ops dw_mci_ops = { 1778 .request = dw_mci_request, 1779 .pre_req = dw_mci_pre_req, 1780 .post_req = dw_mci_post_req, 1781 .set_ios = dw_mci_set_ios, 1782 .get_ro = dw_mci_get_ro, 1783 .get_cd = dw_mci_get_cd, 1784 .hw_reset = dw_mci_hw_reset, 1785 .enable_sdio_irq = dw_mci_enable_sdio_irq, 1786 .ack_sdio_irq = dw_mci_ack_sdio_irq, 1787 .execute_tuning = dw_mci_execute_tuning, 1788 .card_busy = dw_mci_card_busy, 1789 .start_signal_voltage_switch = dw_mci_switch_voltage, 1790 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, 1791 }; 1792 1793 #ifdef CONFIG_FAULT_INJECTION 1794 static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t) 1795 { 1796 struct dw_mci *host = container_of(t, struct dw_mci, fault_timer); 1797 unsigned long flags; 1798 1799 spin_lock_irqsave(&host->irq_lock, flags); 1800 1801 /* 1802 * Only inject an error if we haven't already got an error or data over 1803 * interrupt. 1804 */ 1805 if (!host->data_status) { 1806 host->data_status = SDMMC_INT_DCRC; 1807 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1808 tasklet_schedule(&host->tasklet); 1809 } 1810 1811 spin_unlock_irqrestore(&host->irq_lock, flags); 1812 1813 return HRTIMER_NORESTART; 1814 } 1815 1816 static void dw_mci_start_fault_timer(struct dw_mci *host) 1817 { 1818 struct mmc_data *data = host->data; 1819 1820 if (!data || data->blocks <= 1) 1821 return; 1822 1823 if (!should_fail(&host->fail_data_crc, 1)) 1824 return; 1825 1826 /* 1827 * Try to inject the error at random points during the data transfer. 1828 */ 1829 hrtimer_start(&host->fault_timer, 1830 ms_to_ktime(prandom_u32() % 25), 1831 HRTIMER_MODE_REL); 1832 } 1833 1834 static void dw_mci_stop_fault_timer(struct dw_mci *host) 1835 { 1836 hrtimer_cancel(&host->fault_timer); 1837 } 1838 1839 static void dw_mci_init_fault(struct dw_mci *host) 1840 { 1841 host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER; 1842 1843 hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1844 host->fault_timer.function = dw_mci_fault_timer; 1845 } 1846 #else 1847 static void dw_mci_init_fault(struct dw_mci *host) 1848 { 1849 } 1850 1851 static void dw_mci_start_fault_timer(struct dw_mci *host) 1852 { 1853 } 1854 1855 static void dw_mci_stop_fault_timer(struct dw_mci *host) 1856 { 1857 } 1858 #endif 1859 1860 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1861 __releases(&host->lock) 1862 __acquires(&host->lock) 1863 { 1864 struct dw_mci_slot *slot; 1865 struct mmc_host *prev_mmc = host->slot->mmc; 1866 1867 WARN_ON(host->cmd || host->data); 1868 1869 host->slot->mrq = NULL; 1870 host->mrq = NULL; 1871 if (!list_empty(&host->queue)) { 1872 slot = list_entry(host->queue.next, 1873 struct dw_mci_slot, queue_node); 1874 list_del(&slot->queue_node); 1875 dev_vdbg(host->dev, "list not empty: %s is next\n", 1876 mmc_hostname(slot->mmc)); 1877 host->state = STATE_SENDING_CMD; 1878 dw_mci_start_request(host, slot); 1879 } else { 1880 dev_vdbg(host->dev, "list empty\n"); 1881 1882 if (host->state == STATE_SENDING_CMD11) 1883 host->state = STATE_WAITING_CMD11_DONE; 1884 else 1885 host->state = STATE_IDLE; 1886 } 1887 1888 spin_unlock(&host->lock); 1889 mmc_request_done(prev_mmc, mrq); 1890 spin_lock(&host->lock); 1891 } 1892 1893 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 1894 { 1895 u32 status = host->cmd_status; 1896 1897 host->cmd_status = 0; 1898 1899 /* Read the response from the card (up to 16 bytes) */ 1900 if (cmd->flags & MMC_RSP_PRESENT) { 1901 if (cmd->flags & MMC_RSP_136) { 1902 cmd->resp[3] = mci_readl(host, RESP0); 1903 cmd->resp[2] = mci_readl(host, RESP1); 1904 cmd->resp[1] = mci_readl(host, RESP2); 1905 cmd->resp[0] = mci_readl(host, RESP3); 1906 } else { 1907 cmd->resp[0] = mci_readl(host, RESP0); 1908 cmd->resp[1] = 0; 1909 cmd->resp[2] = 0; 1910 cmd->resp[3] = 0; 1911 } 1912 } 1913 1914 if (status & SDMMC_INT_RTO) 1915 cmd->error = -ETIMEDOUT; 1916 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1917 cmd->error = -EILSEQ; 1918 else if (status & SDMMC_INT_RESP_ERR) 1919 cmd->error = -EIO; 1920 else 1921 cmd->error = 0; 1922 1923 return cmd->error; 1924 } 1925 1926 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) 1927 { 1928 u32 status = host->data_status; 1929 1930 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1931 if (status & SDMMC_INT_DRTO) { 1932 data->error = -ETIMEDOUT; 1933 } else if (status & SDMMC_INT_DCRC) { 1934 data->error = -EILSEQ; 1935 } else if (status & SDMMC_INT_EBE) { 1936 if (host->dir_status == 1937 DW_MCI_SEND_STATUS) { 1938 /* 1939 * No data CRC status was returned. 1940 * The number of bytes transferred 1941 * will be exaggerated in PIO mode. 1942 */ 1943 data->bytes_xfered = 0; 1944 data->error = -ETIMEDOUT; 1945 } else if (host->dir_status == 1946 DW_MCI_RECV_STATUS) { 1947 data->error = -EILSEQ; 1948 } 1949 } else { 1950 /* SDMMC_INT_SBE is included */ 1951 data->error = -EILSEQ; 1952 } 1953 1954 dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1955 1956 /* 1957 * After an error, there may be data lingering 1958 * in the FIFO 1959 */ 1960 dw_mci_reset(host); 1961 } else { 1962 data->bytes_xfered = data->blocks * data->blksz; 1963 data->error = 0; 1964 } 1965 1966 return data->error; 1967 } 1968 1969 static void dw_mci_set_drto(struct dw_mci *host) 1970 { 1971 unsigned int drto_clks; 1972 unsigned int drto_div; 1973 unsigned int drto_ms; 1974 unsigned long irqflags; 1975 1976 drto_clks = mci_readl(host, TMOUT) >> 8; 1977 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 1978 if (drto_div == 0) 1979 drto_div = 1; 1980 1981 drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, 1982 host->bus_hz); 1983 1984 /* add a bit spare time */ 1985 drto_ms += 10; 1986 1987 spin_lock_irqsave(&host->irq_lock, irqflags); 1988 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) 1989 mod_timer(&host->dto_timer, 1990 jiffies + msecs_to_jiffies(drto_ms)); 1991 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1992 } 1993 1994 static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) 1995 { 1996 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 1997 return false; 1998 1999 /* 2000 * Really be certain that the timer has stopped. This is a bit of 2001 * paranoia and could only really happen if we had really bad 2002 * interrupt latency and the interrupt routine and timeout were 2003 * running concurrently so that the del_timer() in the interrupt 2004 * handler couldn't run. 2005 */ 2006 WARN_ON(del_timer_sync(&host->cto_timer)); 2007 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2008 2009 return true; 2010 } 2011 2012 static bool dw_mci_clear_pending_data_complete(struct dw_mci *host) 2013 { 2014 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) 2015 return false; 2016 2017 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ 2018 WARN_ON(del_timer_sync(&host->dto_timer)); 2019 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2020 2021 return true; 2022 } 2023 2024 static void dw_mci_tasklet_func(struct tasklet_struct *t) 2025 { 2026 struct dw_mci *host = from_tasklet(host, t, tasklet); 2027 struct mmc_data *data; 2028 struct mmc_command *cmd; 2029 struct mmc_request *mrq; 2030 enum dw_mci_state state; 2031 enum dw_mci_state prev_state; 2032 unsigned int err; 2033 2034 spin_lock(&host->lock); 2035 2036 state = host->state; 2037 data = host->data; 2038 mrq = host->mrq; 2039 2040 do { 2041 prev_state = state; 2042 2043 switch (state) { 2044 case STATE_IDLE: 2045 case STATE_WAITING_CMD11_DONE: 2046 break; 2047 2048 case STATE_SENDING_CMD11: 2049 case STATE_SENDING_CMD: 2050 if (!dw_mci_clear_pending_cmd_complete(host)) 2051 break; 2052 2053 cmd = host->cmd; 2054 host->cmd = NULL; 2055 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 2056 err = dw_mci_command_complete(host, cmd); 2057 if (cmd == mrq->sbc && !err) { 2058 __dw_mci_start_request(host, host->slot, 2059 mrq->cmd); 2060 goto unlock; 2061 } 2062 2063 if (cmd->data && err) { 2064 /* 2065 * During UHS tuning sequence, sending the stop 2066 * command after the response CRC error would 2067 * throw the system into a confused state 2068 * causing all future tuning phases to report 2069 * failure. 2070 * 2071 * In such case controller will move into a data 2072 * transfer state after a response error or 2073 * response CRC error. Let's let that finish 2074 * before trying to send a stop, so we'll go to 2075 * STATE_SENDING_DATA. 2076 * 2077 * Although letting the data transfer take place 2078 * will waste a bit of time (we already know 2079 * the command was bad), it can't cause any 2080 * errors since it's possible it would have 2081 * taken place anyway if this tasklet got 2082 * delayed. Allowing the transfer to take place 2083 * avoids races and keeps things simple. 2084 */ 2085 if (err != -ETIMEDOUT && 2086 host->dir_status == DW_MCI_RECV_STATUS) { 2087 state = STATE_SENDING_DATA; 2088 continue; 2089 } 2090 2091 send_stop_abort(host, data); 2092 dw_mci_stop_dma(host); 2093 state = STATE_SENDING_STOP; 2094 break; 2095 } 2096 2097 if (!cmd->data || err) { 2098 dw_mci_request_end(host, mrq); 2099 goto unlock; 2100 } 2101 2102 prev_state = state = STATE_SENDING_DATA; 2103 fallthrough; 2104 2105 case STATE_SENDING_DATA: 2106 /* 2107 * We could get a data error and never a transfer 2108 * complete so we'd better check for it here. 2109 * 2110 * Note that we don't really care if we also got a 2111 * transfer complete; stopping the DMA and sending an 2112 * abort won't hurt. 2113 */ 2114 if (test_and_clear_bit(EVENT_DATA_ERROR, 2115 &host->pending_events)) { 2116 if (!(host->data_status & (SDMMC_INT_DRTO | 2117 SDMMC_INT_EBE))) 2118 send_stop_abort(host, data); 2119 dw_mci_stop_dma(host); 2120 state = STATE_DATA_ERROR; 2121 break; 2122 } 2123 2124 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 2125 &host->pending_events)) { 2126 /* 2127 * If all data-related interrupts don't come 2128 * within the given time in reading data state. 2129 */ 2130 if (host->dir_status == DW_MCI_RECV_STATUS) 2131 dw_mci_set_drto(host); 2132 break; 2133 } 2134 2135 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 2136 2137 /* 2138 * Handle an EVENT_DATA_ERROR that might have shown up 2139 * before the transfer completed. This might not have 2140 * been caught by the check above because the interrupt 2141 * could have gone off between the previous check and 2142 * the check for transfer complete. 2143 * 2144 * Technically this ought not be needed assuming we 2145 * get a DATA_COMPLETE eventually (we'll notice the 2146 * error and end the request), but it shouldn't hurt. 2147 * 2148 * This has the advantage of sending the stop command. 2149 */ 2150 if (test_and_clear_bit(EVENT_DATA_ERROR, 2151 &host->pending_events)) { 2152 if (!(host->data_status & (SDMMC_INT_DRTO | 2153 SDMMC_INT_EBE))) 2154 send_stop_abort(host, data); 2155 dw_mci_stop_dma(host); 2156 state = STATE_DATA_ERROR; 2157 break; 2158 } 2159 prev_state = state = STATE_DATA_BUSY; 2160 2161 fallthrough; 2162 2163 case STATE_DATA_BUSY: 2164 if (!dw_mci_clear_pending_data_complete(host)) { 2165 /* 2166 * If data error interrupt comes but data over 2167 * interrupt doesn't come within the given time. 2168 * in reading data state. 2169 */ 2170 if (host->dir_status == DW_MCI_RECV_STATUS) 2171 dw_mci_set_drto(host); 2172 break; 2173 } 2174 2175 dw_mci_stop_fault_timer(host); 2176 host->data = NULL; 2177 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 2178 err = dw_mci_data_complete(host, data); 2179 2180 if (!err) { 2181 if (!data->stop || mrq->sbc) { 2182 if (mrq->sbc && data->stop) 2183 data->stop->error = 0; 2184 dw_mci_request_end(host, mrq); 2185 goto unlock; 2186 } 2187 2188 /* stop command for open-ended transfer*/ 2189 if (data->stop) 2190 send_stop_abort(host, data); 2191 } else { 2192 /* 2193 * If we don't have a command complete now we'll 2194 * never get one since we just reset everything; 2195 * better end the request. 2196 * 2197 * If we do have a command complete we'll fall 2198 * through to the SENDING_STOP command and 2199 * everything will be peachy keen. 2200 */ 2201 if (!test_bit(EVENT_CMD_COMPLETE, 2202 &host->pending_events)) { 2203 host->cmd = NULL; 2204 dw_mci_request_end(host, mrq); 2205 goto unlock; 2206 } 2207 } 2208 2209 /* 2210 * If err has non-zero, 2211 * stop-abort command has been already issued. 2212 */ 2213 prev_state = state = STATE_SENDING_STOP; 2214 2215 fallthrough; 2216 2217 case STATE_SENDING_STOP: 2218 if (!dw_mci_clear_pending_cmd_complete(host)) 2219 break; 2220 2221 /* CMD error in data command */ 2222 if (mrq->cmd->error && mrq->data) 2223 dw_mci_reset(host); 2224 2225 dw_mci_stop_fault_timer(host); 2226 host->cmd = NULL; 2227 host->data = NULL; 2228 2229 if (!mrq->sbc && mrq->stop) 2230 dw_mci_command_complete(host, mrq->stop); 2231 else 2232 host->cmd_status = 0; 2233 2234 dw_mci_request_end(host, mrq); 2235 goto unlock; 2236 2237 case STATE_DATA_ERROR: 2238 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 2239 &host->pending_events)) 2240 break; 2241 2242 state = STATE_DATA_BUSY; 2243 break; 2244 } 2245 } while (state != prev_state); 2246 2247 host->state = state; 2248 unlock: 2249 spin_unlock(&host->lock); 2250 2251 } 2252 2253 /* push final bytes to part_buf, only use during push */ 2254 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 2255 { 2256 memcpy((void *)&host->part_buf, buf, cnt); 2257 host->part_buf_count = cnt; 2258 } 2259 2260 /* append bytes to part_buf, only use during push */ 2261 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 2262 { 2263 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 2264 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 2265 host->part_buf_count += cnt; 2266 return cnt; 2267 } 2268 2269 /* pull first bytes from part_buf, only use during pull */ 2270 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 2271 { 2272 cnt = min_t(int, cnt, host->part_buf_count); 2273 if (cnt) { 2274 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 2275 cnt); 2276 host->part_buf_count -= cnt; 2277 host->part_buf_start += cnt; 2278 } 2279 return cnt; 2280 } 2281 2282 /* pull final bytes from the part_buf, assuming it's just been filled */ 2283 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 2284 { 2285 memcpy(buf, &host->part_buf, cnt); 2286 host->part_buf_start = cnt; 2287 host->part_buf_count = (1 << host->data_shift) - cnt; 2288 } 2289 2290 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 2291 { 2292 struct mmc_data *data = host->data; 2293 int init_cnt = cnt; 2294 2295 /* try and push anything in the part_buf */ 2296 if (unlikely(host->part_buf_count)) { 2297 int len = dw_mci_push_part_bytes(host, buf, cnt); 2298 2299 buf += len; 2300 cnt -= len; 2301 if (host->part_buf_count == 2) { 2302 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2303 host->part_buf_count = 0; 2304 } 2305 } 2306 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2307 if (unlikely((unsigned long)buf & 0x1)) { 2308 while (cnt >= 2) { 2309 u16 aligned_buf[64]; 2310 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2311 int items = len >> 1; 2312 int i; 2313 /* memcpy from input buffer into aligned buffer */ 2314 memcpy(aligned_buf, buf, len); 2315 buf += len; 2316 cnt -= len; 2317 /* push data from aligned buffer into fifo */ 2318 for (i = 0; i < items; ++i) 2319 mci_fifo_writew(host->fifo_reg, aligned_buf[i]); 2320 } 2321 } else 2322 #endif 2323 { 2324 u16 *pdata = buf; 2325 2326 for (; cnt >= 2; cnt -= 2) 2327 mci_fifo_writew(host->fifo_reg, *pdata++); 2328 buf = pdata; 2329 } 2330 /* put anything remaining in the part_buf */ 2331 if (cnt) { 2332 dw_mci_set_part_bytes(host, buf, cnt); 2333 /* Push data if we have reached the expected data length */ 2334 if ((data->bytes_xfered + init_cnt) == 2335 (data->blksz * data->blocks)) 2336 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2337 } 2338 } 2339 2340 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 2341 { 2342 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2343 if (unlikely((unsigned long)buf & 0x1)) { 2344 while (cnt >= 2) { 2345 /* pull data from fifo into aligned buffer */ 2346 u16 aligned_buf[64]; 2347 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2348 int items = len >> 1; 2349 int i; 2350 2351 for (i = 0; i < items; ++i) 2352 aligned_buf[i] = mci_fifo_readw(host->fifo_reg); 2353 /* memcpy from aligned buffer into output buffer */ 2354 memcpy(buf, aligned_buf, len); 2355 buf += len; 2356 cnt -= len; 2357 } 2358 } else 2359 #endif 2360 { 2361 u16 *pdata = buf; 2362 2363 for (; cnt >= 2; cnt -= 2) 2364 *pdata++ = mci_fifo_readw(host->fifo_reg); 2365 buf = pdata; 2366 } 2367 if (cnt) { 2368 host->part_buf16 = mci_fifo_readw(host->fifo_reg); 2369 dw_mci_pull_final_bytes(host, buf, cnt); 2370 } 2371 } 2372 2373 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 2374 { 2375 struct mmc_data *data = host->data; 2376 int init_cnt = cnt; 2377 2378 /* try and push anything in the part_buf */ 2379 if (unlikely(host->part_buf_count)) { 2380 int len = dw_mci_push_part_bytes(host, buf, cnt); 2381 2382 buf += len; 2383 cnt -= len; 2384 if (host->part_buf_count == 4) { 2385 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2386 host->part_buf_count = 0; 2387 } 2388 } 2389 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2390 if (unlikely((unsigned long)buf & 0x3)) { 2391 while (cnt >= 4) { 2392 u32 aligned_buf[32]; 2393 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2394 int items = len >> 2; 2395 int i; 2396 /* memcpy from input buffer into aligned buffer */ 2397 memcpy(aligned_buf, buf, len); 2398 buf += len; 2399 cnt -= len; 2400 /* push data from aligned buffer into fifo */ 2401 for (i = 0; i < items; ++i) 2402 mci_fifo_writel(host->fifo_reg, aligned_buf[i]); 2403 } 2404 } else 2405 #endif 2406 { 2407 u32 *pdata = buf; 2408 2409 for (; cnt >= 4; cnt -= 4) 2410 mci_fifo_writel(host->fifo_reg, *pdata++); 2411 buf = pdata; 2412 } 2413 /* put anything remaining in the part_buf */ 2414 if (cnt) { 2415 dw_mci_set_part_bytes(host, buf, cnt); 2416 /* Push data if we have reached the expected data length */ 2417 if ((data->bytes_xfered + init_cnt) == 2418 (data->blksz * data->blocks)) 2419 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2420 } 2421 } 2422 2423 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 2424 { 2425 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2426 if (unlikely((unsigned long)buf & 0x3)) { 2427 while (cnt >= 4) { 2428 /* pull data from fifo into aligned buffer */ 2429 u32 aligned_buf[32]; 2430 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2431 int items = len >> 2; 2432 int i; 2433 2434 for (i = 0; i < items; ++i) 2435 aligned_buf[i] = mci_fifo_readl(host->fifo_reg); 2436 /* memcpy from aligned buffer into output buffer */ 2437 memcpy(buf, aligned_buf, len); 2438 buf += len; 2439 cnt -= len; 2440 } 2441 } else 2442 #endif 2443 { 2444 u32 *pdata = buf; 2445 2446 for (; cnt >= 4; cnt -= 4) 2447 *pdata++ = mci_fifo_readl(host->fifo_reg); 2448 buf = pdata; 2449 } 2450 if (cnt) { 2451 host->part_buf32 = mci_fifo_readl(host->fifo_reg); 2452 dw_mci_pull_final_bytes(host, buf, cnt); 2453 } 2454 } 2455 2456 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 2457 { 2458 struct mmc_data *data = host->data; 2459 int init_cnt = cnt; 2460 2461 /* try and push anything in the part_buf */ 2462 if (unlikely(host->part_buf_count)) { 2463 int len = dw_mci_push_part_bytes(host, buf, cnt); 2464 2465 buf += len; 2466 cnt -= len; 2467 2468 if (host->part_buf_count == 8) { 2469 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2470 host->part_buf_count = 0; 2471 } 2472 } 2473 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2474 if (unlikely((unsigned long)buf & 0x7)) { 2475 while (cnt >= 8) { 2476 u64 aligned_buf[16]; 2477 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2478 int items = len >> 3; 2479 int i; 2480 /* memcpy from input buffer into aligned buffer */ 2481 memcpy(aligned_buf, buf, len); 2482 buf += len; 2483 cnt -= len; 2484 /* push data from aligned buffer into fifo */ 2485 for (i = 0; i < items; ++i) 2486 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); 2487 } 2488 } else 2489 #endif 2490 { 2491 u64 *pdata = buf; 2492 2493 for (; cnt >= 8; cnt -= 8) 2494 mci_fifo_writeq(host->fifo_reg, *pdata++); 2495 buf = pdata; 2496 } 2497 /* put anything remaining in the part_buf */ 2498 if (cnt) { 2499 dw_mci_set_part_bytes(host, buf, cnt); 2500 /* Push data if we have reached the expected data length */ 2501 if ((data->bytes_xfered + init_cnt) == 2502 (data->blksz * data->blocks)) 2503 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2504 } 2505 } 2506 2507 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 2508 { 2509 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2510 if (unlikely((unsigned long)buf & 0x7)) { 2511 while (cnt >= 8) { 2512 /* pull data from fifo into aligned buffer */ 2513 u64 aligned_buf[16]; 2514 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2515 int items = len >> 3; 2516 int i; 2517 2518 for (i = 0; i < items; ++i) 2519 aligned_buf[i] = mci_fifo_readq(host->fifo_reg); 2520 2521 /* memcpy from aligned buffer into output buffer */ 2522 memcpy(buf, aligned_buf, len); 2523 buf += len; 2524 cnt -= len; 2525 } 2526 } else 2527 #endif 2528 { 2529 u64 *pdata = buf; 2530 2531 for (; cnt >= 8; cnt -= 8) 2532 *pdata++ = mci_fifo_readq(host->fifo_reg); 2533 buf = pdata; 2534 } 2535 if (cnt) { 2536 host->part_buf = mci_fifo_readq(host->fifo_reg); 2537 dw_mci_pull_final_bytes(host, buf, cnt); 2538 } 2539 } 2540 2541 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 2542 { 2543 int len; 2544 2545 /* get remaining partial bytes */ 2546 len = dw_mci_pull_part_bytes(host, buf, cnt); 2547 if (unlikely(len == cnt)) 2548 return; 2549 buf += len; 2550 cnt -= len; 2551 2552 /* get the rest of the data */ 2553 host->pull_data(host, buf, cnt); 2554 } 2555 2556 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 2557 { 2558 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2559 void *buf; 2560 unsigned int offset; 2561 struct mmc_data *data = host->data; 2562 int shift = host->data_shift; 2563 u32 status; 2564 unsigned int len; 2565 unsigned int remain, fcnt; 2566 2567 do { 2568 if (!sg_miter_next(sg_miter)) 2569 goto done; 2570 2571 host->sg = sg_miter->piter.sg; 2572 buf = sg_miter->addr; 2573 remain = sg_miter->length; 2574 offset = 0; 2575 2576 do { 2577 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 2578 << shift) + host->part_buf_count; 2579 len = min(remain, fcnt); 2580 if (!len) 2581 break; 2582 dw_mci_pull_data(host, (void *)(buf + offset), len); 2583 data->bytes_xfered += len; 2584 offset += len; 2585 remain -= len; 2586 } while (remain); 2587 2588 sg_miter->consumed = offset; 2589 status = mci_readl(host, MINTSTS); 2590 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2591 /* if the RXDR is ready read again */ 2592 } while ((status & SDMMC_INT_RXDR) || 2593 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 2594 2595 if (!remain) { 2596 if (!sg_miter_next(sg_miter)) 2597 goto done; 2598 sg_miter->consumed = 0; 2599 } 2600 sg_miter_stop(sg_miter); 2601 return; 2602 2603 done: 2604 sg_miter_stop(sg_miter); 2605 host->sg = NULL; 2606 smp_wmb(); /* drain writebuffer */ 2607 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2608 } 2609 2610 static void dw_mci_write_data_pio(struct dw_mci *host) 2611 { 2612 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2613 void *buf; 2614 unsigned int offset; 2615 struct mmc_data *data = host->data; 2616 int shift = host->data_shift; 2617 u32 status; 2618 unsigned int len; 2619 unsigned int fifo_depth = host->fifo_depth; 2620 unsigned int remain, fcnt; 2621 2622 do { 2623 if (!sg_miter_next(sg_miter)) 2624 goto done; 2625 2626 host->sg = sg_miter->piter.sg; 2627 buf = sg_miter->addr; 2628 remain = sg_miter->length; 2629 offset = 0; 2630 2631 do { 2632 fcnt = ((fifo_depth - 2633 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 2634 << shift) - host->part_buf_count; 2635 len = min(remain, fcnt); 2636 if (!len) 2637 break; 2638 host->push_data(host, (void *)(buf + offset), len); 2639 data->bytes_xfered += len; 2640 offset += len; 2641 remain -= len; 2642 } while (remain); 2643 2644 sg_miter->consumed = offset; 2645 status = mci_readl(host, MINTSTS); 2646 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2647 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 2648 2649 if (!remain) { 2650 if (!sg_miter_next(sg_miter)) 2651 goto done; 2652 sg_miter->consumed = 0; 2653 } 2654 sg_miter_stop(sg_miter); 2655 return; 2656 2657 done: 2658 sg_miter_stop(sg_miter); 2659 host->sg = NULL; 2660 smp_wmb(); /* drain writebuffer */ 2661 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2662 } 2663 2664 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2665 { 2666 del_timer(&host->cto_timer); 2667 2668 if (!host->cmd_status) 2669 host->cmd_status = status; 2670 2671 smp_wmb(); /* drain writebuffer */ 2672 2673 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2674 tasklet_schedule(&host->tasklet); 2675 2676 dw_mci_start_fault_timer(host); 2677 } 2678 2679 static void dw_mci_handle_cd(struct dw_mci *host) 2680 { 2681 struct dw_mci_slot *slot = host->slot; 2682 2683 mmc_detect_change(slot->mmc, 2684 msecs_to_jiffies(host->pdata->detect_delay_ms)); 2685 } 2686 2687 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2688 { 2689 struct dw_mci *host = dev_id; 2690 u32 pending; 2691 struct dw_mci_slot *slot = host->slot; 2692 2693 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 2694 2695 if (pending) { 2696 /* Check volt switch first, since it can look like an error */ 2697 if ((host->state == STATE_SENDING_CMD11) && 2698 (pending & SDMMC_INT_VOLT_SWITCH)) { 2699 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); 2700 pending &= ~SDMMC_INT_VOLT_SWITCH; 2701 2702 /* 2703 * Hold the lock; we know cmd11_timer can't be kicked 2704 * off after the lock is released, so safe to delete. 2705 */ 2706 spin_lock(&host->irq_lock); 2707 dw_mci_cmd_interrupt(host, pending); 2708 spin_unlock(&host->irq_lock); 2709 2710 del_timer(&host->cmd11_timer); 2711 } 2712 2713 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 2714 spin_lock(&host->irq_lock); 2715 2716 del_timer(&host->cto_timer); 2717 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2718 host->cmd_status = pending; 2719 smp_wmb(); /* drain writebuffer */ 2720 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2721 2722 spin_unlock(&host->irq_lock); 2723 } 2724 2725 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 2726 spin_lock(&host->irq_lock); 2727 2728 /* if there is an error report DATA_ERROR */ 2729 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2730 host->data_status = pending; 2731 smp_wmb(); /* drain writebuffer */ 2732 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2733 tasklet_schedule(&host->tasklet); 2734 2735 spin_unlock(&host->irq_lock); 2736 } 2737 2738 if (pending & SDMMC_INT_DATA_OVER) { 2739 spin_lock(&host->irq_lock); 2740 2741 del_timer(&host->dto_timer); 2742 2743 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2744 if (!host->data_status) 2745 host->data_status = pending; 2746 smp_wmb(); /* drain writebuffer */ 2747 if (host->dir_status == DW_MCI_RECV_STATUS) { 2748 if (host->sg != NULL) 2749 dw_mci_read_data_pio(host, true); 2750 } 2751 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2752 tasklet_schedule(&host->tasklet); 2753 2754 spin_unlock(&host->irq_lock); 2755 } 2756 2757 if (pending & SDMMC_INT_RXDR) { 2758 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2759 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 2760 dw_mci_read_data_pio(host, false); 2761 } 2762 2763 if (pending & SDMMC_INT_TXDR) { 2764 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2765 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 2766 dw_mci_write_data_pio(host); 2767 } 2768 2769 if (pending & SDMMC_INT_CMD_DONE) { 2770 spin_lock(&host->irq_lock); 2771 2772 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 2773 dw_mci_cmd_interrupt(host, pending); 2774 2775 spin_unlock(&host->irq_lock); 2776 } 2777 2778 if (pending & SDMMC_INT_CD) { 2779 mci_writel(host, RINTSTS, SDMMC_INT_CD); 2780 dw_mci_handle_cd(host); 2781 } 2782 2783 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { 2784 mci_writel(host, RINTSTS, 2785 SDMMC_INT_SDIO(slot->sdio_id)); 2786 __dw_mci_enable_sdio_irq(slot, 0); 2787 sdio_signal_irq(slot->mmc); 2788 } 2789 2790 } 2791 2792 if (host->use_dma != TRANS_MODE_IDMAC) 2793 return IRQ_HANDLED; 2794 2795 /* Handle IDMA interrupts */ 2796 if (host->dma_64bit_address == 1) { 2797 pending = mci_readl(host, IDSTS64); 2798 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2799 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 2800 SDMMC_IDMAC_INT_RI); 2801 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2802 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 2803 host->dma_ops->complete((void *)host); 2804 } 2805 } else { 2806 pending = mci_readl(host, IDSTS); 2807 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2808 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 2809 SDMMC_IDMAC_INT_RI); 2810 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2811 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 2812 host->dma_ops->complete((void *)host); 2813 } 2814 } 2815 2816 return IRQ_HANDLED; 2817 } 2818 2819 static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) 2820 { 2821 struct dw_mci *host = slot->host; 2822 const struct dw_mci_drv_data *drv_data = host->drv_data; 2823 struct mmc_host *mmc = slot->mmc; 2824 int ctrl_id; 2825 2826 if (host->pdata->caps) 2827 mmc->caps = host->pdata->caps; 2828 2829 if (host->pdata->pm_caps) 2830 mmc->pm_caps = host->pdata->pm_caps; 2831 2832 if (host->dev->of_node) { 2833 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 2834 if (ctrl_id < 0) 2835 ctrl_id = 0; 2836 } else { 2837 ctrl_id = to_platform_device(host->dev)->id; 2838 } 2839 2840 if (drv_data && drv_data->caps) { 2841 if (ctrl_id >= drv_data->num_caps) { 2842 dev_err(host->dev, "invalid controller id %d\n", 2843 ctrl_id); 2844 return -EINVAL; 2845 } 2846 mmc->caps |= drv_data->caps[ctrl_id]; 2847 } 2848 2849 if (host->pdata->caps2) 2850 mmc->caps2 = host->pdata->caps2; 2851 2852 mmc->f_min = DW_MCI_FREQ_MIN; 2853 if (!mmc->f_max) 2854 mmc->f_max = DW_MCI_FREQ_MAX; 2855 2856 /* Process SDIO IRQs through the sdio_irq_work. */ 2857 if (mmc->caps & MMC_CAP_SDIO_IRQ) 2858 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2859 2860 return 0; 2861 } 2862 2863 static int dw_mci_init_slot(struct dw_mci *host) 2864 { 2865 struct mmc_host *mmc; 2866 struct dw_mci_slot *slot; 2867 int ret; 2868 2869 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2870 if (!mmc) 2871 return -ENOMEM; 2872 2873 slot = mmc_priv(mmc); 2874 slot->id = 0; 2875 slot->sdio_id = host->sdio_id0 + slot->id; 2876 slot->mmc = mmc; 2877 slot->host = host; 2878 host->slot = slot; 2879 2880 mmc->ops = &dw_mci_ops; 2881 2882 /*if there are external regulators, get them*/ 2883 ret = mmc_regulator_get_supply(mmc); 2884 if (ret) 2885 goto err_host_allocated; 2886 2887 if (!mmc->ocr_avail) 2888 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2889 2890 ret = mmc_of_parse(mmc); 2891 if (ret) 2892 goto err_host_allocated; 2893 2894 ret = dw_mci_init_slot_caps(slot); 2895 if (ret) 2896 goto err_host_allocated; 2897 2898 /* Useful defaults if platform data is unset. */ 2899 if (host->use_dma == TRANS_MODE_IDMAC) { 2900 mmc->max_segs = host->ring_size; 2901 mmc->max_blk_size = 65535; 2902 mmc->max_seg_size = 0x1000; 2903 mmc->max_req_size = mmc->max_seg_size * host->ring_size; 2904 mmc->max_blk_count = mmc->max_req_size / 512; 2905 } else if (host->use_dma == TRANS_MODE_EDMAC) { 2906 mmc->max_segs = 64; 2907 mmc->max_blk_size = 65535; 2908 mmc->max_blk_count = 65535; 2909 mmc->max_req_size = 2910 mmc->max_blk_size * mmc->max_blk_count; 2911 mmc->max_seg_size = mmc->max_req_size; 2912 } else { 2913 /* TRANS_MODE_PIO */ 2914 mmc->max_segs = 64; 2915 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ 2916 mmc->max_blk_count = 512; 2917 mmc->max_req_size = mmc->max_blk_size * 2918 mmc->max_blk_count; 2919 mmc->max_seg_size = mmc->max_req_size; 2920 } 2921 2922 dw_mci_get_cd(mmc); 2923 2924 ret = mmc_add_host(mmc); 2925 if (ret) 2926 goto err_host_allocated; 2927 2928 #if defined(CONFIG_DEBUG_FS) 2929 dw_mci_init_debugfs(slot); 2930 #endif 2931 2932 return 0; 2933 2934 err_host_allocated: 2935 mmc_free_host(mmc); 2936 return ret; 2937 } 2938 2939 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot) 2940 { 2941 /* Debugfs stuff is cleaned up by mmc core */ 2942 mmc_remove_host(slot->mmc); 2943 slot->host->slot = NULL; 2944 mmc_free_host(slot->mmc); 2945 } 2946 2947 static void dw_mci_init_dma(struct dw_mci *host) 2948 { 2949 int addr_config; 2950 struct device *dev = host->dev; 2951 2952 /* 2953 * Check tansfer mode from HCON[17:16] 2954 * Clear the ambiguous description of dw_mmc databook: 2955 * 2b'00: No DMA Interface -> Actually means using Internal DMA block 2956 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block 2957 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block 2958 * 2b'11: Non DW DMA Interface -> pio only 2959 * Compared to DesignWare DMA Interface, Generic DMA Interface has a 2960 * simpler request/acknowledge handshake mechanism and both of them 2961 * are regarded as external dma master for dw_mmc. 2962 */ 2963 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); 2964 if (host->use_dma == DMA_INTERFACE_IDMA) { 2965 host->use_dma = TRANS_MODE_IDMAC; 2966 } else if (host->use_dma == DMA_INTERFACE_DWDMA || 2967 host->use_dma == DMA_INTERFACE_GDMA) { 2968 host->use_dma = TRANS_MODE_EDMAC; 2969 } else { 2970 goto no_dma; 2971 } 2972 2973 /* Determine which DMA interface to use */ 2974 if (host->use_dma == TRANS_MODE_IDMAC) { 2975 /* 2976 * Check ADDR_CONFIG bit in HCON to find 2977 * IDMAC address bus width 2978 */ 2979 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); 2980 2981 if (addr_config == 1) { 2982 /* host supports IDMAC in 64-bit address mode */ 2983 host->dma_64bit_address = 1; 2984 dev_info(host->dev, 2985 "IDMAC supports 64-bit address mode.\n"); 2986 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) 2987 dma_set_coherent_mask(host->dev, 2988 DMA_BIT_MASK(64)); 2989 } else { 2990 /* host supports IDMAC in 32-bit address mode */ 2991 host->dma_64bit_address = 0; 2992 dev_info(host->dev, 2993 "IDMAC supports 32-bit address mode.\n"); 2994 } 2995 2996 /* Alloc memory for sg translation */ 2997 host->sg_cpu = dmam_alloc_coherent(host->dev, 2998 DESC_RING_BUF_SZ, 2999 &host->sg_dma, GFP_KERNEL); 3000 if (!host->sg_cpu) { 3001 dev_err(host->dev, 3002 "%s: could not alloc DMA memory\n", 3003 __func__); 3004 goto no_dma; 3005 } 3006 3007 host->dma_ops = &dw_mci_idmac_ops; 3008 dev_info(host->dev, "Using internal DMA controller.\n"); 3009 } else { 3010 /* TRANS_MODE_EDMAC: check dma bindings again */ 3011 if ((device_property_read_string_array(dev, "dma-names", 3012 NULL, 0) < 0) || 3013 !device_property_present(dev, "dmas")) { 3014 goto no_dma; 3015 } 3016 host->dma_ops = &dw_mci_edmac_ops; 3017 dev_info(host->dev, "Using external DMA controller.\n"); 3018 } 3019 3020 if (host->dma_ops->init && host->dma_ops->start && 3021 host->dma_ops->stop && host->dma_ops->cleanup) { 3022 if (host->dma_ops->init(host)) { 3023 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", 3024 __func__); 3025 goto no_dma; 3026 } 3027 } else { 3028 dev_err(host->dev, "DMA initialization not found.\n"); 3029 goto no_dma; 3030 } 3031 3032 return; 3033 3034 no_dma: 3035 dev_info(host->dev, "Using PIO mode.\n"); 3036 host->use_dma = TRANS_MODE_PIO; 3037 } 3038 3039 static void dw_mci_cmd11_timer(struct timer_list *t) 3040 { 3041 struct dw_mci *host = from_timer(host, t, cmd11_timer); 3042 3043 if (host->state != STATE_SENDING_CMD11) { 3044 dev_warn(host->dev, "Unexpected CMD11 timeout\n"); 3045 return; 3046 } 3047 3048 host->cmd_status = SDMMC_INT_RTO; 3049 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 3050 tasklet_schedule(&host->tasklet); 3051 } 3052 3053 static void dw_mci_cto_timer(struct timer_list *t) 3054 { 3055 struct dw_mci *host = from_timer(host, t, cto_timer); 3056 unsigned long irqflags; 3057 u32 pending; 3058 3059 spin_lock_irqsave(&host->irq_lock, irqflags); 3060 3061 /* 3062 * If somehow we have very bad interrupt latency it's remotely possible 3063 * that the timer could fire while the interrupt is still pending or 3064 * while the interrupt is midway through running. Let's be paranoid 3065 * and detect those two cases. Note that this is paranoia is somewhat 3066 * justified because in this function we don't actually cancel the 3067 * pending command in the controller--we just assume it will never come. 3068 */ 3069 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 3070 if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) { 3071 /* The interrupt should fire; no need to act but we can warn */ 3072 dev_warn(host->dev, "Unexpected interrupt latency\n"); 3073 goto exit; 3074 } 3075 if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) { 3076 /* Presumably interrupt handler couldn't delete the timer */ 3077 dev_warn(host->dev, "CTO timeout when already completed\n"); 3078 goto exit; 3079 } 3080 3081 /* 3082 * Continued paranoia to make sure we're in the state we expect. 3083 * This paranoia isn't really justified but it seems good to be safe. 3084 */ 3085 switch (host->state) { 3086 case STATE_SENDING_CMD11: 3087 case STATE_SENDING_CMD: 3088 case STATE_SENDING_STOP: 3089 /* 3090 * If CMD_DONE interrupt does NOT come in sending command 3091 * state, we should notify the driver to terminate current 3092 * transfer and report a command timeout to the core. 3093 */ 3094 host->cmd_status = SDMMC_INT_RTO; 3095 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 3096 tasklet_schedule(&host->tasklet); 3097 break; 3098 default: 3099 dev_warn(host->dev, "Unexpected command timeout, state %d\n", 3100 host->state); 3101 break; 3102 } 3103 3104 exit: 3105 spin_unlock_irqrestore(&host->irq_lock, irqflags); 3106 } 3107 3108 static void dw_mci_dto_timer(struct timer_list *t) 3109 { 3110 struct dw_mci *host = from_timer(host, t, dto_timer); 3111 unsigned long irqflags; 3112 u32 pending; 3113 3114 spin_lock_irqsave(&host->irq_lock, irqflags); 3115 3116 /* 3117 * The DTO timer is much longer than the CTO timer, so it's even less 3118 * likely that we'll these cases, but it pays to be paranoid. 3119 */ 3120 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 3121 if (pending & SDMMC_INT_DATA_OVER) { 3122 /* The interrupt should fire; no need to act but we can warn */ 3123 dev_warn(host->dev, "Unexpected data interrupt latency\n"); 3124 goto exit; 3125 } 3126 if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) { 3127 /* Presumably interrupt handler couldn't delete the timer */ 3128 dev_warn(host->dev, "DTO timeout when already completed\n"); 3129 goto exit; 3130 } 3131 3132 /* 3133 * Continued paranoia to make sure we're in the state we expect. 3134 * This paranoia isn't really justified but it seems good to be safe. 3135 */ 3136 switch (host->state) { 3137 case STATE_SENDING_DATA: 3138 case STATE_DATA_BUSY: 3139 /* 3140 * If DTO interrupt does NOT come in sending data state, 3141 * we should notify the driver to terminate current transfer 3142 * and report a data timeout to the core. 3143 */ 3144 host->data_status = SDMMC_INT_DRTO; 3145 set_bit(EVENT_DATA_ERROR, &host->pending_events); 3146 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 3147 tasklet_schedule(&host->tasklet); 3148 break; 3149 default: 3150 dev_warn(host->dev, "Unexpected data timeout, state %d\n", 3151 host->state); 3152 break; 3153 } 3154 3155 exit: 3156 spin_unlock_irqrestore(&host->irq_lock, irqflags); 3157 } 3158 3159 #ifdef CONFIG_OF 3160 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3161 { 3162 struct dw_mci_board *pdata; 3163 struct device *dev = host->dev; 3164 const struct dw_mci_drv_data *drv_data = host->drv_data; 3165 int ret; 3166 u32 clock_frequency; 3167 3168 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 3169 if (!pdata) 3170 return ERR_PTR(-ENOMEM); 3171 3172 /* find reset controller when exist */ 3173 pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset"); 3174 if (IS_ERR(pdata->rstc)) 3175 return ERR_CAST(pdata->rstc); 3176 3177 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) 3178 dev_info(dev, 3179 "fifo-depth property not found, using value of FIFOTH register as default\n"); 3180 3181 device_property_read_u32(dev, "card-detect-delay", 3182 &pdata->detect_delay_ms); 3183 3184 device_property_read_u32(dev, "data-addr", &host->data_addr_override); 3185 3186 if (device_property_present(dev, "fifo-watermark-aligned")) 3187 host->wm_aligned = true; 3188 3189 if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency)) 3190 pdata->bus_hz = clock_frequency; 3191 3192 if (drv_data && drv_data->parse_dt) { 3193 ret = drv_data->parse_dt(host); 3194 if (ret) 3195 return ERR_PTR(ret); 3196 } 3197 3198 return pdata; 3199 } 3200 3201 #else /* CONFIG_OF */ 3202 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3203 { 3204 return ERR_PTR(-EINVAL); 3205 } 3206 #endif /* CONFIG_OF */ 3207 3208 static void dw_mci_enable_cd(struct dw_mci *host) 3209 { 3210 unsigned long irqflags; 3211 u32 temp; 3212 3213 /* 3214 * No need for CD if all slots have a non-error GPIO 3215 * as well as broken card detection is found. 3216 */ 3217 if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL) 3218 return; 3219 3220 if (mmc_gpio_get_cd(host->slot->mmc) < 0) { 3221 spin_lock_irqsave(&host->irq_lock, irqflags); 3222 temp = mci_readl(host, INTMASK); 3223 temp |= SDMMC_INT_CD; 3224 mci_writel(host, INTMASK, temp); 3225 spin_unlock_irqrestore(&host->irq_lock, irqflags); 3226 } 3227 } 3228 3229 int dw_mci_probe(struct dw_mci *host) 3230 { 3231 const struct dw_mci_drv_data *drv_data = host->drv_data; 3232 int width, i, ret = 0; 3233 u32 fifo_size; 3234 3235 if (!host->pdata) { 3236 host->pdata = dw_mci_parse_dt(host); 3237 if (IS_ERR(host->pdata)) 3238 return dev_err_probe(host->dev, PTR_ERR(host->pdata), 3239 "platform data not available\n"); 3240 } 3241 3242 host->biu_clk = devm_clk_get(host->dev, "biu"); 3243 if (IS_ERR(host->biu_clk)) { 3244 dev_dbg(host->dev, "biu clock not available\n"); 3245 } else { 3246 ret = clk_prepare_enable(host->biu_clk); 3247 if (ret) { 3248 dev_err(host->dev, "failed to enable biu clock\n"); 3249 return ret; 3250 } 3251 } 3252 3253 host->ciu_clk = devm_clk_get(host->dev, "ciu"); 3254 if (IS_ERR(host->ciu_clk)) { 3255 dev_dbg(host->dev, "ciu clock not available\n"); 3256 host->bus_hz = host->pdata->bus_hz; 3257 } else { 3258 ret = clk_prepare_enable(host->ciu_clk); 3259 if (ret) { 3260 dev_err(host->dev, "failed to enable ciu clock\n"); 3261 goto err_clk_biu; 3262 } 3263 3264 if (host->pdata->bus_hz) { 3265 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 3266 if (ret) 3267 dev_warn(host->dev, 3268 "Unable to set bus rate to %uHz\n", 3269 host->pdata->bus_hz); 3270 } 3271 host->bus_hz = clk_get_rate(host->ciu_clk); 3272 } 3273 3274 if (!host->bus_hz) { 3275 dev_err(host->dev, 3276 "Platform data must supply bus speed\n"); 3277 ret = -ENODEV; 3278 goto err_clk_ciu; 3279 } 3280 3281 if (host->pdata->rstc) { 3282 reset_control_assert(host->pdata->rstc); 3283 usleep_range(10, 50); 3284 reset_control_deassert(host->pdata->rstc); 3285 } 3286 3287 if (drv_data && drv_data->init) { 3288 ret = drv_data->init(host); 3289 if (ret) { 3290 dev_err(host->dev, 3291 "implementation specific init failed\n"); 3292 goto err_clk_ciu; 3293 } 3294 } 3295 3296 timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0); 3297 timer_setup(&host->cto_timer, dw_mci_cto_timer, 0); 3298 timer_setup(&host->dto_timer, dw_mci_dto_timer, 0); 3299 3300 spin_lock_init(&host->lock); 3301 spin_lock_init(&host->irq_lock); 3302 INIT_LIST_HEAD(&host->queue); 3303 3304 dw_mci_init_fault(host); 3305 3306 /* 3307 * Get the host data width - this assumes that HCON has been set with 3308 * the correct values. 3309 */ 3310 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); 3311 if (!i) { 3312 host->push_data = dw_mci_push_data16; 3313 host->pull_data = dw_mci_pull_data16; 3314 width = 16; 3315 host->data_shift = 1; 3316 } else if (i == 2) { 3317 host->push_data = dw_mci_push_data64; 3318 host->pull_data = dw_mci_pull_data64; 3319 width = 64; 3320 host->data_shift = 3; 3321 } else { 3322 /* Check for a reserved value, and warn if it is */ 3323 WARN((i != 1), 3324 "HCON reports a reserved host data width!\n" 3325 "Defaulting to 32-bit access.\n"); 3326 host->push_data = dw_mci_push_data32; 3327 host->pull_data = dw_mci_pull_data32; 3328 width = 32; 3329 host->data_shift = 2; 3330 } 3331 3332 /* Reset all blocks */ 3333 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3334 ret = -ENODEV; 3335 goto err_clk_ciu; 3336 } 3337 3338 host->dma_ops = host->pdata->dma_ops; 3339 dw_mci_init_dma(host); 3340 3341 /* Clear the interrupts for the host controller */ 3342 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3343 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3344 3345 /* Put in max timeout */ 3346 mci_writel(host, TMOUT, 0xFFFFFFFF); 3347 3348 /* 3349 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 3350 * Tx Mark = fifo_size / 2 DMA Size = 8 3351 */ 3352 if (!host->pdata->fifo_depth) { 3353 /* 3354 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 3355 * have been overwritten by the bootloader, just like we're 3356 * about to do, so if you know the value for your hardware, you 3357 * should put it in the platform data. 3358 */ 3359 fifo_size = mci_readl(host, FIFOTH); 3360 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 3361 } else { 3362 fifo_size = host->pdata->fifo_depth; 3363 } 3364 host->fifo_depth = fifo_size; 3365 host->fifoth_val = 3366 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); 3367 mci_writel(host, FIFOTH, host->fifoth_val); 3368 3369 /* disable clock to CIU */ 3370 mci_writel(host, CLKENA, 0); 3371 mci_writel(host, CLKSRC, 0); 3372 3373 /* 3374 * In 2.40a spec, Data offset is changed. 3375 * Need to check the version-id and set data-offset for DATA register. 3376 */ 3377 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 3378 dev_info(host->dev, "Version ID is %04x\n", host->verid); 3379 3380 if (host->data_addr_override) 3381 host->fifo_reg = host->regs + host->data_addr_override; 3382 else if (host->verid < DW_MMC_240A) 3383 host->fifo_reg = host->regs + DATA_OFFSET; 3384 else 3385 host->fifo_reg = host->regs + DATA_240A_OFFSET; 3386 3387 tasklet_setup(&host->tasklet, dw_mci_tasklet_func); 3388 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 3389 host->irq_flags, "dw-mci", host); 3390 if (ret) 3391 goto err_dmaunmap; 3392 3393 /* 3394 * Enable interrupts for command done, data over, data empty, 3395 * receive ready and error such as transmit, receive timeout, crc error 3396 */ 3397 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3398 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3399 DW_MCI_ERROR_FLAGS); 3400 /* Enable mci interrupt */ 3401 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3402 3403 dev_info(host->dev, 3404 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", 3405 host->irq, width, fifo_size); 3406 3407 /* We need at least one slot to succeed */ 3408 ret = dw_mci_init_slot(host); 3409 if (ret) { 3410 dev_dbg(host->dev, "slot %d init failed\n", i); 3411 goto err_dmaunmap; 3412 } 3413 3414 /* Now that slots are all setup, we can enable card detect */ 3415 dw_mci_enable_cd(host); 3416 3417 return 0; 3418 3419 err_dmaunmap: 3420 if (host->use_dma && host->dma_ops->exit) 3421 host->dma_ops->exit(host); 3422 3423 reset_control_assert(host->pdata->rstc); 3424 3425 err_clk_ciu: 3426 clk_disable_unprepare(host->ciu_clk); 3427 3428 err_clk_biu: 3429 clk_disable_unprepare(host->biu_clk); 3430 3431 return ret; 3432 } 3433 EXPORT_SYMBOL(dw_mci_probe); 3434 3435 void dw_mci_remove(struct dw_mci *host) 3436 { 3437 dev_dbg(host->dev, "remove slot\n"); 3438 if (host->slot) 3439 dw_mci_cleanup_slot(host->slot); 3440 3441 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3442 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3443 3444 /* disable clock to CIU */ 3445 mci_writel(host, CLKENA, 0); 3446 mci_writel(host, CLKSRC, 0); 3447 3448 if (host->use_dma && host->dma_ops->exit) 3449 host->dma_ops->exit(host); 3450 3451 reset_control_assert(host->pdata->rstc); 3452 3453 clk_disable_unprepare(host->ciu_clk); 3454 clk_disable_unprepare(host->biu_clk); 3455 } 3456 EXPORT_SYMBOL(dw_mci_remove); 3457 3458 3459 3460 #ifdef CONFIG_PM 3461 int dw_mci_runtime_suspend(struct device *dev) 3462 { 3463 struct dw_mci *host = dev_get_drvdata(dev); 3464 3465 if (host->use_dma && host->dma_ops->exit) 3466 host->dma_ops->exit(host); 3467 3468 clk_disable_unprepare(host->ciu_clk); 3469 3470 if (host->slot && 3471 (mmc_can_gpio_cd(host->slot->mmc) || 3472 !mmc_card_is_removable(host->slot->mmc))) 3473 clk_disable_unprepare(host->biu_clk); 3474 3475 return 0; 3476 } 3477 EXPORT_SYMBOL(dw_mci_runtime_suspend); 3478 3479 int dw_mci_runtime_resume(struct device *dev) 3480 { 3481 int ret = 0; 3482 struct dw_mci *host = dev_get_drvdata(dev); 3483 3484 if (host->slot && 3485 (mmc_can_gpio_cd(host->slot->mmc) || 3486 !mmc_card_is_removable(host->slot->mmc))) { 3487 ret = clk_prepare_enable(host->biu_clk); 3488 if (ret) 3489 return ret; 3490 } 3491 3492 ret = clk_prepare_enable(host->ciu_clk); 3493 if (ret) 3494 goto err; 3495 3496 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3497 clk_disable_unprepare(host->ciu_clk); 3498 ret = -ENODEV; 3499 goto err; 3500 } 3501 3502 if (host->use_dma && host->dma_ops->init) 3503 host->dma_ops->init(host); 3504 3505 /* 3506 * Restore the initial value at FIFOTH register 3507 * And Invalidate the prev_blksz with zero 3508 */ 3509 mci_writel(host, FIFOTH, host->fifoth_val); 3510 host->prev_blksz = 0; 3511 3512 /* Put in max timeout */ 3513 mci_writel(host, TMOUT, 0xFFFFFFFF); 3514 3515 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3516 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3517 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3518 DW_MCI_ERROR_FLAGS); 3519 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3520 3521 3522 if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER) 3523 dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios); 3524 3525 /* Force setup bus to guarantee available clock output */ 3526 dw_mci_setup_bus(host->slot, true); 3527 3528 /* Re-enable SDIO interrupts. */ 3529 if (sdio_irq_claimed(host->slot->mmc)) 3530 __dw_mci_enable_sdio_irq(host->slot, 1); 3531 3532 /* Now that slots are all setup, we can enable card detect */ 3533 dw_mci_enable_cd(host); 3534 3535 return 0; 3536 3537 err: 3538 if (host->slot && 3539 (mmc_can_gpio_cd(host->slot->mmc) || 3540 !mmc_card_is_removable(host->slot->mmc))) 3541 clk_disable_unprepare(host->biu_clk); 3542 3543 return ret; 3544 } 3545 EXPORT_SYMBOL(dw_mci_runtime_resume); 3546 #endif /* CONFIG_PM */ 3547 3548 static int __init dw_mci_init(void) 3549 { 3550 pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 3551 return 0; 3552 } 3553 3554 static void __exit dw_mci_exit(void) 3555 { 3556 } 3557 3558 module_init(dw_mci_init); 3559 module_exit(dw_mci_exit); 3560 3561 MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 3562 MODULE_AUTHOR("NXP Semiconductor VietNam"); 3563 MODULE_AUTHOR("Imagination Technologies Ltd"); 3564 MODULE_LICENSE("GPL v2"); 3565