1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Synopsys DesignWare Multimedia Card Interface driver 4 * (Based on NXP driver for lpc 31xx) 5 * 6 * Copyright (C) 2009 NXP Semiconductors 7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 8 */ 9 10 #include <linux/blkdev.h> 11 #include <linux/clk.h> 12 #include <linux/debugfs.h> 13 #include <linux/device.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/err.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/iopoll.h> 19 #include <linux/ioport.h> 20 #include <linux/ktime.h> 21 #include <linux/module.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/prandom.h> 25 #include <linux/seq_file.h> 26 #include <linux/slab.h> 27 #include <linux/stat.h> 28 #include <linux/delay.h> 29 #include <linux/irq.h> 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/host.h> 32 #include <linux/mmc/mmc.h> 33 #include <linux/mmc/sd.h> 34 #include <linux/mmc/sdio.h> 35 #include <linux/bitops.h> 36 #include <linux/regulator/consumer.h> 37 #include <linux/of.h> 38 #include <linux/of_gpio.h> 39 #include <linux/mmc/slot-gpio.h> 40 41 #include "dw_mmc.h" 42 43 /* Common flag combinations */ 44 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 45 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 46 SDMMC_INT_EBE | SDMMC_INT_HLE) 47 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 48 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) 49 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 50 DW_MCI_CMD_ERROR_FLAGS) 51 #define DW_MCI_SEND_STATUS 1 52 #define DW_MCI_RECV_STATUS 2 53 #define DW_MCI_DMA_THRESHOLD 16 54 55 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 56 #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */ 57 58 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 59 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 60 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 61 SDMMC_IDMAC_INT_TI) 62 63 #define DESC_RING_BUF_SZ PAGE_SIZE 64 65 struct idmac_desc_64addr { 66 u32 des0; /* Control Descriptor */ 67 #define IDMAC_OWN_CLR64(x) \ 68 !((x) & cpu_to_le32(IDMAC_DES0_OWN)) 69 70 u32 des1; /* Reserved */ 71 72 u32 des2; /*Buffer sizes */ 73 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ 74 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ 75 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) 76 77 u32 des3; /* Reserved */ 78 79 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ 80 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ 81 82 u32 des6; /* Lower 32-bits of Next Descriptor Address */ 83 u32 des7; /* Upper 32-bits of Next Descriptor Address */ 84 }; 85 86 struct idmac_desc { 87 __le32 des0; /* Control Descriptor */ 88 #define IDMAC_DES0_DIC BIT(1) 89 #define IDMAC_DES0_LD BIT(2) 90 #define IDMAC_DES0_FD BIT(3) 91 #define IDMAC_DES0_CH BIT(4) 92 #define IDMAC_DES0_ER BIT(5) 93 #define IDMAC_DES0_CES BIT(30) 94 #define IDMAC_DES0_OWN BIT(31) 95 96 __le32 des1; /* Buffer sizes */ 97 #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 98 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) 99 100 __le32 des2; /* buffer 1 physical address */ 101 102 __le32 des3; /* buffer 2 physical address */ 103 }; 104 105 /* Each descriptor can transfer up to 4KB of data in chained mode */ 106 #define DW_MCI_DESC_DATA_LENGTH 0x1000 107 108 #if defined(CONFIG_DEBUG_FS) 109 static int dw_mci_req_show(struct seq_file *s, void *v) 110 { 111 struct dw_mci_slot *slot = s->private; 112 struct mmc_request *mrq; 113 struct mmc_command *cmd; 114 struct mmc_command *stop; 115 struct mmc_data *data; 116 117 /* Make sure we get a consistent snapshot */ 118 spin_lock_bh(&slot->host->lock); 119 mrq = slot->mrq; 120 121 if (mrq) { 122 cmd = mrq->cmd; 123 data = mrq->data; 124 stop = mrq->stop; 125 126 if (cmd) 127 seq_printf(s, 128 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 129 cmd->opcode, cmd->arg, cmd->flags, 130 cmd->resp[0], cmd->resp[1], cmd->resp[2], 131 cmd->resp[2], cmd->error); 132 if (data) 133 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 134 data->bytes_xfered, data->blocks, 135 data->blksz, data->flags, data->error); 136 if (stop) 137 seq_printf(s, 138 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 139 stop->opcode, stop->arg, stop->flags, 140 stop->resp[0], stop->resp[1], stop->resp[2], 141 stop->resp[2], stop->error); 142 } 143 144 spin_unlock_bh(&slot->host->lock); 145 146 return 0; 147 } 148 DEFINE_SHOW_ATTRIBUTE(dw_mci_req); 149 150 static int dw_mci_regs_show(struct seq_file *s, void *v) 151 { 152 struct dw_mci *host = s->private; 153 154 pm_runtime_get_sync(host->dev); 155 156 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); 157 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); 158 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); 159 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL)); 160 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); 161 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); 162 163 pm_runtime_put_autosuspend(host->dev); 164 165 return 0; 166 } 167 DEFINE_SHOW_ATTRIBUTE(dw_mci_regs); 168 169 static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 170 { 171 struct mmc_host *mmc = slot->mmc; 172 struct dw_mci *host = slot->host; 173 struct dentry *root; 174 175 root = mmc->debugfs_root; 176 if (!root) 177 return; 178 179 debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops); 180 debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops); 181 debugfs_create_u32("state", S_IRUSR, root, &host->state); 182 debugfs_create_xul("pending_events", S_IRUSR, root, 183 &host->pending_events); 184 debugfs_create_xul("completed_events", S_IRUSR, root, 185 &host->completed_events); 186 #ifdef CONFIG_FAULT_INJECTION 187 fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc); 188 #endif 189 } 190 #endif /* defined(CONFIG_DEBUG_FS) */ 191 192 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 193 { 194 u32 ctrl; 195 196 ctrl = mci_readl(host, CTRL); 197 ctrl |= reset; 198 mci_writel(host, CTRL, ctrl); 199 200 /* wait till resets clear */ 201 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl, 202 !(ctrl & reset), 203 1, 500 * USEC_PER_MSEC)) { 204 dev_err(host->dev, 205 "Timeout resetting block (ctrl reset %#x)\n", 206 ctrl & reset); 207 return false; 208 } 209 210 return true; 211 } 212 213 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) 214 { 215 u32 status; 216 217 /* 218 * Databook says that before issuing a new data transfer command 219 * we need to check to see if the card is busy. Data transfer commands 220 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. 221 * 222 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is 223 * expected. 224 */ 225 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && 226 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { 227 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 228 status, 229 !(status & SDMMC_STATUS_BUSY), 230 10, 500 * USEC_PER_MSEC)) 231 dev_err(host->dev, "Busy; trying anyway\n"); 232 } 233 } 234 235 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 236 { 237 struct dw_mci *host = slot->host; 238 unsigned int cmd_status = 0; 239 240 mci_writel(host, CMDARG, arg); 241 wmb(); /* drain writebuffer */ 242 dw_mci_wait_while_busy(host, cmd); 243 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 244 245 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status, 246 !(cmd_status & SDMMC_CMD_START), 247 1, 500 * USEC_PER_MSEC)) 248 dev_err(&slot->mmc->class_dev, 249 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 250 cmd, arg, cmd_status); 251 } 252 253 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 254 { 255 struct dw_mci_slot *slot = mmc_priv(mmc); 256 struct dw_mci *host = slot->host; 257 u32 cmdr; 258 259 cmd->error = -EINPROGRESS; 260 cmdr = cmd->opcode; 261 262 if (cmd->opcode == MMC_STOP_TRANSMISSION || 263 cmd->opcode == MMC_GO_IDLE_STATE || 264 cmd->opcode == MMC_GO_INACTIVE_STATE || 265 (cmd->opcode == SD_IO_RW_DIRECT && 266 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 267 cmdr |= SDMMC_CMD_STOP; 268 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 269 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 270 271 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 272 u32 clk_en_a; 273 274 /* Special bit makes CMD11 not die */ 275 cmdr |= SDMMC_CMD_VOLT_SWITCH; 276 277 /* Change state to continue to handle CMD11 weirdness */ 278 WARN_ON(slot->host->state != STATE_SENDING_CMD); 279 slot->host->state = STATE_SENDING_CMD11; 280 281 /* 282 * We need to disable low power mode (automatic clock stop) 283 * while doing voltage switch so we don't confuse the card, 284 * since stopping the clock is a specific part of the UHS 285 * voltage change dance. 286 * 287 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be 288 * unconditionally turned back on in dw_mci_setup_bus() if it's 289 * ever called with a non-zero clock. That shouldn't happen 290 * until the voltage change is all done. 291 */ 292 clk_en_a = mci_readl(host, CLKENA); 293 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); 294 mci_writel(host, CLKENA, clk_en_a); 295 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 296 SDMMC_CMD_PRV_DAT_WAIT, 0); 297 } 298 299 if (cmd->flags & MMC_RSP_PRESENT) { 300 /* We expect a response, so set this bit */ 301 cmdr |= SDMMC_CMD_RESP_EXP; 302 if (cmd->flags & MMC_RSP_136) 303 cmdr |= SDMMC_CMD_RESP_LONG; 304 } 305 306 if (cmd->flags & MMC_RSP_CRC) 307 cmdr |= SDMMC_CMD_RESP_CRC; 308 309 if (cmd->data) { 310 cmdr |= SDMMC_CMD_DAT_EXP; 311 if (cmd->data->flags & MMC_DATA_WRITE) 312 cmdr |= SDMMC_CMD_DAT_WR; 313 } 314 315 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) 316 cmdr |= SDMMC_CMD_USE_HOLD_REG; 317 318 return cmdr; 319 } 320 321 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) 322 { 323 struct mmc_command *stop; 324 u32 cmdr; 325 326 if (!cmd->data) 327 return 0; 328 329 stop = &host->stop_abort; 330 cmdr = cmd->opcode; 331 memset(stop, 0, sizeof(struct mmc_command)); 332 333 if (cmdr == MMC_READ_SINGLE_BLOCK || 334 cmdr == MMC_READ_MULTIPLE_BLOCK || 335 cmdr == MMC_WRITE_BLOCK || 336 cmdr == MMC_WRITE_MULTIPLE_BLOCK || 337 cmdr == MMC_SEND_TUNING_BLOCK || 338 cmdr == MMC_SEND_TUNING_BLOCK_HS200 || 339 cmdr == MMC_GEN_CMD) { 340 stop->opcode = MMC_STOP_TRANSMISSION; 341 stop->arg = 0; 342 stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 343 } else if (cmdr == SD_IO_RW_EXTENDED) { 344 stop->opcode = SD_IO_RW_DIRECT; 345 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | 346 ((cmd->arg >> 28) & 0x7); 347 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; 348 } else { 349 return 0; 350 } 351 352 cmdr = stop->opcode | SDMMC_CMD_STOP | 353 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; 354 355 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags)) 356 cmdr |= SDMMC_CMD_USE_HOLD_REG; 357 358 return cmdr; 359 } 360 361 static inline void dw_mci_set_cto(struct dw_mci *host) 362 { 363 unsigned int cto_clks; 364 unsigned int cto_div; 365 unsigned int cto_ms; 366 unsigned long irqflags; 367 368 cto_clks = mci_readl(host, TMOUT) & 0xff; 369 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 370 if (cto_div == 0) 371 cto_div = 1; 372 373 cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div, 374 host->bus_hz); 375 376 /* add a bit spare time */ 377 cto_ms += 10; 378 379 /* 380 * The durations we're working with are fairly short so we have to be 381 * extra careful about synchronization here. Specifically in hardware a 382 * command timeout is _at most_ 5.1 ms, so that means we expect an 383 * interrupt (either command done or timeout) to come rather quickly 384 * after the mci_writel. ...but just in case we have a long interrupt 385 * latency let's add a bit of paranoia. 386 * 387 * In general we'll assume that at least an interrupt will be asserted 388 * in hardware by the time the cto_timer runs. ...and if it hasn't 389 * been asserted in hardware by that time then we'll assume it'll never 390 * come. 391 */ 392 spin_lock_irqsave(&host->irq_lock, irqflags); 393 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 394 mod_timer(&host->cto_timer, 395 jiffies + msecs_to_jiffies(cto_ms) + 1); 396 spin_unlock_irqrestore(&host->irq_lock, irqflags); 397 } 398 399 static void dw_mci_start_command(struct dw_mci *host, 400 struct mmc_command *cmd, u32 cmd_flags) 401 { 402 host->cmd = cmd; 403 dev_vdbg(host->dev, 404 "start command: ARGR=0x%08x CMDR=0x%08x\n", 405 cmd->arg, cmd_flags); 406 407 mci_writel(host, CMDARG, cmd->arg); 408 wmb(); /* drain writebuffer */ 409 dw_mci_wait_while_busy(host, cmd_flags); 410 411 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 412 413 /* response expected command only */ 414 if (cmd_flags & SDMMC_CMD_RESP_EXP) 415 dw_mci_set_cto(host); 416 } 417 418 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 419 { 420 struct mmc_command *stop = &host->stop_abort; 421 422 dw_mci_start_command(host, stop, host->stop_cmdr); 423 } 424 425 /* DMA interface functions */ 426 static void dw_mci_stop_dma(struct dw_mci *host) 427 { 428 if (host->using_dma) { 429 host->dma_ops->stop(host); 430 host->dma_ops->cleanup(host); 431 } 432 433 /* Data transfer was stopped by the interrupt handler */ 434 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 435 } 436 437 static void dw_mci_dma_cleanup(struct dw_mci *host) 438 { 439 struct mmc_data *data = host->data; 440 441 if (data && data->host_cookie == COOKIE_MAPPED) { 442 dma_unmap_sg(host->dev, 443 data->sg, 444 data->sg_len, 445 mmc_get_dma_dir(data)); 446 data->host_cookie = COOKIE_UNMAPPED; 447 } 448 } 449 450 static void dw_mci_idmac_reset(struct dw_mci *host) 451 { 452 u32 bmod = mci_readl(host, BMOD); 453 /* Software reset of DMA */ 454 bmod |= SDMMC_IDMAC_SWRESET; 455 mci_writel(host, BMOD, bmod); 456 } 457 458 static void dw_mci_idmac_stop_dma(struct dw_mci *host) 459 { 460 u32 temp; 461 462 /* Disable and reset the IDMAC interface */ 463 temp = mci_readl(host, CTRL); 464 temp &= ~SDMMC_CTRL_USE_IDMAC; 465 temp |= SDMMC_CTRL_DMA_RESET; 466 mci_writel(host, CTRL, temp); 467 468 /* Stop the IDMAC running */ 469 temp = mci_readl(host, BMOD); 470 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 471 temp |= SDMMC_IDMAC_SWRESET; 472 mci_writel(host, BMOD, temp); 473 } 474 475 static void dw_mci_dmac_complete_dma(void *arg) 476 { 477 struct dw_mci *host = arg; 478 struct mmc_data *data = host->data; 479 480 dev_vdbg(host->dev, "DMA complete\n"); 481 482 if ((host->use_dma == TRANS_MODE_EDMAC) && 483 data && (data->flags & MMC_DATA_READ)) 484 /* Invalidate cache after read */ 485 dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc), 486 data->sg, 487 data->sg_len, 488 DMA_FROM_DEVICE); 489 490 host->dma_ops->cleanup(host); 491 492 /* 493 * If the card was removed, data will be NULL. No point in trying to 494 * send the stop command or waiting for NBUSY in this case. 495 */ 496 if (data) { 497 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 498 tasklet_schedule(&host->tasklet); 499 } 500 } 501 502 static int dw_mci_idmac_init(struct dw_mci *host) 503 { 504 int i; 505 506 if (host->dma_64bit_address == 1) { 507 struct idmac_desc_64addr *p; 508 /* Number of descriptors in the ring buffer */ 509 host->ring_size = 510 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr); 511 512 /* Forward link the descriptor list */ 513 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 514 i++, p++) { 515 p->des6 = (host->sg_dma + 516 (sizeof(struct idmac_desc_64addr) * 517 (i + 1))) & 0xffffffff; 518 519 p->des7 = (u64)(host->sg_dma + 520 (sizeof(struct idmac_desc_64addr) * 521 (i + 1))) >> 32; 522 /* Initialize reserved and buffer size fields to "0" */ 523 p->des0 = 0; 524 p->des1 = 0; 525 p->des2 = 0; 526 p->des3 = 0; 527 } 528 529 /* Set the last descriptor as the end-of-ring descriptor */ 530 p->des6 = host->sg_dma & 0xffffffff; 531 p->des7 = (u64)host->sg_dma >> 32; 532 p->des0 = IDMAC_DES0_ER; 533 534 } else { 535 struct idmac_desc *p; 536 /* Number of descriptors in the ring buffer */ 537 host->ring_size = 538 DESC_RING_BUF_SZ / sizeof(struct idmac_desc); 539 540 /* Forward link the descriptor list */ 541 for (i = 0, p = host->sg_cpu; 542 i < host->ring_size - 1; 543 i++, p++) { 544 p->des3 = cpu_to_le32(host->sg_dma + 545 (sizeof(struct idmac_desc) * (i + 1))); 546 p->des0 = 0; 547 p->des1 = 0; 548 } 549 550 /* Set the last descriptor as the end-of-ring descriptor */ 551 p->des3 = cpu_to_le32(host->sg_dma); 552 p->des0 = cpu_to_le32(IDMAC_DES0_ER); 553 } 554 555 dw_mci_idmac_reset(host); 556 557 if (host->dma_64bit_address == 1) { 558 /* Mask out interrupts - get Tx & Rx complete only */ 559 mci_writel(host, IDSTS64, IDMAC_INT_CLR); 560 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | 561 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 562 563 /* Set the descriptor base address */ 564 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); 565 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); 566 567 } else { 568 /* Mask out interrupts - get Tx & Rx complete only */ 569 mci_writel(host, IDSTS, IDMAC_INT_CLR); 570 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | 571 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 572 573 /* Set the descriptor base address */ 574 mci_writel(host, DBADDR, host->sg_dma); 575 } 576 577 return 0; 578 } 579 580 static inline int dw_mci_prepare_desc64(struct dw_mci *host, 581 struct mmc_data *data, 582 unsigned int sg_len) 583 { 584 unsigned int desc_len; 585 struct idmac_desc_64addr *desc_first, *desc_last, *desc; 586 u32 val; 587 int i; 588 589 desc_first = desc_last = desc = host->sg_cpu; 590 591 for (i = 0; i < sg_len; i++) { 592 unsigned int length = sg_dma_len(&data->sg[i]); 593 594 u64 mem_addr = sg_dma_address(&data->sg[i]); 595 596 for ( ; length ; desc++) { 597 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 598 length : DW_MCI_DESC_DATA_LENGTH; 599 600 length -= desc_len; 601 602 /* 603 * Wait for the former clear OWN bit operation 604 * of IDMAC to make sure that this descriptor 605 * isn't still owned by IDMAC as IDMAC's write 606 * ops and CPU's read ops are asynchronous. 607 */ 608 if (readl_poll_timeout_atomic(&desc->des0, val, 609 !(val & IDMAC_DES0_OWN), 610 10, 100 * USEC_PER_MSEC)) 611 goto err_own_bit; 612 613 /* 614 * Set the OWN bit and disable interrupts 615 * for this descriptor 616 */ 617 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 618 IDMAC_DES0_CH; 619 620 /* Buffer length */ 621 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); 622 623 /* Physical address to DMA to/from */ 624 desc->des4 = mem_addr & 0xffffffff; 625 desc->des5 = mem_addr >> 32; 626 627 /* Update physical address for the next desc */ 628 mem_addr += desc_len; 629 630 /* Save pointer to the last descriptor */ 631 desc_last = desc; 632 } 633 } 634 635 /* Set first descriptor */ 636 desc_first->des0 |= IDMAC_DES0_FD; 637 638 /* Set last descriptor */ 639 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 640 desc_last->des0 |= IDMAC_DES0_LD; 641 642 return 0; 643 err_own_bit: 644 /* restore the descriptor chain as it's polluted */ 645 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 646 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 647 dw_mci_idmac_init(host); 648 return -EINVAL; 649 } 650 651 652 static inline int dw_mci_prepare_desc32(struct dw_mci *host, 653 struct mmc_data *data, 654 unsigned int sg_len) 655 { 656 unsigned int desc_len; 657 struct idmac_desc *desc_first, *desc_last, *desc; 658 u32 val; 659 int i; 660 661 desc_first = desc_last = desc = host->sg_cpu; 662 663 for (i = 0; i < sg_len; i++) { 664 unsigned int length = sg_dma_len(&data->sg[i]); 665 666 u32 mem_addr = sg_dma_address(&data->sg[i]); 667 668 for ( ; length ; desc++) { 669 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 670 length : DW_MCI_DESC_DATA_LENGTH; 671 672 length -= desc_len; 673 674 /* 675 * Wait for the former clear OWN bit operation 676 * of IDMAC to make sure that this descriptor 677 * isn't still owned by IDMAC as IDMAC's write 678 * ops and CPU's read ops are asynchronous. 679 */ 680 if (readl_poll_timeout_atomic(&desc->des0, val, 681 IDMAC_OWN_CLR64(val), 682 10, 683 100 * USEC_PER_MSEC)) 684 goto err_own_bit; 685 686 /* 687 * Set the OWN bit and disable interrupts 688 * for this descriptor 689 */ 690 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 691 IDMAC_DES0_DIC | 692 IDMAC_DES0_CH); 693 694 /* Buffer length */ 695 IDMAC_SET_BUFFER1_SIZE(desc, desc_len); 696 697 /* Physical address to DMA to/from */ 698 desc->des2 = cpu_to_le32(mem_addr); 699 700 /* Update physical address for the next desc */ 701 mem_addr += desc_len; 702 703 /* Save pointer to the last descriptor */ 704 desc_last = desc; 705 } 706 } 707 708 /* Set first descriptor */ 709 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); 710 711 /* Set last descriptor */ 712 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | 713 IDMAC_DES0_DIC)); 714 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); 715 716 return 0; 717 err_own_bit: 718 /* restore the descriptor chain as it's polluted */ 719 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 720 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 721 dw_mci_idmac_init(host); 722 return -EINVAL; 723 } 724 725 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 726 { 727 u32 temp; 728 int ret; 729 730 if (host->dma_64bit_address == 1) 731 ret = dw_mci_prepare_desc64(host, host->data, sg_len); 732 else 733 ret = dw_mci_prepare_desc32(host, host->data, sg_len); 734 735 if (ret) 736 goto out; 737 738 /* drain writebuffer */ 739 wmb(); 740 741 /* Make sure to reset DMA in case we did PIO before this */ 742 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); 743 dw_mci_idmac_reset(host); 744 745 /* Select IDMAC interface */ 746 temp = mci_readl(host, CTRL); 747 temp |= SDMMC_CTRL_USE_IDMAC; 748 mci_writel(host, CTRL, temp); 749 750 /* drain writebuffer */ 751 wmb(); 752 753 /* Enable the IDMAC */ 754 temp = mci_readl(host, BMOD); 755 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 756 mci_writel(host, BMOD, temp); 757 758 /* Start it running */ 759 mci_writel(host, PLDMND, 1); 760 761 out: 762 return ret; 763 } 764 765 static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 766 .init = dw_mci_idmac_init, 767 .start = dw_mci_idmac_start_dma, 768 .stop = dw_mci_idmac_stop_dma, 769 .complete = dw_mci_dmac_complete_dma, 770 .cleanup = dw_mci_dma_cleanup, 771 }; 772 773 static void dw_mci_edmac_stop_dma(struct dw_mci *host) 774 { 775 dmaengine_terminate_async(host->dms->ch); 776 } 777 778 static int dw_mci_edmac_start_dma(struct dw_mci *host, 779 unsigned int sg_len) 780 { 781 struct dma_slave_config cfg; 782 struct dma_async_tx_descriptor *desc = NULL; 783 struct scatterlist *sgl = host->data->sg; 784 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 785 u32 sg_elems = host->data->sg_len; 786 u32 fifoth_val; 787 u32 fifo_offset = host->fifo_reg - host->regs; 788 int ret = 0; 789 790 /* Set external dma config: burst size, burst width */ 791 memset(&cfg, 0, sizeof(cfg)); 792 cfg.dst_addr = host->phy_regs + fifo_offset; 793 cfg.src_addr = cfg.dst_addr; 794 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 795 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 796 797 /* Match burst msize with external dma config */ 798 fifoth_val = mci_readl(host, FIFOTH); 799 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; 800 cfg.src_maxburst = cfg.dst_maxburst; 801 802 if (host->data->flags & MMC_DATA_WRITE) 803 cfg.direction = DMA_MEM_TO_DEV; 804 else 805 cfg.direction = DMA_DEV_TO_MEM; 806 807 ret = dmaengine_slave_config(host->dms->ch, &cfg); 808 if (ret) { 809 dev_err(host->dev, "Failed to config edmac.\n"); 810 return -EBUSY; 811 } 812 813 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, 814 sg_len, cfg.direction, 815 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 816 if (!desc) { 817 dev_err(host->dev, "Can't prepare slave sg.\n"); 818 return -EBUSY; 819 } 820 821 /* Set dw_mci_dmac_complete_dma as callback */ 822 desc->callback = dw_mci_dmac_complete_dma; 823 desc->callback_param = (void *)host; 824 dmaengine_submit(desc); 825 826 /* Flush cache before write */ 827 if (host->data->flags & MMC_DATA_WRITE) 828 dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl, 829 sg_elems, DMA_TO_DEVICE); 830 831 dma_async_issue_pending(host->dms->ch); 832 833 return 0; 834 } 835 836 static int dw_mci_edmac_init(struct dw_mci *host) 837 { 838 /* Request external dma channel */ 839 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); 840 if (!host->dms) 841 return -ENOMEM; 842 843 host->dms->ch = dma_request_chan(host->dev, "rx-tx"); 844 if (IS_ERR(host->dms->ch)) { 845 int ret = PTR_ERR(host->dms->ch); 846 847 dev_err(host->dev, "Failed to get external DMA channel.\n"); 848 kfree(host->dms); 849 host->dms = NULL; 850 return ret; 851 } 852 853 return 0; 854 } 855 856 static void dw_mci_edmac_exit(struct dw_mci *host) 857 { 858 if (host->dms) { 859 if (host->dms->ch) { 860 dma_release_channel(host->dms->ch); 861 host->dms->ch = NULL; 862 } 863 kfree(host->dms); 864 host->dms = NULL; 865 } 866 } 867 868 static const struct dw_mci_dma_ops dw_mci_edmac_ops = { 869 .init = dw_mci_edmac_init, 870 .exit = dw_mci_edmac_exit, 871 .start = dw_mci_edmac_start_dma, 872 .stop = dw_mci_edmac_stop_dma, 873 .complete = dw_mci_dmac_complete_dma, 874 .cleanup = dw_mci_dma_cleanup, 875 }; 876 877 static int dw_mci_pre_dma_transfer(struct dw_mci *host, 878 struct mmc_data *data, 879 int cookie) 880 { 881 struct scatterlist *sg; 882 unsigned int i, sg_len; 883 884 if (data->host_cookie == COOKIE_PRE_MAPPED) 885 return data->sg_len; 886 887 /* 888 * We don't do DMA on "complex" transfers, i.e. with 889 * non-word-aligned buffers or lengths. Also, we don't bother 890 * with all the DMA setup overhead for short transfers. 891 */ 892 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 893 return -EINVAL; 894 895 if (data->blksz & 3) 896 return -EINVAL; 897 898 for_each_sg(data->sg, sg, data->sg_len, i) { 899 if (sg->offset & 3 || sg->length & 3) 900 return -EINVAL; 901 } 902 903 sg_len = dma_map_sg(host->dev, 904 data->sg, 905 data->sg_len, 906 mmc_get_dma_dir(data)); 907 if (sg_len == 0) 908 return -EINVAL; 909 910 data->host_cookie = cookie; 911 912 return sg_len; 913 } 914 915 static void dw_mci_pre_req(struct mmc_host *mmc, 916 struct mmc_request *mrq) 917 { 918 struct dw_mci_slot *slot = mmc_priv(mmc); 919 struct mmc_data *data = mrq->data; 920 921 if (!slot->host->use_dma || !data) 922 return; 923 924 /* This data might be unmapped at this time */ 925 data->host_cookie = COOKIE_UNMAPPED; 926 927 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 928 COOKIE_PRE_MAPPED) < 0) 929 data->host_cookie = COOKIE_UNMAPPED; 930 } 931 932 static void dw_mci_post_req(struct mmc_host *mmc, 933 struct mmc_request *mrq, 934 int err) 935 { 936 struct dw_mci_slot *slot = mmc_priv(mmc); 937 struct mmc_data *data = mrq->data; 938 939 if (!slot->host->use_dma || !data) 940 return; 941 942 if (data->host_cookie != COOKIE_UNMAPPED) 943 dma_unmap_sg(slot->host->dev, 944 data->sg, 945 data->sg_len, 946 mmc_get_dma_dir(data)); 947 data->host_cookie = COOKIE_UNMAPPED; 948 } 949 950 static int dw_mci_get_cd(struct mmc_host *mmc) 951 { 952 int present; 953 struct dw_mci_slot *slot = mmc_priv(mmc); 954 struct dw_mci *host = slot->host; 955 int gpio_cd = mmc_gpio_get_cd(mmc); 956 957 /* Use platform get_cd function, else try onboard card detect */ 958 if (((mmc->caps & MMC_CAP_NEEDS_POLL) 959 || !mmc_card_is_removable(mmc))) { 960 present = 1; 961 962 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 963 if (mmc->caps & MMC_CAP_NEEDS_POLL) { 964 dev_info(&mmc->class_dev, 965 "card is polling.\n"); 966 } else { 967 dev_info(&mmc->class_dev, 968 "card is non-removable.\n"); 969 } 970 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 971 } 972 973 return present; 974 } else if (gpio_cd >= 0) 975 present = gpio_cd; 976 else 977 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 978 == 0 ? 1 : 0; 979 980 spin_lock_bh(&host->lock); 981 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 982 dev_dbg(&mmc->class_dev, "card is present\n"); 983 else if (!present && 984 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 985 dev_dbg(&mmc->class_dev, "card is not present\n"); 986 spin_unlock_bh(&host->lock); 987 988 return present; 989 } 990 991 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 992 { 993 unsigned int blksz = data->blksz; 994 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 995 u32 fifo_width = 1 << host->data_shift; 996 u32 blksz_depth = blksz / fifo_width, fifoth_val; 997 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 998 int idx = ARRAY_SIZE(mszs) - 1; 999 1000 /* pio should ship this scenario */ 1001 if (!host->use_dma) 1002 return; 1003 1004 tx_wmark = (host->fifo_depth) / 2; 1005 tx_wmark_invers = host->fifo_depth - tx_wmark; 1006 1007 /* 1008 * MSIZE is '1', 1009 * if blksz is not a multiple of the FIFO width 1010 */ 1011 if (blksz % fifo_width) 1012 goto done; 1013 1014 do { 1015 if (!((blksz_depth % mszs[idx]) || 1016 (tx_wmark_invers % mszs[idx]))) { 1017 msize = idx; 1018 rx_wmark = mszs[idx] - 1; 1019 break; 1020 } 1021 } while (--idx > 0); 1022 /* 1023 * If idx is '0', it won't be tried 1024 * Thus, initial values are uesed 1025 */ 1026 done: 1027 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 1028 mci_writel(host, FIFOTH, fifoth_val); 1029 } 1030 1031 static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) 1032 { 1033 unsigned int blksz = data->blksz; 1034 u32 blksz_depth, fifo_depth; 1035 u16 thld_size; 1036 u8 enable; 1037 1038 /* 1039 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is 1040 * in the FIFO region, so we really shouldn't access it). 1041 */ 1042 if (host->verid < DW_MMC_240A || 1043 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) 1044 return; 1045 1046 /* 1047 * Card write Threshold is introduced since 2.80a 1048 * It's used when HS400 mode is enabled. 1049 */ 1050 if (data->flags & MMC_DATA_WRITE && 1051 host->timing != MMC_TIMING_MMC_HS400) 1052 goto disable; 1053 1054 if (data->flags & MMC_DATA_WRITE) 1055 enable = SDMMC_CARD_WR_THR_EN; 1056 else 1057 enable = SDMMC_CARD_RD_THR_EN; 1058 1059 if (host->timing != MMC_TIMING_MMC_HS200 && 1060 host->timing != MMC_TIMING_UHS_SDR104 && 1061 host->timing != MMC_TIMING_MMC_HS400) 1062 goto disable; 1063 1064 blksz_depth = blksz / (1 << host->data_shift); 1065 fifo_depth = host->fifo_depth; 1066 1067 if (blksz_depth > fifo_depth) 1068 goto disable; 1069 1070 /* 1071 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' 1072 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz 1073 * Currently just choose blksz. 1074 */ 1075 thld_size = blksz; 1076 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); 1077 return; 1078 1079 disable: 1080 mci_writel(host, CDTHRCTL, 0); 1081 } 1082 1083 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 1084 { 1085 unsigned long irqflags; 1086 int sg_len; 1087 u32 temp; 1088 1089 host->using_dma = 0; 1090 1091 /* If we don't have a channel, we can't do DMA */ 1092 if (!host->use_dma) 1093 return -ENODEV; 1094 1095 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1096 if (sg_len < 0) { 1097 host->dma_ops->stop(host); 1098 return sg_len; 1099 } 1100 1101 host->using_dma = 1; 1102 1103 if (host->use_dma == TRANS_MODE_IDMAC) 1104 dev_vdbg(host->dev, 1105 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 1106 (unsigned long)host->sg_cpu, 1107 (unsigned long)host->sg_dma, 1108 sg_len); 1109 1110 /* 1111 * Decide the MSIZE and RX/TX Watermark. 1112 * If current block size is same with previous size, 1113 * no need to update fifoth. 1114 */ 1115 if (host->prev_blksz != data->blksz) 1116 dw_mci_adjust_fifoth(host, data); 1117 1118 /* Enable the DMA interface */ 1119 temp = mci_readl(host, CTRL); 1120 temp |= SDMMC_CTRL_DMA_ENABLE; 1121 mci_writel(host, CTRL, temp); 1122 1123 /* Disable RX/TX IRQs, let DMA handle it */ 1124 spin_lock_irqsave(&host->irq_lock, irqflags); 1125 temp = mci_readl(host, INTMASK); 1126 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 1127 mci_writel(host, INTMASK, temp); 1128 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1129 1130 if (host->dma_ops->start(host, sg_len)) { 1131 host->dma_ops->stop(host); 1132 /* We can't do DMA, try PIO for this one */ 1133 dev_dbg(host->dev, 1134 "%s: fall back to PIO mode for current transfer\n", 1135 __func__); 1136 return -ENODEV; 1137 } 1138 1139 return 0; 1140 } 1141 1142 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 1143 { 1144 unsigned long irqflags; 1145 int flags = SG_MITER_ATOMIC; 1146 u32 temp; 1147 1148 data->error = -EINPROGRESS; 1149 1150 WARN_ON(host->data); 1151 host->sg = NULL; 1152 host->data = data; 1153 1154 if (data->flags & MMC_DATA_READ) 1155 host->dir_status = DW_MCI_RECV_STATUS; 1156 else 1157 host->dir_status = DW_MCI_SEND_STATUS; 1158 1159 dw_mci_ctrl_thld(host, data); 1160 1161 if (dw_mci_submit_data_dma(host, data)) { 1162 if (host->data->flags & MMC_DATA_READ) 1163 flags |= SG_MITER_TO_SG; 1164 else 1165 flags |= SG_MITER_FROM_SG; 1166 1167 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1168 host->sg = data->sg; 1169 host->part_buf_start = 0; 1170 host->part_buf_count = 0; 1171 1172 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 1173 1174 spin_lock_irqsave(&host->irq_lock, irqflags); 1175 temp = mci_readl(host, INTMASK); 1176 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 1177 mci_writel(host, INTMASK, temp); 1178 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1179 1180 temp = mci_readl(host, CTRL); 1181 temp &= ~SDMMC_CTRL_DMA_ENABLE; 1182 mci_writel(host, CTRL, temp); 1183 1184 /* 1185 * Use the initial fifoth_val for PIO mode. If wm_algined 1186 * is set, we set watermark same as data size. 1187 * If next issued data may be transfered by DMA mode, 1188 * prev_blksz should be invalidated. 1189 */ 1190 if (host->wm_aligned) 1191 dw_mci_adjust_fifoth(host, data); 1192 else 1193 mci_writel(host, FIFOTH, host->fifoth_val); 1194 host->prev_blksz = 0; 1195 } else { 1196 /* 1197 * Keep the current block size. 1198 * It will be used to decide whether to update 1199 * fifoth register next time. 1200 */ 1201 host->prev_blksz = data->blksz; 1202 } 1203 } 1204 1205 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 1206 { 1207 struct dw_mci *host = slot->host; 1208 unsigned int clock = slot->clock; 1209 u32 div; 1210 u32 clk_en_a; 1211 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; 1212 1213 /* We must continue to set bit 28 in CMD until the change is complete */ 1214 if (host->state == STATE_WAITING_CMD11_DONE) 1215 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; 1216 1217 slot->mmc->actual_clock = 0; 1218 1219 if (!clock) { 1220 mci_writel(host, CLKENA, 0); 1221 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1222 } else if (clock != host->current_speed || force_clkinit) { 1223 div = host->bus_hz / clock; 1224 if (host->bus_hz % clock && host->bus_hz > clock) 1225 /* 1226 * move the + 1 after the divide to prevent 1227 * over-clocking the card. 1228 */ 1229 div += 1; 1230 1231 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1232 1233 if ((clock != slot->__clk_old && 1234 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) || 1235 force_clkinit) { 1236 /* Silent the verbose log if calling from PM context */ 1237 if (!force_clkinit) 1238 dev_info(&slot->mmc->class_dev, 1239 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1240 slot->id, host->bus_hz, clock, 1241 div ? ((host->bus_hz / div) >> 1) : 1242 host->bus_hz, div); 1243 1244 /* 1245 * If card is polling, display the message only 1246 * one time at boot time. 1247 */ 1248 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL && 1249 slot->mmc->f_min == clock) 1250 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags); 1251 } 1252 1253 /* disable clock */ 1254 mci_writel(host, CLKENA, 0); 1255 mci_writel(host, CLKSRC, 0); 1256 1257 /* inform CIU */ 1258 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1259 1260 /* set clock to desired speed */ 1261 mci_writel(host, CLKDIV, div); 1262 1263 /* inform CIU */ 1264 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1265 1266 /* enable clock; only low power if no SDIO */ 1267 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 1268 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) 1269 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 1270 mci_writel(host, CLKENA, clk_en_a); 1271 1272 /* inform CIU */ 1273 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1274 1275 /* keep the last clock value that was requested from core */ 1276 slot->__clk_old = clock; 1277 slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) : 1278 host->bus_hz; 1279 } 1280 1281 host->current_speed = clock; 1282 1283 /* Set the current slot bus width */ 1284 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 1285 } 1286 1287 static void dw_mci_set_data_timeout(struct dw_mci *host, 1288 unsigned int timeout_ns) 1289 { 1290 u32 clk_div, tmout; 1291 u64 tmp; 1292 1293 clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2; 1294 if (clk_div == 0) 1295 clk_div = 1; 1296 1297 tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC); 1298 tmp = DIV_ROUND_UP_ULL(tmp, clk_div); 1299 1300 /* TMOUT[7:0] (RESPONSE_TIMEOUT) */ 1301 tmout = 0xFF; /* Set maximum */ 1302 1303 /* TMOUT[31:8] (DATA_TIMEOUT) */ 1304 if (!tmp || tmp > 0xFFFFFF) 1305 tmout |= (0xFFFFFF << 8); 1306 else 1307 tmout |= (tmp & 0xFFFFFF) << 8; 1308 1309 mci_writel(host, TMOUT, tmout); 1310 dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x", 1311 timeout_ns, tmout >> 8); 1312 } 1313 1314 static void __dw_mci_start_request(struct dw_mci *host, 1315 struct dw_mci_slot *slot, 1316 struct mmc_command *cmd) 1317 { 1318 struct mmc_request *mrq; 1319 struct mmc_data *data; 1320 u32 cmdflags; 1321 1322 mrq = slot->mrq; 1323 1324 host->mrq = mrq; 1325 1326 host->pending_events = 0; 1327 host->completed_events = 0; 1328 host->cmd_status = 0; 1329 host->data_status = 0; 1330 host->dir_status = 0; 1331 1332 data = cmd->data; 1333 if (data) { 1334 dw_mci_set_data_timeout(host, data->timeout_ns); 1335 mci_writel(host, BYTCNT, data->blksz*data->blocks); 1336 mci_writel(host, BLKSIZ, data->blksz); 1337 } 1338 1339 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 1340 1341 /* this is the first command, send the initialization clock */ 1342 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 1343 cmdflags |= SDMMC_CMD_INIT; 1344 1345 if (data) { 1346 dw_mci_submit_data(host, data); 1347 wmb(); /* drain writebuffer */ 1348 } 1349 1350 dw_mci_start_command(host, cmd, cmdflags); 1351 1352 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 1353 unsigned long irqflags; 1354 1355 /* 1356 * Databook says to fail after 2ms w/ no response, but evidence 1357 * shows that sometimes the cmd11 interrupt takes over 130ms. 1358 * We'll set to 500ms, plus an extra jiffy just in case jiffies 1359 * is just about to roll over. 1360 * 1361 * We do this whole thing under spinlock and only if the 1362 * command hasn't already completed (indicating the the irq 1363 * already ran so we don't want the timeout). 1364 */ 1365 spin_lock_irqsave(&host->irq_lock, irqflags); 1366 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 1367 mod_timer(&host->cmd11_timer, 1368 jiffies + msecs_to_jiffies(500) + 1); 1369 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1370 } 1371 1372 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); 1373 } 1374 1375 static void dw_mci_start_request(struct dw_mci *host, 1376 struct dw_mci_slot *slot) 1377 { 1378 struct mmc_request *mrq = slot->mrq; 1379 struct mmc_command *cmd; 1380 1381 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 1382 __dw_mci_start_request(host, slot, cmd); 1383 } 1384 1385 /* must be called with host->lock held */ 1386 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 1387 struct mmc_request *mrq) 1388 { 1389 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1390 host->state); 1391 1392 slot->mrq = mrq; 1393 1394 if (host->state == STATE_WAITING_CMD11_DONE) { 1395 dev_warn(&slot->mmc->class_dev, 1396 "Voltage change didn't complete\n"); 1397 /* 1398 * this case isn't expected to happen, so we can 1399 * either crash here or just try to continue on 1400 * in the closest possible state 1401 */ 1402 host->state = STATE_IDLE; 1403 } 1404 1405 if (host->state == STATE_IDLE) { 1406 host->state = STATE_SENDING_CMD; 1407 dw_mci_start_request(host, slot); 1408 } else { 1409 list_add_tail(&slot->queue_node, &host->queue); 1410 } 1411 } 1412 1413 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1414 { 1415 struct dw_mci_slot *slot = mmc_priv(mmc); 1416 struct dw_mci *host = slot->host; 1417 1418 WARN_ON(slot->mrq); 1419 1420 /* 1421 * The check for card presence and queueing of the request must be 1422 * atomic, otherwise the card could be removed in between and the 1423 * request wouldn't fail until another card was inserted. 1424 */ 1425 1426 if (!dw_mci_get_cd(mmc)) { 1427 mrq->cmd->error = -ENOMEDIUM; 1428 mmc_request_done(mmc, mrq); 1429 return; 1430 } 1431 1432 spin_lock_bh(&host->lock); 1433 1434 dw_mci_queue_request(host, slot, mrq); 1435 1436 spin_unlock_bh(&host->lock); 1437 } 1438 1439 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1440 { 1441 struct dw_mci_slot *slot = mmc_priv(mmc); 1442 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 1443 u32 regs; 1444 int ret; 1445 1446 switch (ios->bus_width) { 1447 case MMC_BUS_WIDTH_4: 1448 slot->ctype = SDMMC_CTYPE_4BIT; 1449 break; 1450 case MMC_BUS_WIDTH_8: 1451 slot->ctype = SDMMC_CTYPE_8BIT; 1452 break; 1453 default: 1454 /* set default 1 bit mode */ 1455 slot->ctype = SDMMC_CTYPE_1BIT; 1456 } 1457 1458 regs = mci_readl(slot->host, UHS_REG); 1459 1460 /* DDR mode set */ 1461 if (ios->timing == MMC_TIMING_MMC_DDR52 || 1462 ios->timing == MMC_TIMING_UHS_DDR50 || 1463 ios->timing == MMC_TIMING_MMC_HS400) 1464 regs |= ((0x1 << slot->id) << 16); 1465 else 1466 regs &= ~((0x1 << slot->id) << 16); 1467 1468 mci_writel(slot->host, UHS_REG, regs); 1469 slot->host->timing = ios->timing; 1470 1471 /* 1472 * Use mirror of ios->clock to prevent race with mmc 1473 * core ios update when finding the minimum. 1474 */ 1475 slot->clock = ios->clock; 1476 1477 if (drv_data && drv_data->set_ios) 1478 drv_data->set_ios(slot->host, ios); 1479 1480 switch (ios->power_mode) { 1481 case MMC_POWER_UP: 1482 if (!IS_ERR(mmc->supply.vmmc)) { 1483 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 1484 ios->vdd); 1485 if (ret) { 1486 dev_err(slot->host->dev, 1487 "failed to enable vmmc regulator\n"); 1488 /*return, if failed turn on vmmc*/ 1489 return; 1490 } 1491 } 1492 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 1493 regs = mci_readl(slot->host, PWREN); 1494 regs |= (1 << slot->id); 1495 mci_writel(slot->host, PWREN, regs); 1496 break; 1497 case MMC_POWER_ON: 1498 if (!slot->host->vqmmc_enabled) { 1499 if (!IS_ERR(mmc->supply.vqmmc)) { 1500 ret = regulator_enable(mmc->supply.vqmmc); 1501 if (ret < 0) 1502 dev_err(slot->host->dev, 1503 "failed to enable vqmmc\n"); 1504 else 1505 slot->host->vqmmc_enabled = true; 1506 1507 } else { 1508 /* Keep track so we don't reset again */ 1509 slot->host->vqmmc_enabled = true; 1510 } 1511 1512 /* Reset our state machine after powering on */ 1513 dw_mci_ctrl_reset(slot->host, 1514 SDMMC_CTRL_ALL_RESET_FLAGS); 1515 } 1516 1517 /* Adjust clock / bus width after power is up */ 1518 dw_mci_setup_bus(slot, false); 1519 1520 break; 1521 case MMC_POWER_OFF: 1522 /* Turn clock off before power goes down */ 1523 dw_mci_setup_bus(slot, false); 1524 1525 if (!IS_ERR(mmc->supply.vmmc)) 1526 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1527 1528 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) 1529 regulator_disable(mmc->supply.vqmmc); 1530 slot->host->vqmmc_enabled = false; 1531 1532 regs = mci_readl(slot->host, PWREN); 1533 regs &= ~(1 << slot->id); 1534 mci_writel(slot->host, PWREN, regs); 1535 break; 1536 default: 1537 break; 1538 } 1539 1540 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) 1541 slot->host->state = STATE_IDLE; 1542 } 1543 1544 static int dw_mci_card_busy(struct mmc_host *mmc) 1545 { 1546 struct dw_mci_slot *slot = mmc_priv(mmc); 1547 u32 status; 1548 1549 /* 1550 * Check the busy bit which is low when DAT[3:0] 1551 * (the data lines) are 0000 1552 */ 1553 status = mci_readl(slot->host, STATUS); 1554 1555 return !!(status & SDMMC_STATUS_BUSY); 1556 } 1557 1558 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 1559 { 1560 struct dw_mci_slot *slot = mmc_priv(mmc); 1561 struct dw_mci *host = slot->host; 1562 const struct dw_mci_drv_data *drv_data = host->drv_data; 1563 u32 uhs; 1564 u32 v18 = SDMMC_UHS_18V << slot->id; 1565 int ret; 1566 1567 if (drv_data && drv_data->switch_voltage) 1568 return drv_data->switch_voltage(mmc, ios); 1569 1570 /* 1571 * Program the voltage. Note that some instances of dw_mmc may use 1572 * the UHS_REG for this. For other instances (like exynos) the UHS_REG 1573 * does no harm but you need to set the regulator directly. Try both. 1574 */ 1575 uhs = mci_readl(host, UHS_REG); 1576 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1577 uhs &= ~v18; 1578 else 1579 uhs |= v18; 1580 1581 if (!IS_ERR(mmc->supply.vqmmc)) { 1582 ret = mmc_regulator_set_vqmmc(mmc, ios); 1583 if (ret < 0) { 1584 dev_dbg(&mmc->class_dev, 1585 "Regulator set error %d - %s V\n", 1586 ret, uhs & v18 ? "1.8" : "3.3"); 1587 return ret; 1588 } 1589 } 1590 mci_writel(host, UHS_REG, uhs); 1591 1592 return 0; 1593 } 1594 1595 static int dw_mci_get_ro(struct mmc_host *mmc) 1596 { 1597 int read_only; 1598 struct dw_mci_slot *slot = mmc_priv(mmc); 1599 int gpio_ro = mmc_gpio_get_ro(mmc); 1600 1601 /* Use platform get_ro function, else try on board write protect */ 1602 if (gpio_ro >= 0) 1603 read_only = gpio_ro; 1604 else 1605 read_only = 1606 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 1607 1608 dev_dbg(&mmc->class_dev, "card is %s\n", 1609 read_only ? "read-only" : "read-write"); 1610 1611 return read_only; 1612 } 1613 1614 static void dw_mci_hw_reset(struct mmc_host *mmc) 1615 { 1616 struct dw_mci_slot *slot = mmc_priv(mmc); 1617 struct dw_mci *host = slot->host; 1618 int reset; 1619 1620 if (host->use_dma == TRANS_MODE_IDMAC) 1621 dw_mci_idmac_reset(host); 1622 1623 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | 1624 SDMMC_CTRL_FIFO_RESET)) 1625 return; 1626 1627 /* 1628 * According to eMMC spec, card reset procedure: 1629 * tRstW >= 1us: RST_n pulse width 1630 * tRSCA >= 200us: RST_n to Command time 1631 * tRSTH >= 1us: RST_n high period 1632 */ 1633 reset = mci_readl(host, RST_N); 1634 reset &= ~(SDMMC_RST_HWACTIVE << slot->id); 1635 mci_writel(host, RST_N, reset); 1636 usleep_range(1, 2); 1637 reset |= SDMMC_RST_HWACTIVE << slot->id; 1638 mci_writel(host, RST_N, reset); 1639 usleep_range(200, 300); 1640 } 1641 1642 static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare) 1643 { 1644 struct dw_mci *host = slot->host; 1645 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 1646 u32 clk_en_a_old; 1647 u32 clk_en_a; 1648 1649 /* 1650 * Low power mode will stop the card clock when idle. According to the 1651 * description of the CLKENA register we should disable low power mode 1652 * for SDIO cards if we need SDIO interrupts to work. 1653 */ 1654 1655 clk_en_a_old = mci_readl(host, CLKENA); 1656 if (prepare) { 1657 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1658 clk_en_a = clk_en_a_old & ~clken_low_pwr; 1659 } else { 1660 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1661 clk_en_a = clk_en_a_old | clken_low_pwr; 1662 } 1663 1664 if (clk_en_a != clk_en_a_old) { 1665 mci_writel(host, CLKENA, clk_en_a); 1666 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 1667 0); 1668 } 1669 } 1670 1671 static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb) 1672 { 1673 struct dw_mci *host = slot->host; 1674 unsigned long irqflags; 1675 u32 int_mask; 1676 1677 spin_lock_irqsave(&host->irq_lock, irqflags); 1678 1679 /* Enable/disable Slot Specific SDIO interrupt */ 1680 int_mask = mci_readl(host, INTMASK); 1681 if (enb) 1682 int_mask |= SDMMC_INT_SDIO(slot->sdio_id); 1683 else 1684 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); 1685 mci_writel(host, INTMASK, int_mask); 1686 1687 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1688 } 1689 1690 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 1691 { 1692 struct dw_mci_slot *slot = mmc_priv(mmc); 1693 struct dw_mci *host = slot->host; 1694 1695 dw_mci_prepare_sdio_irq(slot, enb); 1696 __dw_mci_enable_sdio_irq(slot, enb); 1697 1698 /* Avoid runtime suspending the device when SDIO IRQ is enabled */ 1699 if (enb) 1700 pm_runtime_get_noresume(host->dev); 1701 else 1702 pm_runtime_put_noidle(host->dev); 1703 } 1704 1705 static void dw_mci_ack_sdio_irq(struct mmc_host *mmc) 1706 { 1707 struct dw_mci_slot *slot = mmc_priv(mmc); 1708 1709 __dw_mci_enable_sdio_irq(slot, 1); 1710 } 1711 1712 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1713 { 1714 struct dw_mci_slot *slot = mmc_priv(mmc); 1715 struct dw_mci *host = slot->host; 1716 const struct dw_mci_drv_data *drv_data = host->drv_data; 1717 int err = -EINVAL; 1718 1719 if (drv_data && drv_data->execute_tuning) 1720 err = drv_data->execute_tuning(slot, opcode); 1721 return err; 1722 } 1723 1724 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, 1725 struct mmc_ios *ios) 1726 { 1727 struct dw_mci_slot *slot = mmc_priv(mmc); 1728 struct dw_mci *host = slot->host; 1729 const struct dw_mci_drv_data *drv_data = host->drv_data; 1730 1731 if (drv_data && drv_data->prepare_hs400_tuning) 1732 return drv_data->prepare_hs400_tuning(host, ios); 1733 1734 return 0; 1735 } 1736 1737 static bool dw_mci_reset(struct dw_mci *host) 1738 { 1739 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; 1740 bool ret = false; 1741 u32 status = 0; 1742 1743 /* 1744 * Resetting generates a block interrupt, hence setting 1745 * the scatter-gather pointer to NULL. 1746 */ 1747 if (host->sg) { 1748 sg_miter_stop(&host->sg_miter); 1749 host->sg = NULL; 1750 } 1751 1752 if (host->use_dma) 1753 flags |= SDMMC_CTRL_DMA_RESET; 1754 1755 if (dw_mci_ctrl_reset(host, flags)) { 1756 /* 1757 * In all cases we clear the RAWINTS 1758 * register to clear any interrupts. 1759 */ 1760 mci_writel(host, RINTSTS, 0xFFFFFFFF); 1761 1762 if (!host->use_dma) { 1763 ret = true; 1764 goto ciu_out; 1765 } 1766 1767 /* Wait for dma_req to be cleared */ 1768 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 1769 status, 1770 !(status & SDMMC_STATUS_DMA_REQ), 1771 1, 500 * USEC_PER_MSEC)) { 1772 dev_err(host->dev, 1773 "%s: Timeout waiting for dma_req to be cleared\n", 1774 __func__); 1775 goto ciu_out; 1776 } 1777 1778 /* when using DMA next we reset the fifo again */ 1779 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) 1780 goto ciu_out; 1781 } else { 1782 /* if the controller reset bit did clear, then set clock regs */ 1783 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 1784 dev_err(host->dev, 1785 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", 1786 __func__); 1787 goto ciu_out; 1788 } 1789 } 1790 1791 if (host->use_dma == TRANS_MODE_IDMAC) 1792 /* It is also required that we reinit idmac */ 1793 dw_mci_idmac_init(host); 1794 1795 ret = true; 1796 1797 ciu_out: 1798 /* After a CTRL reset we need to have CIU set clock registers */ 1799 mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0); 1800 1801 return ret; 1802 } 1803 1804 static const struct mmc_host_ops dw_mci_ops = { 1805 .request = dw_mci_request, 1806 .pre_req = dw_mci_pre_req, 1807 .post_req = dw_mci_post_req, 1808 .set_ios = dw_mci_set_ios, 1809 .get_ro = dw_mci_get_ro, 1810 .get_cd = dw_mci_get_cd, 1811 .hw_reset = dw_mci_hw_reset, 1812 .enable_sdio_irq = dw_mci_enable_sdio_irq, 1813 .ack_sdio_irq = dw_mci_ack_sdio_irq, 1814 .execute_tuning = dw_mci_execute_tuning, 1815 .card_busy = dw_mci_card_busy, 1816 .start_signal_voltage_switch = dw_mci_switch_voltage, 1817 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, 1818 }; 1819 1820 #ifdef CONFIG_FAULT_INJECTION 1821 static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t) 1822 { 1823 struct dw_mci *host = container_of(t, struct dw_mci, fault_timer); 1824 unsigned long flags; 1825 1826 spin_lock_irqsave(&host->irq_lock, flags); 1827 1828 /* 1829 * Only inject an error if we haven't already got an error or data over 1830 * interrupt. 1831 */ 1832 if (!host->data_status) { 1833 host->data_status = SDMMC_INT_DCRC; 1834 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1835 tasklet_schedule(&host->tasklet); 1836 } 1837 1838 spin_unlock_irqrestore(&host->irq_lock, flags); 1839 1840 return HRTIMER_NORESTART; 1841 } 1842 1843 static void dw_mci_start_fault_timer(struct dw_mci *host) 1844 { 1845 struct mmc_data *data = host->data; 1846 1847 if (!data || data->blocks <= 1) 1848 return; 1849 1850 if (!should_fail(&host->fail_data_crc, 1)) 1851 return; 1852 1853 /* 1854 * Try to inject the error at random points during the data transfer. 1855 */ 1856 hrtimer_start(&host->fault_timer, 1857 ms_to_ktime(prandom_u32() % 25), 1858 HRTIMER_MODE_REL); 1859 } 1860 1861 static void dw_mci_stop_fault_timer(struct dw_mci *host) 1862 { 1863 hrtimer_cancel(&host->fault_timer); 1864 } 1865 1866 static void dw_mci_init_fault(struct dw_mci *host) 1867 { 1868 host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER; 1869 1870 hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1871 host->fault_timer.function = dw_mci_fault_timer; 1872 } 1873 #else 1874 static void dw_mci_init_fault(struct dw_mci *host) 1875 { 1876 } 1877 1878 static void dw_mci_start_fault_timer(struct dw_mci *host) 1879 { 1880 } 1881 1882 static void dw_mci_stop_fault_timer(struct dw_mci *host) 1883 { 1884 } 1885 #endif 1886 1887 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1888 __releases(&host->lock) 1889 __acquires(&host->lock) 1890 { 1891 struct dw_mci_slot *slot; 1892 struct mmc_host *prev_mmc = host->slot->mmc; 1893 1894 WARN_ON(host->cmd || host->data); 1895 1896 host->slot->mrq = NULL; 1897 host->mrq = NULL; 1898 if (!list_empty(&host->queue)) { 1899 slot = list_entry(host->queue.next, 1900 struct dw_mci_slot, queue_node); 1901 list_del(&slot->queue_node); 1902 dev_vdbg(host->dev, "list not empty: %s is next\n", 1903 mmc_hostname(slot->mmc)); 1904 host->state = STATE_SENDING_CMD; 1905 dw_mci_start_request(host, slot); 1906 } else { 1907 dev_vdbg(host->dev, "list empty\n"); 1908 1909 if (host->state == STATE_SENDING_CMD11) 1910 host->state = STATE_WAITING_CMD11_DONE; 1911 else 1912 host->state = STATE_IDLE; 1913 } 1914 1915 spin_unlock(&host->lock); 1916 mmc_request_done(prev_mmc, mrq); 1917 spin_lock(&host->lock); 1918 } 1919 1920 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 1921 { 1922 u32 status = host->cmd_status; 1923 1924 host->cmd_status = 0; 1925 1926 /* Read the response from the card (up to 16 bytes) */ 1927 if (cmd->flags & MMC_RSP_PRESENT) { 1928 if (cmd->flags & MMC_RSP_136) { 1929 cmd->resp[3] = mci_readl(host, RESP0); 1930 cmd->resp[2] = mci_readl(host, RESP1); 1931 cmd->resp[1] = mci_readl(host, RESP2); 1932 cmd->resp[0] = mci_readl(host, RESP3); 1933 } else { 1934 cmd->resp[0] = mci_readl(host, RESP0); 1935 cmd->resp[1] = 0; 1936 cmd->resp[2] = 0; 1937 cmd->resp[3] = 0; 1938 } 1939 } 1940 1941 if (status & SDMMC_INT_RTO) 1942 cmd->error = -ETIMEDOUT; 1943 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1944 cmd->error = -EILSEQ; 1945 else if (status & SDMMC_INT_RESP_ERR) 1946 cmd->error = -EIO; 1947 else 1948 cmd->error = 0; 1949 1950 return cmd->error; 1951 } 1952 1953 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) 1954 { 1955 u32 status = host->data_status; 1956 1957 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1958 if (status & SDMMC_INT_DRTO) { 1959 data->error = -ETIMEDOUT; 1960 } else if (status & SDMMC_INT_DCRC) { 1961 data->error = -EILSEQ; 1962 } else if (status & SDMMC_INT_EBE) { 1963 if (host->dir_status == 1964 DW_MCI_SEND_STATUS) { 1965 /* 1966 * No data CRC status was returned. 1967 * The number of bytes transferred 1968 * will be exaggerated in PIO mode. 1969 */ 1970 data->bytes_xfered = 0; 1971 data->error = -ETIMEDOUT; 1972 } else if (host->dir_status == 1973 DW_MCI_RECV_STATUS) { 1974 data->error = -EILSEQ; 1975 } 1976 } else { 1977 /* SDMMC_INT_SBE is included */ 1978 data->error = -EILSEQ; 1979 } 1980 1981 dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1982 1983 /* 1984 * After an error, there may be data lingering 1985 * in the FIFO 1986 */ 1987 dw_mci_reset(host); 1988 } else { 1989 data->bytes_xfered = data->blocks * data->blksz; 1990 data->error = 0; 1991 } 1992 1993 return data->error; 1994 } 1995 1996 static void dw_mci_set_drto(struct dw_mci *host) 1997 { 1998 unsigned int drto_clks; 1999 unsigned int drto_div; 2000 unsigned int drto_ms; 2001 unsigned long irqflags; 2002 2003 drto_clks = mci_readl(host, TMOUT) >> 8; 2004 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 2005 if (drto_div == 0) 2006 drto_div = 1; 2007 2008 drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, 2009 host->bus_hz); 2010 2011 /* add a bit spare time */ 2012 drto_ms += 10; 2013 2014 spin_lock_irqsave(&host->irq_lock, irqflags); 2015 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) 2016 mod_timer(&host->dto_timer, 2017 jiffies + msecs_to_jiffies(drto_ms)); 2018 spin_unlock_irqrestore(&host->irq_lock, irqflags); 2019 } 2020 2021 static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) 2022 { 2023 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 2024 return false; 2025 2026 /* 2027 * Really be certain that the timer has stopped. This is a bit of 2028 * paranoia and could only really happen if we had really bad 2029 * interrupt latency and the interrupt routine and timeout were 2030 * running concurrently so that the del_timer() in the interrupt 2031 * handler couldn't run. 2032 */ 2033 WARN_ON(del_timer_sync(&host->cto_timer)); 2034 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2035 2036 return true; 2037 } 2038 2039 static bool dw_mci_clear_pending_data_complete(struct dw_mci *host) 2040 { 2041 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) 2042 return false; 2043 2044 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ 2045 WARN_ON(del_timer_sync(&host->dto_timer)); 2046 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2047 2048 return true; 2049 } 2050 2051 static void dw_mci_tasklet_func(struct tasklet_struct *t) 2052 { 2053 struct dw_mci *host = from_tasklet(host, t, tasklet); 2054 struct mmc_data *data; 2055 struct mmc_command *cmd; 2056 struct mmc_request *mrq; 2057 enum dw_mci_state state; 2058 enum dw_mci_state prev_state; 2059 unsigned int err; 2060 2061 spin_lock(&host->lock); 2062 2063 state = host->state; 2064 data = host->data; 2065 mrq = host->mrq; 2066 2067 do { 2068 prev_state = state; 2069 2070 switch (state) { 2071 case STATE_IDLE: 2072 case STATE_WAITING_CMD11_DONE: 2073 break; 2074 2075 case STATE_SENDING_CMD11: 2076 case STATE_SENDING_CMD: 2077 if (!dw_mci_clear_pending_cmd_complete(host)) 2078 break; 2079 2080 cmd = host->cmd; 2081 host->cmd = NULL; 2082 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 2083 err = dw_mci_command_complete(host, cmd); 2084 if (cmd == mrq->sbc && !err) { 2085 __dw_mci_start_request(host, host->slot, 2086 mrq->cmd); 2087 goto unlock; 2088 } 2089 2090 if (cmd->data && err) { 2091 /* 2092 * During UHS tuning sequence, sending the stop 2093 * command after the response CRC error would 2094 * throw the system into a confused state 2095 * causing all future tuning phases to report 2096 * failure. 2097 * 2098 * In such case controller will move into a data 2099 * transfer state after a response error or 2100 * response CRC error. Let's let that finish 2101 * before trying to send a stop, so we'll go to 2102 * STATE_SENDING_DATA. 2103 * 2104 * Although letting the data transfer take place 2105 * will waste a bit of time (we already know 2106 * the command was bad), it can't cause any 2107 * errors since it's possible it would have 2108 * taken place anyway if this tasklet got 2109 * delayed. Allowing the transfer to take place 2110 * avoids races and keeps things simple. 2111 */ 2112 if (err != -ETIMEDOUT && 2113 host->dir_status == DW_MCI_RECV_STATUS) { 2114 state = STATE_SENDING_DATA; 2115 continue; 2116 } 2117 2118 send_stop_abort(host, data); 2119 dw_mci_stop_dma(host); 2120 state = STATE_SENDING_STOP; 2121 break; 2122 } 2123 2124 if (!cmd->data || err) { 2125 dw_mci_request_end(host, mrq); 2126 goto unlock; 2127 } 2128 2129 prev_state = state = STATE_SENDING_DATA; 2130 fallthrough; 2131 2132 case STATE_SENDING_DATA: 2133 /* 2134 * We could get a data error and never a transfer 2135 * complete so we'd better check for it here. 2136 * 2137 * Note that we don't really care if we also got a 2138 * transfer complete; stopping the DMA and sending an 2139 * abort won't hurt. 2140 */ 2141 if (test_and_clear_bit(EVENT_DATA_ERROR, 2142 &host->pending_events)) { 2143 if (!(host->data_status & (SDMMC_INT_DRTO | 2144 SDMMC_INT_EBE))) 2145 send_stop_abort(host, data); 2146 dw_mci_stop_dma(host); 2147 state = STATE_DATA_ERROR; 2148 break; 2149 } 2150 2151 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 2152 &host->pending_events)) { 2153 /* 2154 * If all data-related interrupts don't come 2155 * within the given time in reading data state. 2156 */ 2157 if (host->dir_status == DW_MCI_RECV_STATUS) 2158 dw_mci_set_drto(host); 2159 break; 2160 } 2161 2162 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 2163 2164 /* 2165 * Handle an EVENT_DATA_ERROR that might have shown up 2166 * before the transfer completed. This might not have 2167 * been caught by the check above because the interrupt 2168 * could have gone off between the previous check and 2169 * the check for transfer complete. 2170 * 2171 * Technically this ought not be needed assuming we 2172 * get a DATA_COMPLETE eventually (we'll notice the 2173 * error and end the request), but it shouldn't hurt. 2174 * 2175 * This has the advantage of sending the stop command. 2176 */ 2177 if (test_and_clear_bit(EVENT_DATA_ERROR, 2178 &host->pending_events)) { 2179 if (!(host->data_status & (SDMMC_INT_DRTO | 2180 SDMMC_INT_EBE))) 2181 send_stop_abort(host, data); 2182 dw_mci_stop_dma(host); 2183 state = STATE_DATA_ERROR; 2184 break; 2185 } 2186 prev_state = state = STATE_DATA_BUSY; 2187 2188 fallthrough; 2189 2190 case STATE_DATA_BUSY: 2191 if (!dw_mci_clear_pending_data_complete(host)) { 2192 /* 2193 * If data error interrupt comes but data over 2194 * interrupt doesn't come within the given time. 2195 * in reading data state. 2196 */ 2197 if (host->dir_status == DW_MCI_RECV_STATUS) 2198 dw_mci_set_drto(host); 2199 break; 2200 } 2201 2202 dw_mci_stop_fault_timer(host); 2203 host->data = NULL; 2204 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 2205 err = dw_mci_data_complete(host, data); 2206 2207 if (!err) { 2208 if (!data->stop || mrq->sbc) { 2209 if (mrq->sbc && data->stop) 2210 data->stop->error = 0; 2211 dw_mci_request_end(host, mrq); 2212 goto unlock; 2213 } 2214 2215 /* stop command for open-ended transfer*/ 2216 if (data->stop) 2217 send_stop_abort(host, data); 2218 } else { 2219 /* 2220 * If we don't have a command complete now we'll 2221 * never get one since we just reset everything; 2222 * better end the request. 2223 * 2224 * If we do have a command complete we'll fall 2225 * through to the SENDING_STOP command and 2226 * everything will be peachy keen. 2227 */ 2228 if (!test_bit(EVENT_CMD_COMPLETE, 2229 &host->pending_events)) { 2230 host->cmd = NULL; 2231 dw_mci_request_end(host, mrq); 2232 goto unlock; 2233 } 2234 } 2235 2236 /* 2237 * If err has non-zero, 2238 * stop-abort command has been already issued. 2239 */ 2240 prev_state = state = STATE_SENDING_STOP; 2241 2242 fallthrough; 2243 2244 case STATE_SENDING_STOP: 2245 if (!dw_mci_clear_pending_cmd_complete(host)) 2246 break; 2247 2248 /* CMD error in data command */ 2249 if (mrq->cmd->error && mrq->data) 2250 dw_mci_reset(host); 2251 2252 dw_mci_stop_fault_timer(host); 2253 host->cmd = NULL; 2254 host->data = NULL; 2255 2256 if (!mrq->sbc && mrq->stop) 2257 dw_mci_command_complete(host, mrq->stop); 2258 else 2259 host->cmd_status = 0; 2260 2261 dw_mci_request_end(host, mrq); 2262 goto unlock; 2263 2264 case STATE_DATA_ERROR: 2265 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 2266 &host->pending_events)) 2267 break; 2268 2269 state = STATE_DATA_BUSY; 2270 break; 2271 } 2272 } while (state != prev_state); 2273 2274 host->state = state; 2275 unlock: 2276 spin_unlock(&host->lock); 2277 2278 } 2279 2280 /* push final bytes to part_buf, only use during push */ 2281 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 2282 { 2283 memcpy((void *)&host->part_buf, buf, cnt); 2284 host->part_buf_count = cnt; 2285 } 2286 2287 /* append bytes to part_buf, only use during push */ 2288 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 2289 { 2290 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 2291 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 2292 host->part_buf_count += cnt; 2293 return cnt; 2294 } 2295 2296 /* pull first bytes from part_buf, only use during pull */ 2297 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 2298 { 2299 cnt = min_t(int, cnt, host->part_buf_count); 2300 if (cnt) { 2301 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 2302 cnt); 2303 host->part_buf_count -= cnt; 2304 host->part_buf_start += cnt; 2305 } 2306 return cnt; 2307 } 2308 2309 /* pull final bytes from the part_buf, assuming it's just been filled */ 2310 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 2311 { 2312 memcpy(buf, &host->part_buf, cnt); 2313 host->part_buf_start = cnt; 2314 host->part_buf_count = (1 << host->data_shift) - cnt; 2315 } 2316 2317 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 2318 { 2319 struct mmc_data *data = host->data; 2320 int init_cnt = cnt; 2321 2322 /* try and push anything in the part_buf */ 2323 if (unlikely(host->part_buf_count)) { 2324 int len = dw_mci_push_part_bytes(host, buf, cnt); 2325 2326 buf += len; 2327 cnt -= len; 2328 if (host->part_buf_count == 2) { 2329 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2330 host->part_buf_count = 0; 2331 } 2332 } 2333 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2334 if (unlikely((unsigned long)buf & 0x1)) { 2335 while (cnt >= 2) { 2336 u16 aligned_buf[64]; 2337 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2338 int items = len >> 1; 2339 int i; 2340 /* memcpy from input buffer into aligned buffer */ 2341 memcpy(aligned_buf, buf, len); 2342 buf += len; 2343 cnt -= len; 2344 /* push data from aligned buffer into fifo */ 2345 for (i = 0; i < items; ++i) 2346 mci_fifo_writew(host->fifo_reg, aligned_buf[i]); 2347 } 2348 } else 2349 #endif 2350 { 2351 u16 *pdata = buf; 2352 2353 for (; cnt >= 2; cnt -= 2) 2354 mci_fifo_writew(host->fifo_reg, *pdata++); 2355 buf = pdata; 2356 } 2357 /* put anything remaining in the part_buf */ 2358 if (cnt) { 2359 dw_mci_set_part_bytes(host, buf, cnt); 2360 /* Push data if we have reached the expected data length */ 2361 if ((data->bytes_xfered + init_cnt) == 2362 (data->blksz * data->blocks)) 2363 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2364 } 2365 } 2366 2367 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 2368 { 2369 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2370 if (unlikely((unsigned long)buf & 0x1)) { 2371 while (cnt >= 2) { 2372 /* pull data from fifo into aligned buffer */ 2373 u16 aligned_buf[64]; 2374 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2375 int items = len >> 1; 2376 int i; 2377 2378 for (i = 0; i < items; ++i) 2379 aligned_buf[i] = mci_fifo_readw(host->fifo_reg); 2380 /* memcpy from aligned buffer into output buffer */ 2381 memcpy(buf, aligned_buf, len); 2382 buf += len; 2383 cnt -= len; 2384 } 2385 } else 2386 #endif 2387 { 2388 u16 *pdata = buf; 2389 2390 for (; cnt >= 2; cnt -= 2) 2391 *pdata++ = mci_fifo_readw(host->fifo_reg); 2392 buf = pdata; 2393 } 2394 if (cnt) { 2395 host->part_buf16 = mci_fifo_readw(host->fifo_reg); 2396 dw_mci_pull_final_bytes(host, buf, cnt); 2397 } 2398 } 2399 2400 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 2401 { 2402 struct mmc_data *data = host->data; 2403 int init_cnt = cnt; 2404 2405 /* try and push anything in the part_buf */ 2406 if (unlikely(host->part_buf_count)) { 2407 int len = dw_mci_push_part_bytes(host, buf, cnt); 2408 2409 buf += len; 2410 cnt -= len; 2411 if (host->part_buf_count == 4) { 2412 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2413 host->part_buf_count = 0; 2414 } 2415 } 2416 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2417 if (unlikely((unsigned long)buf & 0x3)) { 2418 while (cnt >= 4) { 2419 u32 aligned_buf[32]; 2420 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2421 int items = len >> 2; 2422 int i; 2423 /* memcpy from input buffer into aligned buffer */ 2424 memcpy(aligned_buf, buf, len); 2425 buf += len; 2426 cnt -= len; 2427 /* push data from aligned buffer into fifo */ 2428 for (i = 0; i < items; ++i) 2429 mci_fifo_writel(host->fifo_reg, aligned_buf[i]); 2430 } 2431 } else 2432 #endif 2433 { 2434 u32 *pdata = buf; 2435 2436 for (; cnt >= 4; cnt -= 4) 2437 mci_fifo_writel(host->fifo_reg, *pdata++); 2438 buf = pdata; 2439 } 2440 /* put anything remaining in the part_buf */ 2441 if (cnt) { 2442 dw_mci_set_part_bytes(host, buf, cnt); 2443 /* Push data if we have reached the expected data length */ 2444 if ((data->bytes_xfered + init_cnt) == 2445 (data->blksz * data->blocks)) 2446 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2447 } 2448 } 2449 2450 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 2451 { 2452 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2453 if (unlikely((unsigned long)buf & 0x3)) { 2454 while (cnt >= 4) { 2455 /* pull data from fifo into aligned buffer */ 2456 u32 aligned_buf[32]; 2457 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2458 int items = len >> 2; 2459 int i; 2460 2461 for (i = 0; i < items; ++i) 2462 aligned_buf[i] = mci_fifo_readl(host->fifo_reg); 2463 /* memcpy from aligned buffer into output buffer */ 2464 memcpy(buf, aligned_buf, len); 2465 buf += len; 2466 cnt -= len; 2467 } 2468 } else 2469 #endif 2470 { 2471 u32 *pdata = buf; 2472 2473 for (; cnt >= 4; cnt -= 4) 2474 *pdata++ = mci_fifo_readl(host->fifo_reg); 2475 buf = pdata; 2476 } 2477 if (cnt) { 2478 host->part_buf32 = mci_fifo_readl(host->fifo_reg); 2479 dw_mci_pull_final_bytes(host, buf, cnt); 2480 } 2481 } 2482 2483 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 2484 { 2485 struct mmc_data *data = host->data; 2486 int init_cnt = cnt; 2487 2488 /* try and push anything in the part_buf */ 2489 if (unlikely(host->part_buf_count)) { 2490 int len = dw_mci_push_part_bytes(host, buf, cnt); 2491 2492 buf += len; 2493 cnt -= len; 2494 2495 if (host->part_buf_count == 8) { 2496 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2497 host->part_buf_count = 0; 2498 } 2499 } 2500 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2501 if (unlikely((unsigned long)buf & 0x7)) { 2502 while (cnt >= 8) { 2503 u64 aligned_buf[16]; 2504 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2505 int items = len >> 3; 2506 int i; 2507 /* memcpy from input buffer into aligned buffer */ 2508 memcpy(aligned_buf, buf, len); 2509 buf += len; 2510 cnt -= len; 2511 /* push data from aligned buffer into fifo */ 2512 for (i = 0; i < items; ++i) 2513 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); 2514 } 2515 } else 2516 #endif 2517 { 2518 u64 *pdata = buf; 2519 2520 for (; cnt >= 8; cnt -= 8) 2521 mci_fifo_writeq(host->fifo_reg, *pdata++); 2522 buf = pdata; 2523 } 2524 /* put anything remaining in the part_buf */ 2525 if (cnt) { 2526 dw_mci_set_part_bytes(host, buf, cnt); 2527 /* Push data if we have reached the expected data length */ 2528 if ((data->bytes_xfered + init_cnt) == 2529 (data->blksz * data->blocks)) 2530 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2531 } 2532 } 2533 2534 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 2535 { 2536 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2537 if (unlikely((unsigned long)buf & 0x7)) { 2538 while (cnt >= 8) { 2539 /* pull data from fifo into aligned buffer */ 2540 u64 aligned_buf[16]; 2541 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2542 int items = len >> 3; 2543 int i; 2544 2545 for (i = 0; i < items; ++i) 2546 aligned_buf[i] = mci_fifo_readq(host->fifo_reg); 2547 2548 /* memcpy from aligned buffer into output buffer */ 2549 memcpy(buf, aligned_buf, len); 2550 buf += len; 2551 cnt -= len; 2552 } 2553 } else 2554 #endif 2555 { 2556 u64 *pdata = buf; 2557 2558 for (; cnt >= 8; cnt -= 8) 2559 *pdata++ = mci_fifo_readq(host->fifo_reg); 2560 buf = pdata; 2561 } 2562 if (cnt) { 2563 host->part_buf = mci_fifo_readq(host->fifo_reg); 2564 dw_mci_pull_final_bytes(host, buf, cnt); 2565 } 2566 } 2567 2568 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 2569 { 2570 int len; 2571 2572 /* get remaining partial bytes */ 2573 len = dw_mci_pull_part_bytes(host, buf, cnt); 2574 if (unlikely(len == cnt)) 2575 return; 2576 buf += len; 2577 cnt -= len; 2578 2579 /* get the rest of the data */ 2580 host->pull_data(host, buf, cnt); 2581 } 2582 2583 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 2584 { 2585 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2586 void *buf; 2587 unsigned int offset; 2588 struct mmc_data *data = host->data; 2589 int shift = host->data_shift; 2590 u32 status; 2591 unsigned int len; 2592 unsigned int remain, fcnt; 2593 2594 do { 2595 if (!sg_miter_next(sg_miter)) 2596 goto done; 2597 2598 host->sg = sg_miter->piter.sg; 2599 buf = sg_miter->addr; 2600 remain = sg_miter->length; 2601 offset = 0; 2602 2603 do { 2604 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 2605 << shift) + host->part_buf_count; 2606 len = min(remain, fcnt); 2607 if (!len) 2608 break; 2609 dw_mci_pull_data(host, (void *)(buf + offset), len); 2610 data->bytes_xfered += len; 2611 offset += len; 2612 remain -= len; 2613 } while (remain); 2614 2615 sg_miter->consumed = offset; 2616 status = mci_readl(host, MINTSTS); 2617 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2618 /* if the RXDR is ready read again */ 2619 } while ((status & SDMMC_INT_RXDR) || 2620 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 2621 2622 if (!remain) { 2623 if (!sg_miter_next(sg_miter)) 2624 goto done; 2625 sg_miter->consumed = 0; 2626 } 2627 sg_miter_stop(sg_miter); 2628 return; 2629 2630 done: 2631 sg_miter_stop(sg_miter); 2632 host->sg = NULL; 2633 smp_wmb(); /* drain writebuffer */ 2634 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2635 } 2636 2637 static void dw_mci_write_data_pio(struct dw_mci *host) 2638 { 2639 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2640 void *buf; 2641 unsigned int offset; 2642 struct mmc_data *data = host->data; 2643 int shift = host->data_shift; 2644 u32 status; 2645 unsigned int len; 2646 unsigned int fifo_depth = host->fifo_depth; 2647 unsigned int remain, fcnt; 2648 2649 do { 2650 if (!sg_miter_next(sg_miter)) 2651 goto done; 2652 2653 host->sg = sg_miter->piter.sg; 2654 buf = sg_miter->addr; 2655 remain = sg_miter->length; 2656 offset = 0; 2657 2658 do { 2659 fcnt = ((fifo_depth - 2660 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 2661 << shift) - host->part_buf_count; 2662 len = min(remain, fcnt); 2663 if (!len) 2664 break; 2665 host->push_data(host, (void *)(buf + offset), len); 2666 data->bytes_xfered += len; 2667 offset += len; 2668 remain -= len; 2669 } while (remain); 2670 2671 sg_miter->consumed = offset; 2672 status = mci_readl(host, MINTSTS); 2673 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2674 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 2675 2676 if (!remain) { 2677 if (!sg_miter_next(sg_miter)) 2678 goto done; 2679 sg_miter->consumed = 0; 2680 } 2681 sg_miter_stop(sg_miter); 2682 return; 2683 2684 done: 2685 sg_miter_stop(sg_miter); 2686 host->sg = NULL; 2687 smp_wmb(); /* drain writebuffer */ 2688 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2689 } 2690 2691 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2692 { 2693 del_timer(&host->cto_timer); 2694 2695 if (!host->cmd_status) 2696 host->cmd_status = status; 2697 2698 smp_wmb(); /* drain writebuffer */ 2699 2700 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2701 tasklet_schedule(&host->tasklet); 2702 2703 dw_mci_start_fault_timer(host); 2704 } 2705 2706 static void dw_mci_handle_cd(struct dw_mci *host) 2707 { 2708 struct dw_mci_slot *slot = host->slot; 2709 2710 mmc_detect_change(slot->mmc, 2711 msecs_to_jiffies(host->pdata->detect_delay_ms)); 2712 } 2713 2714 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2715 { 2716 struct dw_mci *host = dev_id; 2717 u32 pending; 2718 struct dw_mci_slot *slot = host->slot; 2719 2720 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 2721 2722 if (pending) { 2723 /* Check volt switch first, since it can look like an error */ 2724 if ((host->state == STATE_SENDING_CMD11) && 2725 (pending & SDMMC_INT_VOLT_SWITCH)) { 2726 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); 2727 pending &= ~SDMMC_INT_VOLT_SWITCH; 2728 2729 /* 2730 * Hold the lock; we know cmd11_timer can't be kicked 2731 * off after the lock is released, so safe to delete. 2732 */ 2733 spin_lock(&host->irq_lock); 2734 dw_mci_cmd_interrupt(host, pending); 2735 spin_unlock(&host->irq_lock); 2736 2737 del_timer(&host->cmd11_timer); 2738 } 2739 2740 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 2741 spin_lock(&host->irq_lock); 2742 2743 del_timer(&host->cto_timer); 2744 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2745 host->cmd_status = pending; 2746 smp_wmb(); /* drain writebuffer */ 2747 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2748 2749 spin_unlock(&host->irq_lock); 2750 } 2751 2752 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 2753 spin_lock(&host->irq_lock); 2754 2755 /* if there is an error report DATA_ERROR */ 2756 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2757 host->data_status = pending; 2758 smp_wmb(); /* drain writebuffer */ 2759 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2760 tasklet_schedule(&host->tasklet); 2761 2762 spin_unlock(&host->irq_lock); 2763 } 2764 2765 if (pending & SDMMC_INT_DATA_OVER) { 2766 spin_lock(&host->irq_lock); 2767 2768 del_timer(&host->dto_timer); 2769 2770 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2771 if (!host->data_status) 2772 host->data_status = pending; 2773 smp_wmb(); /* drain writebuffer */ 2774 if (host->dir_status == DW_MCI_RECV_STATUS) { 2775 if (host->sg != NULL) 2776 dw_mci_read_data_pio(host, true); 2777 } 2778 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2779 tasklet_schedule(&host->tasklet); 2780 2781 spin_unlock(&host->irq_lock); 2782 } 2783 2784 if (pending & SDMMC_INT_RXDR) { 2785 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2786 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 2787 dw_mci_read_data_pio(host, false); 2788 } 2789 2790 if (pending & SDMMC_INT_TXDR) { 2791 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2792 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 2793 dw_mci_write_data_pio(host); 2794 } 2795 2796 if (pending & SDMMC_INT_CMD_DONE) { 2797 spin_lock(&host->irq_lock); 2798 2799 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 2800 dw_mci_cmd_interrupt(host, pending); 2801 2802 spin_unlock(&host->irq_lock); 2803 } 2804 2805 if (pending & SDMMC_INT_CD) { 2806 mci_writel(host, RINTSTS, SDMMC_INT_CD); 2807 dw_mci_handle_cd(host); 2808 } 2809 2810 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { 2811 mci_writel(host, RINTSTS, 2812 SDMMC_INT_SDIO(slot->sdio_id)); 2813 __dw_mci_enable_sdio_irq(slot, 0); 2814 sdio_signal_irq(slot->mmc); 2815 } 2816 2817 } 2818 2819 if (host->use_dma != TRANS_MODE_IDMAC) 2820 return IRQ_HANDLED; 2821 2822 /* Handle IDMA interrupts */ 2823 if (host->dma_64bit_address == 1) { 2824 pending = mci_readl(host, IDSTS64); 2825 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2826 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 2827 SDMMC_IDMAC_INT_RI); 2828 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2829 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 2830 host->dma_ops->complete((void *)host); 2831 } 2832 } else { 2833 pending = mci_readl(host, IDSTS); 2834 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2835 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 2836 SDMMC_IDMAC_INT_RI); 2837 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2838 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 2839 host->dma_ops->complete((void *)host); 2840 } 2841 } 2842 2843 return IRQ_HANDLED; 2844 } 2845 2846 static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) 2847 { 2848 struct dw_mci *host = slot->host; 2849 const struct dw_mci_drv_data *drv_data = host->drv_data; 2850 struct mmc_host *mmc = slot->mmc; 2851 int ctrl_id; 2852 2853 if (host->pdata->caps) 2854 mmc->caps = host->pdata->caps; 2855 2856 if (host->pdata->pm_caps) 2857 mmc->pm_caps = host->pdata->pm_caps; 2858 2859 if (drv_data) 2860 mmc->caps |= drv_data->common_caps; 2861 2862 if (host->dev->of_node) { 2863 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 2864 if (ctrl_id < 0) 2865 ctrl_id = 0; 2866 } else { 2867 ctrl_id = to_platform_device(host->dev)->id; 2868 } 2869 2870 if (drv_data && drv_data->caps) { 2871 if (ctrl_id >= drv_data->num_caps) { 2872 dev_err(host->dev, "invalid controller id %d\n", 2873 ctrl_id); 2874 return -EINVAL; 2875 } 2876 mmc->caps |= drv_data->caps[ctrl_id]; 2877 } 2878 2879 if (host->pdata->caps2) 2880 mmc->caps2 = host->pdata->caps2; 2881 2882 mmc->f_min = DW_MCI_FREQ_MIN; 2883 if (!mmc->f_max) 2884 mmc->f_max = DW_MCI_FREQ_MAX; 2885 2886 /* Process SDIO IRQs through the sdio_irq_work. */ 2887 if (mmc->caps & MMC_CAP_SDIO_IRQ) 2888 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2889 2890 return 0; 2891 } 2892 2893 static int dw_mci_init_slot(struct dw_mci *host) 2894 { 2895 struct mmc_host *mmc; 2896 struct dw_mci_slot *slot; 2897 int ret; 2898 2899 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2900 if (!mmc) 2901 return -ENOMEM; 2902 2903 slot = mmc_priv(mmc); 2904 slot->id = 0; 2905 slot->sdio_id = host->sdio_id0 + slot->id; 2906 slot->mmc = mmc; 2907 slot->host = host; 2908 host->slot = slot; 2909 2910 mmc->ops = &dw_mci_ops; 2911 2912 /*if there are external regulators, get them*/ 2913 ret = mmc_regulator_get_supply(mmc); 2914 if (ret) 2915 goto err_host_allocated; 2916 2917 if (!mmc->ocr_avail) 2918 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2919 2920 ret = mmc_of_parse(mmc); 2921 if (ret) 2922 goto err_host_allocated; 2923 2924 ret = dw_mci_init_slot_caps(slot); 2925 if (ret) 2926 goto err_host_allocated; 2927 2928 /* Useful defaults if platform data is unset. */ 2929 if (host->use_dma == TRANS_MODE_IDMAC) { 2930 mmc->max_segs = host->ring_size; 2931 mmc->max_blk_size = 65535; 2932 mmc->max_seg_size = 0x1000; 2933 mmc->max_req_size = mmc->max_seg_size * host->ring_size; 2934 mmc->max_blk_count = mmc->max_req_size / 512; 2935 } else if (host->use_dma == TRANS_MODE_EDMAC) { 2936 mmc->max_segs = 64; 2937 mmc->max_blk_size = 65535; 2938 mmc->max_blk_count = 65535; 2939 mmc->max_req_size = 2940 mmc->max_blk_size * mmc->max_blk_count; 2941 mmc->max_seg_size = mmc->max_req_size; 2942 } else { 2943 /* TRANS_MODE_PIO */ 2944 mmc->max_segs = 64; 2945 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ 2946 mmc->max_blk_count = 512; 2947 mmc->max_req_size = mmc->max_blk_size * 2948 mmc->max_blk_count; 2949 mmc->max_seg_size = mmc->max_req_size; 2950 } 2951 2952 dw_mci_get_cd(mmc); 2953 2954 ret = mmc_add_host(mmc); 2955 if (ret) 2956 goto err_host_allocated; 2957 2958 #if defined(CONFIG_DEBUG_FS) 2959 dw_mci_init_debugfs(slot); 2960 #endif 2961 2962 return 0; 2963 2964 err_host_allocated: 2965 mmc_free_host(mmc); 2966 return ret; 2967 } 2968 2969 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot) 2970 { 2971 /* Debugfs stuff is cleaned up by mmc core */ 2972 mmc_remove_host(slot->mmc); 2973 slot->host->slot = NULL; 2974 mmc_free_host(slot->mmc); 2975 } 2976 2977 static void dw_mci_init_dma(struct dw_mci *host) 2978 { 2979 int addr_config; 2980 struct device *dev = host->dev; 2981 2982 /* 2983 * Check tansfer mode from HCON[17:16] 2984 * Clear the ambiguous description of dw_mmc databook: 2985 * 2b'00: No DMA Interface -> Actually means using Internal DMA block 2986 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block 2987 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block 2988 * 2b'11: Non DW DMA Interface -> pio only 2989 * Compared to DesignWare DMA Interface, Generic DMA Interface has a 2990 * simpler request/acknowledge handshake mechanism and both of them 2991 * are regarded as external dma master for dw_mmc. 2992 */ 2993 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); 2994 if (host->use_dma == DMA_INTERFACE_IDMA) { 2995 host->use_dma = TRANS_MODE_IDMAC; 2996 } else if (host->use_dma == DMA_INTERFACE_DWDMA || 2997 host->use_dma == DMA_INTERFACE_GDMA) { 2998 host->use_dma = TRANS_MODE_EDMAC; 2999 } else { 3000 goto no_dma; 3001 } 3002 3003 /* Determine which DMA interface to use */ 3004 if (host->use_dma == TRANS_MODE_IDMAC) { 3005 /* 3006 * Check ADDR_CONFIG bit in HCON to find 3007 * IDMAC address bus width 3008 */ 3009 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); 3010 3011 if (addr_config == 1) { 3012 /* host supports IDMAC in 64-bit address mode */ 3013 host->dma_64bit_address = 1; 3014 dev_info(host->dev, 3015 "IDMAC supports 64-bit address mode.\n"); 3016 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) 3017 dma_set_coherent_mask(host->dev, 3018 DMA_BIT_MASK(64)); 3019 } else { 3020 /* host supports IDMAC in 32-bit address mode */ 3021 host->dma_64bit_address = 0; 3022 dev_info(host->dev, 3023 "IDMAC supports 32-bit address mode.\n"); 3024 } 3025 3026 /* Alloc memory for sg translation */ 3027 host->sg_cpu = dmam_alloc_coherent(host->dev, 3028 DESC_RING_BUF_SZ, 3029 &host->sg_dma, GFP_KERNEL); 3030 if (!host->sg_cpu) { 3031 dev_err(host->dev, 3032 "%s: could not alloc DMA memory\n", 3033 __func__); 3034 goto no_dma; 3035 } 3036 3037 host->dma_ops = &dw_mci_idmac_ops; 3038 dev_info(host->dev, "Using internal DMA controller.\n"); 3039 } else { 3040 /* TRANS_MODE_EDMAC: check dma bindings again */ 3041 if ((device_property_read_string_array(dev, "dma-names", 3042 NULL, 0) < 0) || 3043 !device_property_present(dev, "dmas")) { 3044 goto no_dma; 3045 } 3046 host->dma_ops = &dw_mci_edmac_ops; 3047 dev_info(host->dev, "Using external DMA controller.\n"); 3048 } 3049 3050 if (host->dma_ops->init && host->dma_ops->start && 3051 host->dma_ops->stop && host->dma_ops->cleanup) { 3052 if (host->dma_ops->init(host)) { 3053 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", 3054 __func__); 3055 goto no_dma; 3056 } 3057 } else { 3058 dev_err(host->dev, "DMA initialization not found.\n"); 3059 goto no_dma; 3060 } 3061 3062 return; 3063 3064 no_dma: 3065 dev_info(host->dev, "Using PIO mode.\n"); 3066 host->use_dma = TRANS_MODE_PIO; 3067 } 3068 3069 static void dw_mci_cmd11_timer(struct timer_list *t) 3070 { 3071 struct dw_mci *host = from_timer(host, t, cmd11_timer); 3072 3073 if (host->state != STATE_SENDING_CMD11) { 3074 dev_warn(host->dev, "Unexpected CMD11 timeout\n"); 3075 return; 3076 } 3077 3078 host->cmd_status = SDMMC_INT_RTO; 3079 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 3080 tasklet_schedule(&host->tasklet); 3081 } 3082 3083 static void dw_mci_cto_timer(struct timer_list *t) 3084 { 3085 struct dw_mci *host = from_timer(host, t, cto_timer); 3086 unsigned long irqflags; 3087 u32 pending; 3088 3089 spin_lock_irqsave(&host->irq_lock, irqflags); 3090 3091 /* 3092 * If somehow we have very bad interrupt latency it's remotely possible 3093 * that the timer could fire while the interrupt is still pending or 3094 * while the interrupt is midway through running. Let's be paranoid 3095 * and detect those two cases. Note that this is paranoia is somewhat 3096 * justified because in this function we don't actually cancel the 3097 * pending command in the controller--we just assume it will never come. 3098 */ 3099 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 3100 if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) { 3101 /* The interrupt should fire; no need to act but we can warn */ 3102 dev_warn(host->dev, "Unexpected interrupt latency\n"); 3103 goto exit; 3104 } 3105 if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) { 3106 /* Presumably interrupt handler couldn't delete the timer */ 3107 dev_warn(host->dev, "CTO timeout when already completed\n"); 3108 goto exit; 3109 } 3110 3111 /* 3112 * Continued paranoia to make sure we're in the state we expect. 3113 * This paranoia isn't really justified but it seems good to be safe. 3114 */ 3115 switch (host->state) { 3116 case STATE_SENDING_CMD11: 3117 case STATE_SENDING_CMD: 3118 case STATE_SENDING_STOP: 3119 /* 3120 * If CMD_DONE interrupt does NOT come in sending command 3121 * state, we should notify the driver to terminate current 3122 * transfer and report a command timeout to the core. 3123 */ 3124 host->cmd_status = SDMMC_INT_RTO; 3125 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 3126 tasklet_schedule(&host->tasklet); 3127 break; 3128 default: 3129 dev_warn(host->dev, "Unexpected command timeout, state %d\n", 3130 host->state); 3131 break; 3132 } 3133 3134 exit: 3135 spin_unlock_irqrestore(&host->irq_lock, irqflags); 3136 } 3137 3138 static void dw_mci_dto_timer(struct timer_list *t) 3139 { 3140 struct dw_mci *host = from_timer(host, t, dto_timer); 3141 unsigned long irqflags; 3142 u32 pending; 3143 3144 spin_lock_irqsave(&host->irq_lock, irqflags); 3145 3146 /* 3147 * The DTO timer is much longer than the CTO timer, so it's even less 3148 * likely that we'll these cases, but it pays to be paranoid. 3149 */ 3150 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 3151 if (pending & SDMMC_INT_DATA_OVER) { 3152 /* The interrupt should fire; no need to act but we can warn */ 3153 dev_warn(host->dev, "Unexpected data interrupt latency\n"); 3154 goto exit; 3155 } 3156 if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) { 3157 /* Presumably interrupt handler couldn't delete the timer */ 3158 dev_warn(host->dev, "DTO timeout when already completed\n"); 3159 goto exit; 3160 } 3161 3162 /* 3163 * Continued paranoia to make sure we're in the state we expect. 3164 * This paranoia isn't really justified but it seems good to be safe. 3165 */ 3166 switch (host->state) { 3167 case STATE_SENDING_DATA: 3168 case STATE_DATA_BUSY: 3169 /* 3170 * If DTO interrupt does NOT come in sending data state, 3171 * we should notify the driver to terminate current transfer 3172 * and report a data timeout to the core. 3173 */ 3174 host->data_status = SDMMC_INT_DRTO; 3175 set_bit(EVENT_DATA_ERROR, &host->pending_events); 3176 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 3177 tasklet_schedule(&host->tasklet); 3178 break; 3179 default: 3180 dev_warn(host->dev, "Unexpected data timeout, state %d\n", 3181 host->state); 3182 break; 3183 } 3184 3185 exit: 3186 spin_unlock_irqrestore(&host->irq_lock, irqflags); 3187 } 3188 3189 #ifdef CONFIG_OF 3190 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3191 { 3192 struct dw_mci_board *pdata; 3193 struct device *dev = host->dev; 3194 const struct dw_mci_drv_data *drv_data = host->drv_data; 3195 int ret; 3196 u32 clock_frequency; 3197 3198 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 3199 if (!pdata) 3200 return ERR_PTR(-ENOMEM); 3201 3202 /* find reset controller when exist */ 3203 pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset"); 3204 if (IS_ERR(pdata->rstc)) 3205 return ERR_CAST(pdata->rstc); 3206 3207 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) 3208 dev_info(dev, 3209 "fifo-depth property not found, using value of FIFOTH register as default\n"); 3210 3211 device_property_read_u32(dev, "card-detect-delay", 3212 &pdata->detect_delay_ms); 3213 3214 device_property_read_u32(dev, "data-addr", &host->data_addr_override); 3215 3216 if (device_property_present(dev, "fifo-watermark-aligned")) 3217 host->wm_aligned = true; 3218 3219 if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency)) 3220 pdata->bus_hz = clock_frequency; 3221 3222 if (drv_data && drv_data->parse_dt) { 3223 ret = drv_data->parse_dt(host); 3224 if (ret) 3225 return ERR_PTR(ret); 3226 } 3227 3228 return pdata; 3229 } 3230 3231 #else /* CONFIG_OF */ 3232 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3233 { 3234 return ERR_PTR(-EINVAL); 3235 } 3236 #endif /* CONFIG_OF */ 3237 3238 static void dw_mci_enable_cd(struct dw_mci *host) 3239 { 3240 unsigned long irqflags; 3241 u32 temp; 3242 3243 /* 3244 * No need for CD if all slots have a non-error GPIO 3245 * as well as broken card detection is found. 3246 */ 3247 if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL) 3248 return; 3249 3250 if (mmc_gpio_get_cd(host->slot->mmc) < 0) { 3251 spin_lock_irqsave(&host->irq_lock, irqflags); 3252 temp = mci_readl(host, INTMASK); 3253 temp |= SDMMC_INT_CD; 3254 mci_writel(host, INTMASK, temp); 3255 spin_unlock_irqrestore(&host->irq_lock, irqflags); 3256 } 3257 } 3258 3259 int dw_mci_probe(struct dw_mci *host) 3260 { 3261 const struct dw_mci_drv_data *drv_data = host->drv_data; 3262 int width, i, ret = 0; 3263 u32 fifo_size; 3264 3265 if (!host->pdata) { 3266 host->pdata = dw_mci_parse_dt(host); 3267 if (IS_ERR(host->pdata)) 3268 return dev_err_probe(host->dev, PTR_ERR(host->pdata), 3269 "platform data not available\n"); 3270 } 3271 3272 host->biu_clk = devm_clk_get(host->dev, "biu"); 3273 if (IS_ERR(host->biu_clk)) { 3274 dev_dbg(host->dev, "biu clock not available\n"); 3275 } else { 3276 ret = clk_prepare_enable(host->biu_clk); 3277 if (ret) { 3278 dev_err(host->dev, "failed to enable biu clock\n"); 3279 return ret; 3280 } 3281 } 3282 3283 host->ciu_clk = devm_clk_get(host->dev, "ciu"); 3284 if (IS_ERR(host->ciu_clk)) { 3285 dev_dbg(host->dev, "ciu clock not available\n"); 3286 host->bus_hz = host->pdata->bus_hz; 3287 } else { 3288 ret = clk_prepare_enable(host->ciu_clk); 3289 if (ret) { 3290 dev_err(host->dev, "failed to enable ciu clock\n"); 3291 goto err_clk_biu; 3292 } 3293 3294 if (host->pdata->bus_hz) { 3295 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 3296 if (ret) 3297 dev_warn(host->dev, 3298 "Unable to set bus rate to %uHz\n", 3299 host->pdata->bus_hz); 3300 } 3301 host->bus_hz = clk_get_rate(host->ciu_clk); 3302 } 3303 3304 if (!host->bus_hz) { 3305 dev_err(host->dev, 3306 "Platform data must supply bus speed\n"); 3307 ret = -ENODEV; 3308 goto err_clk_ciu; 3309 } 3310 3311 if (host->pdata->rstc) { 3312 reset_control_assert(host->pdata->rstc); 3313 usleep_range(10, 50); 3314 reset_control_deassert(host->pdata->rstc); 3315 } 3316 3317 if (drv_data && drv_data->init) { 3318 ret = drv_data->init(host); 3319 if (ret) { 3320 dev_err(host->dev, 3321 "implementation specific init failed\n"); 3322 goto err_clk_ciu; 3323 } 3324 } 3325 3326 timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0); 3327 timer_setup(&host->cto_timer, dw_mci_cto_timer, 0); 3328 timer_setup(&host->dto_timer, dw_mci_dto_timer, 0); 3329 3330 spin_lock_init(&host->lock); 3331 spin_lock_init(&host->irq_lock); 3332 INIT_LIST_HEAD(&host->queue); 3333 3334 dw_mci_init_fault(host); 3335 3336 /* 3337 * Get the host data width - this assumes that HCON has been set with 3338 * the correct values. 3339 */ 3340 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); 3341 if (!i) { 3342 host->push_data = dw_mci_push_data16; 3343 host->pull_data = dw_mci_pull_data16; 3344 width = 16; 3345 host->data_shift = 1; 3346 } else if (i == 2) { 3347 host->push_data = dw_mci_push_data64; 3348 host->pull_data = dw_mci_pull_data64; 3349 width = 64; 3350 host->data_shift = 3; 3351 } else { 3352 /* Check for a reserved value, and warn if it is */ 3353 WARN((i != 1), 3354 "HCON reports a reserved host data width!\n" 3355 "Defaulting to 32-bit access.\n"); 3356 host->push_data = dw_mci_push_data32; 3357 host->pull_data = dw_mci_pull_data32; 3358 width = 32; 3359 host->data_shift = 2; 3360 } 3361 3362 /* Reset all blocks */ 3363 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3364 ret = -ENODEV; 3365 goto err_clk_ciu; 3366 } 3367 3368 host->dma_ops = host->pdata->dma_ops; 3369 dw_mci_init_dma(host); 3370 3371 /* Clear the interrupts for the host controller */ 3372 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3373 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3374 3375 /* Put in max timeout */ 3376 mci_writel(host, TMOUT, 0xFFFFFFFF); 3377 3378 /* 3379 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 3380 * Tx Mark = fifo_size / 2 DMA Size = 8 3381 */ 3382 if (!host->pdata->fifo_depth) { 3383 /* 3384 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 3385 * have been overwritten by the bootloader, just like we're 3386 * about to do, so if you know the value for your hardware, you 3387 * should put it in the platform data. 3388 */ 3389 fifo_size = mci_readl(host, FIFOTH); 3390 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 3391 } else { 3392 fifo_size = host->pdata->fifo_depth; 3393 } 3394 host->fifo_depth = fifo_size; 3395 host->fifoth_val = 3396 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); 3397 mci_writel(host, FIFOTH, host->fifoth_val); 3398 3399 /* disable clock to CIU */ 3400 mci_writel(host, CLKENA, 0); 3401 mci_writel(host, CLKSRC, 0); 3402 3403 /* 3404 * In 2.40a spec, Data offset is changed. 3405 * Need to check the version-id and set data-offset for DATA register. 3406 */ 3407 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 3408 dev_info(host->dev, "Version ID is %04x\n", host->verid); 3409 3410 if (host->data_addr_override) 3411 host->fifo_reg = host->regs + host->data_addr_override; 3412 else if (host->verid < DW_MMC_240A) 3413 host->fifo_reg = host->regs + DATA_OFFSET; 3414 else 3415 host->fifo_reg = host->regs + DATA_240A_OFFSET; 3416 3417 tasklet_setup(&host->tasklet, dw_mci_tasklet_func); 3418 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 3419 host->irq_flags, "dw-mci", host); 3420 if (ret) 3421 goto err_dmaunmap; 3422 3423 /* 3424 * Enable interrupts for command done, data over, data empty, 3425 * receive ready and error such as transmit, receive timeout, crc error 3426 */ 3427 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3428 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3429 DW_MCI_ERROR_FLAGS); 3430 /* Enable mci interrupt */ 3431 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3432 3433 dev_info(host->dev, 3434 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", 3435 host->irq, width, fifo_size); 3436 3437 /* We need at least one slot to succeed */ 3438 ret = dw_mci_init_slot(host); 3439 if (ret) { 3440 dev_dbg(host->dev, "slot %d init failed\n", i); 3441 goto err_dmaunmap; 3442 } 3443 3444 /* Now that slots are all setup, we can enable card detect */ 3445 dw_mci_enable_cd(host); 3446 3447 return 0; 3448 3449 err_dmaunmap: 3450 if (host->use_dma && host->dma_ops->exit) 3451 host->dma_ops->exit(host); 3452 3453 reset_control_assert(host->pdata->rstc); 3454 3455 err_clk_ciu: 3456 clk_disable_unprepare(host->ciu_clk); 3457 3458 err_clk_biu: 3459 clk_disable_unprepare(host->biu_clk); 3460 3461 return ret; 3462 } 3463 EXPORT_SYMBOL(dw_mci_probe); 3464 3465 void dw_mci_remove(struct dw_mci *host) 3466 { 3467 dev_dbg(host->dev, "remove slot\n"); 3468 if (host->slot) 3469 dw_mci_cleanup_slot(host->slot); 3470 3471 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3472 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3473 3474 /* disable clock to CIU */ 3475 mci_writel(host, CLKENA, 0); 3476 mci_writel(host, CLKSRC, 0); 3477 3478 if (host->use_dma && host->dma_ops->exit) 3479 host->dma_ops->exit(host); 3480 3481 reset_control_assert(host->pdata->rstc); 3482 3483 clk_disable_unprepare(host->ciu_clk); 3484 clk_disable_unprepare(host->biu_clk); 3485 } 3486 EXPORT_SYMBOL(dw_mci_remove); 3487 3488 3489 3490 #ifdef CONFIG_PM 3491 int dw_mci_runtime_suspend(struct device *dev) 3492 { 3493 struct dw_mci *host = dev_get_drvdata(dev); 3494 3495 if (host->use_dma && host->dma_ops->exit) 3496 host->dma_ops->exit(host); 3497 3498 clk_disable_unprepare(host->ciu_clk); 3499 3500 if (host->slot && 3501 (mmc_can_gpio_cd(host->slot->mmc) || 3502 !mmc_card_is_removable(host->slot->mmc))) 3503 clk_disable_unprepare(host->biu_clk); 3504 3505 return 0; 3506 } 3507 EXPORT_SYMBOL(dw_mci_runtime_suspend); 3508 3509 int dw_mci_runtime_resume(struct device *dev) 3510 { 3511 int ret = 0; 3512 struct dw_mci *host = dev_get_drvdata(dev); 3513 3514 if (host->slot && 3515 (mmc_can_gpio_cd(host->slot->mmc) || 3516 !mmc_card_is_removable(host->slot->mmc))) { 3517 ret = clk_prepare_enable(host->biu_clk); 3518 if (ret) 3519 return ret; 3520 } 3521 3522 ret = clk_prepare_enable(host->ciu_clk); 3523 if (ret) 3524 goto err; 3525 3526 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3527 clk_disable_unprepare(host->ciu_clk); 3528 ret = -ENODEV; 3529 goto err; 3530 } 3531 3532 if (host->use_dma && host->dma_ops->init) 3533 host->dma_ops->init(host); 3534 3535 /* 3536 * Restore the initial value at FIFOTH register 3537 * And Invalidate the prev_blksz with zero 3538 */ 3539 mci_writel(host, FIFOTH, host->fifoth_val); 3540 host->prev_blksz = 0; 3541 3542 /* Put in max timeout */ 3543 mci_writel(host, TMOUT, 0xFFFFFFFF); 3544 3545 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3546 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3547 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3548 DW_MCI_ERROR_FLAGS); 3549 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3550 3551 3552 if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER) 3553 dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios); 3554 3555 /* Force setup bus to guarantee available clock output */ 3556 dw_mci_setup_bus(host->slot, true); 3557 3558 /* Re-enable SDIO interrupts. */ 3559 if (sdio_irq_claimed(host->slot->mmc)) 3560 __dw_mci_enable_sdio_irq(host->slot, 1); 3561 3562 /* Now that slots are all setup, we can enable card detect */ 3563 dw_mci_enable_cd(host); 3564 3565 return 0; 3566 3567 err: 3568 if (host->slot && 3569 (mmc_can_gpio_cd(host->slot->mmc) || 3570 !mmc_card_is_removable(host->slot->mmc))) 3571 clk_disable_unprepare(host->biu_clk); 3572 3573 return ret; 3574 } 3575 EXPORT_SYMBOL(dw_mci_runtime_resume); 3576 #endif /* CONFIG_PM */ 3577 3578 static int __init dw_mci_init(void) 3579 { 3580 pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 3581 return 0; 3582 } 3583 3584 static void __exit dw_mci_exit(void) 3585 { 3586 } 3587 3588 module_init(dw_mci_init); 3589 module_exit(dw_mci_exit); 3590 3591 MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 3592 MODULE_AUTHOR("NXP Semiconductor VietNam"); 3593 MODULE_AUTHOR("Imagination Technologies Ltd"); 3594 MODULE_LICENSE("GPL v2"); 3595