1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Synopsys DesignWare Multimedia Card Interface driver 4 * (Based on NXP driver for lpc 31xx) 5 * 6 * Copyright (C) 2009 NXP Semiconductors 7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 8 */ 9 10 #include <linux/blkdev.h> 11 #include <linux/clk.h> 12 #include <linux/debugfs.h> 13 #include <linux/device.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/err.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/iopoll.h> 19 #include <linux/ioport.h> 20 #include <linux/ktime.h> 21 #include <linux/module.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/prandom.h> 25 #include <linux/seq_file.h> 26 #include <linux/slab.h> 27 #include <linux/stat.h> 28 #include <linux/delay.h> 29 #include <linux/irq.h> 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/host.h> 32 #include <linux/mmc/mmc.h> 33 #include <linux/mmc/sd.h> 34 #include <linux/mmc/sdio.h> 35 #include <linux/bitops.h> 36 #include <linux/regulator/consumer.h> 37 #include <linux/of.h> 38 #include <linux/of_gpio.h> 39 #include <linux/mmc/slot-gpio.h> 40 41 #include "dw_mmc.h" 42 43 /* Common flag combinations */ 44 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 45 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 46 SDMMC_INT_EBE | SDMMC_INT_HLE) 47 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 48 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) 49 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 50 DW_MCI_CMD_ERROR_FLAGS) 51 #define DW_MCI_SEND_STATUS 1 52 #define DW_MCI_RECV_STATUS 2 53 #define DW_MCI_DMA_THRESHOLD 16 54 55 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 56 #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */ 57 58 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 59 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 60 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 61 SDMMC_IDMAC_INT_TI) 62 63 #define DESC_RING_BUF_SZ PAGE_SIZE 64 65 struct idmac_desc_64addr { 66 u32 des0; /* Control Descriptor */ 67 #define IDMAC_OWN_CLR64(x) \ 68 !((x) & cpu_to_le32(IDMAC_DES0_OWN)) 69 70 u32 des1; /* Reserved */ 71 72 u32 des2; /*Buffer sizes */ 73 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ 74 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ 75 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) 76 77 u32 des3; /* Reserved */ 78 79 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ 80 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ 81 82 u32 des6; /* Lower 32-bits of Next Descriptor Address */ 83 u32 des7; /* Upper 32-bits of Next Descriptor Address */ 84 }; 85 86 struct idmac_desc { 87 __le32 des0; /* Control Descriptor */ 88 #define IDMAC_DES0_DIC BIT(1) 89 #define IDMAC_DES0_LD BIT(2) 90 #define IDMAC_DES0_FD BIT(3) 91 #define IDMAC_DES0_CH BIT(4) 92 #define IDMAC_DES0_ER BIT(5) 93 #define IDMAC_DES0_CES BIT(30) 94 #define IDMAC_DES0_OWN BIT(31) 95 96 __le32 des1; /* Buffer sizes */ 97 #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 98 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) 99 100 __le32 des2; /* buffer 1 physical address */ 101 102 __le32 des3; /* buffer 2 physical address */ 103 }; 104 105 /* Each descriptor can transfer up to 4KB of data in chained mode */ 106 #define DW_MCI_DESC_DATA_LENGTH 0x1000 107 108 #if defined(CONFIG_DEBUG_FS) 109 static int dw_mci_req_show(struct seq_file *s, void *v) 110 { 111 struct dw_mci_slot *slot = s->private; 112 struct mmc_request *mrq; 113 struct mmc_command *cmd; 114 struct mmc_command *stop; 115 struct mmc_data *data; 116 117 /* Make sure we get a consistent snapshot */ 118 spin_lock_bh(&slot->host->lock); 119 mrq = slot->mrq; 120 121 if (mrq) { 122 cmd = mrq->cmd; 123 data = mrq->data; 124 stop = mrq->stop; 125 126 if (cmd) 127 seq_printf(s, 128 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 129 cmd->opcode, cmd->arg, cmd->flags, 130 cmd->resp[0], cmd->resp[1], cmd->resp[2], 131 cmd->resp[2], cmd->error); 132 if (data) 133 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 134 data->bytes_xfered, data->blocks, 135 data->blksz, data->flags, data->error); 136 if (stop) 137 seq_printf(s, 138 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 139 stop->opcode, stop->arg, stop->flags, 140 stop->resp[0], stop->resp[1], stop->resp[2], 141 stop->resp[2], stop->error); 142 } 143 144 spin_unlock_bh(&slot->host->lock); 145 146 return 0; 147 } 148 DEFINE_SHOW_ATTRIBUTE(dw_mci_req); 149 150 static int dw_mci_regs_show(struct seq_file *s, void *v) 151 { 152 struct dw_mci *host = s->private; 153 154 pm_runtime_get_sync(host->dev); 155 156 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); 157 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); 158 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); 159 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL)); 160 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); 161 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); 162 163 pm_runtime_put_autosuspend(host->dev); 164 165 return 0; 166 } 167 DEFINE_SHOW_ATTRIBUTE(dw_mci_regs); 168 169 static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 170 { 171 struct mmc_host *mmc = slot->mmc; 172 struct dw_mci *host = slot->host; 173 struct dentry *root; 174 175 root = mmc->debugfs_root; 176 if (!root) 177 return; 178 179 debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops); 180 debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops); 181 debugfs_create_u32("state", S_IRUSR, root, &host->state); 182 debugfs_create_xul("pending_events", S_IRUSR, root, 183 &host->pending_events); 184 debugfs_create_xul("completed_events", S_IRUSR, root, 185 &host->completed_events); 186 #ifdef CONFIG_FAULT_INJECTION 187 fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc); 188 #endif 189 } 190 #endif /* defined(CONFIG_DEBUG_FS) */ 191 192 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 193 { 194 u32 ctrl; 195 196 ctrl = mci_readl(host, CTRL); 197 ctrl |= reset; 198 mci_writel(host, CTRL, ctrl); 199 200 /* wait till resets clear */ 201 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl, 202 !(ctrl & reset), 203 1, 500 * USEC_PER_MSEC)) { 204 dev_err(host->dev, 205 "Timeout resetting block (ctrl reset %#x)\n", 206 ctrl & reset); 207 return false; 208 } 209 210 return true; 211 } 212 213 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) 214 { 215 u32 status; 216 217 /* 218 * Databook says that before issuing a new data transfer command 219 * we need to check to see if the card is busy. Data transfer commands 220 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. 221 * 222 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is 223 * expected. 224 */ 225 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && 226 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { 227 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 228 status, 229 !(status & SDMMC_STATUS_BUSY), 230 10, 500 * USEC_PER_MSEC)) 231 dev_err(host->dev, "Busy; trying anyway\n"); 232 } 233 } 234 235 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 236 { 237 struct dw_mci *host = slot->host; 238 unsigned int cmd_status = 0; 239 240 mci_writel(host, CMDARG, arg); 241 wmb(); /* drain writebuffer */ 242 dw_mci_wait_while_busy(host, cmd); 243 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 244 245 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status, 246 !(cmd_status & SDMMC_CMD_START), 247 1, 500 * USEC_PER_MSEC)) 248 dev_err(&slot->mmc->class_dev, 249 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 250 cmd, arg, cmd_status); 251 } 252 253 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 254 { 255 struct dw_mci_slot *slot = mmc_priv(mmc); 256 struct dw_mci *host = slot->host; 257 u32 cmdr; 258 259 cmd->error = -EINPROGRESS; 260 cmdr = cmd->opcode; 261 262 if (cmd->opcode == MMC_STOP_TRANSMISSION || 263 cmd->opcode == MMC_GO_IDLE_STATE || 264 cmd->opcode == MMC_GO_INACTIVE_STATE || 265 (cmd->opcode == SD_IO_RW_DIRECT && 266 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 267 cmdr |= SDMMC_CMD_STOP; 268 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 269 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 270 271 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 272 u32 clk_en_a; 273 274 /* Special bit makes CMD11 not die */ 275 cmdr |= SDMMC_CMD_VOLT_SWITCH; 276 277 /* Change state to continue to handle CMD11 weirdness */ 278 WARN_ON(slot->host->state != STATE_SENDING_CMD); 279 slot->host->state = STATE_SENDING_CMD11; 280 281 /* 282 * We need to disable low power mode (automatic clock stop) 283 * while doing voltage switch so we don't confuse the card, 284 * since stopping the clock is a specific part of the UHS 285 * voltage change dance. 286 * 287 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be 288 * unconditionally turned back on in dw_mci_setup_bus() if it's 289 * ever called with a non-zero clock. That shouldn't happen 290 * until the voltage change is all done. 291 */ 292 clk_en_a = mci_readl(host, CLKENA); 293 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); 294 mci_writel(host, CLKENA, clk_en_a); 295 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 296 SDMMC_CMD_PRV_DAT_WAIT, 0); 297 } 298 299 if (cmd->flags & MMC_RSP_PRESENT) { 300 /* We expect a response, so set this bit */ 301 cmdr |= SDMMC_CMD_RESP_EXP; 302 if (cmd->flags & MMC_RSP_136) 303 cmdr |= SDMMC_CMD_RESP_LONG; 304 } 305 306 if (cmd->flags & MMC_RSP_CRC) 307 cmdr |= SDMMC_CMD_RESP_CRC; 308 309 if (cmd->data) { 310 cmdr |= SDMMC_CMD_DAT_EXP; 311 if (cmd->data->flags & MMC_DATA_WRITE) 312 cmdr |= SDMMC_CMD_DAT_WR; 313 } 314 315 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) 316 cmdr |= SDMMC_CMD_USE_HOLD_REG; 317 318 return cmdr; 319 } 320 321 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) 322 { 323 struct mmc_command *stop; 324 u32 cmdr; 325 326 if (!cmd->data) 327 return 0; 328 329 stop = &host->stop_abort; 330 cmdr = cmd->opcode; 331 memset(stop, 0, sizeof(struct mmc_command)); 332 333 if (cmdr == MMC_READ_SINGLE_BLOCK || 334 cmdr == MMC_READ_MULTIPLE_BLOCK || 335 cmdr == MMC_WRITE_BLOCK || 336 cmdr == MMC_WRITE_MULTIPLE_BLOCK || 337 cmdr == MMC_SEND_TUNING_BLOCK || 338 cmdr == MMC_SEND_TUNING_BLOCK_HS200) { 339 stop->opcode = MMC_STOP_TRANSMISSION; 340 stop->arg = 0; 341 stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 342 } else if (cmdr == SD_IO_RW_EXTENDED) { 343 stop->opcode = SD_IO_RW_DIRECT; 344 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | 345 ((cmd->arg >> 28) & 0x7); 346 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; 347 } else { 348 return 0; 349 } 350 351 cmdr = stop->opcode | SDMMC_CMD_STOP | 352 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; 353 354 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags)) 355 cmdr |= SDMMC_CMD_USE_HOLD_REG; 356 357 return cmdr; 358 } 359 360 static inline void dw_mci_set_cto(struct dw_mci *host) 361 { 362 unsigned int cto_clks; 363 unsigned int cto_div; 364 unsigned int cto_ms; 365 unsigned long irqflags; 366 367 cto_clks = mci_readl(host, TMOUT) & 0xff; 368 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 369 if (cto_div == 0) 370 cto_div = 1; 371 372 cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div, 373 host->bus_hz); 374 375 /* add a bit spare time */ 376 cto_ms += 10; 377 378 /* 379 * The durations we're working with are fairly short so we have to be 380 * extra careful about synchronization here. Specifically in hardware a 381 * command timeout is _at most_ 5.1 ms, so that means we expect an 382 * interrupt (either command done or timeout) to come rather quickly 383 * after the mci_writel. ...but just in case we have a long interrupt 384 * latency let's add a bit of paranoia. 385 * 386 * In general we'll assume that at least an interrupt will be asserted 387 * in hardware by the time the cto_timer runs. ...and if it hasn't 388 * been asserted in hardware by that time then we'll assume it'll never 389 * come. 390 */ 391 spin_lock_irqsave(&host->irq_lock, irqflags); 392 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 393 mod_timer(&host->cto_timer, 394 jiffies + msecs_to_jiffies(cto_ms) + 1); 395 spin_unlock_irqrestore(&host->irq_lock, irqflags); 396 } 397 398 static void dw_mci_start_command(struct dw_mci *host, 399 struct mmc_command *cmd, u32 cmd_flags) 400 { 401 host->cmd = cmd; 402 dev_vdbg(host->dev, 403 "start command: ARGR=0x%08x CMDR=0x%08x\n", 404 cmd->arg, cmd_flags); 405 406 mci_writel(host, CMDARG, cmd->arg); 407 wmb(); /* drain writebuffer */ 408 dw_mci_wait_while_busy(host, cmd_flags); 409 410 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 411 412 /* response expected command only */ 413 if (cmd_flags & SDMMC_CMD_RESP_EXP) 414 dw_mci_set_cto(host); 415 } 416 417 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 418 { 419 struct mmc_command *stop = &host->stop_abort; 420 421 dw_mci_start_command(host, stop, host->stop_cmdr); 422 } 423 424 /* DMA interface functions */ 425 static void dw_mci_stop_dma(struct dw_mci *host) 426 { 427 if (host->using_dma) { 428 host->dma_ops->stop(host); 429 host->dma_ops->cleanup(host); 430 } 431 432 /* Data transfer was stopped by the interrupt handler */ 433 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 434 } 435 436 static void dw_mci_dma_cleanup(struct dw_mci *host) 437 { 438 struct mmc_data *data = host->data; 439 440 if (data && data->host_cookie == COOKIE_MAPPED) { 441 dma_unmap_sg(host->dev, 442 data->sg, 443 data->sg_len, 444 mmc_get_dma_dir(data)); 445 data->host_cookie = COOKIE_UNMAPPED; 446 } 447 } 448 449 static void dw_mci_idmac_reset(struct dw_mci *host) 450 { 451 u32 bmod = mci_readl(host, BMOD); 452 /* Software reset of DMA */ 453 bmod |= SDMMC_IDMAC_SWRESET; 454 mci_writel(host, BMOD, bmod); 455 } 456 457 static void dw_mci_idmac_stop_dma(struct dw_mci *host) 458 { 459 u32 temp; 460 461 /* Disable and reset the IDMAC interface */ 462 temp = mci_readl(host, CTRL); 463 temp &= ~SDMMC_CTRL_USE_IDMAC; 464 temp |= SDMMC_CTRL_DMA_RESET; 465 mci_writel(host, CTRL, temp); 466 467 /* Stop the IDMAC running */ 468 temp = mci_readl(host, BMOD); 469 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 470 temp |= SDMMC_IDMAC_SWRESET; 471 mci_writel(host, BMOD, temp); 472 } 473 474 static void dw_mci_dmac_complete_dma(void *arg) 475 { 476 struct dw_mci *host = arg; 477 struct mmc_data *data = host->data; 478 479 dev_vdbg(host->dev, "DMA complete\n"); 480 481 if ((host->use_dma == TRANS_MODE_EDMAC) && 482 data && (data->flags & MMC_DATA_READ)) 483 /* Invalidate cache after read */ 484 dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc), 485 data->sg, 486 data->sg_len, 487 DMA_FROM_DEVICE); 488 489 host->dma_ops->cleanup(host); 490 491 /* 492 * If the card was removed, data will be NULL. No point in trying to 493 * send the stop command or waiting for NBUSY in this case. 494 */ 495 if (data) { 496 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 497 tasklet_schedule(&host->tasklet); 498 } 499 } 500 501 static int dw_mci_idmac_init(struct dw_mci *host) 502 { 503 int i; 504 505 if (host->dma_64bit_address == 1) { 506 struct idmac_desc_64addr *p; 507 /* Number of descriptors in the ring buffer */ 508 host->ring_size = 509 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr); 510 511 /* Forward link the descriptor list */ 512 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 513 i++, p++) { 514 p->des6 = (host->sg_dma + 515 (sizeof(struct idmac_desc_64addr) * 516 (i + 1))) & 0xffffffff; 517 518 p->des7 = (u64)(host->sg_dma + 519 (sizeof(struct idmac_desc_64addr) * 520 (i + 1))) >> 32; 521 /* Initialize reserved and buffer size fields to "0" */ 522 p->des0 = 0; 523 p->des1 = 0; 524 p->des2 = 0; 525 p->des3 = 0; 526 } 527 528 /* Set the last descriptor as the end-of-ring descriptor */ 529 p->des6 = host->sg_dma & 0xffffffff; 530 p->des7 = (u64)host->sg_dma >> 32; 531 p->des0 = IDMAC_DES0_ER; 532 533 } else { 534 struct idmac_desc *p; 535 /* Number of descriptors in the ring buffer */ 536 host->ring_size = 537 DESC_RING_BUF_SZ / sizeof(struct idmac_desc); 538 539 /* Forward link the descriptor list */ 540 for (i = 0, p = host->sg_cpu; 541 i < host->ring_size - 1; 542 i++, p++) { 543 p->des3 = cpu_to_le32(host->sg_dma + 544 (sizeof(struct idmac_desc) * (i + 1))); 545 p->des0 = 0; 546 p->des1 = 0; 547 } 548 549 /* Set the last descriptor as the end-of-ring descriptor */ 550 p->des3 = cpu_to_le32(host->sg_dma); 551 p->des0 = cpu_to_le32(IDMAC_DES0_ER); 552 } 553 554 dw_mci_idmac_reset(host); 555 556 if (host->dma_64bit_address == 1) { 557 /* Mask out interrupts - get Tx & Rx complete only */ 558 mci_writel(host, IDSTS64, IDMAC_INT_CLR); 559 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | 560 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 561 562 /* Set the descriptor base address */ 563 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); 564 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); 565 566 } else { 567 /* Mask out interrupts - get Tx & Rx complete only */ 568 mci_writel(host, IDSTS, IDMAC_INT_CLR); 569 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | 570 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 571 572 /* Set the descriptor base address */ 573 mci_writel(host, DBADDR, host->sg_dma); 574 } 575 576 return 0; 577 } 578 579 static inline int dw_mci_prepare_desc64(struct dw_mci *host, 580 struct mmc_data *data, 581 unsigned int sg_len) 582 { 583 unsigned int desc_len; 584 struct idmac_desc_64addr *desc_first, *desc_last, *desc; 585 u32 val; 586 int i; 587 588 desc_first = desc_last = desc = host->sg_cpu; 589 590 for (i = 0; i < sg_len; i++) { 591 unsigned int length = sg_dma_len(&data->sg[i]); 592 593 u64 mem_addr = sg_dma_address(&data->sg[i]); 594 595 for ( ; length ; desc++) { 596 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 597 length : DW_MCI_DESC_DATA_LENGTH; 598 599 length -= desc_len; 600 601 /* 602 * Wait for the former clear OWN bit operation 603 * of IDMAC to make sure that this descriptor 604 * isn't still owned by IDMAC as IDMAC's write 605 * ops and CPU's read ops are asynchronous. 606 */ 607 if (readl_poll_timeout_atomic(&desc->des0, val, 608 !(val & IDMAC_DES0_OWN), 609 10, 100 * USEC_PER_MSEC)) 610 goto err_own_bit; 611 612 /* 613 * Set the OWN bit and disable interrupts 614 * for this descriptor 615 */ 616 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 617 IDMAC_DES0_CH; 618 619 /* Buffer length */ 620 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); 621 622 /* Physical address to DMA to/from */ 623 desc->des4 = mem_addr & 0xffffffff; 624 desc->des5 = mem_addr >> 32; 625 626 /* Update physical address for the next desc */ 627 mem_addr += desc_len; 628 629 /* Save pointer to the last descriptor */ 630 desc_last = desc; 631 } 632 } 633 634 /* Set first descriptor */ 635 desc_first->des0 |= IDMAC_DES0_FD; 636 637 /* Set last descriptor */ 638 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 639 desc_last->des0 |= IDMAC_DES0_LD; 640 641 return 0; 642 err_own_bit: 643 /* restore the descriptor chain as it's polluted */ 644 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 645 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 646 dw_mci_idmac_init(host); 647 return -EINVAL; 648 } 649 650 651 static inline int dw_mci_prepare_desc32(struct dw_mci *host, 652 struct mmc_data *data, 653 unsigned int sg_len) 654 { 655 unsigned int desc_len; 656 struct idmac_desc *desc_first, *desc_last, *desc; 657 u32 val; 658 int i; 659 660 desc_first = desc_last = desc = host->sg_cpu; 661 662 for (i = 0; i < sg_len; i++) { 663 unsigned int length = sg_dma_len(&data->sg[i]); 664 665 u32 mem_addr = sg_dma_address(&data->sg[i]); 666 667 for ( ; length ; desc++) { 668 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 669 length : DW_MCI_DESC_DATA_LENGTH; 670 671 length -= desc_len; 672 673 /* 674 * Wait for the former clear OWN bit operation 675 * of IDMAC to make sure that this descriptor 676 * isn't still owned by IDMAC as IDMAC's write 677 * ops and CPU's read ops are asynchronous. 678 */ 679 if (readl_poll_timeout_atomic(&desc->des0, val, 680 IDMAC_OWN_CLR64(val), 681 10, 682 100 * USEC_PER_MSEC)) 683 goto err_own_bit; 684 685 /* 686 * Set the OWN bit and disable interrupts 687 * for this descriptor 688 */ 689 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 690 IDMAC_DES0_DIC | 691 IDMAC_DES0_CH); 692 693 /* Buffer length */ 694 IDMAC_SET_BUFFER1_SIZE(desc, desc_len); 695 696 /* Physical address to DMA to/from */ 697 desc->des2 = cpu_to_le32(mem_addr); 698 699 /* Update physical address for the next desc */ 700 mem_addr += desc_len; 701 702 /* Save pointer to the last descriptor */ 703 desc_last = desc; 704 } 705 } 706 707 /* Set first descriptor */ 708 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); 709 710 /* Set last descriptor */ 711 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | 712 IDMAC_DES0_DIC)); 713 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); 714 715 return 0; 716 err_own_bit: 717 /* restore the descriptor chain as it's polluted */ 718 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 719 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 720 dw_mci_idmac_init(host); 721 return -EINVAL; 722 } 723 724 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 725 { 726 u32 temp; 727 int ret; 728 729 if (host->dma_64bit_address == 1) 730 ret = dw_mci_prepare_desc64(host, host->data, sg_len); 731 else 732 ret = dw_mci_prepare_desc32(host, host->data, sg_len); 733 734 if (ret) 735 goto out; 736 737 /* drain writebuffer */ 738 wmb(); 739 740 /* Make sure to reset DMA in case we did PIO before this */ 741 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); 742 dw_mci_idmac_reset(host); 743 744 /* Select IDMAC interface */ 745 temp = mci_readl(host, CTRL); 746 temp |= SDMMC_CTRL_USE_IDMAC; 747 mci_writel(host, CTRL, temp); 748 749 /* drain writebuffer */ 750 wmb(); 751 752 /* Enable the IDMAC */ 753 temp = mci_readl(host, BMOD); 754 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 755 mci_writel(host, BMOD, temp); 756 757 /* Start it running */ 758 mci_writel(host, PLDMND, 1); 759 760 out: 761 return ret; 762 } 763 764 static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 765 .init = dw_mci_idmac_init, 766 .start = dw_mci_idmac_start_dma, 767 .stop = dw_mci_idmac_stop_dma, 768 .complete = dw_mci_dmac_complete_dma, 769 .cleanup = dw_mci_dma_cleanup, 770 }; 771 772 static void dw_mci_edmac_stop_dma(struct dw_mci *host) 773 { 774 dmaengine_terminate_async(host->dms->ch); 775 } 776 777 static int dw_mci_edmac_start_dma(struct dw_mci *host, 778 unsigned int sg_len) 779 { 780 struct dma_slave_config cfg; 781 struct dma_async_tx_descriptor *desc = NULL; 782 struct scatterlist *sgl = host->data->sg; 783 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 784 u32 sg_elems = host->data->sg_len; 785 u32 fifoth_val; 786 u32 fifo_offset = host->fifo_reg - host->regs; 787 int ret = 0; 788 789 /* Set external dma config: burst size, burst width */ 790 memset(&cfg, 0, sizeof(cfg)); 791 cfg.dst_addr = host->phy_regs + fifo_offset; 792 cfg.src_addr = cfg.dst_addr; 793 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 794 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 795 796 /* Match burst msize with external dma config */ 797 fifoth_val = mci_readl(host, FIFOTH); 798 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; 799 cfg.src_maxburst = cfg.dst_maxburst; 800 801 if (host->data->flags & MMC_DATA_WRITE) 802 cfg.direction = DMA_MEM_TO_DEV; 803 else 804 cfg.direction = DMA_DEV_TO_MEM; 805 806 ret = dmaengine_slave_config(host->dms->ch, &cfg); 807 if (ret) { 808 dev_err(host->dev, "Failed to config edmac.\n"); 809 return -EBUSY; 810 } 811 812 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, 813 sg_len, cfg.direction, 814 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 815 if (!desc) { 816 dev_err(host->dev, "Can't prepare slave sg.\n"); 817 return -EBUSY; 818 } 819 820 /* Set dw_mci_dmac_complete_dma as callback */ 821 desc->callback = dw_mci_dmac_complete_dma; 822 desc->callback_param = (void *)host; 823 dmaengine_submit(desc); 824 825 /* Flush cache before write */ 826 if (host->data->flags & MMC_DATA_WRITE) 827 dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl, 828 sg_elems, DMA_TO_DEVICE); 829 830 dma_async_issue_pending(host->dms->ch); 831 832 return 0; 833 } 834 835 static int dw_mci_edmac_init(struct dw_mci *host) 836 { 837 /* Request external dma channel */ 838 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); 839 if (!host->dms) 840 return -ENOMEM; 841 842 host->dms->ch = dma_request_chan(host->dev, "rx-tx"); 843 if (IS_ERR(host->dms->ch)) { 844 int ret = PTR_ERR(host->dms->ch); 845 846 dev_err(host->dev, "Failed to get external DMA channel.\n"); 847 kfree(host->dms); 848 host->dms = NULL; 849 return ret; 850 } 851 852 return 0; 853 } 854 855 static void dw_mci_edmac_exit(struct dw_mci *host) 856 { 857 if (host->dms) { 858 if (host->dms->ch) { 859 dma_release_channel(host->dms->ch); 860 host->dms->ch = NULL; 861 } 862 kfree(host->dms); 863 host->dms = NULL; 864 } 865 } 866 867 static const struct dw_mci_dma_ops dw_mci_edmac_ops = { 868 .init = dw_mci_edmac_init, 869 .exit = dw_mci_edmac_exit, 870 .start = dw_mci_edmac_start_dma, 871 .stop = dw_mci_edmac_stop_dma, 872 .complete = dw_mci_dmac_complete_dma, 873 .cleanup = dw_mci_dma_cleanup, 874 }; 875 876 static int dw_mci_pre_dma_transfer(struct dw_mci *host, 877 struct mmc_data *data, 878 int cookie) 879 { 880 struct scatterlist *sg; 881 unsigned int i, sg_len; 882 883 if (data->host_cookie == COOKIE_PRE_MAPPED) 884 return data->sg_len; 885 886 /* 887 * We don't do DMA on "complex" transfers, i.e. with 888 * non-word-aligned buffers or lengths. Also, we don't bother 889 * with all the DMA setup overhead for short transfers. 890 */ 891 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 892 return -EINVAL; 893 894 if (data->blksz & 3) 895 return -EINVAL; 896 897 for_each_sg(data->sg, sg, data->sg_len, i) { 898 if (sg->offset & 3 || sg->length & 3) 899 return -EINVAL; 900 } 901 902 sg_len = dma_map_sg(host->dev, 903 data->sg, 904 data->sg_len, 905 mmc_get_dma_dir(data)); 906 if (sg_len == 0) 907 return -EINVAL; 908 909 data->host_cookie = cookie; 910 911 return sg_len; 912 } 913 914 static void dw_mci_pre_req(struct mmc_host *mmc, 915 struct mmc_request *mrq) 916 { 917 struct dw_mci_slot *slot = mmc_priv(mmc); 918 struct mmc_data *data = mrq->data; 919 920 if (!slot->host->use_dma || !data) 921 return; 922 923 /* This data might be unmapped at this time */ 924 data->host_cookie = COOKIE_UNMAPPED; 925 926 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 927 COOKIE_PRE_MAPPED) < 0) 928 data->host_cookie = COOKIE_UNMAPPED; 929 } 930 931 static void dw_mci_post_req(struct mmc_host *mmc, 932 struct mmc_request *mrq, 933 int err) 934 { 935 struct dw_mci_slot *slot = mmc_priv(mmc); 936 struct mmc_data *data = mrq->data; 937 938 if (!slot->host->use_dma || !data) 939 return; 940 941 if (data->host_cookie != COOKIE_UNMAPPED) 942 dma_unmap_sg(slot->host->dev, 943 data->sg, 944 data->sg_len, 945 mmc_get_dma_dir(data)); 946 data->host_cookie = COOKIE_UNMAPPED; 947 } 948 949 static int dw_mci_get_cd(struct mmc_host *mmc) 950 { 951 int present; 952 struct dw_mci_slot *slot = mmc_priv(mmc); 953 struct dw_mci *host = slot->host; 954 int gpio_cd = mmc_gpio_get_cd(mmc); 955 956 /* Use platform get_cd function, else try onboard card detect */ 957 if (((mmc->caps & MMC_CAP_NEEDS_POLL) 958 || !mmc_card_is_removable(mmc))) { 959 present = 1; 960 961 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 962 if (mmc->caps & MMC_CAP_NEEDS_POLL) { 963 dev_info(&mmc->class_dev, 964 "card is polling.\n"); 965 } else { 966 dev_info(&mmc->class_dev, 967 "card is non-removable.\n"); 968 } 969 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 970 } 971 972 return present; 973 } else if (gpio_cd >= 0) 974 present = gpio_cd; 975 else 976 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 977 == 0 ? 1 : 0; 978 979 spin_lock_bh(&host->lock); 980 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 981 dev_dbg(&mmc->class_dev, "card is present\n"); 982 else if (!present && 983 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 984 dev_dbg(&mmc->class_dev, "card is not present\n"); 985 spin_unlock_bh(&host->lock); 986 987 return present; 988 } 989 990 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 991 { 992 unsigned int blksz = data->blksz; 993 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 994 u32 fifo_width = 1 << host->data_shift; 995 u32 blksz_depth = blksz / fifo_width, fifoth_val; 996 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 997 int idx = ARRAY_SIZE(mszs) - 1; 998 999 /* pio should ship this scenario */ 1000 if (!host->use_dma) 1001 return; 1002 1003 tx_wmark = (host->fifo_depth) / 2; 1004 tx_wmark_invers = host->fifo_depth - tx_wmark; 1005 1006 /* 1007 * MSIZE is '1', 1008 * if blksz is not a multiple of the FIFO width 1009 */ 1010 if (blksz % fifo_width) 1011 goto done; 1012 1013 do { 1014 if (!((blksz_depth % mszs[idx]) || 1015 (tx_wmark_invers % mszs[idx]))) { 1016 msize = idx; 1017 rx_wmark = mszs[idx] - 1; 1018 break; 1019 } 1020 } while (--idx > 0); 1021 /* 1022 * If idx is '0', it won't be tried 1023 * Thus, initial values are uesed 1024 */ 1025 done: 1026 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 1027 mci_writel(host, FIFOTH, fifoth_val); 1028 } 1029 1030 static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) 1031 { 1032 unsigned int blksz = data->blksz; 1033 u32 blksz_depth, fifo_depth; 1034 u16 thld_size; 1035 u8 enable; 1036 1037 /* 1038 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is 1039 * in the FIFO region, so we really shouldn't access it). 1040 */ 1041 if (host->verid < DW_MMC_240A || 1042 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) 1043 return; 1044 1045 /* 1046 * Card write Threshold is introduced since 2.80a 1047 * It's used when HS400 mode is enabled. 1048 */ 1049 if (data->flags & MMC_DATA_WRITE && 1050 host->timing != MMC_TIMING_MMC_HS400) 1051 goto disable; 1052 1053 if (data->flags & MMC_DATA_WRITE) 1054 enable = SDMMC_CARD_WR_THR_EN; 1055 else 1056 enable = SDMMC_CARD_RD_THR_EN; 1057 1058 if (host->timing != MMC_TIMING_MMC_HS200 && 1059 host->timing != MMC_TIMING_UHS_SDR104 && 1060 host->timing != MMC_TIMING_MMC_HS400) 1061 goto disable; 1062 1063 blksz_depth = blksz / (1 << host->data_shift); 1064 fifo_depth = host->fifo_depth; 1065 1066 if (blksz_depth > fifo_depth) 1067 goto disable; 1068 1069 /* 1070 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' 1071 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz 1072 * Currently just choose blksz. 1073 */ 1074 thld_size = blksz; 1075 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); 1076 return; 1077 1078 disable: 1079 mci_writel(host, CDTHRCTL, 0); 1080 } 1081 1082 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 1083 { 1084 unsigned long irqflags; 1085 int sg_len; 1086 u32 temp; 1087 1088 host->using_dma = 0; 1089 1090 /* If we don't have a channel, we can't do DMA */ 1091 if (!host->use_dma) 1092 return -ENODEV; 1093 1094 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1095 if (sg_len < 0) { 1096 host->dma_ops->stop(host); 1097 return sg_len; 1098 } 1099 1100 host->using_dma = 1; 1101 1102 if (host->use_dma == TRANS_MODE_IDMAC) 1103 dev_vdbg(host->dev, 1104 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 1105 (unsigned long)host->sg_cpu, 1106 (unsigned long)host->sg_dma, 1107 sg_len); 1108 1109 /* 1110 * Decide the MSIZE and RX/TX Watermark. 1111 * If current block size is same with previous size, 1112 * no need to update fifoth. 1113 */ 1114 if (host->prev_blksz != data->blksz) 1115 dw_mci_adjust_fifoth(host, data); 1116 1117 /* Enable the DMA interface */ 1118 temp = mci_readl(host, CTRL); 1119 temp |= SDMMC_CTRL_DMA_ENABLE; 1120 mci_writel(host, CTRL, temp); 1121 1122 /* Disable RX/TX IRQs, let DMA handle it */ 1123 spin_lock_irqsave(&host->irq_lock, irqflags); 1124 temp = mci_readl(host, INTMASK); 1125 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 1126 mci_writel(host, INTMASK, temp); 1127 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1128 1129 if (host->dma_ops->start(host, sg_len)) { 1130 host->dma_ops->stop(host); 1131 /* We can't do DMA, try PIO for this one */ 1132 dev_dbg(host->dev, 1133 "%s: fall back to PIO mode for current transfer\n", 1134 __func__); 1135 return -ENODEV; 1136 } 1137 1138 return 0; 1139 } 1140 1141 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 1142 { 1143 unsigned long irqflags; 1144 int flags = SG_MITER_ATOMIC; 1145 u32 temp; 1146 1147 data->error = -EINPROGRESS; 1148 1149 WARN_ON(host->data); 1150 host->sg = NULL; 1151 host->data = data; 1152 1153 if (data->flags & MMC_DATA_READ) 1154 host->dir_status = DW_MCI_RECV_STATUS; 1155 else 1156 host->dir_status = DW_MCI_SEND_STATUS; 1157 1158 dw_mci_ctrl_thld(host, data); 1159 1160 if (dw_mci_submit_data_dma(host, data)) { 1161 if (host->data->flags & MMC_DATA_READ) 1162 flags |= SG_MITER_TO_SG; 1163 else 1164 flags |= SG_MITER_FROM_SG; 1165 1166 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1167 host->sg = data->sg; 1168 host->part_buf_start = 0; 1169 host->part_buf_count = 0; 1170 1171 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 1172 1173 spin_lock_irqsave(&host->irq_lock, irqflags); 1174 temp = mci_readl(host, INTMASK); 1175 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 1176 mci_writel(host, INTMASK, temp); 1177 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1178 1179 temp = mci_readl(host, CTRL); 1180 temp &= ~SDMMC_CTRL_DMA_ENABLE; 1181 mci_writel(host, CTRL, temp); 1182 1183 /* 1184 * Use the initial fifoth_val for PIO mode. If wm_algined 1185 * is set, we set watermark same as data size. 1186 * If next issued data may be transfered by DMA mode, 1187 * prev_blksz should be invalidated. 1188 */ 1189 if (host->wm_aligned) 1190 dw_mci_adjust_fifoth(host, data); 1191 else 1192 mci_writel(host, FIFOTH, host->fifoth_val); 1193 host->prev_blksz = 0; 1194 } else { 1195 /* 1196 * Keep the current block size. 1197 * It will be used to decide whether to update 1198 * fifoth register next time. 1199 */ 1200 host->prev_blksz = data->blksz; 1201 } 1202 } 1203 1204 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 1205 { 1206 struct dw_mci *host = slot->host; 1207 unsigned int clock = slot->clock; 1208 u32 div; 1209 u32 clk_en_a; 1210 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; 1211 1212 /* We must continue to set bit 28 in CMD until the change is complete */ 1213 if (host->state == STATE_WAITING_CMD11_DONE) 1214 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; 1215 1216 slot->mmc->actual_clock = 0; 1217 1218 if (!clock) { 1219 mci_writel(host, CLKENA, 0); 1220 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1221 } else if (clock != host->current_speed || force_clkinit) { 1222 div = host->bus_hz / clock; 1223 if (host->bus_hz % clock && host->bus_hz > clock) 1224 /* 1225 * move the + 1 after the divide to prevent 1226 * over-clocking the card. 1227 */ 1228 div += 1; 1229 1230 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1231 1232 if ((clock != slot->__clk_old && 1233 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) || 1234 force_clkinit) { 1235 /* Silent the verbose log if calling from PM context */ 1236 if (!force_clkinit) 1237 dev_info(&slot->mmc->class_dev, 1238 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1239 slot->id, host->bus_hz, clock, 1240 div ? ((host->bus_hz / div) >> 1) : 1241 host->bus_hz, div); 1242 1243 /* 1244 * If card is polling, display the message only 1245 * one time at boot time. 1246 */ 1247 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL && 1248 slot->mmc->f_min == clock) 1249 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags); 1250 } 1251 1252 /* disable clock */ 1253 mci_writel(host, CLKENA, 0); 1254 mci_writel(host, CLKSRC, 0); 1255 1256 /* inform CIU */ 1257 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1258 1259 /* set clock to desired speed */ 1260 mci_writel(host, CLKDIV, div); 1261 1262 /* inform CIU */ 1263 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1264 1265 /* enable clock; only low power if no SDIO */ 1266 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 1267 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) 1268 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 1269 mci_writel(host, CLKENA, clk_en_a); 1270 1271 /* inform CIU */ 1272 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1273 1274 /* keep the last clock value that was requested from core */ 1275 slot->__clk_old = clock; 1276 slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) : 1277 host->bus_hz; 1278 } 1279 1280 host->current_speed = clock; 1281 1282 /* Set the current slot bus width */ 1283 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 1284 } 1285 1286 static void __dw_mci_start_request(struct dw_mci *host, 1287 struct dw_mci_slot *slot, 1288 struct mmc_command *cmd) 1289 { 1290 struct mmc_request *mrq; 1291 struct mmc_data *data; 1292 u32 cmdflags; 1293 1294 mrq = slot->mrq; 1295 1296 host->mrq = mrq; 1297 1298 host->pending_events = 0; 1299 host->completed_events = 0; 1300 host->cmd_status = 0; 1301 host->data_status = 0; 1302 host->dir_status = 0; 1303 1304 data = cmd->data; 1305 if (data) { 1306 mci_writel(host, TMOUT, 0xFFFFFFFF); 1307 mci_writel(host, BYTCNT, data->blksz*data->blocks); 1308 mci_writel(host, BLKSIZ, data->blksz); 1309 } 1310 1311 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 1312 1313 /* this is the first command, send the initialization clock */ 1314 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 1315 cmdflags |= SDMMC_CMD_INIT; 1316 1317 if (data) { 1318 dw_mci_submit_data(host, data); 1319 wmb(); /* drain writebuffer */ 1320 } 1321 1322 dw_mci_start_command(host, cmd, cmdflags); 1323 1324 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 1325 unsigned long irqflags; 1326 1327 /* 1328 * Databook says to fail after 2ms w/ no response, but evidence 1329 * shows that sometimes the cmd11 interrupt takes over 130ms. 1330 * We'll set to 500ms, plus an extra jiffy just in case jiffies 1331 * is just about to roll over. 1332 * 1333 * We do this whole thing under spinlock and only if the 1334 * command hasn't already completed (indicating the the irq 1335 * already ran so we don't want the timeout). 1336 */ 1337 spin_lock_irqsave(&host->irq_lock, irqflags); 1338 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 1339 mod_timer(&host->cmd11_timer, 1340 jiffies + msecs_to_jiffies(500) + 1); 1341 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1342 } 1343 1344 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); 1345 } 1346 1347 static void dw_mci_start_request(struct dw_mci *host, 1348 struct dw_mci_slot *slot) 1349 { 1350 struct mmc_request *mrq = slot->mrq; 1351 struct mmc_command *cmd; 1352 1353 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 1354 __dw_mci_start_request(host, slot, cmd); 1355 } 1356 1357 /* must be called with host->lock held */ 1358 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 1359 struct mmc_request *mrq) 1360 { 1361 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1362 host->state); 1363 1364 slot->mrq = mrq; 1365 1366 if (host->state == STATE_WAITING_CMD11_DONE) { 1367 dev_warn(&slot->mmc->class_dev, 1368 "Voltage change didn't complete\n"); 1369 /* 1370 * this case isn't expected to happen, so we can 1371 * either crash here or just try to continue on 1372 * in the closest possible state 1373 */ 1374 host->state = STATE_IDLE; 1375 } 1376 1377 if (host->state == STATE_IDLE) { 1378 host->state = STATE_SENDING_CMD; 1379 dw_mci_start_request(host, slot); 1380 } else { 1381 list_add_tail(&slot->queue_node, &host->queue); 1382 } 1383 } 1384 1385 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1386 { 1387 struct dw_mci_slot *slot = mmc_priv(mmc); 1388 struct dw_mci *host = slot->host; 1389 1390 WARN_ON(slot->mrq); 1391 1392 /* 1393 * The check for card presence and queueing of the request must be 1394 * atomic, otherwise the card could be removed in between and the 1395 * request wouldn't fail until another card was inserted. 1396 */ 1397 1398 if (!dw_mci_get_cd(mmc)) { 1399 mrq->cmd->error = -ENOMEDIUM; 1400 mmc_request_done(mmc, mrq); 1401 return; 1402 } 1403 1404 spin_lock_bh(&host->lock); 1405 1406 dw_mci_queue_request(host, slot, mrq); 1407 1408 spin_unlock_bh(&host->lock); 1409 } 1410 1411 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1412 { 1413 struct dw_mci_slot *slot = mmc_priv(mmc); 1414 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 1415 u32 regs; 1416 int ret; 1417 1418 switch (ios->bus_width) { 1419 case MMC_BUS_WIDTH_4: 1420 slot->ctype = SDMMC_CTYPE_4BIT; 1421 break; 1422 case MMC_BUS_WIDTH_8: 1423 slot->ctype = SDMMC_CTYPE_8BIT; 1424 break; 1425 default: 1426 /* set default 1 bit mode */ 1427 slot->ctype = SDMMC_CTYPE_1BIT; 1428 } 1429 1430 regs = mci_readl(slot->host, UHS_REG); 1431 1432 /* DDR mode set */ 1433 if (ios->timing == MMC_TIMING_MMC_DDR52 || 1434 ios->timing == MMC_TIMING_UHS_DDR50 || 1435 ios->timing == MMC_TIMING_MMC_HS400) 1436 regs |= ((0x1 << slot->id) << 16); 1437 else 1438 regs &= ~((0x1 << slot->id) << 16); 1439 1440 mci_writel(slot->host, UHS_REG, regs); 1441 slot->host->timing = ios->timing; 1442 1443 /* 1444 * Use mirror of ios->clock to prevent race with mmc 1445 * core ios update when finding the minimum. 1446 */ 1447 slot->clock = ios->clock; 1448 1449 if (drv_data && drv_data->set_ios) 1450 drv_data->set_ios(slot->host, ios); 1451 1452 switch (ios->power_mode) { 1453 case MMC_POWER_UP: 1454 if (!IS_ERR(mmc->supply.vmmc)) { 1455 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 1456 ios->vdd); 1457 if (ret) { 1458 dev_err(slot->host->dev, 1459 "failed to enable vmmc regulator\n"); 1460 /*return, if failed turn on vmmc*/ 1461 return; 1462 } 1463 } 1464 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 1465 regs = mci_readl(slot->host, PWREN); 1466 regs |= (1 << slot->id); 1467 mci_writel(slot->host, PWREN, regs); 1468 break; 1469 case MMC_POWER_ON: 1470 if (!slot->host->vqmmc_enabled) { 1471 if (!IS_ERR(mmc->supply.vqmmc)) { 1472 ret = regulator_enable(mmc->supply.vqmmc); 1473 if (ret < 0) 1474 dev_err(slot->host->dev, 1475 "failed to enable vqmmc\n"); 1476 else 1477 slot->host->vqmmc_enabled = true; 1478 1479 } else { 1480 /* Keep track so we don't reset again */ 1481 slot->host->vqmmc_enabled = true; 1482 } 1483 1484 /* Reset our state machine after powering on */ 1485 dw_mci_ctrl_reset(slot->host, 1486 SDMMC_CTRL_ALL_RESET_FLAGS); 1487 } 1488 1489 /* Adjust clock / bus width after power is up */ 1490 dw_mci_setup_bus(slot, false); 1491 1492 break; 1493 case MMC_POWER_OFF: 1494 /* Turn clock off before power goes down */ 1495 dw_mci_setup_bus(slot, false); 1496 1497 if (!IS_ERR(mmc->supply.vmmc)) 1498 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1499 1500 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) 1501 regulator_disable(mmc->supply.vqmmc); 1502 slot->host->vqmmc_enabled = false; 1503 1504 regs = mci_readl(slot->host, PWREN); 1505 regs &= ~(1 << slot->id); 1506 mci_writel(slot->host, PWREN, regs); 1507 break; 1508 default: 1509 break; 1510 } 1511 1512 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) 1513 slot->host->state = STATE_IDLE; 1514 } 1515 1516 static int dw_mci_card_busy(struct mmc_host *mmc) 1517 { 1518 struct dw_mci_slot *slot = mmc_priv(mmc); 1519 u32 status; 1520 1521 /* 1522 * Check the busy bit which is low when DAT[3:0] 1523 * (the data lines) are 0000 1524 */ 1525 status = mci_readl(slot->host, STATUS); 1526 1527 return !!(status & SDMMC_STATUS_BUSY); 1528 } 1529 1530 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 1531 { 1532 struct dw_mci_slot *slot = mmc_priv(mmc); 1533 struct dw_mci *host = slot->host; 1534 const struct dw_mci_drv_data *drv_data = host->drv_data; 1535 u32 uhs; 1536 u32 v18 = SDMMC_UHS_18V << slot->id; 1537 int ret; 1538 1539 if (drv_data && drv_data->switch_voltage) 1540 return drv_data->switch_voltage(mmc, ios); 1541 1542 /* 1543 * Program the voltage. Note that some instances of dw_mmc may use 1544 * the UHS_REG for this. For other instances (like exynos) the UHS_REG 1545 * does no harm but you need to set the regulator directly. Try both. 1546 */ 1547 uhs = mci_readl(host, UHS_REG); 1548 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1549 uhs &= ~v18; 1550 else 1551 uhs |= v18; 1552 1553 if (!IS_ERR(mmc->supply.vqmmc)) { 1554 ret = mmc_regulator_set_vqmmc(mmc, ios); 1555 if (ret < 0) { 1556 dev_dbg(&mmc->class_dev, 1557 "Regulator set error %d - %s V\n", 1558 ret, uhs & v18 ? "1.8" : "3.3"); 1559 return ret; 1560 } 1561 } 1562 mci_writel(host, UHS_REG, uhs); 1563 1564 return 0; 1565 } 1566 1567 static int dw_mci_get_ro(struct mmc_host *mmc) 1568 { 1569 int read_only; 1570 struct dw_mci_slot *slot = mmc_priv(mmc); 1571 int gpio_ro = mmc_gpio_get_ro(mmc); 1572 1573 /* Use platform get_ro function, else try on board write protect */ 1574 if (gpio_ro >= 0) 1575 read_only = gpio_ro; 1576 else 1577 read_only = 1578 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 1579 1580 dev_dbg(&mmc->class_dev, "card is %s\n", 1581 read_only ? "read-only" : "read-write"); 1582 1583 return read_only; 1584 } 1585 1586 static void dw_mci_hw_reset(struct mmc_host *mmc) 1587 { 1588 struct dw_mci_slot *slot = mmc_priv(mmc); 1589 struct dw_mci *host = slot->host; 1590 int reset; 1591 1592 if (host->use_dma == TRANS_MODE_IDMAC) 1593 dw_mci_idmac_reset(host); 1594 1595 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | 1596 SDMMC_CTRL_FIFO_RESET)) 1597 return; 1598 1599 /* 1600 * According to eMMC spec, card reset procedure: 1601 * tRstW >= 1us: RST_n pulse width 1602 * tRSCA >= 200us: RST_n to Command time 1603 * tRSTH >= 1us: RST_n high period 1604 */ 1605 reset = mci_readl(host, RST_N); 1606 reset &= ~(SDMMC_RST_HWACTIVE << slot->id); 1607 mci_writel(host, RST_N, reset); 1608 usleep_range(1, 2); 1609 reset |= SDMMC_RST_HWACTIVE << slot->id; 1610 mci_writel(host, RST_N, reset); 1611 usleep_range(200, 300); 1612 } 1613 1614 static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare) 1615 { 1616 struct dw_mci *host = slot->host; 1617 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 1618 u32 clk_en_a_old; 1619 u32 clk_en_a; 1620 1621 /* 1622 * Low power mode will stop the card clock when idle. According to the 1623 * description of the CLKENA register we should disable low power mode 1624 * for SDIO cards if we need SDIO interrupts to work. 1625 */ 1626 1627 clk_en_a_old = mci_readl(host, CLKENA); 1628 if (prepare) { 1629 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1630 clk_en_a = clk_en_a_old & ~clken_low_pwr; 1631 } else { 1632 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1633 clk_en_a = clk_en_a_old | clken_low_pwr; 1634 } 1635 1636 if (clk_en_a != clk_en_a_old) { 1637 mci_writel(host, CLKENA, clk_en_a); 1638 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 1639 0); 1640 } 1641 } 1642 1643 static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb) 1644 { 1645 struct dw_mci *host = slot->host; 1646 unsigned long irqflags; 1647 u32 int_mask; 1648 1649 spin_lock_irqsave(&host->irq_lock, irqflags); 1650 1651 /* Enable/disable Slot Specific SDIO interrupt */ 1652 int_mask = mci_readl(host, INTMASK); 1653 if (enb) 1654 int_mask |= SDMMC_INT_SDIO(slot->sdio_id); 1655 else 1656 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); 1657 mci_writel(host, INTMASK, int_mask); 1658 1659 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1660 } 1661 1662 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 1663 { 1664 struct dw_mci_slot *slot = mmc_priv(mmc); 1665 struct dw_mci *host = slot->host; 1666 1667 dw_mci_prepare_sdio_irq(slot, enb); 1668 __dw_mci_enable_sdio_irq(slot, enb); 1669 1670 /* Avoid runtime suspending the device when SDIO IRQ is enabled */ 1671 if (enb) 1672 pm_runtime_get_noresume(host->dev); 1673 else 1674 pm_runtime_put_noidle(host->dev); 1675 } 1676 1677 static void dw_mci_ack_sdio_irq(struct mmc_host *mmc) 1678 { 1679 struct dw_mci_slot *slot = mmc_priv(mmc); 1680 1681 __dw_mci_enable_sdio_irq(slot, 1); 1682 } 1683 1684 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1685 { 1686 struct dw_mci_slot *slot = mmc_priv(mmc); 1687 struct dw_mci *host = slot->host; 1688 const struct dw_mci_drv_data *drv_data = host->drv_data; 1689 int err = -EINVAL; 1690 1691 if (drv_data && drv_data->execute_tuning) 1692 err = drv_data->execute_tuning(slot, opcode); 1693 return err; 1694 } 1695 1696 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, 1697 struct mmc_ios *ios) 1698 { 1699 struct dw_mci_slot *slot = mmc_priv(mmc); 1700 struct dw_mci *host = slot->host; 1701 const struct dw_mci_drv_data *drv_data = host->drv_data; 1702 1703 if (drv_data && drv_data->prepare_hs400_tuning) 1704 return drv_data->prepare_hs400_tuning(host, ios); 1705 1706 return 0; 1707 } 1708 1709 static bool dw_mci_reset(struct dw_mci *host) 1710 { 1711 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; 1712 bool ret = false; 1713 u32 status = 0; 1714 1715 /* 1716 * Resetting generates a block interrupt, hence setting 1717 * the scatter-gather pointer to NULL. 1718 */ 1719 if (host->sg) { 1720 sg_miter_stop(&host->sg_miter); 1721 host->sg = NULL; 1722 } 1723 1724 if (host->use_dma) 1725 flags |= SDMMC_CTRL_DMA_RESET; 1726 1727 if (dw_mci_ctrl_reset(host, flags)) { 1728 /* 1729 * In all cases we clear the RAWINTS 1730 * register to clear any interrupts. 1731 */ 1732 mci_writel(host, RINTSTS, 0xFFFFFFFF); 1733 1734 if (!host->use_dma) { 1735 ret = true; 1736 goto ciu_out; 1737 } 1738 1739 /* Wait for dma_req to be cleared */ 1740 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 1741 status, 1742 !(status & SDMMC_STATUS_DMA_REQ), 1743 1, 500 * USEC_PER_MSEC)) { 1744 dev_err(host->dev, 1745 "%s: Timeout waiting for dma_req to be cleared\n", 1746 __func__); 1747 goto ciu_out; 1748 } 1749 1750 /* when using DMA next we reset the fifo again */ 1751 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) 1752 goto ciu_out; 1753 } else { 1754 /* if the controller reset bit did clear, then set clock regs */ 1755 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 1756 dev_err(host->dev, 1757 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", 1758 __func__); 1759 goto ciu_out; 1760 } 1761 } 1762 1763 if (host->use_dma == TRANS_MODE_IDMAC) 1764 /* It is also required that we reinit idmac */ 1765 dw_mci_idmac_init(host); 1766 1767 ret = true; 1768 1769 ciu_out: 1770 /* After a CTRL reset we need to have CIU set clock registers */ 1771 mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0); 1772 1773 return ret; 1774 } 1775 1776 static const struct mmc_host_ops dw_mci_ops = { 1777 .request = dw_mci_request, 1778 .pre_req = dw_mci_pre_req, 1779 .post_req = dw_mci_post_req, 1780 .set_ios = dw_mci_set_ios, 1781 .get_ro = dw_mci_get_ro, 1782 .get_cd = dw_mci_get_cd, 1783 .hw_reset = dw_mci_hw_reset, 1784 .enable_sdio_irq = dw_mci_enable_sdio_irq, 1785 .ack_sdio_irq = dw_mci_ack_sdio_irq, 1786 .execute_tuning = dw_mci_execute_tuning, 1787 .card_busy = dw_mci_card_busy, 1788 .start_signal_voltage_switch = dw_mci_switch_voltage, 1789 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, 1790 }; 1791 1792 #ifdef CONFIG_FAULT_INJECTION 1793 static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t) 1794 { 1795 struct dw_mci *host = container_of(t, struct dw_mci, fault_timer); 1796 unsigned long flags; 1797 1798 spin_lock_irqsave(&host->irq_lock, flags); 1799 1800 /* 1801 * Only inject an error if we haven't already got an error or data over 1802 * interrupt. 1803 */ 1804 if (!host->data_status) { 1805 host->data_status = SDMMC_INT_DCRC; 1806 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1807 tasklet_schedule(&host->tasklet); 1808 } 1809 1810 spin_unlock_irqrestore(&host->irq_lock, flags); 1811 1812 return HRTIMER_NORESTART; 1813 } 1814 1815 static void dw_mci_start_fault_timer(struct dw_mci *host) 1816 { 1817 struct mmc_data *data = host->data; 1818 1819 if (!data || data->blocks <= 1) 1820 return; 1821 1822 if (!should_fail(&host->fail_data_crc, 1)) 1823 return; 1824 1825 /* 1826 * Try to inject the error at random points during the data transfer. 1827 */ 1828 hrtimer_start(&host->fault_timer, 1829 ms_to_ktime(prandom_u32() % 25), 1830 HRTIMER_MODE_REL); 1831 } 1832 1833 static void dw_mci_stop_fault_timer(struct dw_mci *host) 1834 { 1835 hrtimer_cancel(&host->fault_timer); 1836 } 1837 1838 static void dw_mci_init_fault(struct dw_mci *host) 1839 { 1840 host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER; 1841 1842 hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1843 host->fault_timer.function = dw_mci_fault_timer; 1844 } 1845 #else 1846 static void dw_mci_init_fault(struct dw_mci *host) 1847 { 1848 } 1849 1850 static void dw_mci_start_fault_timer(struct dw_mci *host) 1851 { 1852 } 1853 1854 static void dw_mci_stop_fault_timer(struct dw_mci *host) 1855 { 1856 } 1857 #endif 1858 1859 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1860 __releases(&host->lock) 1861 __acquires(&host->lock) 1862 { 1863 struct dw_mci_slot *slot; 1864 struct mmc_host *prev_mmc = host->slot->mmc; 1865 1866 WARN_ON(host->cmd || host->data); 1867 1868 host->slot->mrq = NULL; 1869 host->mrq = NULL; 1870 if (!list_empty(&host->queue)) { 1871 slot = list_entry(host->queue.next, 1872 struct dw_mci_slot, queue_node); 1873 list_del(&slot->queue_node); 1874 dev_vdbg(host->dev, "list not empty: %s is next\n", 1875 mmc_hostname(slot->mmc)); 1876 host->state = STATE_SENDING_CMD; 1877 dw_mci_start_request(host, slot); 1878 } else { 1879 dev_vdbg(host->dev, "list empty\n"); 1880 1881 if (host->state == STATE_SENDING_CMD11) 1882 host->state = STATE_WAITING_CMD11_DONE; 1883 else 1884 host->state = STATE_IDLE; 1885 } 1886 1887 spin_unlock(&host->lock); 1888 mmc_request_done(prev_mmc, mrq); 1889 spin_lock(&host->lock); 1890 } 1891 1892 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 1893 { 1894 u32 status = host->cmd_status; 1895 1896 host->cmd_status = 0; 1897 1898 /* Read the response from the card (up to 16 bytes) */ 1899 if (cmd->flags & MMC_RSP_PRESENT) { 1900 if (cmd->flags & MMC_RSP_136) { 1901 cmd->resp[3] = mci_readl(host, RESP0); 1902 cmd->resp[2] = mci_readl(host, RESP1); 1903 cmd->resp[1] = mci_readl(host, RESP2); 1904 cmd->resp[0] = mci_readl(host, RESP3); 1905 } else { 1906 cmd->resp[0] = mci_readl(host, RESP0); 1907 cmd->resp[1] = 0; 1908 cmd->resp[2] = 0; 1909 cmd->resp[3] = 0; 1910 } 1911 } 1912 1913 if (status & SDMMC_INT_RTO) 1914 cmd->error = -ETIMEDOUT; 1915 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1916 cmd->error = -EILSEQ; 1917 else if (status & SDMMC_INT_RESP_ERR) 1918 cmd->error = -EIO; 1919 else 1920 cmd->error = 0; 1921 1922 return cmd->error; 1923 } 1924 1925 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) 1926 { 1927 u32 status = host->data_status; 1928 1929 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1930 if (status & SDMMC_INT_DRTO) { 1931 data->error = -ETIMEDOUT; 1932 } else if (status & SDMMC_INT_DCRC) { 1933 data->error = -EILSEQ; 1934 } else if (status & SDMMC_INT_EBE) { 1935 if (host->dir_status == 1936 DW_MCI_SEND_STATUS) { 1937 /* 1938 * No data CRC status was returned. 1939 * The number of bytes transferred 1940 * will be exaggerated in PIO mode. 1941 */ 1942 data->bytes_xfered = 0; 1943 data->error = -ETIMEDOUT; 1944 } else if (host->dir_status == 1945 DW_MCI_RECV_STATUS) { 1946 data->error = -EILSEQ; 1947 } 1948 } else { 1949 /* SDMMC_INT_SBE is included */ 1950 data->error = -EILSEQ; 1951 } 1952 1953 dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1954 1955 /* 1956 * After an error, there may be data lingering 1957 * in the FIFO 1958 */ 1959 dw_mci_reset(host); 1960 } else { 1961 data->bytes_xfered = data->blocks * data->blksz; 1962 data->error = 0; 1963 } 1964 1965 return data->error; 1966 } 1967 1968 static void dw_mci_set_drto(struct dw_mci *host) 1969 { 1970 unsigned int drto_clks; 1971 unsigned int drto_div; 1972 unsigned int drto_ms; 1973 unsigned long irqflags; 1974 1975 drto_clks = mci_readl(host, TMOUT) >> 8; 1976 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 1977 if (drto_div == 0) 1978 drto_div = 1; 1979 1980 drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, 1981 host->bus_hz); 1982 1983 /* add a bit spare time */ 1984 drto_ms += 10; 1985 1986 spin_lock_irqsave(&host->irq_lock, irqflags); 1987 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) 1988 mod_timer(&host->dto_timer, 1989 jiffies + msecs_to_jiffies(drto_ms)); 1990 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1991 } 1992 1993 static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) 1994 { 1995 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 1996 return false; 1997 1998 /* 1999 * Really be certain that the timer has stopped. This is a bit of 2000 * paranoia and could only really happen if we had really bad 2001 * interrupt latency and the interrupt routine and timeout were 2002 * running concurrently so that the del_timer() in the interrupt 2003 * handler couldn't run. 2004 */ 2005 WARN_ON(del_timer_sync(&host->cto_timer)); 2006 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2007 2008 return true; 2009 } 2010 2011 static bool dw_mci_clear_pending_data_complete(struct dw_mci *host) 2012 { 2013 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) 2014 return false; 2015 2016 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ 2017 WARN_ON(del_timer_sync(&host->dto_timer)); 2018 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2019 2020 return true; 2021 } 2022 2023 static void dw_mci_tasklet_func(struct tasklet_struct *t) 2024 { 2025 struct dw_mci *host = from_tasklet(host, t, tasklet); 2026 struct mmc_data *data; 2027 struct mmc_command *cmd; 2028 struct mmc_request *mrq; 2029 enum dw_mci_state state; 2030 enum dw_mci_state prev_state; 2031 unsigned int err; 2032 2033 spin_lock(&host->lock); 2034 2035 state = host->state; 2036 data = host->data; 2037 mrq = host->mrq; 2038 2039 do { 2040 prev_state = state; 2041 2042 switch (state) { 2043 case STATE_IDLE: 2044 case STATE_WAITING_CMD11_DONE: 2045 break; 2046 2047 case STATE_SENDING_CMD11: 2048 case STATE_SENDING_CMD: 2049 if (!dw_mci_clear_pending_cmd_complete(host)) 2050 break; 2051 2052 cmd = host->cmd; 2053 host->cmd = NULL; 2054 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 2055 err = dw_mci_command_complete(host, cmd); 2056 if (cmd == mrq->sbc && !err) { 2057 __dw_mci_start_request(host, host->slot, 2058 mrq->cmd); 2059 goto unlock; 2060 } 2061 2062 if (cmd->data && err) { 2063 /* 2064 * During UHS tuning sequence, sending the stop 2065 * command after the response CRC error would 2066 * throw the system into a confused state 2067 * causing all future tuning phases to report 2068 * failure. 2069 * 2070 * In such case controller will move into a data 2071 * transfer state after a response error or 2072 * response CRC error. Let's let that finish 2073 * before trying to send a stop, so we'll go to 2074 * STATE_SENDING_DATA. 2075 * 2076 * Although letting the data transfer take place 2077 * will waste a bit of time (we already know 2078 * the command was bad), it can't cause any 2079 * errors since it's possible it would have 2080 * taken place anyway if this tasklet got 2081 * delayed. Allowing the transfer to take place 2082 * avoids races and keeps things simple. 2083 */ 2084 if (err != -ETIMEDOUT && 2085 host->dir_status == DW_MCI_RECV_STATUS) { 2086 state = STATE_SENDING_DATA; 2087 continue; 2088 } 2089 2090 send_stop_abort(host, data); 2091 dw_mci_stop_dma(host); 2092 state = STATE_SENDING_STOP; 2093 break; 2094 } 2095 2096 if (!cmd->data || err) { 2097 dw_mci_request_end(host, mrq); 2098 goto unlock; 2099 } 2100 2101 prev_state = state = STATE_SENDING_DATA; 2102 fallthrough; 2103 2104 case STATE_SENDING_DATA: 2105 /* 2106 * We could get a data error and never a transfer 2107 * complete so we'd better check for it here. 2108 * 2109 * Note that we don't really care if we also got a 2110 * transfer complete; stopping the DMA and sending an 2111 * abort won't hurt. 2112 */ 2113 if (test_and_clear_bit(EVENT_DATA_ERROR, 2114 &host->pending_events)) { 2115 if (!(host->data_status & (SDMMC_INT_DRTO | 2116 SDMMC_INT_EBE))) 2117 send_stop_abort(host, data); 2118 dw_mci_stop_dma(host); 2119 state = STATE_DATA_ERROR; 2120 break; 2121 } 2122 2123 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 2124 &host->pending_events)) { 2125 /* 2126 * If all data-related interrupts don't come 2127 * within the given time in reading data state. 2128 */ 2129 if (host->dir_status == DW_MCI_RECV_STATUS) 2130 dw_mci_set_drto(host); 2131 break; 2132 } 2133 2134 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 2135 2136 /* 2137 * Handle an EVENT_DATA_ERROR that might have shown up 2138 * before the transfer completed. This might not have 2139 * been caught by the check above because the interrupt 2140 * could have gone off between the previous check and 2141 * the check for transfer complete. 2142 * 2143 * Technically this ought not be needed assuming we 2144 * get a DATA_COMPLETE eventually (we'll notice the 2145 * error and end the request), but it shouldn't hurt. 2146 * 2147 * This has the advantage of sending the stop command. 2148 */ 2149 if (test_and_clear_bit(EVENT_DATA_ERROR, 2150 &host->pending_events)) { 2151 if (!(host->data_status & (SDMMC_INT_DRTO | 2152 SDMMC_INT_EBE))) 2153 send_stop_abort(host, data); 2154 dw_mci_stop_dma(host); 2155 state = STATE_DATA_ERROR; 2156 break; 2157 } 2158 prev_state = state = STATE_DATA_BUSY; 2159 2160 fallthrough; 2161 2162 case STATE_DATA_BUSY: 2163 if (!dw_mci_clear_pending_data_complete(host)) { 2164 /* 2165 * If data error interrupt comes but data over 2166 * interrupt doesn't come within the given time. 2167 * in reading data state. 2168 */ 2169 if (host->dir_status == DW_MCI_RECV_STATUS) 2170 dw_mci_set_drto(host); 2171 break; 2172 } 2173 2174 dw_mci_stop_fault_timer(host); 2175 host->data = NULL; 2176 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 2177 err = dw_mci_data_complete(host, data); 2178 2179 if (!err) { 2180 if (!data->stop || mrq->sbc) { 2181 if (mrq->sbc && data->stop) 2182 data->stop->error = 0; 2183 dw_mci_request_end(host, mrq); 2184 goto unlock; 2185 } 2186 2187 /* stop command for open-ended transfer*/ 2188 if (data->stop) 2189 send_stop_abort(host, data); 2190 } else { 2191 /* 2192 * If we don't have a command complete now we'll 2193 * never get one since we just reset everything; 2194 * better end the request. 2195 * 2196 * If we do have a command complete we'll fall 2197 * through to the SENDING_STOP command and 2198 * everything will be peachy keen. 2199 */ 2200 if (!test_bit(EVENT_CMD_COMPLETE, 2201 &host->pending_events)) { 2202 host->cmd = NULL; 2203 dw_mci_request_end(host, mrq); 2204 goto unlock; 2205 } 2206 } 2207 2208 /* 2209 * If err has non-zero, 2210 * stop-abort command has been already issued. 2211 */ 2212 prev_state = state = STATE_SENDING_STOP; 2213 2214 fallthrough; 2215 2216 case STATE_SENDING_STOP: 2217 if (!dw_mci_clear_pending_cmd_complete(host)) 2218 break; 2219 2220 /* CMD error in data command */ 2221 if (mrq->cmd->error && mrq->data) 2222 dw_mci_reset(host); 2223 2224 dw_mci_stop_fault_timer(host); 2225 host->cmd = NULL; 2226 host->data = NULL; 2227 2228 if (!mrq->sbc && mrq->stop) 2229 dw_mci_command_complete(host, mrq->stop); 2230 else 2231 host->cmd_status = 0; 2232 2233 dw_mci_request_end(host, mrq); 2234 goto unlock; 2235 2236 case STATE_DATA_ERROR: 2237 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 2238 &host->pending_events)) 2239 break; 2240 2241 state = STATE_DATA_BUSY; 2242 break; 2243 } 2244 } while (state != prev_state); 2245 2246 host->state = state; 2247 unlock: 2248 spin_unlock(&host->lock); 2249 2250 } 2251 2252 /* push final bytes to part_buf, only use during push */ 2253 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 2254 { 2255 memcpy((void *)&host->part_buf, buf, cnt); 2256 host->part_buf_count = cnt; 2257 } 2258 2259 /* append bytes to part_buf, only use during push */ 2260 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 2261 { 2262 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 2263 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 2264 host->part_buf_count += cnt; 2265 return cnt; 2266 } 2267 2268 /* pull first bytes from part_buf, only use during pull */ 2269 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 2270 { 2271 cnt = min_t(int, cnt, host->part_buf_count); 2272 if (cnt) { 2273 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 2274 cnt); 2275 host->part_buf_count -= cnt; 2276 host->part_buf_start += cnt; 2277 } 2278 return cnt; 2279 } 2280 2281 /* pull final bytes from the part_buf, assuming it's just been filled */ 2282 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 2283 { 2284 memcpy(buf, &host->part_buf, cnt); 2285 host->part_buf_start = cnt; 2286 host->part_buf_count = (1 << host->data_shift) - cnt; 2287 } 2288 2289 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 2290 { 2291 struct mmc_data *data = host->data; 2292 int init_cnt = cnt; 2293 2294 /* try and push anything in the part_buf */ 2295 if (unlikely(host->part_buf_count)) { 2296 int len = dw_mci_push_part_bytes(host, buf, cnt); 2297 2298 buf += len; 2299 cnt -= len; 2300 if (host->part_buf_count == 2) { 2301 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2302 host->part_buf_count = 0; 2303 } 2304 } 2305 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2306 if (unlikely((unsigned long)buf & 0x1)) { 2307 while (cnt >= 2) { 2308 u16 aligned_buf[64]; 2309 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2310 int items = len >> 1; 2311 int i; 2312 /* memcpy from input buffer into aligned buffer */ 2313 memcpy(aligned_buf, buf, len); 2314 buf += len; 2315 cnt -= len; 2316 /* push data from aligned buffer into fifo */ 2317 for (i = 0; i < items; ++i) 2318 mci_fifo_writew(host->fifo_reg, aligned_buf[i]); 2319 } 2320 } else 2321 #endif 2322 { 2323 u16 *pdata = buf; 2324 2325 for (; cnt >= 2; cnt -= 2) 2326 mci_fifo_writew(host->fifo_reg, *pdata++); 2327 buf = pdata; 2328 } 2329 /* put anything remaining in the part_buf */ 2330 if (cnt) { 2331 dw_mci_set_part_bytes(host, buf, cnt); 2332 /* Push data if we have reached the expected data length */ 2333 if ((data->bytes_xfered + init_cnt) == 2334 (data->blksz * data->blocks)) 2335 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2336 } 2337 } 2338 2339 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 2340 { 2341 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2342 if (unlikely((unsigned long)buf & 0x1)) { 2343 while (cnt >= 2) { 2344 /* pull data from fifo into aligned buffer */ 2345 u16 aligned_buf[64]; 2346 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2347 int items = len >> 1; 2348 int i; 2349 2350 for (i = 0; i < items; ++i) 2351 aligned_buf[i] = mci_fifo_readw(host->fifo_reg); 2352 /* memcpy from aligned buffer into output buffer */ 2353 memcpy(buf, aligned_buf, len); 2354 buf += len; 2355 cnt -= len; 2356 } 2357 } else 2358 #endif 2359 { 2360 u16 *pdata = buf; 2361 2362 for (; cnt >= 2; cnt -= 2) 2363 *pdata++ = mci_fifo_readw(host->fifo_reg); 2364 buf = pdata; 2365 } 2366 if (cnt) { 2367 host->part_buf16 = mci_fifo_readw(host->fifo_reg); 2368 dw_mci_pull_final_bytes(host, buf, cnt); 2369 } 2370 } 2371 2372 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 2373 { 2374 struct mmc_data *data = host->data; 2375 int init_cnt = cnt; 2376 2377 /* try and push anything in the part_buf */ 2378 if (unlikely(host->part_buf_count)) { 2379 int len = dw_mci_push_part_bytes(host, buf, cnt); 2380 2381 buf += len; 2382 cnt -= len; 2383 if (host->part_buf_count == 4) { 2384 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2385 host->part_buf_count = 0; 2386 } 2387 } 2388 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2389 if (unlikely((unsigned long)buf & 0x3)) { 2390 while (cnt >= 4) { 2391 u32 aligned_buf[32]; 2392 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2393 int items = len >> 2; 2394 int i; 2395 /* memcpy from input buffer into aligned buffer */ 2396 memcpy(aligned_buf, buf, len); 2397 buf += len; 2398 cnt -= len; 2399 /* push data from aligned buffer into fifo */ 2400 for (i = 0; i < items; ++i) 2401 mci_fifo_writel(host->fifo_reg, aligned_buf[i]); 2402 } 2403 } else 2404 #endif 2405 { 2406 u32 *pdata = buf; 2407 2408 for (; cnt >= 4; cnt -= 4) 2409 mci_fifo_writel(host->fifo_reg, *pdata++); 2410 buf = pdata; 2411 } 2412 /* put anything remaining in the part_buf */ 2413 if (cnt) { 2414 dw_mci_set_part_bytes(host, buf, cnt); 2415 /* Push data if we have reached the expected data length */ 2416 if ((data->bytes_xfered + init_cnt) == 2417 (data->blksz * data->blocks)) 2418 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2419 } 2420 } 2421 2422 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 2423 { 2424 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2425 if (unlikely((unsigned long)buf & 0x3)) { 2426 while (cnt >= 4) { 2427 /* pull data from fifo into aligned buffer */ 2428 u32 aligned_buf[32]; 2429 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2430 int items = len >> 2; 2431 int i; 2432 2433 for (i = 0; i < items; ++i) 2434 aligned_buf[i] = mci_fifo_readl(host->fifo_reg); 2435 /* memcpy from aligned buffer into output buffer */ 2436 memcpy(buf, aligned_buf, len); 2437 buf += len; 2438 cnt -= len; 2439 } 2440 } else 2441 #endif 2442 { 2443 u32 *pdata = buf; 2444 2445 for (; cnt >= 4; cnt -= 4) 2446 *pdata++ = mci_fifo_readl(host->fifo_reg); 2447 buf = pdata; 2448 } 2449 if (cnt) { 2450 host->part_buf32 = mci_fifo_readl(host->fifo_reg); 2451 dw_mci_pull_final_bytes(host, buf, cnt); 2452 } 2453 } 2454 2455 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 2456 { 2457 struct mmc_data *data = host->data; 2458 int init_cnt = cnt; 2459 2460 /* try and push anything in the part_buf */ 2461 if (unlikely(host->part_buf_count)) { 2462 int len = dw_mci_push_part_bytes(host, buf, cnt); 2463 2464 buf += len; 2465 cnt -= len; 2466 2467 if (host->part_buf_count == 8) { 2468 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2469 host->part_buf_count = 0; 2470 } 2471 } 2472 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2473 if (unlikely((unsigned long)buf & 0x7)) { 2474 while (cnt >= 8) { 2475 u64 aligned_buf[16]; 2476 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2477 int items = len >> 3; 2478 int i; 2479 /* memcpy from input buffer into aligned buffer */ 2480 memcpy(aligned_buf, buf, len); 2481 buf += len; 2482 cnt -= len; 2483 /* push data from aligned buffer into fifo */ 2484 for (i = 0; i < items; ++i) 2485 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); 2486 } 2487 } else 2488 #endif 2489 { 2490 u64 *pdata = buf; 2491 2492 for (; cnt >= 8; cnt -= 8) 2493 mci_fifo_writeq(host->fifo_reg, *pdata++); 2494 buf = pdata; 2495 } 2496 /* put anything remaining in the part_buf */ 2497 if (cnt) { 2498 dw_mci_set_part_bytes(host, buf, cnt); 2499 /* Push data if we have reached the expected data length */ 2500 if ((data->bytes_xfered + init_cnt) == 2501 (data->blksz * data->blocks)) 2502 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2503 } 2504 } 2505 2506 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 2507 { 2508 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2509 if (unlikely((unsigned long)buf & 0x7)) { 2510 while (cnt >= 8) { 2511 /* pull data from fifo into aligned buffer */ 2512 u64 aligned_buf[16]; 2513 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2514 int items = len >> 3; 2515 int i; 2516 2517 for (i = 0; i < items; ++i) 2518 aligned_buf[i] = mci_fifo_readq(host->fifo_reg); 2519 2520 /* memcpy from aligned buffer into output buffer */ 2521 memcpy(buf, aligned_buf, len); 2522 buf += len; 2523 cnt -= len; 2524 } 2525 } else 2526 #endif 2527 { 2528 u64 *pdata = buf; 2529 2530 for (; cnt >= 8; cnt -= 8) 2531 *pdata++ = mci_fifo_readq(host->fifo_reg); 2532 buf = pdata; 2533 } 2534 if (cnt) { 2535 host->part_buf = mci_fifo_readq(host->fifo_reg); 2536 dw_mci_pull_final_bytes(host, buf, cnt); 2537 } 2538 } 2539 2540 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 2541 { 2542 int len; 2543 2544 /* get remaining partial bytes */ 2545 len = dw_mci_pull_part_bytes(host, buf, cnt); 2546 if (unlikely(len == cnt)) 2547 return; 2548 buf += len; 2549 cnt -= len; 2550 2551 /* get the rest of the data */ 2552 host->pull_data(host, buf, cnt); 2553 } 2554 2555 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 2556 { 2557 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2558 void *buf; 2559 unsigned int offset; 2560 struct mmc_data *data = host->data; 2561 int shift = host->data_shift; 2562 u32 status; 2563 unsigned int len; 2564 unsigned int remain, fcnt; 2565 2566 do { 2567 if (!sg_miter_next(sg_miter)) 2568 goto done; 2569 2570 host->sg = sg_miter->piter.sg; 2571 buf = sg_miter->addr; 2572 remain = sg_miter->length; 2573 offset = 0; 2574 2575 do { 2576 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 2577 << shift) + host->part_buf_count; 2578 len = min(remain, fcnt); 2579 if (!len) 2580 break; 2581 dw_mci_pull_data(host, (void *)(buf + offset), len); 2582 data->bytes_xfered += len; 2583 offset += len; 2584 remain -= len; 2585 } while (remain); 2586 2587 sg_miter->consumed = offset; 2588 status = mci_readl(host, MINTSTS); 2589 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2590 /* if the RXDR is ready read again */ 2591 } while ((status & SDMMC_INT_RXDR) || 2592 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 2593 2594 if (!remain) { 2595 if (!sg_miter_next(sg_miter)) 2596 goto done; 2597 sg_miter->consumed = 0; 2598 } 2599 sg_miter_stop(sg_miter); 2600 return; 2601 2602 done: 2603 sg_miter_stop(sg_miter); 2604 host->sg = NULL; 2605 smp_wmb(); /* drain writebuffer */ 2606 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2607 } 2608 2609 static void dw_mci_write_data_pio(struct dw_mci *host) 2610 { 2611 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2612 void *buf; 2613 unsigned int offset; 2614 struct mmc_data *data = host->data; 2615 int shift = host->data_shift; 2616 u32 status; 2617 unsigned int len; 2618 unsigned int fifo_depth = host->fifo_depth; 2619 unsigned int remain, fcnt; 2620 2621 do { 2622 if (!sg_miter_next(sg_miter)) 2623 goto done; 2624 2625 host->sg = sg_miter->piter.sg; 2626 buf = sg_miter->addr; 2627 remain = sg_miter->length; 2628 offset = 0; 2629 2630 do { 2631 fcnt = ((fifo_depth - 2632 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 2633 << shift) - host->part_buf_count; 2634 len = min(remain, fcnt); 2635 if (!len) 2636 break; 2637 host->push_data(host, (void *)(buf + offset), len); 2638 data->bytes_xfered += len; 2639 offset += len; 2640 remain -= len; 2641 } while (remain); 2642 2643 sg_miter->consumed = offset; 2644 status = mci_readl(host, MINTSTS); 2645 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2646 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 2647 2648 if (!remain) { 2649 if (!sg_miter_next(sg_miter)) 2650 goto done; 2651 sg_miter->consumed = 0; 2652 } 2653 sg_miter_stop(sg_miter); 2654 return; 2655 2656 done: 2657 sg_miter_stop(sg_miter); 2658 host->sg = NULL; 2659 smp_wmb(); /* drain writebuffer */ 2660 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2661 } 2662 2663 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2664 { 2665 del_timer(&host->cto_timer); 2666 2667 if (!host->cmd_status) 2668 host->cmd_status = status; 2669 2670 smp_wmb(); /* drain writebuffer */ 2671 2672 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2673 tasklet_schedule(&host->tasklet); 2674 2675 dw_mci_start_fault_timer(host); 2676 } 2677 2678 static void dw_mci_handle_cd(struct dw_mci *host) 2679 { 2680 struct dw_mci_slot *slot = host->slot; 2681 2682 mmc_detect_change(slot->mmc, 2683 msecs_to_jiffies(host->pdata->detect_delay_ms)); 2684 } 2685 2686 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2687 { 2688 struct dw_mci *host = dev_id; 2689 u32 pending; 2690 struct dw_mci_slot *slot = host->slot; 2691 2692 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 2693 2694 if (pending) { 2695 /* Check volt switch first, since it can look like an error */ 2696 if ((host->state == STATE_SENDING_CMD11) && 2697 (pending & SDMMC_INT_VOLT_SWITCH)) { 2698 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); 2699 pending &= ~SDMMC_INT_VOLT_SWITCH; 2700 2701 /* 2702 * Hold the lock; we know cmd11_timer can't be kicked 2703 * off after the lock is released, so safe to delete. 2704 */ 2705 spin_lock(&host->irq_lock); 2706 dw_mci_cmd_interrupt(host, pending); 2707 spin_unlock(&host->irq_lock); 2708 2709 del_timer(&host->cmd11_timer); 2710 } 2711 2712 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 2713 spin_lock(&host->irq_lock); 2714 2715 del_timer(&host->cto_timer); 2716 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2717 host->cmd_status = pending; 2718 smp_wmb(); /* drain writebuffer */ 2719 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2720 2721 spin_unlock(&host->irq_lock); 2722 } 2723 2724 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 2725 spin_lock(&host->irq_lock); 2726 2727 /* if there is an error report DATA_ERROR */ 2728 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2729 host->data_status = pending; 2730 smp_wmb(); /* drain writebuffer */ 2731 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2732 tasklet_schedule(&host->tasklet); 2733 2734 spin_unlock(&host->irq_lock); 2735 } 2736 2737 if (pending & SDMMC_INT_DATA_OVER) { 2738 spin_lock(&host->irq_lock); 2739 2740 del_timer(&host->dto_timer); 2741 2742 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2743 if (!host->data_status) 2744 host->data_status = pending; 2745 smp_wmb(); /* drain writebuffer */ 2746 if (host->dir_status == DW_MCI_RECV_STATUS) { 2747 if (host->sg != NULL) 2748 dw_mci_read_data_pio(host, true); 2749 } 2750 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2751 tasklet_schedule(&host->tasklet); 2752 2753 spin_unlock(&host->irq_lock); 2754 } 2755 2756 if (pending & SDMMC_INT_RXDR) { 2757 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2758 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 2759 dw_mci_read_data_pio(host, false); 2760 } 2761 2762 if (pending & SDMMC_INT_TXDR) { 2763 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2764 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 2765 dw_mci_write_data_pio(host); 2766 } 2767 2768 if (pending & SDMMC_INT_CMD_DONE) { 2769 spin_lock(&host->irq_lock); 2770 2771 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 2772 dw_mci_cmd_interrupt(host, pending); 2773 2774 spin_unlock(&host->irq_lock); 2775 } 2776 2777 if (pending & SDMMC_INT_CD) { 2778 mci_writel(host, RINTSTS, SDMMC_INT_CD); 2779 dw_mci_handle_cd(host); 2780 } 2781 2782 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { 2783 mci_writel(host, RINTSTS, 2784 SDMMC_INT_SDIO(slot->sdio_id)); 2785 __dw_mci_enable_sdio_irq(slot, 0); 2786 sdio_signal_irq(slot->mmc); 2787 } 2788 2789 } 2790 2791 if (host->use_dma != TRANS_MODE_IDMAC) 2792 return IRQ_HANDLED; 2793 2794 /* Handle IDMA interrupts */ 2795 if (host->dma_64bit_address == 1) { 2796 pending = mci_readl(host, IDSTS64); 2797 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2798 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 2799 SDMMC_IDMAC_INT_RI); 2800 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2801 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 2802 host->dma_ops->complete((void *)host); 2803 } 2804 } else { 2805 pending = mci_readl(host, IDSTS); 2806 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2807 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 2808 SDMMC_IDMAC_INT_RI); 2809 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2810 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 2811 host->dma_ops->complete((void *)host); 2812 } 2813 } 2814 2815 return IRQ_HANDLED; 2816 } 2817 2818 static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) 2819 { 2820 struct dw_mci *host = slot->host; 2821 const struct dw_mci_drv_data *drv_data = host->drv_data; 2822 struct mmc_host *mmc = slot->mmc; 2823 int ctrl_id; 2824 2825 if (host->pdata->caps) 2826 mmc->caps = host->pdata->caps; 2827 2828 if (host->pdata->pm_caps) 2829 mmc->pm_caps = host->pdata->pm_caps; 2830 2831 if (host->dev->of_node) { 2832 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 2833 if (ctrl_id < 0) 2834 ctrl_id = 0; 2835 } else { 2836 ctrl_id = to_platform_device(host->dev)->id; 2837 } 2838 2839 if (drv_data && drv_data->caps) { 2840 if (ctrl_id >= drv_data->num_caps) { 2841 dev_err(host->dev, "invalid controller id %d\n", 2842 ctrl_id); 2843 return -EINVAL; 2844 } 2845 mmc->caps |= drv_data->caps[ctrl_id]; 2846 } 2847 2848 if (host->pdata->caps2) 2849 mmc->caps2 = host->pdata->caps2; 2850 2851 mmc->f_min = DW_MCI_FREQ_MIN; 2852 if (!mmc->f_max) 2853 mmc->f_max = DW_MCI_FREQ_MAX; 2854 2855 /* Process SDIO IRQs through the sdio_irq_work. */ 2856 if (mmc->caps & MMC_CAP_SDIO_IRQ) 2857 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2858 2859 return 0; 2860 } 2861 2862 static int dw_mci_init_slot(struct dw_mci *host) 2863 { 2864 struct mmc_host *mmc; 2865 struct dw_mci_slot *slot; 2866 int ret; 2867 2868 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2869 if (!mmc) 2870 return -ENOMEM; 2871 2872 slot = mmc_priv(mmc); 2873 slot->id = 0; 2874 slot->sdio_id = host->sdio_id0 + slot->id; 2875 slot->mmc = mmc; 2876 slot->host = host; 2877 host->slot = slot; 2878 2879 mmc->ops = &dw_mci_ops; 2880 2881 /*if there are external regulators, get them*/ 2882 ret = mmc_regulator_get_supply(mmc); 2883 if (ret) 2884 goto err_host_allocated; 2885 2886 if (!mmc->ocr_avail) 2887 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2888 2889 ret = mmc_of_parse(mmc); 2890 if (ret) 2891 goto err_host_allocated; 2892 2893 ret = dw_mci_init_slot_caps(slot); 2894 if (ret) 2895 goto err_host_allocated; 2896 2897 /* Useful defaults if platform data is unset. */ 2898 if (host->use_dma == TRANS_MODE_IDMAC) { 2899 mmc->max_segs = host->ring_size; 2900 mmc->max_blk_size = 65535; 2901 mmc->max_seg_size = 0x1000; 2902 mmc->max_req_size = mmc->max_seg_size * host->ring_size; 2903 mmc->max_blk_count = mmc->max_req_size / 512; 2904 } else if (host->use_dma == TRANS_MODE_EDMAC) { 2905 mmc->max_segs = 64; 2906 mmc->max_blk_size = 65535; 2907 mmc->max_blk_count = 65535; 2908 mmc->max_req_size = 2909 mmc->max_blk_size * mmc->max_blk_count; 2910 mmc->max_seg_size = mmc->max_req_size; 2911 } else { 2912 /* TRANS_MODE_PIO */ 2913 mmc->max_segs = 64; 2914 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ 2915 mmc->max_blk_count = 512; 2916 mmc->max_req_size = mmc->max_blk_size * 2917 mmc->max_blk_count; 2918 mmc->max_seg_size = mmc->max_req_size; 2919 } 2920 2921 dw_mci_get_cd(mmc); 2922 2923 ret = mmc_add_host(mmc); 2924 if (ret) 2925 goto err_host_allocated; 2926 2927 #if defined(CONFIG_DEBUG_FS) 2928 dw_mci_init_debugfs(slot); 2929 #endif 2930 2931 return 0; 2932 2933 err_host_allocated: 2934 mmc_free_host(mmc); 2935 return ret; 2936 } 2937 2938 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot) 2939 { 2940 /* Debugfs stuff is cleaned up by mmc core */ 2941 mmc_remove_host(slot->mmc); 2942 slot->host->slot = NULL; 2943 mmc_free_host(slot->mmc); 2944 } 2945 2946 static void dw_mci_init_dma(struct dw_mci *host) 2947 { 2948 int addr_config; 2949 struct device *dev = host->dev; 2950 2951 /* 2952 * Check tansfer mode from HCON[17:16] 2953 * Clear the ambiguous description of dw_mmc databook: 2954 * 2b'00: No DMA Interface -> Actually means using Internal DMA block 2955 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block 2956 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block 2957 * 2b'11: Non DW DMA Interface -> pio only 2958 * Compared to DesignWare DMA Interface, Generic DMA Interface has a 2959 * simpler request/acknowledge handshake mechanism and both of them 2960 * are regarded as external dma master for dw_mmc. 2961 */ 2962 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); 2963 if (host->use_dma == DMA_INTERFACE_IDMA) { 2964 host->use_dma = TRANS_MODE_IDMAC; 2965 } else if (host->use_dma == DMA_INTERFACE_DWDMA || 2966 host->use_dma == DMA_INTERFACE_GDMA) { 2967 host->use_dma = TRANS_MODE_EDMAC; 2968 } else { 2969 goto no_dma; 2970 } 2971 2972 /* Determine which DMA interface to use */ 2973 if (host->use_dma == TRANS_MODE_IDMAC) { 2974 /* 2975 * Check ADDR_CONFIG bit in HCON to find 2976 * IDMAC address bus width 2977 */ 2978 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); 2979 2980 if (addr_config == 1) { 2981 /* host supports IDMAC in 64-bit address mode */ 2982 host->dma_64bit_address = 1; 2983 dev_info(host->dev, 2984 "IDMAC supports 64-bit address mode.\n"); 2985 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) 2986 dma_set_coherent_mask(host->dev, 2987 DMA_BIT_MASK(64)); 2988 } else { 2989 /* host supports IDMAC in 32-bit address mode */ 2990 host->dma_64bit_address = 0; 2991 dev_info(host->dev, 2992 "IDMAC supports 32-bit address mode.\n"); 2993 } 2994 2995 /* Alloc memory for sg translation */ 2996 host->sg_cpu = dmam_alloc_coherent(host->dev, 2997 DESC_RING_BUF_SZ, 2998 &host->sg_dma, GFP_KERNEL); 2999 if (!host->sg_cpu) { 3000 dev_err(host->dev, 3001 "%s: could not alloc DMA memory\n", 3002 __func__); 3003 goto no_dma; 3004 } 3005 3006 host->dma_ops = &dw_mci_idmac_ops; 3007 dev_info(host->dev, "Using internal DMA controller.\n"); 3008 } else { 3009 /* TRANS_MODE_EDMAC: check dma bindings again */ 3010 if ((device_property_read_string_array(dev, "dma-names", 3011 NULL, 0) < 0) || 3012 !device_property_present(dev, "dmas")) { 3013 goto no_dma; 3014 } 3015 host->dma_ops = &dw_mci_edmac_ops; 3016 dev_info(host->dev, "Using external DMA controller.\n"); 3017 } 3018 3019 if (host->dma_ops->init && host->dma_ops->start && 3020 host->dma_ops->stop && host->dma_ops->cleanup) { 3021 if (host->dma_ops->init(host)) { 3022 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", 3023 __func__); 3024 goto no_dma; 3025 } 3026 } else { 3027 dev_err(host->dev, "DMA initialization not found.\n"); 3028 goto no_dma; 3029 } 3030 3031 return; 3032 3033 no_dma: 3034 dev_info(host->dev, "Using PIO mode.\n"); 3035 host->use_dma = TRANS_MODE_PIO; 3036 } 3037 3038 static void dw_mci_cmd11_timer(struct timer_list *t) 3039 { 3040 struct dw_mci *host = from_timer(host, t, cmd11_timer); 3041 3042 if (host->state != STATE_SENDING_CMD11) { 3043 dev_warn(host->dev, "Unexpected CMD11 timeout\n"); 3044 return; 3045 } 3046 3047 host->cmd_status = SDMMC_INT_RTO; 3048 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 3049 tasklet_schedule(&host->tasklet); 3050 } 3051 3052 static void dw_mci_cto_timer(struct timer_list *t) 3053 { 3054 struct dw_mci *host = from_timer(host, t, cto_timer); 3055 unsigned long irqflags; 3056 u32 pending; 3057 3058 spin_lock_irqsave(&host->irq_lock, irqflags); 3059 3060 /* 3061 * If somehow we have very bad interrupt latency it's remotely possible 3062 * that the timer could fire while the interrupt is still pending or 3063 * while the interrupt is midway through running. Let's be paranoid 3064 * and detect those two cases. Note that this is paranoia is somewhat 3065 * justified because in this function we don't actually cancel the 3066 * pending command in the controller--we just assume it will never come. 3067 */ 3068 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 3069 if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) { 3070 /* The interrupt should fire; no need to act but we can warn */ 3071 dev_warn(host->dev, "Unexpected interrupt latency\n"); 3072 goto exit; 3073 } 3074 if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) { 3075 /* Presumably interrupt handler couldn't delete the timer */ 3076 dev_warn(host->dev, "CTO timeout when already completed\n"); 3077 goto exit; 3078 } 3079 3080 /* 3081 * Continued paranoia to make sure we're in the state we expect. 3082 * This paranoia isn't really justified but it seems good to be safe. 3083 */ 3084 switch (host->state) { 3085 case STATE_SENDING_CMD11: 3086 case STATE_SENDING_CMD: 3087 case STATE_SENDING_STOP: 3088 /* 3089 * If CMD_DONE interrupt does NOT come in sending command 3090 * state, we should notify the driver to terminate current 3091 * transfer and report a command timeout to the core. 3092 */ 3093 host->cmd_status = SDMMC_INT_RTO; 3094 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 3095 tasklet_schedule(&host->tasklet); 3096 break; 3097 default: 3098 dev_warn(host->dev, "Unexpected command timeout, state %d\n", 3099 host->state); 3100 break; 3101 } 3102 3103 exit: 3104 spin_unlock_irqrestore(&host->irq_lock, irqflags); 3105 } 3106 3107 static void dw_mci_dto_timer(struct timer_list *t) 3108 { 3109 struct dw_mci *host = from_timer(host, t, dto_timer); 3110 unsigned long irqflags; 3111 u32 pending; 3112 3113 spin_lock_irqsave(&host->irq_lock, irqflags); 3114 3115 /* 3116 * The DTO timer is much longer than the CTO timer, so it's even less 3117 * likely that we'll these cases, but it pays to be paranoid. 3118 */ 3119 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 3120 if (pending & SDMMC_INT_DATA_OVER) { 3121 /* The interrupt should fire; no need to act but we can warn */ 3122 dev_warn(host->dev, "Unexpected data interrupt latency\n"); 3123 goto exit; 3124 } 3125 if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) { 3126 /* Presumably interrupt handler couldn't delete the timer */ 3127 dev_warn(host->dev, "DTO timeout when already completed\n"); 3128 goto exit; 3129 } 3130 3131 /* 3132 * Continued paranoia to make sure we're in the state we expect. 3133 * This paranoia isn't really justified but it seems good to be safe. 3134 */ 3135 switch (host->state) { 3136 case STATE_SENDING_DATA: 3137 case STATE_DATA_BUSY: 3138 /* 3139 * If DTO interrupt does NOT come in sending data state, 3140 * we should notify the driver to terminate current transfer 3141 * and report a data timeout to the core. 3142 */ 3143 host->data_status = SDMMC_INT_DRTO; 3144 set_bit(EVENT_DATA_ERROR, &host->pending_events); 3145 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 3146 tasklet_schedule(&host->tasklet); 3147 break; 3148 default: 3149 dev_warn(host->dev, "Unexpected data timeout, state %d\n", 3150 host->state); 3151 break; 3152 } 3153 3154 exit: 3155 spin_unlock_irqrestore(&host->irq_lock, irqflags); 3156 } 3157 3158 #ifdef CONFIG_OF 3159 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3160 { 3161 struct dw_mci_board *pdata; 3162 struct device *dev = host->dev; 3163 const struct dw_mci_drv_data *drv_data = host->drv_data; 3164 int ret; 3165 u32 clock_frequency; 3166 3167 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 3168 if (!pdata) 3169 return ERR_PTR(-ENOMEM); 3170 3171 /* find reset controller when exist */ 3172 pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset"); 3173 if (IS_ERR(pdata->rstc)) 3174 return ERR_CAST(pdata->rstc); 3175 3176 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) 3177 dev_info(dev, 3178 "fifo-depth property not found, using value of FIFOTH register as default\n"); 3179 3180 device_property_read_u32(dev, "card-detect-delay", 3181 &pdata->detect_delay_ms); 3182 3183 device_property_read_u32(dev, "data-addr", &host->data_addr_override); 3184 3185 if (device_property_present(dev, "fifo-watermark-aligned")) 3186 host->wm_aligned = true; 3187 3188 if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency)) 3189 pdata->bus_hz = clock_frequency; 3190 3191 if (drv_data && drv_data->parse_dt) { 3192 ret = drv_data->parse_dt(host); 3193 if (ret) 3194 return ERR_PTR(ret); 3195 } 3196 3197 return pdata; 3198 } 3199 3200 #else /* CONFIG_OF */ 3201 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3202 { 3203 return ERR_PTR(-EINVAL); 3204 } 3205 #endif /* CONFIG_OF */ 3206 3207 static void dw_mci_enable_cd(struct dw_mci *host) 3208 { 3209 unsigned long irqflags; 3210 u32 temp; 3211 3212 /* 3213 * No need for CD if all slots have a non-error GPIO 3214 * as well as broken card detection is found. 3215 */ 3216 if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL) 3217 return; 3218 3219 if (mmc_gpio_get_cd(host->slot->mmc) < 0) { 3220 spin_lock_irqsave(&host->irq_lock, irqflags); 3221 temp = mci_readl(host, INTMASK); 3222 temp |= SDMMC_INT_CD; 3223 mci_writel(host, INTMASK, temp); 3224 spin_unlock_irqrestore(&host->irq_lock, irqflags); 3225 } 3226 } 3227 3228 int dw_mci_probe(struct dw_mci *host) 3229 { 3230 const struct dw_mci_drv_data *drv_data = host->drv_data; 3231 int width, i, ret = 0; 3232 u32 fifo_size; 3233 3234 if (!host->pdata) { 3235 host->pdata = dw_mci_parse_dt(host); 3236 if (IS_ERR(host->pdata)) 3237 return dev_err_probe(host->dev, PTR_ERR(host->pdata), 3238 "platform data not available\n"); 3239 } 3240 3241 host->biu_clk = devm_clk_get(host->dev, "biu"); 3242 if (IS_ERR(host->biu_clk)) { 3243 dev_dbg(host->dev, "biu clock not available\n"); 3244 } else { 3245 ret = clk_prepare_enable(host->biu_clk); 3246 if (ret) { 3247 dev_err(host->dev, "failed to enable biu clock\n"); 3248 return ret; 3249 } 3250 } 3251 3252 host->ciu_clk = devm_clk_get(host->dev, "ciu"); 3253 if (IS_ERR(host->ciu_clk)) { 3254 dev_dbg(host->dev, "ciu clock not available\n"); 3255 host->bus_hz = host->pdata->bus_hz; 3256 } else { 3257 ret = clk_prepare_enable(host->ciu_clk); 3258 if (ret) { 3259 dev_err(host->dev, "failed to enable ciu clock\n"); 3260 goto err_clk_biu; 3261 } 3262 3263 if (host->pdata->bus_hz) { 3264 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 3265 if (ret) 3266 dev_warn(host->dev, 3267 "Unable to set bus rate to %uHz\n", 3268 host->pdata->bus_hz); 3269 } 3270 host->bus_hz = clk_get_rate(host->ciu_clk); 3271 } 3272 3273 if (!host->bus_hz) { 3274 dev_err(host->dev, 3275 "Platform data must supply bus speed\n"); 3276 ret = -ENODEV; 3277 goto err_clk_ciu; 3278 } 3279 3280 if (host->pdata->rstc) { 3281 reset_control_assert(host->pdata->rstc); 3282 usleep_range(10, 50); 3283 reset_control_deassert(host->pdata->rstc); 3284 } 3285 3286 if (drv_data && drv_data->init) { 3287 ret = drv_data->init(host); 3288 if (ret) { 3289 dev_err(host->dev, 3290 "implementation specific init failed\n"); 3291 goto err_clk_ciu; 3292 } 3293 } 3294 3295 timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0); 3296 timer_setup(&host->cto_timer, dw_mci_cto_timer, 0); 3297 timer_setup(&host->dto_timer, dw_mci_dto_timer, 0); 3298 3299 spin_lock_init(&host->lock); 3300 spin_lock_init(&host->irq_lock); 3301 INIT_LIST_HEAD(&host->queue); 3302 3303 dw_mci_init_fault(host); 3304 3305 /* 3306 * Get the host data width - this assumes that HCON has been set with 3307 * the correct values. 3308 */ 3309 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); 3310 if (!i) { 3311 host->push_data = dw_mci_push_data16; 3312 host->pull_data = dw_mci_pull_data16; 3313 width = 16; 3314 host->data_shift = 1; 3315 } else if (i == 2) { 3316 host->push_data = dw_mci_push_data64; 3317 host->pull_data = dw_mci_pull_data64; 3318 width = 64; 3319 host->data_shift = 3; 3320 } else { 3321 /* Check for a reserved value, and warn if it is */ 3322 WARN((i != 1), 3323 "HCON reports a reserved host data width!\n" 3324 "Defaulting to 32-bit access.\n"); 3325 host->push_data = dw_mci_push_data32; 3326 host->pull_data = dw_mci_pull_data32; 3327 width = 32; 3328 host->data_shift = 2; 3329 } 3330 3331 /* Reset all blocks */ 3332 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3333 ret = -ENODEV; 3334 goto err_clk_ciu; 3335 } 3336 3337 host->dma_ops = host->pdata->dma_ops; 3338 dw_mci_init_dma(host); 3339 3340 /* Clear the interrupts for the host controller */ 3341 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3342 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3343 3344 /* Put in max timeout */ 3345 mci_writel(host, TMOUT, 0xFFFFFFFF); 3346 3347 /* 3348 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 3349 * Tx Mark = fifo_size / 2 DMA Size = 8 3350 */ 3351 if (!host->pdata->fifo_depth) { 3352 /* 3353 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 3354 * have been overwritten by the bootloader, just like we're 3355 * about to do, so if you know the value for your hardware, you 3356 * should put it in the platform data. 3357 */ 3358 fifo_size = mci_readl(host, FIFOTH); 3359 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 3360 } else { 3361 fifo_size = host->pdata->fifo_depth; 3362 } 3363 host->fifo_depth = fifo_size; 3364 host->fifoth_val = 3365 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); 3366 mci_writel(host, FIFOTH, host->fifoth_val); 3367 3368 /* disable clock to CIU */ 3369 mci_writel(host, CLKENA, 0); 3370 mci_writel(host, CLKSRC, 0); 3371 3372 /* 3373 * In 2.40a spec, Data offset is changed. 3374 * Need to check the version-id and set data-offset for DATA register. 3375 */ 3376 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 3377 dev_info(host->dev, "Version ID is %04x\n", host->verid); 3378 3379 if (host->data_addr_override) 3380 host->fifo_reg = host->regs + host->data_addr_override; 3381 else if (host->verid < DW_MMC_240A) 3382 host->fifo_reg = host->regs + DATA_OFFSET; 3383 else 3384 host->fifo_reg = host->regs + DATA_240A_OFFSET; 3385 3386 tasklet_setup(&host->tasklet, dw_mci_tasklet_func); 3387 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 3388 host->irq_flags, "dw-mci", host); 3389 if (ret) 3390 goto err_dmaunmap; 3391 3392 /* 3393 * Enable interrupts for command done, data over, data empty, 3394 * receive ready and error such as transmit, receive timeout, crc error 3395 */ 3396 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3397 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3398 DW_MCI_ERROR_FLAGS); 3399 /* Enable mci interrupt */ 3400 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3401 3402 dev_info(host->dev, 3403 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", 3404 host->irq, width, fifo_size); 3405 3406 /* We need at least one slot to succeed */ 3407 ret = dw_mci_init_slot(host); 3408 if (ret) { 3409 dev_dbg(host->dev, "slot %d init failed\n", i); 3410 goto err_dmaunmap; 3411 } 3412 3413 /* Now that slots are all setup, we can enable card detect */ 3414 dw_mci_enable_cd(host); 3415 3416 return 0; 3417 3418 err_dmaunmap: 3419 if (host->use_dma && host->dma_ops->exit) 3420 host->dma_ops->exit(host); 3421 3422 reset_control_assert(host->pdata->rstc); 3423 3424 err_clk_ciu: 3425 clk_disable_unprepare(host->ciu_clk); 3426 3427 err_clk_biu: 3428 clk_disable_unprepare(host->biu_clk); 3429 3430 return ret; 3431 } 3432 EXPORT_SYMBOL(dw_mci_probe); 3433 3434 void dw_mci_remove(struct dw_mci *host) 3435 { 3436 dev_dbg(host->dev, "remove slot\n"); 3437 if (host->slot) 3438 dw_mci_cleanup_slot(host->slot); 3439 3440 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3441 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3442 3443 /* disable clock to CIU */ 3444 mci_writel(host, CLKENA, 0); 3445 mci_writel(host, CLKSRC, 0); 3446 3447 if (host->use_dma && host->dma_ops->exit) 3448 host->dma_ops->exit(host); 3449 3450 reset_control_assert(host->pdata->rstc); 3451 3452 clk_disable_unprepare(host->ciu_clk); 3453 clk_disable_unprepare(host->biu_clk); 3454 } 3455 EXPORT_SYMBOL(dw_mci_remove); 3456 3457 3458 3459 #ifdef CONFIG_PM 3460 int dw_mci_runtime_suspend(struct device *dev) 3461 { 3462 struct dw_mci *host = dev_get_drvdata(dev); 3463 3464 if (host->use_dma && host->dma_ops->exit) 3465 host->dma_ops->exit(host); 3466 3467 clk_disable_unprepare(host->ciu_clk); 3468 3469 if (host->slot && 3470 (mmc_can_gpio_cd(host->slot->mmc) || 3471 !mmc_card_is_removable(host->slot->mmc))) 3472 clk_disable_unprepare(host->biu_clk); 3473 3474 return 0; 3475 } 3476 EXPORT_SYMBOL(dw_mci_runtime_suspend); 3477 3478 int dw_mci_runtime_resume(struct device *dev) 3479 { 3480 int ret = 0; 3481 struct dw_mci *host = dev_get_drvdata(dev); 3482 3483 if (host->slot && 3484 (mmc_can_gpio_cd(host->slot->mmc) || 3485 !mmc_card_is_removable(host->slot->mmc))) { 3486 ret = clk_prepare_enable(host->biu_clk); 3487 if (ret) 3488 return ret; 3489 } 3490 3491 ret = clk_prepare_enable(host->ciu_clk); 3492 if (ret) 3493 goto err; 3494 3495 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3496 clk_disable_unprepare(host->ciu_clk); 3497 ret = -ENODEV; 3498 goto err; 3499 } 3500 3501 if (host->use_dma && host->dma_ops->init) 3502 host->dma_ops->init(host); 3503 3504 /* 3505 * Restore the initial value at FIFOTH register 3506 * And Invalidate the prev_blksz with zero 3507 */ 3508 mci_writel(host, FIFOTH, host->fifoth_val); 3509 host->prev_blksz = 0; 3510 3511 /* Put in max timeout */ 3512 mci_writel(host, TMOUT, 0xFFFFFFFF); 3513 3514 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3515 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3516 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3517 DW_MCI_ERROR_FLAGS); 3518 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3519 3520 3521 if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER) 3522 dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios); 3523 3524 /* Force setup bus to guarantee available clock output */ 3525 dw_mci_setup_bus(host->slot, true); 3526 3527 /* Re-enable SDIO interrupts. */ 3528 if (sdio_irq_claimed(host->slot->mmc)) 3529 __dw_mci_enable_sdio_irq(host->slot, 1); 3530 3531 /* Now that slots are all setup, we can enable card detect */ 3532 dw_mci_enable_cd(host); 3533 3534 return 0; 3535 3536 err: 3537 if (host->slot && 3538 (mmc_can_gpio_cd(host->slot->mmc) || 3539 !mmc_card_is_removable(host->slot->mmc))) 3540 clk_disable_unprepare(host->biu_clk); 3541 3542 return ret; 3543 } 3544 EXPORT_SYMBOL(dw_mci_runtime_resume); 3545 #endif /* CONFIG_PM */ 3546 3547 static int __init dw_mci_init(void) 3548 { 3549 pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 3550 return 0; 3551 } 3552 3553 static void __exit dw_mci_exit(void) 3554 { 3555 } 3556 3557 module_init(dw_mci_init); 3558 module_exit(dw_mci_exit); 3559 3560 MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 3561 MODULE_AUTHOR("NXP Semiconductor VietNam"); 3562 MODULE_AUTHOR("Imagination Technologies Ltd"); 3563 MODULE_LICENSE("GPL v2"); 3564