1 /* 2 * Synopsys DesignWare Multimedia Card Interface driver 3 * (Based on NXP driver for lpc 31xx) 4 * 5 * Copyright (C) 2009 NXP Semiconductors 6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14 #include <linux/blkdev.h> 15 #include <linux/clk.h> 16 #include <linux/debugfs.h> 17 #include <linux/device.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/interrupt.h> 22 #include <linux/ioport.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/seq_file.h> 26 #include <linux/slab.h> 27 #include <linux/stat.h> 28 #include <linux/delay.h> 29 #include <linux/irq.h> 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/host.h> 32 #include <linux/mmc/mmc.h> 33 #include <linux/mmc/sd.h> 34 #include <linux/mmc/sdio.h> 35 #include <linux/mmc/dw_mmc.h> 36 #include <linux/bitops.h> 37 #include <linux/regulator/consumer.h> 38 #include <linux/of.h> 39 #include <linux/of_gpio.h> 40 #include <linux/mmc/slot-gpio.h> 41 42 #include "dw_mmc.h" 43 44 /* Common flag combinations */ 45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 46 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 47 SDMMC_INT_EBE) 48 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 49 SDMMC_INT_RESP_ERR) 50 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 51 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE) 52 #define DW_MCI_SEND_STATUS 1 53 #define DW_MCI_RECV_STATUS 2 54 #define DW_MCI_DMA_THRESHOLD 16 55 56 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 57 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */ 58 59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 62 SDMMC_IDMAC_INT_TI) 63 64 struct idmac_desc_64addr { 65 u32 des0; /* Control Descriptor */ 66 67 u32 des1; /* Reserved */ 68 69 u32 des2; /*Buffer sizes */ 70 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ 71 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ 72 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) 73 74 u32 des3; /* Reserved */ 75 76 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ 77 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ 78 79 u32 des6; /* Lower 32-bits of Next Descriptor Address */ 80 u32 des7; /* Upper 32-bits of Next Descriptor Address */ 81 }; 82 83 struct idmac_desc { 84 __le32 des0; /* Control Descriptor */ 85 #define IDMAC_DES0_DIC BIT(1) 86 #define IDMAC_DES0_LD BIT(2) 87 #define IDMAC_DES0_FD BIT(3) 88 #define IDMAC_DES0_CH BIT(4) 89 #define IDMAC_DES0_ER BIT(5) 90 #define IDMAC_DES0_CES BIT(30) 91 #define IDMAC_DES0_OWN BIT(31) 92 93 __le32 des1; /* Buffer sizes */ 94 #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 95 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) 96 97 __le32 des2; /* buffer 1 physical address */ 98 99 __le32 des3; /* buffer 2 physical address */ 100 }; 101 102 /* Each descriptor can transfer up to 4KB of data in chained mode */ 103 #define DW_MCI_DESC_DATA_LENGTH 0x1000 104 105 static bool dw_mci_reset(struct dw_mci *host); 106 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset); 107 static int dw_mci_card_busy(struct mmc_host *mmc); 108 109 #if defined(CONFIG_DEBUG_FS) 110 static int dw_mci_req_show(struct seq_file *s, void *v) 111 { 112 struct dw_mci_slot *slot = s->private; 113 struct mmc_request *mrq; 114 struct mmc_command *cmd; 115 struct mmc_command *stop; 116 struct mmc_data *data; 117 118 /* Make sure we get a consistent snapshot */ 119 spin_lock_bh(&slot->host->lock); 120 mrq = slot->mrq; 121 122 if (mrq) { 123 cmd = mrq->cmd; 124 data = mrq->data; 125 stop = mrq->stop; 126 127 if (cmd) 128 seq_printf(s, 129 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 130 cmd->opcode, cmd->arg, cmd->flags, 131 cmd->resp[0], cmd->resp[1], cmd->resp[2], 132 cmd->resp[2], cmd->error); 133 if (data) 134 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 135 data->bytes_xfered, data->blocks, 136 data->blksz, data->flags, data->error); 137 if (stop) 138 seq_printf(s, 139 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 140 stop->opcode, stop->arg, stop->flags, 141 stop->resp[0], stop->resp[1], stop->resp[2], 142 stop->resp[2], stop->error); 143 } 144 145 spin_unlock_bh(&slot->host->lock); 146 147 return 0; 148 } 149 150 static int dw_mci_req_open(struct inode *inode, struct file *file) 151 { 152 return single_open(file, dw_mci_req_show, inode->i_private); 153 } 154 155 static const struct file_operations dw_mci_req_fops = { 156 .owner = THIS_MODULE, 157 .open = dw_mci_req_open, 158 .read = seq_read, 159 .llseek = seq_lseek, 160 .release = single_release, 161 }; 162 163 static int dw_mci_regs_show(struct seq_file *s, void *v) 164 { 165 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS); 166 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS); 167 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD); 168 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL); 169 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK); 170 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA); 171 172 return 0; 173 } 174 175 static int dw_mci_regs_open(struct inode *inode, struct file *file) 176 { 177 return single_open(file, dw_mci_regs_show, inode->i_private); 178 } 179 180 static const struct file_operations dw_mci_regs_fops = { 181 .owner = THIS_MODULE, 182 .open = dw_mci_regs_open, 183 .read = seq_read, 184 .llseek = seq_lseek, 185 .release = single_release, 186 }; 187 188 static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 189 { 190 struct mmc_host *mmc = slot->mmc; 191 struct dw_mci *host = slot->host; 192 struct dentry *root; 193 struct dentry *node; 194 195 root = mmc->debugfs_root; 196 if (!root) 197 return; 198 199 node = debugfs_create_file("regs", S_IRUSR, root, host, 200 &dw_mci_regs_fops); 201 if (!node) 202 goto err; 203 204 node = debugfs_create_file("req", S_IRUSR, root, slot, 205 &dw_mci_req_fops); 206 if (!node) 207 goto err; 208 209 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 210 if (!node) 211 goto err; 212 213 node = debugfs_create_x32("pending_events", S_IRUSR, root, 214 (u32 *)&host->pending_events); 215 if (!node) 216 goto err; 217 218 node = debugfs_create_x32("completed_events", S_IRUSR, root, 219 (u32 *)&host->completed_events); 220 if (!node) 221 goto err; 222 223 return; 224 225 err: 226 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 227 } 228 #endif /* defined(CONFIG_DEBUG_FS) */ 229 230 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg); 231 232 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 233 { 234 struct mmc_data *data; 235 struct dw_mci_slot *slot = mmc_priv(mmc); 236 struct dw_mci *host = slot->host; 237 u32 cmdr; 238 239 cmd->error = -EINPROGRESS; 240 cmdr = cmd->opcode; 241 242 if (cmd->opcode == MMC_STOP_TRANSMISSION || 243 cmd->opcode == MMC_GO_IDLE_STATE || 244 cmd->opcode == MMC_GO_INACTIVE_STATE || 245 (cmd->opcode == SD_IO_RW_DIRECT && 246 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 247 cmdr |= SDMMC_CMD_STOP; 248 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 249 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 250 251 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 252 u32 clk_en_a; 253 254 /* Special bit makes CMD11 not die */ 255 cmdr |= SDMMC_CMD_VOLT_SWITCH; 256 257 /* Change state to continue to handle CMD11 weirdness */ 258 WARN_ON(slot->host->state != STATE_SENDING_CMD); 259 slot->host->state = STATE_SENDING_CMD11; 260 261 /* 262 * We need to disable low power mode (automatic clock stop) 263 * while doing voltage switch so we don't confuse the card, 264 * since stopping the clock is a specific part of the UHS 265 * voltage change dance. 266 * 267 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be 268 * unconditionally turned back on in dw_mci_setup_bus() if it's 269 * ever called with a non-zero clock. That shouldn't happen 270 * until the voltage change is all done. 271 */ 272 clk_en_a = mci_readl(host, CLKENA); 273 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); 274 mci_writel(host, CLKENA, clk_en_a); 275 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 276 SDMMC_CMD_PRV_DAT_WAIT, 0); 277 } 278 279 if (cmd->flags & MMC_RSP_PRESENT) { 280 /* We expect a response, so set this bit */ 281 cmdr |= SDMMC_CMD_RESP_EXP; 282 if (cmd->flags & MMC_RSP_136) 283 cmdr |= SDMMC_CMD_RESP_LONG; 284 } 285 286 if (cmd->flags & MMC_RSP_CRC) 287 cmdr |= SDMMC_CMD_RESP_CRC; 288 289 data = cmd->data; 290 if (data) { 291 cmdr |= SDMMC_CMD_DAT_EXP; 292 if (data->flags & MMC_DATA_WRITE) 293 cmdr |= SDMMC_CMD_DAT_WR; 294 } 295 296 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) 297 cmdr |= SDMMC_CMD_USE_HOLD_REG; 298 299 return cmdr; 300 } 301 302 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) 303 { 304 struct mmc_command *stop; 305 u32 cmdr; 306 307 if (!cmd->data) 308 return 0; 309 310 stop = &host->stop_abort; 311 cmdr = cmd->opcode; 312 memset(stop, 0, sizeof(struct mmc_command)); 313 314 if (cmdr == MMC_READ_SINGLE_BLOCK || 315 cmdr == MMC_READ_MULTIPLE_BLOCK || 316 cmdr == MMC_WRITE_BLOCK || 317 cmdr == MMC_WRITE_MULTIPLE_BLOCK || 318 cmdr == MMC_SEND_TUNING_BLOCK || 319 cmdr == MMC_SEND_TUNING_BLOCK_HS200) { 320 stop->opcode = MMC_STOP_TRANSMISSION; 321 stop->arg = 0; 322 stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 323 } else if (cmdr == SD_IO_RW_EXTENDED) { 324 stop->opcode = SD_IO_RW_DIRECT; 325 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | 326 ((cmd->arg >> 28) & 0x7); 327 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; 328 } else { 329 return 0; 330 } 331 332 cmdr = stop->opcode | SDMMC_CMD_STOP | 333 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; 334 335 return cmdr; 336 } 337 338 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) 339 { 340 unsigned long timeout = jiffies + msecs_to_jiffies(500); 341 342 /* 343 * Databook says that before issuing a new data transfer command 344 * we need to check to see if the card is busy. Data transfer commands 345 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. 346 * 347 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is 348 * expected. 349 */ 350 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && 351 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { 352 while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) { 353 if (time_after(jiffies, timeout)) { 354 /* Command will fail; we'll pass error then */ 355 dev_err(host->dev, "Busy; trying anyway\n"); 356 break; 357 } 358 udelay(10); 359 } 360 } 361 } 362 363 static void dw_mci_start_command(struct dw_mci *host, 364 struct mmc_command *cmd, u32 cmd_flags) 365 { 366 host->cmd = cmd; 367 dev_vdbg(host->dev, 368 "start command: ARGR=0x%08x CMDR=0x%08x\n", 369 cmd->arg, cmd_flags); 370 371 mci_writel(host, CMDARG, cmd->arg); 372 wmb(); /* drain writebuffer */ 373 dw_mci_wait_while_busy(host, cmd_flags); 374 375 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 376 } 377 378 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 379 { 380 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; 381 382 dw_mci_start_command(host, stop, host->stop_cmdr); 383 } 384 385 /* DMA interface functions */ 386 static void dw_mci_stop_dma(struct dw_mci *host) 387 { 388 if (host->using_dma) { 389 host->dma_ops->stop(host); 390 host->dma_ops->cleanup(host); 391 } 392 393 /* Data transfer was stopped by the interrupt handler */ 394 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 395 } 396 397 static int dw_mci_get_dma_dir(struct mmc_data *data) 398 { 399 if (data->flags & MMC_DATA_WRITE) 400 return DMA_TO_DEVICE; 401 else 402 return DMA_FROM_DEVICE; 403 } 404 405 static void dw_mci_dma_cleanup(struct dw_mci *host) 406 { 407 struct mmc_data *data = host->data; 408 409 if (data) 410 if (!data->host_cookie) 411 dma_unmap_sg(host->dev, 412 data->sg, 413 data->sg_len, 414 dw_mci_get_dma_dir(data)); 415 } 416 417 static void dw_mci_idmac_reset(struct dw_mci *host) 418 { 419 u32 bmod = mci_readl(host, BMOD); 420 /* Software reset of DMA */ 421 bmod |= SDMMC_IDMAC_SWRESET; 422 mci_writel(host, BMOD, bmod); 423 } 424 425 static void dw_mci_idmac_stop_dma(struct dw_mci *host) 426 { 427 u32 temp; 428 429 /* Disable and reset the IDMAC interface */ 430 temp = mci_readl(host, CTRL); 431 temp &= ~SDMMC_CTRL_USE_IDMAC; 432 temp |= SDMMC_CTRL_DMA_RESET; 433 mci_writel(host, CTRL, temp); 434 435 /* Stop the IDMAC running */ 436 temp = mci_readl(host, BMOD); 437 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 438 temp |= SDMMC_IDMAC_SWRESET; 439 mci_writel(host, BMOD, temp); 440 } 441 442 static void dw_mci_dmac_complete_dma(void *arg) 443 { 444 struct dw_mci *host = arg; 445 struct mmc_data *data = host->data; 446 447 dev_vdbg(host->dev, "DMA complete\n"); 448 449 if ((host->use_dma == TRANS_MODE_EDMAC) && 450 data && (data->flags & MMC_DATA_READ)) 451 /* Invalidate cache after read */ 452 dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc), 453 data->sg, 454 data->sg_len, 455 DMA_FROM_DEVICE); 456 457 host->dma_ops->cleanup(host); 458 459 /* 460 * If the card was removed, data will be NULL. No point in trying to 461 * send the stop command or waiting for NBUSY in this case. 462 */ 463 if (data) { 464 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 465 tasklet_schedule(&host->tasklet); 466 } 467 } 468 469 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, 470 unsigned int sg_len) 471 { 472 unsigned int desc_len; 473 int i; 474 475 if (host->dma_64bit_address == 1) { 476 struct idmac_desc_64addr *desc_first, *desc_last, *desc; 477 478 desc_first = desc_last = desc = host->sg_cpu; 479 480 for (i = 0; i < sg_len; i++) { 481 unsigned int length = sg_dma_len(&data->sg[i]); 482 483 u64 mem_addr = sg_dma_address(&data->sg[i]); 484 485 for ( ; length ; desc++) { 486 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 487 length : DW_MCI_DESC_DATA_LENGTH; 488 489 length -= desc_len; 490 491 /* 492 * Set the OWN bit and disable interrupts 493 * for this descriptor 494 */ 495 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 496 IDMAC_DES0_CH; 497 498 /* Buffer length */ 499 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); 500 501 /* Physical address to DMA to/from */ 502 desc->des4 = mem_addr & 0xffffffff; 503 desc->des5 = mem_addr >> 32; 504 505 /* Update physical address for the next desc */ 506 mem_addr += desc_len; 507 508 /* Save pointer to the last descriptor */ 509 desc_last = desc; 510 } 511 } 512 513 /* Set first descriptor */ 514 desc_first->des0 |= IDMAC_DES0_FD; 515 516 /* Set last descriptor */ 517 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 518 desc_last->des0 |= IDMAC_DES0_LD; 519 520 } else { 521 struct idmac_desc *desc_first, *desc_last, *desc; 522 523 desc_first = desc_last = desc = host->sg_cpu; 524 525 for (i = 0; i < sg_len; i++) { 526 unsigned int length = sg_dma_len(&data->sg[i]); 527 528 u32 mem_addr = sg_dma_address(&data->sg[i]); 529 530 for ( ; length ; desc++) { 531 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 532 length : DW_MCI_DESC_DATA_LENGTH; 533 534 length -= desc_len; 535 536 /* 537 * Set the OWN bit and disable interrupts 538 * for this descriptor 539 */ 540 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 541 IDMAC_DES0_DIC | 542 IDMAC_DES0_CH); 543 544 /* Buffer length */ 545 IDMAC_SET_BUFFER1_SIZE(desc, desc_len); 546 547 /* Physical address to DMA to/from */ 548 desc->des2 = cpu_to_le32(mem_addr); 549 550 /* Update physical address for the next desc */ 551 mem_addr += desc_len; 552 553 /* Save pointer to the last descriptor */ 554 desc_last = desc; 555 } 556 } 557 558 /* Set first descriptor */ 559 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); 560 561 /* Set last descriptor */ 562 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | 563 IDMAC_DES0_DIC)); 564 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); 565 } 566 567 wmb(); /* drain writebuffer */ 568 } 569 570 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 571 { 572 u32 temp; 573 574 dw_mci_translate_sglist(host, host->data, sg_len); 575 576 /* Make sure to reset DMA in case we did PIO before this */ 577 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); 578 dw_mci_idmac_reset(host); 579 580 /* Select IDMAC interface */ 581 temp = mci_readl(host, CTRL); 582 temp |= SDMMC_CTRL_USE_IDMAC; 583 mci_writel(host, CTRL, temp); 584 585 /* drain writebuffer */ 586 wmb(); 587 588 /* Enable the IDMAC */ 589 temp = mci_readl(host, BMOD); 590 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 591 mci_writel(host, BMOD, temp); 592 593 /* Start it running */ 594 mci_writel(host, PLDMND, 1); 595 596 return 0; 597 } 598 599 static int dw_mci_idmac_init(struct dw_mci *host) 600 { 601 int i; 602 603 if (host->dma_64bit_address == 1) { 604 struct idmac_desc_64addr *p; 605 /* Number of descriptors in the ring buffer */ 606 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr); 607 608 /* Forward link the descriptor list */ 609 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 610 i++, p++) { 611 p->des6 = (host->sg_dma + 612 (sizeof(struct idmac_desc_64addr) * 613 (i + 1))) & 0xffffffff; 614 615 p->des7 = (u64)(host->sg_dma + 616 (sizeof(struct idmac_desc_64addr) * 617 (i + 1))) >> 32; 618 /* Initialize reserved and buffer size fields to "0" */ 619 p->des1 = 0; 620 p->des2 = 0; 621 p->des3 = 0; 622 } 623 624 /* Set the last descriptor as the end-of-ring descriptor */ 625 p->des6 = host->sg_dma & 0xffffffff; 626 p->des7 = (u64)host->sg_dma >> 32; 627 p->des0 = IDMAC_DES0_ER; 628 629 } else { 630 struct idmac_desc *p; 631 /* Number of descriptors in the ring buffer */ 632 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 633 634 /* Forward link the descriptor list */ 635 for (i = 0, p = host->sg_cpu; 636 i < host->ring_size - 1; 637 i++, p++) { 638 p->des3 = cpu_to_le32(host->sg_dma + 639 (sizeof(struct idmac_desc) * (i + 1))); 640 p->des1 = 0; 641 } 642 643 /* Set the last descriptor as the end-of-ring descriptor */ 644 p->des3 = cpu_to_le32(host->sg_dma); 645 p->des0 = cpu_to_le32(IDMAC_DES0_ER); 646 } 647 648 dw_mci_idmac_reset(host); 649 650 if (host->dma_64bit_address == 1) { 651 /* Mask out interrupts - get Tx & Rx complete only */ 652 mci_writel(host, IDSTS64, IDMAC_INT_CLR); 653 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | 654 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 655 656 /* Set the descriptor base address */ 657 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); 658 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); 659 660 } else { 661 /* Mask out interrupts - get Tx & Rx complete only */ 662 mci_writel(host, IDSTS, IDMAC_INT_CLR); 663 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | 664 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 665 666 /* Set the descriptor base address */ 667 mci_writel(host, DBADDR, host->sg_dma); 668 } 669 670 return 0; 671 } 672 673 static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 674 .init = dw_mci_idmac_init, 675 .start = dw_mci_idmac_start_dma, 676 .stop = dw_mci_idmac_stop_dma, 677 .complete = dw_mci_dmac_complete_dma, 678 .cleanup = dw_mci_dma_cleanup, 679 }; 680 681 static void dw_mci_edmac_stop_dma(struct dw_mci *host) 682 { 683 dmaengine_terminate_async(host->dms->ch); 684 } 685 686 static int dw_mci_edmac_start_dma(struct dw_mci *host, 687 unsigned int sg_len) 688 { 689 struct dma_slave_config cfg; 690 struct dma_async_tx_descriptor *desc = NULL; 691 struct scatterlist *sgl = host->data->sg; 692 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 693 u32 sg_elems = host->data->sg_len; 694 u32 fifoth_val; 695 u32 fifo_offset = host->fifo_reg - host->regs; 696 int ret = 0; 697 698 /* Set external dma config: burst size, burst width */ 699 cfg.dst_addr = host->phy_regs + fifo_offset; 700 cfg.src_addr = cfg.dst_addr; 701 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 702 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 703 704 /* Match burst msize with external dma config */ 705 fifoth_val = mci_readl(host, FIFOTH); 706 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; 707 cfg.src_maxburst = cfg.dst_maxburst; 708 709 if (host->data->flags & MMC_DATA_WRITE) 710 cfg.direction = DMA_MEM_TO_DEV; 711 else 712 cfg.direction = DMA_DEV_TO_MEM; 713 714 ret = dmaengine_slave_config(host->dms->ch, &cfg); 715 if (ret) { 716 dev_err(host->dev, "Failed to config edmac.\n"); 717 return -EBUSY; 718 } 719 720 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, 721 sg_len, cfg.direction, 722 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 723 if (!desc) { 724 dev_err(host->dev, "Can't prepare slave sg.\n"); 725 return -EBUSY; 726 } 727 728 /* Set dw_mci_dmac_complete_dma as callback */ 729 desc->callback = dw_mci_dmac_complete_dma; 730 desc->callback_param = (void *)host; 731 dmaengine_submit(desc); 732 733 /* Flush cache before write */ 734 if (host->data->flags & MMC_DATA_WRITE) 735 dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl, 736 sg_elems, DMA_TO_DEVICE); 737 738 dma_async_issue_pending(host->dms->ch); 739 740 return 0; 741 } 742 743 static int dw_mci_edmac_init(struct dw_mci *host) 744 { 745 /* Request external dma channel */ 746 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); 747 if (!host->dms) 748 return -ENOMEM; 749 750 host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx"); 751 if (!host->dms->ch) { 752 dev_err(host->dev, "Failed to get external DMA channel.\n"); 753 kfree(host->dms); 754 host->dms = NULL; 755 return -ENXIO; 756 } 757 758 return 0; 759 } 760 761 static void dw_mci_edmac_exit(struct dw_mci *host) 762 { 763 if (host->dms) { 764 if (host->dms->ch) { 765 dma_release_channel(host->dms->ch); 766 host->dms->ch = NULL; 767 } 768 kfree(host->dms); 769 host->dms = NULL; 770 } 771 } 772 773 static const struct dw_mci_dma_ops dw_mci_edmac_ops = { 774 .init = dw_mci_edmac_init, 775 .exit = dw_mci_edmac_exit, 776 .start = dw_mci_edmac_start_dma, 777 .stop = dw_mci_edmac_stop_dma, 778 .complete = dw_mci_dmac_complete_dma, 779 .cleanup = dw_mci_dma_cleanup, 780 }; 781 782 static int dw_mci_pre_dma_transfer(struct dw_mci *host, 783 struct mmc_data *data, 784 bool next) 785 { 786 struct scatterlist *sg; 787 unsigned int i, sg_len; 788 789 if (!next && data->host_cookie) 790 return data->host_cookie; 791 792 /* 793 * We don't do DMA on "complex" transfers, i.e. with 794 * non-word-aligned buffers or lengths. Also, we don't bother 795 * with all the DMA setup overhead for short transfers. 796 */ 797 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 798 return -EINVAL; 799 800 if (data->blksz & 3) 801 return -EINVAL; 802 803 for_each_sg(data->sg, sg, data->sg_len, i) { 804 if (sg->offset & 3 || sg->length & 3) 805 return -EINVAL; 806 } 807 808 sg_len = dma_map_sg(host->dev, 809 data->sg, 810 data->sg_len, 811 dw_mci_get_dma_dir(data)); 812 if (sg_len == 0) 813 return -EINVAL; 814 815 if (next) 816 data->host_cookie = sg_len; 817 818 return sg_len; 819 } 820 821 static void dw_mci_pre_req(struct mmc_host *mmc, 822 struct mmc_request *mrq, 823 bool is_first_req) 824 { 825 struct dw_mci_slot *slot = mmc_priv(mmc); 826 struct mmc_data *data = mrq->data; 827 828 if (!slot->host->use_dma || !data) 829 return; 830 831 if (data->host_cookie) { 832 data->host_cookie = 0; 833 return; 834 } 835 836 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) 837 data->host_cookie = 0; 838 } 839 840 static void dw_mci_post_req(struct mmc_host *mmc, 841 struct mmc_request *mrq, 842 int err) 843 { 844 struct dw_mci_slot *slot = mmc_priv(mmc); 845 struct mmc_data *data = mrq->data; 846 847 if (!slot->host->use_dma || !data) 848 return; 849 850 if (data->host_cookie) 851 dma_unmap_sg(slot->host->dev, 852 data->sg, 853 data->sg_len, 854 dw_mci_get_dma_dir(data)); 855 data->host_cookie = 0; 856 } 857 858 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 859 { 860 unsigned int blksz = data->blksz; 861 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 862 u32 fifo_width = 1 << host->data_shift; 863 u32 blksz_depth = blksz / fifo_width, fifoth_val; 864 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 865 int idx = ARRAY_SIZE(mszs) - 1; 866 867 /* pio should ship this scenario */ 868 if (!host->use_dma) 869 return; 870 871 tx_wmark = (host->fifo_depth) / 2; 872 tx_wmark_invers = host->fifo_depth - tx_wmark; 873 874 /* 875 * MSIZE is '1', 876 * if blksz is not a multiple of the FIFO width 877 */ 878 if (blksz % fifo_width) { 879 msize = 0; 880 rx_wmark = 1; 881 goto done; 882 } 883 884 do { 885 if (!((blksz_depth % mszs[idx]) || 886 (tx_wmark_invers % mszs[idx]))) { 887 msize = idx; 888 rx_wmark = mszs[idx] - 1; 889 break; 890 } 891 } while (--idx > 0); 892 /* 893 * If idx is '0', it won't be tried 894 * Thus, initial values are uesed 895 */ 896 done: 897 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 898 mci_writel(host, FIFOTH, fifoth_val); 899 } 900 901 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) 902 { 903 unsigned int blksz = data->blksz; 904 u32 blksz_depth, fifo_depth; 905 u16 thld_size; 906 907 WARN_ON(!(data->flags & MMC_DATA_READ)); 908 909 /* 910 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is 911 * in the FIFO region, so we really shouldn't access it). 912 */ 913 if (host->verid < DW_MMC_240A) 914 return; 915 916 if (host->timing != MMC_TIMING_MMC_HS200 && 917 host->timing != MMC_TIMING_MMC_HS400 && 918 host->timing != MMC_TIMING_UHS_SDR104) 919 goto disable; 920 921 blksz_depth = blksz / (1 << host->data_shift); 922 fifo_depth = host->fifo_depth; 923 924 if (blksz_depth > fifo_depth) 925 goto disable; 926 927 /* 928 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' 929 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz 930 * Currently just choose blksz. 931 */ 932 thld_size = blksz; 933 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1)); 934 return; 935 936 disable: 937 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0)); 938 } 939 940 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 941 { 942 unsigned long irqflags; 943 int sg_len; 944 u32 temp; 945 946 host->using_dma = 0; 947 948 /* If we don't have a channel, we can't do DMA */ 949 if (!host->use_dma) 950 return -ENODEV; 951 952 sg_len = dw_mci_pre_dma_transfer(host, data, 0); 953 if (sg_len < 0) { 954 host->dma_ops->stop(host); 955 return sg_len; 956 } 957 958 host->using_dma = 1; 959 960 if (host->use_dma == TRANS_MODE_IDMAC) 961 dev_vdbg(host->dev, 962 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 963 (unsigned long)host->sg_cpu, 964 (unsigned long)host->sg_dma, 965 sg_len); 966 967 /* 968 * Decide the MSIZE and RX/TX Watermark. 969 * If current block size is same with previous size, 970 * no need to update fifoth. 971 */ 972 if (host->prev_blksz != data->blksz) 973 dw_mci_adjust_fifoth(host, data); 974 975 /* Enable the DMA interface */ 976 temp = mci_readl(host, CTRL); 977 temp |= SDMMC_CTRL_DMA_ENABLE; 978 mci_writel(host, CTRL, temp); 979 980 /* Disable RX/TX IRQs, let DMA handle it */ 981 spin_lock_irqsave(&host->irq_lock, irqflags); 982 temp = mci_readl(host, INTMASK); 983 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 984 mci_writel(host, INTMASK, temp); 985 spin_unlock_irqrestore(&host->irq_lock, irqflags); 986 987 if (host->dma_ops->start(host, sg_len)) { 988 /* We can't do DMA */ 989 dev_err(host->dev, "%s: failed to start DMA.\n", __func__); 990 return -ENODEV; 991 } 992 993 return 0; 994 } 995 996 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 997 { 998 unsigned long irqflags; 999 int flags = SG_MITER_ATOMIC; 1000 u32 temp; 1001 1002 data->error = -EINPROGRESS; 1003 1004 WARN_ON(host->data); 1005 host->sg = NULL; 1006 host->data = data; 1007 1008 if (data->flags & MMC_DATA_READ) { 1009 host->dir_status = DW_MCI_RECV_STATUS; 1010 dw_mci_ctrl_rd_thld(host, data); 1011 } else { 1012 host->dir_status = DW_MCI_SEND_STATUS; 1013 } 1014 1015 if (dw_mci_submit_data_dma(host, data)) { 1016 if (host->data->flags & MMC_DATA_READ) 1017 flags |= SG_MITER_TO_SG; 1018 else 1019 flags |= SG_MITER_FROM_SG; 1020 1021 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1022 host->sg = data->sg; 1023 host->part_buf_start = 0; 1024 host->part_buf_count = 0; 1025 1026 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 1027 1028 spin_lock_irqsave(&host->irq_lock, irqflags); 1029 temp = mci_readl(host, INTMASK); 1030 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 1031 mci_writel(host, INTMASK, temp); 1032 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1033 1034 temp = mci_readl(host, CTRL); 1035 temp &= ~SDMMC_CTRL_DMA_ENABLE; 1036 mci_writel(host, CTRL, temp); 1037 1038 /* 1039 * Use the initial fifoth_val for PIO mode. 1040 * If next issued data may be transfered by DMA mode, 1041 * prev_blksz should be invalidated. 1042 */ 1043 mci_writel(host, FIFOTH, host->fifoth_val); 1044 host->prev_blksz = 0; 1045 } else { 1046 /* 1047 * Keep the current block size. 1048 * It will be used to decide whether to update 1049 * fifoth register next time. 1050 */ 1051 host->prev_blksz = data->blksz; 1052 } 1053 } 1054 1055 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 1056 { 1057 struct dw_mci *host = slot->host; 1058 unsigned long timeout = jiffies + msecs_to_jiffies(500); 1059 unsigned int cmd_status = 0; 1060 1061 mci_writel(host, CMDARG, arg); 1062 wmb(); /* drain writebuffer */ 1063 dw_mci_wait_while_busy(host, cmd); 1064 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 1065 1066 while (time_before(jiffies, timeout)) { 1067 cmd_status = mci_readl(host, CMD); 1068 if (!(cmd_status & SDMMC_CMD_START)) 1069 return; 1070 } 1071 dev_err(&slot->mmc->class_dev, 1072 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 1073 cmd, arg, cmd_status); 1074 } 1075 1076 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 1077 { 1078 struct dw_mci *host = slot->host; 1079 unsigned int clock = slot->clock; 1080 u32 div; 1081 u32 clk_en_a; 1082 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; 1083 1084 /* We must continue to set bit 28 in CMD until the change is complete */ 1085 if (host->state == STATE_WAITING_CMD11_DONE) 1086 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; 1087 1088 if (!clock) { 1089 mci_writel(host, CLKENA, 0); 1090 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1091 } else if (clock != host->current_speed || force_clkinit) { 1092 div = host->bus_hz / clock; 1093 if (host->bus_hz % clock && host->bus_hz > clock) 1094 /* 1095 * move the + 1 after the divide to prevent 1096 * over-clocking the card. 1097 */ 1098 div += 1; 1099 1100 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1101 1102 if ((clock << div) != slot->__clk_old || force_clkinit) 1103 dev_info(&slot->mmc->class_dev, 1104 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1105 slot->id, host->bus_hz, clock, 1106 div ? ((host->bus_hz / div) >> 1) : 1107 host->bus_hz, div); 1108 1109 /* disable clock */ 1110 mci_writel(host, CLKENA, 0); 1111 mci_writel(host, CLKSRC, 0); 1112 1113 /* inform CIU */ 1114 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1115 1116 /* set clock to desired speed */ 1117 mci_writel(host, CLKDIV, div); 1118 1119 /* inform CIU */ 1120 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1121 1122 /* enable clock; only low power if no SDIO */ 1123 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 1124 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) 1125 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 1126 mci_writel(host, CLKENA, clk_en_a); 1127 1128 /* inform CIU */ 1129 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1130 1131 /* keep the clock with reflecting clock dividor */ 1132 slot->__clk_old = clock << div; 1133 } 1134 1135 host->current_speed = clock; 1136 1137 /* Set the current slot bus width */ 1138 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 1139 } 1140 1141 static void __dw_mci_start_request(struct dw_mci *host, 1142 struct dw_mci_slot *slot, 1143 struct mmc_command *cmd) 1144 { 1145 struct mmc_request *mrq; 1146 struct mmc_data *data; 1147 u32 cmdflags; 1148 1149 mrq = slot->mrq; 1150 1151 host->cur_slot = slot; 1152 host->mrq = mrq; 1153 1154 host->pending_events = 0; 1155 host->completed_events = 0; 1156 host->cmd_status = 0; 1157 host->data_status = 0; 1158 host->dir_status = 0; 1159 1160 data = cmd->data; 1161 if (data) { 1162 mci_writel(host, TMOUT, 0xFFFFFFFF); 1163 mci_writel(host, BYTCNT, data->blksz*data->blocks); 1164 mci_writel(host, BLKSIZ, data->blksz); 1165 } 1166 1167 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 1168 1169 /* this is the first command, send the initialization clock */ 1170 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 1171 cmdflags |= SDMMC_CMD_INIT; 1172 1173 if (data) { 1174 dw_mci_submit_data(host, data); 1175 wmb(); /* drain writebuffer */ 1176 } 1177 1178 dw_mci_start_command(host, cmd, cmdflags); 1179 1180 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 1181 unsigned long irqflags; 1182 1183 /* 1184 * Databook says to fail after 2ms w/ no response, but evidence 1185 * shows that sometimes the cmd11 interrupt takes over 130ms. 1186 * We'll set to 500ms, plus an extra jiffy just in case jiffies 1187 * is just about to roll over. 1188 * 1189 * We do this whole thing under spinlock and only if the 1190 * command hasn't already completed (indicating the the irq 1191 * already ran so we don't want the timeout). 1192 */ 1193 spin_lock_irqsave(&host->irq_lock, irqflags); 1194 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 1195 mod_timer(&host->cmd11_timer, 1196 jiffies + msecs_to_jiffies(500) + 1); 1197 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1198 } 1199 1200 if (mrq->stop) 1201 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); 1202 else 1203 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); 1204 } 1205 1206 static void dw_mci_start_request(struct dw_mci *host, 1207 struct dw_mci_slot *slot) 1208 { 1209 struct mmc_request *mrq = slot->mrq; 1210 struct mmc_command *cmd; 1211 1212 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 1213 __dw_mci_start_request(host, slot, cmd); 1214 } 1215 1216 /* must be called with host->lock held */ 1217 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 1218 struct mmc_request *mrq) 1219 { 1220 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1221 host->state); 1222 1223 slot->mrq = mrq; 1224 1225 if (host->state == STATE_WAITING_CMD11_DONE) { 1226 dev_warn(&slot->mmc->class_dev, 1227 "Voltage change didn't complete\n"); 1228 /* 1229 * this case isn't expected to happen, so we can 1230 * either crash here or just try to continue on 1231 * in the closest possible state 1232 */ 1233 host->state = STATE_IDLE; 1234 } 1235 1236 if (host->state == STATE_IDLE) { 1237 host->state = STATE_SENDING_CMD; 1238 dw_mci_start_request(host, slot); 1239 } else { 1240 list_add_tail(&slot->queue_node, &host->queue); 1241 } 1242 } 1243 1244 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1245 { 1246 struct dw_mci_slot *slot = mmc_priv(mmc); 1247 struct dw_mci *host = slot->host; 1248 1249 WARN_ON(slot->mrq); 1250 1251 /* 1252 * The check for card presence and queueing of the request must be 1253 * atomic, otherwise the card could be removed in between and the 1254 * request wouldn't fail until another card was inserted. 1255 */ 1256 spin_lock_bh(&host->lock); 1257 1258 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 1259 spin_unlock_bh(&host->lock); 1260 mrq->cmd->error = -ENOMEDIUM; 1261 mmc_request_done(mmc, mrq); 1262 return; 1263 } 1264 1265 dw_mci_queue_request(host, slot, mrq); 1266 1267 spin_unlock_bh(&host->lock); 1268 } 1269 1270 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1271 { 1272 struct dw_mci_slot *slot = mmc_priv(mmc); 1273 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 1274 u32 regs; 1275 int ret; 1276 1277 switch (ios->bus_width) { 1278 case MMC_BUS_WIDTH_4: 1279 slot->ctype = SDMMC_CTYPE_4BIT; 1280 break; 1281 case MMC_BUS_WIDTH_8: 1282 slot->ctype = SDMMC_CTYPE_8BIT; 1283 break; 1284 default: 1285 /* set default 1 bit mode */ 1286 slot->ctype = SDMMC_CTYPE_1BIT; 1287 } 1288 1289 regs = mci_readl(slot->host, UHS_REG); 1290 1291 /* DDR mode set */ 1292 if (ios->timing == MMC_TIMING_MMC_DDR52 || 1293 ios->timing == MMC_TIMING_UHS_DDR50 || 1294 ios->timing == MMC_TIMING_MMC_HS400) 1295 regs |= ((0x1 << slot->id) << 16); 1296 else 1297 regs &= ~((0x1 << slot->id) << 16); 1298 1299 mci_writel(slot->host, UHS_REG, regs); 1300 slot->host->timing = ios->timing; 1301 1302 /* 1303 * Use mirror of ios->clock to prevent race with mmc 1304 * core ios update when finding the minimum. 1305 */ 1306 slot->clock = ios->clock; 1307 1308 if (drv_data && drv_data->set_ios) 1309 drv_data->set_ios(slot->host, ios); 1310 1311 switch (ios->power_mode) { 1312 case MMC_POWER_UP: 1313 if (!IS_ERR(mmc->supply.vmmc)) { 1314 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 1315 ios->vdd); 1316 if (ret) { 1317 dev_err(slot->host->dev, 1318 "failed to enable vmmc regulator\n"); 1319 /*return, if failed turn on vmmc*/ 1320 return; 1321 } 1322 } 1323 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 1324 regs = mci_readl(slot->host, PWREN); 1325 regs |= (1 << slot->id); 1326 mci_writel(slot->host, PWREN, regs); 1327 break; 1328 case MMC_POWER_ON: 1329 if (!slot->host->vqmmc_enabled) { 1330 if (!IS_ERR(mmc->supply.vqmmc)) { 1331 ret = regulator_enable(mmc->supply.vqmmc); 1332 if (ret < 0) 1333 dev_err(slot->host->dev, 1334 "failed to enable vqmmc\n"); 1335 else 1336 slot->host->vqmmc_enabled = true; 1337 1338 } else { 1339 /* Keep track so we don't reset again */ 1340 slot->host->vqmmc_enabled = true; 1341 } 1342 1343 /* Reset our state machine after powering on */ 1344 dw_mci_ctrl_reset(slot->host, 1345 SDMMC_CTRL_ALL_RESET_FLAGS); 1346 } 1347 1348 /* Adjust clock / bus width after power is up */ 1349 dw_mci_setup_bus(slot, false); 1350 1351 break; 1352 case MMC_POWER_OFF: 1353 /* Turn clock off before power goes down */ 1354 dw_mci_setup_bus(slot, false); 1355 1356 if (!IS_ERR(mmc->supply.vmmc)) 1357 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1358 1359 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) 1360 regulator_disable(mmc->supply.vqmmc); 1361 slot->host->vqmmc_enabled = false; 1362 1363 regs = mci_readl(slot->host, PWREN); 1364 regs &= ~(1 << slot->id); 1365 mci_writel(slot->host, PWREN, regs); 1366 break; 1367 default: 1368 break; 1369 } 1370 1371 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) 1372 slot->host->state = STATE_IDLE; 1373 } 1374 1375 static int dw_mci_card_busy(struct mmc_host *mmc) 1376 { 1377 struct dw_mci_slot *slot = mmc_priv(mmc); 1378 u32 status; 1379 1380 /* 1381 * Check the busy bit which is low when DAT[3:0] 1382 * (the data lines) are 0000 1383 */ 1384 status = mci_readl(slot->host, STATUS); 1385 1386 return !!(status & SDMMC_STATUS_BUSY); 1387 } 1388 1389 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 1390 { 1391 struct dw_mci_slot *slot = mmc_priv(mmc); 1392 struct dw_mci *host = slot->host; 1393 const struct dw_mci_drv_data *drv_data = host->drv_data; 1394 u32 uhs; 1395 u32 v18 = SDMMC_UHS_18V << slot->id; 1396 int ret; 1397 1398 if (drv_data && drv_data->switch_voltage) 1399 return drv_data->switch_voltage(mmc, ios); 1400 1401 /* 1402 * Program the voltage. Note that some instances of dw_mmc may use 1403 * the UHS_REG for this. For other instances (like exynos) the UHS_REG 1404 * does no harm but you need to set the regulator directly. Try both. 1405 */ 1406 uhs = mci_readl(host, UHS_REG); 1407 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1408 uhs &= ~v18; 1409 else 1410 uhs |= v18; 1411 1412 if (!IS_ERR(mmc->supply.vqmmc)) { 1413 ret = mmc_regulator_set_vqmmc(mmc, ios); 1414 1415 if (ret) { 1416 dev_dbg(&mmc->class_dev, 1417 "Regulator set error %d - %s V\n", 1418 ret, uhs & v18 ? "1.8" : "3.3"); 1419 return ret; 1420 } 1421 } 1422 mci_writel(host, UHS_REG, uhs); 1423 1424 return 0; 1425 } 1426 1427 static int dw_mci_get_ro(struct mmc_host *mmc) 1428 { 1429 int read_only; 1430 struct dw_mci_slot *slot = mmc_priv(mmc); 1431 int gpio_ro = mmc_gpio_get_ro(mmc); 1432 1433 /* Use platform get_ro function, else try on board write protect */ 1434 if (gpio_ro >= 0) 1435 read_only = gpio_ro; 1436 else 1437 read_only = 1438 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 1439 1440 dev_dbg(&mmc->class_dev, "card is %s\n", 1441 read_only ? "read-only" : "read-write"); 1442 1443 return read_only; 1444 } 1445 1446 static int dw_mci_get_cd(struct mmc_host *mmc) 1447 { 1448 int present; 1449 struct dw_mci_slot *slot = mmc_priv(mmc); 1450 struct dw_mci *host = slot->host; 1451 int gpio_cd = mmc_gpio_get_cd(mmc); 1452 1453 /* Use platform get_cd function, else try onboard card detect */ 1454 if ((mmc->caps & MMC_CAP_NEEDS_POLL) || 1455 (mmc->caps & MMC_CAP_NONREMOVABLE)) 1456 present = 1; 1457 else if (gpio_cd >= 0) 1458 present = gpio_cd; 1459 else 1460 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 1461 == 0 ? 1 : 0; 1462 1463 spin_lock_bh(&host->lock); 1464 if (present) { 1465 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1466 dev_dbg(&mmc->class_dev, "card is present\n"); 1467 } else { 1468 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1469 dev_dbg(&mmc->class_dev, "card is not present\n"); 1470 } 1471 spin_unlock_bh(&host->lock); 1472 1473 return present; 1474 } 1475 1476 static void dw_mci_hw_reset(struct mmc_host *mmc) 1477 { 1478 struct dw_mci_slot *slot = mmc_priv(mmc); 1479 struct dw_mci *host = slot->host; 1480 int reset; 1481 1482 if (host->use_dma == TRANS_MODE_IDMAC) 1483 dw_mci_idmac_reset(host); 1484 1485 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | 1486 SDMMC_CTRL_FIFO_RESET)) 1487 return; 1488 1489 /* 1490 * According to eMMC spec, card reset procedure: 1491 * tRstW >= 1us: RST_n pulse width 1492 * tRSCA >= 200us: RST_n to Command time 1493 * tRSTH >= 1us: RST_n high period 1494 */ 1495 reset = mci_readl(host, RST_N); 1496 reset &= ~(SDMMC_RST_HWACTIVE << slot->id); 1497 mci_writel(host, RST_N, reset); 1498 usleep_range(1, 2); 1499 reset |= SDMMC_RST_HWACTIVE << slot->id; 1500 mci_writel(host, RST_N, reset); 1501 usleep_range(200, 300); 1502 } 1503 1504 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card) 1505 { 1506 struct dw_mci_slot *slot = mmc_priv(mmc); 1507 struct dw_mci *host = slot->host; 1508 1509 /* 1510 * Low power mode will stop the card clock when idle. According to the 1511 * description of the CLKENA register we should disable low power mode 1512 * for SDIO cards if we need SDIO interrupts to work. 1513 */ 1514 if (mmc->caps & MMC_CAP_SDIO_IRQ) { 1515 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 1516 u32 clk_en_a_old; 1517 u32 clk_en_a; 1518 1519 clk_en_a_old = mci_readl(host, CLKENA); 1520 1521 if (card->type == MMC_TYPE_SDIO || 1522 card->type == MMC_TYPE_SD_COMBO) { 1523 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1524 clk_en_a = clk_en_a_old & ~clken_low_pwr; 1525 } else { 1526 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1527 clk_en_a = clk_en_a_old | clken_low_pwr; 1528 } 1529 1530 if (clk_en_a != clk_en_a_old) { 1531 mci_writel(host, CLKENA, clk_en_a); 1532 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 1533 SDMMC_CMD_PRV_DAT_WAIT, 0); 1534 } 1535 } 1536 } 1537 1538 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 1539 { 1540 struct dw_mci_slot *slot = mmc_priv(mmc); 1541 struct dw_mci *host = slot->host; 1542 unsigned long irqflags; 1543 u32 int_mask; 1544 1545 spin_lock_irqsave(&host->irq_lock, irqflags); 1546 1547 /* Enable/disable Slot Specific SDIO interrupt */ 1548 int_mask = mci_readl(host, INTMASK); 1549 if (enb) 1550 int_mask |= SDMMC_INT_SDIO(slot->sdio_id); 1551 else 1552 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); 1553 mci_writel(host, INTMASK, int_mask); 1554 1555 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1556 } 1557 1558 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1559 { 1560 struct dw_mci_slot *slot = mmc_priv(mmc); 1561 struct dw_mci *host = slot->host; 1562 const struct dw_mci_drv_data *drv_data = host->drv_data; 1563 int err = -EINVAL; 1564 1565 if (drv_data && drv_data->execute_tuning) 1566 err = drv_data->execute_tuning(slot, opcode); 1567 return err; 1568 } 1569 1570 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, 1571 struct mmc_ios *ios) 1572 { 1573 struct dw_mci_slot *slot = mmc_priv(mmc); 1574 struct dw_mci *host = slot->host; 1575 const struct dw_mci_drv_data *drv_data = host->drv_data; 1576 1577 if (drv_data && drv_data->prepare_hs400_tuning) 1578 return drv_data->prepare_hs400_tuning(host, ios); 1579 1580 return 0; 1581 } 1582 1583 static const struct mmc_host_ops dw_mci_ops = { 1584 .request = dw_mci_request, 1585 .pre_req = dw_mci_pre_req, 1586 .post_req = dw_mci_post_req, 1587 .set_ios = dw_mci_set_ios, 1588 .get_ro = dw_mci_get_ro, 1589 .get_cd = dw_mci_get_cd, 1590 .hw_reset = dw_mci_hw_reset, 1591 .enable_sdio_irq = dw_mci_enable_sdio_irq, 1592 .execute_tuning = dw_mci_execute_tuning, 1593 .card_busy = dw_mci_card_busy, 1594 .start_signal_voltage_switch = dw_mci_switch_voltage, 1595 .init_card = dw_mci_init_card, 1596 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, 1597 }; 1598 1599 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1600 __releases(&host->lock) 1601 __acquires(&host->lock) 1602 { 1603 struct dw_mci_slot *slot; 1604 struct mmc_host *prev_mmc = host->cur_slot->mmc; 1605 1606 WARN_ON(host->cmd || host->data); 1607 1608 host->cur_slot->mrq = NULL; 1609 host->mrq = NULL; 1610 if (!list_empty(&host->queue)) { 1611 slot = list_entry(host->queue.next, 1612 struct dw_mci_slot, queue_node); 1613 list_del(&slot->queue_node); 1614 dev_vdbg(host->dev, "list not empty: %s is next\n", 1615 mmc_hostname(slot->mmc)); 1616 host->state = STATE_SENDING_CMD; 1617 dw_mci_start_request(host, slot); 1618 } else { 1619 dev_vdbg(host->dev, "list empty\n"); 1620 1621 if (host->state == STATE_SENDING_CMD11) 1622 host->state = STATE_WAITING_CMD11_DONE; 1623 else 1624 host->state = STATE_IDLE; 1625 } 1626 1627 spin_unlock(&host->lock); 1628 mmc_request_done(prev_mmc, mrq); 1629 spin_lock(&host->lock); 1630 } 1631 1632 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 1633 { 1634 u32 status = host->cmd_status; 1635 1636 host->cmd_status = 0; 1637 1638 /* Read the response from the card (up to 16 bytes) */ 1639 if (cmd->flags & MMC_RSP_PRESENT) { 1640 if (cmd->flags & MMC_RSP_136) { 1641 cmd->resp[3] = mci_readl(host, RESP0); 1642 cmd->resp[2] = mci_readl(host, RESP1); 1643 cmd->resp[1] = mci_readl(host, RESP2); 1644 cmd->resp[0] = mci_readl(host, RESP3); 1645 } else { 1646 cmd->resp[0] = mci_readl(host, RESP0); 1647 cmd->resp[1] = 0; 1648 cmd->resp[2] = 0; 1649 cmd->resp[3] = 0; 1650 } 1651 } 1652 1653 if (status & SDMMC_INT_RTO) 1654 cmd->error = -ETIMEDOUT; 1655 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1656 cmd->error = -EILSEQ; 1657 else if (status & SDMMC_INT_RESP_ERR) 1658 cmd->error = -EIO; 1659 else 1660 cmd->error = 0; 1661 1662 return cmd->error; 1663 } 1664 1665 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) 1666 { 1667 u32 status = host->data_status; 1668 1669 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1670 if (status & SDMMC_INT_DRTO) { 1671 data->error = -ETIMEDOUT; 1672 } else if (status & SDMMC_INT_DCRC) { 1673 data->error = -EILSEQ; 1674 } else if (status & SDMMC_INT_EBE) { 1675 if (host->dir_status == 1676 DW_MCI_SEND_STATUS) { 1677 /* 1678 * No data CRC status was returned. 1679 * The number of bytes transferred 1680 * will be exaggerated in PIO mode. 1681 */ 1682 data->bytes_xfered = 0; 1683 data->error = -ETIMEDOUT; 1684 } else if (host->dir_status == 1685 DW_MCI_RECV_STATUS) { 1686 data->error = -EIO; 1687 } 1688 } else { 1689 /* SDMMC_INT_SBE is included */ 1690 data->error = -EIO; 1691 } 1692 1693 dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1694 1695 /* 1696 * After an error, there may be data lingering 1697 * in the FIFO 1698 */ 1699 dw_mci_reset(host); 1700 } else { 1701 data->bytes_xfered = data->blocks * data->blksz; 1702 data->error = 0; 1703 } 1704 1705 return data->error; 1706 } 1707 1708 static void dw_mci_set_drto(struct dw_mci *host) 1709 { 1710 unsigned int drto_clks; 1711 unsigned int drto_ms; 1712 1713 drto_clks = mci_readl(host, TMOUT) >> 8; 1714 drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000); 1715 1716 /* add a bit spare time */ 1717 drto_ms += 10; 1718 1719 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms)); 1720 } 1721 1722 static void dw_mci_tasklet_func(unsigned long priv) 1723 { 1724 struct dw_mci *host = (struct dw_mci *)priv; 1725 struct mmc_data *data; 1726 struct mmc_command *cmd; 1727 struct mmc_request *mrq; 1728 enum dw_mci_state state; 1729 enum dw_mci_state prev_state; 1730 unsigned int err; 1731 1732 spin_lock(&host->lock); 1733 1734 state = host->state; 1735 data = host->data; 1736 mrq = host->mrq; 1737 1738 do { 1739 prev_state = state; 1740 1741 switch (state) { 1742 case STATE_IDLE: 1743 case STATE_WAITING_CMD11_DONE: 1744 break; 1745 1746 case STATE_SENDING_CMD11: 1747 case STATE_SENDING_CMD: 1748 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1749 &host->pending_events)) 1750 break; 1751 1752 cmd = host->cmd; 1753 host->cmd = NULL; 1754 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 1755 err = dw_mci_command_complete(host, cmd); 1756 if (cmd == mrq->sbc && !err) { 1757 prev_state = state = STATE_SENDING_CMD; 1758 __dw_mci_start_request(host, host->cur_slot, 1759 mrq->cmd); 1760 goto unlock; 1761 } 1762 1763 if (cmd->data && err) { 1764 dw_mci_stop_dma(host); 1765 send_stop_abort(host, data); 1766 state = STATE_SENDING_STOP; 1767 break; 1768 } 1769 1770 if (!cmd->data || err) { 1771 dw_mci_request_end(host, mrq); 1772 goto unlock; 1773 } 1774 1775 prev_state = state = STATE_SENDING_DATA; 1776 /* fall through */ 1777 1778 case STATE_SENDING_DATA: 1779 /* 1780 * We could get a data error and never a transfer 1781 * complete so we'd better check for it here. 1782 * 1783 * Note that we don't really care if we also got a 1784 * transfer complete; stopping the DMA and sending an 1785 * abort won't hurt. 1786 */ 1787 if (test_and_clear_bit(EVENT_DATA_ERROR, 1788 &host->pending_events)) { 1789 dw_mci_stop_dma(host); 1790 if (data->stop || 1791 !(host->data_status & (SDMMC_INT_DRTO | 1792 SDMMC_INT_EBE))) 1793 send_stop_abort(host, data); 1794 state = STATE_DATA_ERROR; 1795 break; 1796 } 1797 1798 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1799 &host->pending_events)) { 1800 /* 1801 * If all data-related interrupts don't come 1802 * within the given time in reading data state. 1803 */ 1804 if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) && 1805 (host->dir_status == DW_MCI_RECV_STATUS)) 1806 dw_mci_set_drto(host); 1807 break; 1808 } 1809 1810 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 1811 1812 /* 1813 * Handle an EVENT_DATA_ERROR that might have shown up 1814 * before the transfer completed. This might not have 1815 * been caught by the check above because the interrupt 1816 * could have gone off between the previous check and 1817 * the check for transfer complete. 1818 * 1819 * Technically this ought not be needed assuming we 1820 * get a DATA_COMPLETE eventually (we'll notice the 1821 * error and end the request), but it shouldn't hurt. 1822 * 1823 * This has the advantage of sending the stop command. 1824 */ 1825 if (test_and_clear_bit(EVENT_DATA_ERROR, 1826 &host->pending_events)) { 1827 dw_mci_stop_dma(host); 1828 if (data->stop || 1829 !(host->data_status & (SDMMC_INT_DRTO | 1830 SDMMC_INT_EBE))) 1831 send_stop_abort(host, data); 1832 state = STATE_DATA_ERROR; 1833 break; 1834 } 1835 prev_state = state = STATE_DATA_BUSY; 1836 1837 /* fall through */ 1838 1839 case STATE_DATA_BUSY: 1840 if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 1841 &host->pending_events)) { 1842 /* 1843 * If data error interrupt comes but data over 1844 * interrupt doesn't come within the given time. 1845 * in reading data state. 1846 */ 1847 if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) && 1848 (host->dir_status == DW_MCI_RECV_STATUS)) 1849 dw_mci_set_drto(host); 1850 break; 1851 } 1852 1853 host->data = NULL; 1854 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 1855 err = dw_mci_data_complete(host, data); 1856 1857 if (!err) { 1858 if (!data->stop || mrq->sbc) { 1859 if (mrq->sbc && data->stop) 1860 data->stop->error = 0; 1861 dw_mci_request_end(host, mrq); 1862 goto unlock; 1863 } 1864 1865 /* stop command for open-ended transfer*/ 1866 if (data->stop) 1867 send_stop_abort(host, data); 1868 } else { 1869 /* 1870 * If we don't have a command complete now we'll 1871 * never get one since we just reset everything; 1872 * better end the request. 1873 * 1874 * If we do have a command complete we'll fall 1875 * through to the SENDING_STOP command and 1876 * everything will be peachy keen. 1877 */ 1878 if (!test_bit(EVENT_CMD_COMPLETE, 1879 &host->pending_events)) { 1880 host->cmd = NULL; 1881 dw_mci_request_end(host, mrq); 1882 goto unlock; 1883 } 1884 } 1885 1886 /* 1887 * If err has non-zero, 1888 * stop-abort command has been already issued. 1889 */ 1890 prev_state = state = STATE_SENDING_STOP; 1891 1892 /* fall through */ 1893 1894 case STATE_SENDING_STOP: 1895 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1896 &host->pending_events)) 1897 break; 1898 1899 /* CMD error in data command */ 1900 if (mrq->cmd->error && mrq->data) 1901 dw_mci_reset(host); 1902 1903 host->cmd = NULL; 1904 host->data = NULL; 1905 1906 if (mrq->stop) 1907 dw_mci_command_complete(host, mrq->stop); 1908 else 1909 host->cmd_status = 0; 1910 1911 dw_mci_request_end(host, mrq); 1912 goto unlock; 1913 1914 case STATE_DATA_ERROR: 1915 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1916 &host->pending_events)) 1917 break; 1918 1919 state = STATE_DATA_BUSY; 1920 break; 1921 } 1922 } while (state != prev_state); 1923 1924 host->state = state; 1925 unlock: 1926 spin_unlock(&host->lock); 1927 1928 } 1929 1930 /* push final bytes to part_buf, only use during push */ 1931 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 1932 { 1933 memcpy((void *)&host->part_buf, buf, cnt); 1934 host->part_buf_count = cnt; 1935 } 1936 1937 /* append bytes to part_buf, only use during push */ 1938 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 1939 { 1940 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 1941 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 1942 host->part_buf_count += cnt; 1943 return cnt; 1944 } 1945 1946 /* pull first bytes from part_buf, only use during pull */ 1947 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 1948 { 1949 cnt = min_t(int, cnt, host->part_buf_count); 1950 if (cnt) { 1951 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 1952 cnt); 1953 host->part_buf_count -= cnt; 1954 host->part_buf_start += cnt; 1955 } 1956 return cnt; 1957 } 1958 1959 /* pull final bytes from the part_buf, assuming it's just been filled */ 1960 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 1961 { 1962 memcpy(buf, &host->part_buf, cnt); 1963 host->part_buf_start = cnt; 1964 host->part_buf_count = (1 << host->data_shift) - cnt; 1965 } 1966 1967 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 1968 { 1969 struct mmc_data *data = host->data; 1970 int init_cnt = cnt; 1971 1972 /* try and push anything in the part_buf */ 1973 if (unlikely(host->part_buf_count)) { 1974 int len = dw_mci_push_part_bytes(host, buf, cnt); 1975 1976 buf += len; 1977 cnt -= len; 1978 if (host->part_buf_count == 2) { 1979 mci_fifo_writew(host->fifo_reg, host->part_buf16); 1980 host->part_buf_count = 0; 1981 } 1982 } 1983 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1984 if (unlikely((unsigned long)buf & 0x1)) { 1985 while (cnt >= 2) { 1986 u16 aligned_buf[64]; 1987 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1988 int items = len >> 1; 1989 int i; 1990 /* memcpy from input buffer into aligned buffer */ 1991 memcpy(aligned_buf, buf, len); 1992 buf += len; 1993 cnt -= len; 1994 /* push data from aligned buffer into fifo */ 1995 for (i = 0; i < items; ++i) 1996 mci_fifo_writew(host->fifo_reg, aligned_buf[i]); 1997 } 1998 } else 1999 #endif 2000 { 2001 u16 *pdata = buf; 2002 2003 for (; cnt >= 2; cnt -= 2) 2004 mci_fifo_writew(host->fifo_reg, *pdata++); 2005 buf = pdata; 2006 } 2007 /* put anything remaining in the part_buf */ 2008 if (cnt) { 2009 dw_mci_set_part_bytes(host, buf, cnt); 2010 /* Push data if we have reached the expected data length */ 2011 if ((data->bytes_xfered + init_cnt) == 2012 (data->blksz * data->blocks)) 2013 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2014 } 2015 } 2016 2017 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 2018 { 2019 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2020 if (unlikely((unsigned long)buf & 0x1)) { 2021 while (cnt >= 2) { 2022 /* pull data from fifo into aligned buffer */ 2023 u16 aligned_buf[64]; 2024 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2025 int items = len >> 1; 2026 int i; 2027 2028 for (i = 0; i < items; ++i) 2029 aligned_buf[i] = mci_fifo_readw(host->fifo_reg); 2030 /* memcpy from aligned buffer into output buffer */ 2031 memcpy(buf, aligned_buf, len); 2032 buf += len; 2033 cnt -= len; 2034 } 2035 } else 2036 #endif 2037 { 2038 u16 *pdata = buf; 2039 2040 for (; cnt >= 2; cnt -= 2) 2041 *pdata++ = mci_fifo_readw(host->fifo_reg); 2042 buf = pdata; 2043 } 2044 if (cnt) { 2045 host->part_buf16 = mci_fifo_readw(host->fifo_reg); 2046 dw_mci_pull_final_bytes(host, buf, cnt); 2047 } 2048 } 2049 2050 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 2051 { 2052 struct mmc_data *data = host->data; 2053 int init_cnt = cnt; 2054 2055 /* try and push anything in the part_buf */ 2056 if (unlikely(host->part_buf_count)) { 2057 int len = dw_mci_push_part_bytes(host, buf, cnt); 2058 2059 buf += len; 2060 cnt -= len; 2061 if (host->part_buf_count == 4) { 2062 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2063 host->part_buf_count = 0; 2064 } 2065 } 2066 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2067 if (unlikely((unsigned long)buf & 0x3)) { 2068 while (cnt >= 4) { 2069 u32 aligned_buf[32]; 2070 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2071 int items = len >> 2; 2072 int i; 2073 /* memcpy from input buffer into aligned buffer */ 2074 memcpy(aligned_buf, buf, len); 2075 buf += len; 2076 cnt -= len; 2077 /* push data from aligned buffer into fifo */ 2078 for (i = 0; i < items; ++i) 2079 mci_fifo_writel(host->fifo_reg, aligned_buf[i]); 2080 } 2081 } else 2082 #endif 2083 { 2084 u32 *pdata = buf; 2085 2086 for (; cnt >= 4; cnt -= 4) 2087 mci_fifo_writel(host->fifo_reg, *pdata++); 2088 buf = pdata; 2089 } 2090 /* put anything remaining in the part_buf */ 2091 if (cnt) { 2092 dw_mci_set_part_bytes(host, buf, cnt); 2093 /* Push data if we have reached the expected data length */ 2094 if ((data->bytes_xfered + init_cnt) == 2095 (data->blksz * data->blocks)) 2096 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2097 } 2098 } 2099 2100 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 2101 { 2102 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2103 if (unlikely((unsigned long)buf & 0x3)) { 2104 while (cnt >= 4) { 2105 /* pull data from fifo into aligned buffer */ 2106 u32 aligned_buf[32]; 2107 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2108 int items = len >> 2; 2109 int i; 2110 2111 for (i = 0; i < items; ++i) 2112 aligned_buf[i] = mci_fifo_readl(host->fifo_reg); 2113 /* memcpy from aligned buffer into output buffer */ 2114 memcpy(buf, aligned_buf, len); 2115 buf += len; 2116 cnt -= len; 2117 } 2118 } else 2119 #endif 2120 { 2121 u32 *pdata = buf; 2122 2123 for (; cnt >= 4; cnt -= 4) 2124 *pdata++ = mci_fifo_readl(host->fifo_reg); 2125 buf = pdata; 2126 } 2127 if (cnt) { 2128 host->part_buf32 = mci_fifo_readl(host->fifo_reg); 2129 dw_mci_pull_final_bytes(host, buf, cnt); 2130 } 2131 } 2132 2133 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 2134 { 2135 struct mmc_data *data = host->data; 2136 int init_cnt = cnt; 2137 2138 /* try and push anything in the part_buf */ 2139 if (unlikely(host->part_buf_count)) { 2140 int len = dw_mci_push_part_bytes(host, buf, cnt); 2141 2142 buf += len; 2143 cnt -= len; 2144 2145 if (host->part_buf_count == 8) { 2146 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2147 host->part_buf_count = 0; 2148 } 2149 } 2150 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2151 if (unlikely((unsigned long)buf & 0x7)) { 2152 while (cnt >= 8) { 2153 u64 aligned_buf[16]; 2154 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2155 int items = len >> 3; 2156 int i; 2157 /* memcpy from input buffer into aligned buffer */ 2158 memcpy(aligned_buf, buf, len); 2159 buf += len; 2160 cnt -= len; 2161 /* push data from aligned buffer into fifo */ 2162 for (i = 0; i < items; ++i) 2163 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); 2164 } 2165 } else 2166 #endif 2167 { 2168 u64 *pdata = buf; 2169 2170 for (; cnt >= 8; cnt -= 8) 2171 mci_fifo_writeq(host->fifo_reg, *pdata++); 2172 buf = pdata; 2173 } 2174 /* put anything remaining in the part_buf */ 2175 if (cnt) { 2176 dw_mci_set_part_bytes(host, buf, cnt); 2177 /* Push data if we have reached the expected data length */ 2178 if ((data->bytes_xfered + init_cnt) == 2179 (data->blksz * data->blocks)) 2180 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2181 } 2182 } 2183 2184 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 2185 { 2186 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2187 if (unlikely((unsigned long)buf & 0x7)) { 2188 while (cnt >= 8) { 2189 /* pull data from fifo into aligned buffer */ 2190 u64 aligned_buf[16]; 2191 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2192 int items = len >> 3; 2193 int i; 2194 2195 for (i = 0; i < items; ++i) 2196 aligned_buf[i] = mci_fifo_readq(host->fifo_reg); 2197 2198 /* memcpy from aligned buffer into output buffer */ 2199 memcpy(buf, aligned_buf, len); 2200 buf += len; 2201 cnt -= len; 2202 } 2203 } else 2204 #endif 2205 { 2206 u64 *pdata = buf; 2207 2208 for (; cnt >= 8; cnt -= 8) 2209 *pdata++ = mci_fifo_readq(host->fifo_reg); 2210 buf = pdata; 2211 } 2212 if (cnt) { 2213 host->part_buf = mci_fifo_readq(host->fifo_reg); 2214 dw_mci_pull_final_bytes(host, buf, cnt); 2215 } 2216 } 2217 2218 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 2219 { 2220 int len; 2221 2222 /* get remaining partial bytes */ 2223 len = dw_mci_pull_part_bytes(host, buf, cnt); 2224 if (unlikely(len == cnt)) 2225 return; 2226 buf += len; 2227 cnt -= len; 2228 2229 /* get the rest of the data */ 2230 host->pull_data(host, buf, cnt); 2231 } 2232 2233 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 2234 { 2235 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2236 void *buf; 2237 unsigned int offset; 2238 struct mmc_data *data = host->data; 2239 int shift = host->data_shift; 2240 u32 status; 2241 unsigned int len; 2242 unsigned int remain, fcnt; 2243 2244 do { 2245 if (!sg_miter_next(sg_miter)) 2246 goto done; 2247 2248 host->sg = sg_miter->piter.sg; 2249 buf = sg_miter->addr; 2250 remain = sg_miter->length; 2251 offset = 0; 2252 2253 do { 2254 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 2255 << shift) + host->part_buf_count; 2256 len = min(remain, fcnt); 2257 if (!len) 2258 break; 2259 dw_mci_pull_data(host, (void *)(buf + offset), len); 2260 data->bytes_xfered += len; 2261 offset += len; 2262 remain -= len; 2263 } while (remain); 2264 2265 sg_miter->consumed = offset; 2266 status = mci_readl(host, MINTSTS); 2267 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2268 /* if the RXDR is ready read again */ 2269 } while ((status & SDMMC_INT_RXDR) || 2270 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 2271 2272 if (!remain) { 2273 if (!sg_miter_next(sg_miter)) 2274 goto done; 2275 sg_miter->consumed = 0; 2276 } 2277 sg_miter_stop(sg_miter); 2278 return; 2279 2280 done: 2281 sg_miter_stop(sg_miter); 2282 host->sg = NULL; 2283 smp_wmb(); /* drain writebuffer */ 2284 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2285 } 2286 2287 static void dw_mci_write_data_pio(struct dw_mci *host) 2288 { 2289 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2290 void *buf; 2291 unsigned int offset; 2292 struct mmc_data *data = host->data; 2293 int shift = host->data_shift; 2294 u32 status; 2295 unsigned int len; 2296 unsigned int fifo_depth = host->fifo_depth; 2297 unsigned int remain, fcnt; 2298 2299 do { 2300 if (!sg_miter_next(sg_miter)) 2301 goto done; 2302 2303 host->sg = sg_miter->piter.sg; 2304 buf = sg_miter->addr; 2305 remain = sg_miter->length; 2306 offset = 0; 2307 2308 do { 2309 fcnt = ((fifo_depth - 2310 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 2311 << shift) - host->part_buf_count; 2312 len = min(remain, fcnt); 2313 if (!len) 2314 break; 2315 host->push_data(host, (void *)(buf + offset), len); 2316 data->bytes_xfered += len; 2317 offset += len; 2318 remain -= len; 2319 } while (remain); 2320 2321 sg_miter->consumed = offset; 2322 status = mci_readl(host, MINTSTS); 2323 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2324 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 2325 2326 if (!remain) { 2327 if (!sg_miter_next(sg_miter)) 2328 goto done; 2329 sg_miter->consumed = 0; 2330 } 2331 sg_miter_stop(sg_miter); 2332 return; 2333 2334 done: 2335 sg_miter_stop(sg_miter); 2336 host->sg = NULL; 2337 smp_wmb(); /* drain writebuffer */ 2338 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2339 } 2340 2341 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2342 { 2343 if (!host->cmd_status) 2344 host->cmd_status = status; 2345 2346 smp_wmb(); /* drain writebuffer */ 2347 2348 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2349 tasklet_schedule(&host->tasklet); 2350 } 2351 2352 static void dw_mci_handle_cd(struct dw_mci *host) 2353 { 2354 int i; 2355 2356 for (i = 0; i < host->num_slots; i++) { 2357 struct dw_mci_slot *slot = host->slot[i]; 2358 2359 if (!slot) 2360 continue; 2361 2362 if (slot->mmc->ops->card_event) 2363 slot->mmc->ops->card_event(slot->mmc); 2364 mmc_detect_change(slot->mmc, 2365 msecs_to_jiffies(host->pdata->detect_delay_ms)); 2366 } 2367 } 2368 2369 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2370 { 2371 struct dw_mci *host = dev_id; 2372 u32 pending; 2373 int i; 2374 2375 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 2376 2377 if (pending) { 2378 /* Check volt switch first, since it can look like an error */ 2379 if ((host->state == STATE_SENDING_CMD11) && 2380 (pending & SDMMC_INT_VOLT_SWITCH)) { 2381 unsigned long irqflags; 2382 2383 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); 2384 pending &= ~SDMMC_INT_VOLT_SWITCH; 2385 2386 /* 2387 * Hold the lock; we know cmd11_timer can't be kicked 2388 * off after the lock is released, so safe to delete. 2389 */ 2390 spin_lock_irqsave(&host->irq_lock, irqflags); 2391 dw_mci_cmd_interrupt(host, pending); 2392 spin_unlock_irqrestore(&host->irq_lock, irqflags); 2393 2394 del_timer(&host->cmd11_timer); 2395 } 2396 2397 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 2398 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2399 host->cmd_status = pending; 2400 smp_wmb(); /* drain writebuffer */ 2401 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2402 } 2403 2404 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 2405 /* if there is an error report DATA_ERROR */ 2406 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2407 host->data_status = pending; 2408 smp_wmb(); /* drain writebuffer */ 2409 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2410 tasklet_schedule(&host->tasklet); 2411 } 2412 2413 if (pending & SDMMC_INT_DATA_OVER) { 2414 if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO) 2415 del_timer(&host->dto_timer); 2416 2417 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2418 if (!host->data_status) 2419 host->data_status = pending; 2420 smp_wmb(); /* drain writebuffer */ 2421 if (host->dir_status == DW_MCI_RECV_STATUS) { 2422 if (host->sg != NULL) 2423 dw_mci_read_data_pio(host, true); 2424 } 2425 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2426 tasklet_schedule(&host->tasklet); 2427 } 2428 2429 if (pending & SDMMC_INT_RXDR) { 2430 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2431 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 2432 dw_mci_read_data_pio(host, false); 2433 } 2434 2435 if (pending & SDMMC_INT_TXDR) { 2436 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2437 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 2438 dw_mci_write_data_pio(host); 2439 } 2440 2441 if (pending & SDMMC_INT_CMD_DONE) { 2442 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 2443 dw_mci_cmd_interrupt(host, pending); 2444 } 2445 2446 if (pending & SDMMC_INT_CD) { 2447 mci_writel(host, RINTSTS, SDMMC_INT_CD); 2448 dw_mci_handle_cd(host); 2449 } 2450 2451 /* Handle SDIO Interrupts */ 2452 for (i = 0; i < host->num_slots; i++) { 2453 struct dw_mci_slot *slot = host->slot[i]; 2454 2455 if (!slot) 2456 continue; 2457 2458 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { 2459 mci_writel(host, RINTSTS, 2460 SDMMC_INT_SDIO(slot->sdio_id)); 2461 mmc_signal_sdio_irq(slot->mmc); 2462 } 2463 } 2464 2465 } 2466 2467 if (host->use_dma != TRANS_MODE_IDMAC) 2468 return IRQ_HANDLED; 2469 2470 /* Handle IDMA interrupts */ 2471 if (host->dma_64bit_address == 1) { 2472 pending = mci_readl(host, IDSTS64); 2473 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2474 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 2475 SDMMC_IDMAC_INT_RI); 2476 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2477 host->dma_ops->complete((void *)host); 2478 } 2479 } else { 2480 pending = mci_readl(host, IDSTS); 2481 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2482 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 2483 SDMMC_IDMAC_INT_RI); 2484 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2485 host->dma_ops->complete((void *)host); 2486 } 2487 } 2488 2489 return IRQ_HANDLED; 2490 } 2491 2492 #ifdef CONFIG_OF 2493 /* given a slot, find out the device node representing that slot */ 2494 static struct device_node *dw_mci_of_find_slot_node(struct dw_mci_slot *slot) 2495 { 2496 struct device *dev = slot->mmc->parent; 2497 struct device_node *np; 2498 const __be32 *addr; 2499 int len; 2500 2501 if (!dev || !dev->of_node) 2502 return NULL; 2503 2504 for_each_child_of_node(dev->of_node, np) { 2505 addr = of_get_property(np, "reg", &len); 2506 if (!addr || (len < sizeof(int))) 2507 continue; 2508 if (be32_to_cpup(addr) == slot->id) 2509 return np; 2510 } 2511 return NULL; 2512 } 2513 2514 static void dw_mci_slot_of_parse(struct dw_mci_slot *slot) 2515 { 2516 struct device_node *np = dw_mci_of_find_slot_node(slot); 2517 2518 if (!np) 2519 return; 2520 2521 if (of_property_read_bool(np, "disable-wp")) { 2522 slot->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; 2523 dev_warn(slot->mmc->parent, 2524 "Slot quirk 'disable-wp' is deprecated\n"); 2525 } 2526 } 2527 #else /* CONFIG_OF */ 2528 static void dw_mci_slot_of_parse(struct dw_mci_slot *slot) 2529 { 2530 } 2531 #endif /* CONFIG_OF */ 2532 2533 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) 2534 { 2535 struct mmc_host *mmc; 2536 struct dw_mci_slot *slot; 2537 const struct dw_mci_drv_data *drv_data = host->drv_data; 2538 int ctrl_id, ret; 2539 u32 freq[2]; 2540 2541 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2542 if (!mmc) 2543 return -ENOMEM; 2544 2545 slot = mmc_priv(mmc); 2546 slot->id = id; 2547 slot->sdio_id = host->sdio_id0 + id; 2548 slot->mmc = mmc; 2549 slot->host = host; 2550 host->slot[id] = slot; 2551 2552 mmc->ops = &dw_mci_ops; 2553 if (of_property_read_u32_array(host->dev->of_node, 2554 "clock-freq-min-max", freq, 2)) { 2555 mmc->f_min = DW_MCI_FREQ_MIN; 2556 mmc->f_max = DW_MCI_FREQ_MAX; 2557 } else { 2558 mmc->f_min = freq[0]; 2559 mmc->f_max = freq[1]; 2560 } 2561 2562 /*if there are external regulators, get them*/ 2563 ret = mmc_regulator_get_supply(mmc); 2564 if (ret == -EPROBE_DEFER) 2565 goto err_host_allocated; 2566 2567 if (!mmc->ocr_avail) 2568 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2569 2570 if (host->pdata->caps) 2571 mmc->caps = host->pdata->caps; 2572 2573 if (host->pdata->pm_caps) 2574 mmc->pm_caps = host->pdata->pm_caps; 2575 2576 if (host->dev->of_node) { 2577 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 2578 if (ctrl_id < 0) 2579 ctrl_id = 0; 2580 } else { 2581 ctrl_id = to_platform_device(host->dev)->id; 2582 } 2583 if (drv_data && drv_data->caps) 2584 mmc->caps |= drv_data->caps[ctrl_id]; 2585 2586 if (host->pdata->caps2) 2587 mmc->caps2 = host->pdata->caps2; 2588 2589 dw_mci_slot_of_parse(slot); 2590 2591 ret = mmc_of_parse(mmc); 2592 if (ret) 2593 goto err_host_allocated; 2594 2595 /* Useful defaults if platform data is unset. */ 2596 if (host->use_dma == TRANS_MODE_IDMAC) { 2597 mmc->max_segs = host->ring_size; 2598 mmc->max_blk_size = 65535; 2599 mmc->max_seg_size = 0x1000; 2600 mmc->max_req_size = mmc->max_seg_size * host->ring_size; 2601 mmc->max_blk_count = mmc->max_req_size / 512; 2602 } else if (host->use_dma == TRANS_MODE_EDMAC) { 2603 mmc->max_segs = 64; 2604 mmc->max_blk_size = 65535; 2605 mmc->max_blk_count = 65535; 2606 mmc->max_req_size = 2607 mmc->max_blk_size * mmc->max_blk_count; 2608 mmc->max_seg_size = mmc->max_req_size; 2609 } else { 2610 /* TRANS_MODE_PIO */ 2611 mmc->max_segs = 64; 2612 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ 2613 mmc->max_blk_count = 512; 2614 mmc->max_req_size = mmc->max_blk_size * 2615 mmc->max_blk_count; 2616 mmc->max_seg_size = mmc->max_req_size; 2617 } 2618 2619 if (dw_mci_get_cd(mmc)) 2620 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 2621 else 2622 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 2623 2624 ret = mmc_add_host(mmc); 2625 if (ret) 2626 goto err_host_allocated; 2627 2628 #if defined(CONFIG_DEBUG_FS) 2629 dw_mci_init_debugfs(slot); 2630 #endif 2631 2632 return 0; 2633 2634 err_host_allocated: 2635 mmc_free_host(mmc); 2636 return ret; 2637 } 2638 2639 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 2640 { 2641 /* Debugfs stuff is cleaned up by mmc core */ 2642 mmc_remove_host(slot->mmc); 2643 slot->host->slot[id] = NULL; 2644 mmc_free_host(slot->mmc); 2645 } 2646 2647 static void dw_mci_init_dma(struct dw_mci *host) 2648 { 2649 int addr_config; 2650 struct device *dev = host->dev; 2651 struct device_node *np = dev->of_node; 2652 2653 /* 2654 * Check tansfer mode from HCON[17:16] 2655 * Clear the ambiguous description of dw_mmc databook: 2656 * 2b'00: No DMA Interface -> Actually means using Internal DMA block 2657 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block 2658 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block 2659 * 2b'11: Non DW DMA Interface -> pio only 2660 * Compared to DesignWare DMA Interface, Generic DMA Interface has a 2661 * simpler request/acknowledge handshake mechanism and both of them 2662 * are regarded as external dma master for dw_mmc. 2663 */ 2664 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); 2665 if (host->use_dma == DMA_INTERFACE_IDMA) { 2666 host->use_dma = TRANS_MODE_IDMAC; 2667 } else if (host->use_dma == DMA_INTERFACE_DWDMA || 2668 host->use_dma == DMA_INTERFACE_GDMA) { 2669 host->use_dma = TRANS_MODE_EDMAC; 2670 } else { 2671 goto no_dma; 2672 } 2673 2674 /* Determine which DMA interface to use */ 2675 if (host->use_dma == TRANS_MODE_IDMAC) { 2676 /* 2677 * Check ADDR_CONFIG bit in HCON to find 2678 * IDMAC address bus width 2679 */ 2680 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); 2681 2682 if (addr_config == 1) { 2683 /* host supports IDMAC in 64-bit address mode */ 2684 host->dma_64bit_address = 1; 2685 dev_info(host->dev, 2686 "IDMAC supports 64-bit address mode.\n"); 2687 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) 2688 dma_set_coherent_mask(host->dev, 2689 DMA_BIT_MASK(64)); 2690 } else { 2691 /* host supports IDMAC in 32-bit address mode */ 2692 host->dma_64bit_address = 0; 2693 dev_info(host->dev, 2694 "IDMAC supports 32-bit address mode.\n"); 2695 } 2696 2697 /* Alloc memory for sg translation */ 2698 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, 2699 &host->sg_dma, GFP_KERNEL); 2700 if (!host->sg_cpu) { 2701 dev_err(host->dev, 2702 "%s: could not alloc DMA memory\n", 2703 __func__); 2704 goto no_dma; 2705 } 2706 2707 host->dma_ops = &dw_mci_idmac_ops; 2708 dev_info(host->dev, "Using internal DMA controller.\n"); 2709 } else { 2710 /* TRANS_MODE_EDMAC: check dma bindings again */ 2711 if ((of_property_count_strings(np, "dma-names") < 0) || 2712 (!of_find_property(np, "dmas", NULL))) { 2713 goto no_dma; 2714 } 2715 host->dma_ops = &dw_mci_edmac_ops; 2716 dev_info(host->dev, "Using external DMA controller.\n"); 2717 } 2718 2719 if (host->dma_ops->init && host->dma_ops->start && 2720 host->dma_ops->stop && host->dma_ops->cleanup) { 2721 if (host->dma_ops->init(host)) { 2722 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", 2723 __func__); 2724 goto no_dma; 2725 } 2726 } else { 2727 dev_err(host->dev, "DMA initialization not found.\n"); 2728 goto no_dma; 2729 } 2730 2731 return; 2732 2733 no_dma: 2734 dev_info(host->dev, "Using PIO mode.\n"); 2735 host->use_dma = TRANS_MODE_PIO; 2736 } 2737 2738 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 2739 { 2740 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2741 u32 ctrl; 2742 2743 ctrl = mci_readl(host, CTRL); 2744 ctrl |= reset; 2745 mci_writel(host, CTRL, ctrl); 2746 2747 /* wait till resets clear */ 2748 do { 2749 ctrl = mci_readl(host, CTRL); 2750 if (!(ctrl & reset)) 2751 return true; 2752 } while (time_before(jiffies, timeout)); 2753 2754 dev_err(host->dev, 2755 "Timeout resetting block (ctrl reset %#x)\n", 2756 ctrl & reset); 2757 2758 return false; 2759 } 2760 2761 static bool dw_mci_reset(struct dw_mci *host) 2762 { 2763 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; 2764 bool ret = false; 2765 2766 /* 2767 * Reseting generates a block interrupt, hence setting 2768 * the scatter-gather pointer to NULL. 2769 */ 2770 if (host->sg) { 2771 sg_miter_stop(&host->sg_miter); 2772 host->sg = NULL; 2773 } 2774 2775 if (host->use_dma) 2776 flags |= SDMMC_CTRL_DMA_RESET; 2777 2778 if (dw_mci_ctrl_reset(host, flags)) { 2779 /* 2780 * In all cases we clear the RAWINTS register to clear any 2781 * interrupts. 2782 */ 2783 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2784 2785 /* if using dma we wait for dma_req to clear */ 2786 if (host->use_dma) { 2787 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2788 u32 status; 2789 2790 do { 2791 status = mci_readl(host, STATUS); 2792 if (!(status & SDMMC_STATUS_DMA_REQ)) 2793 break; 2794 cpu_relax(); 2795 } while (time_before(jiffies, timeout)); 2796 2797 if (status & SDMMC_STATUS_DMA_REQ) { 2798 dev_err(host->dev, 2799 "%s: Timeout waiting for dma_req to clear during reset\n", 2800 __func__); 2801 goto ciu_out; 2802 } 2803 2804 /* when using DMA next we reset the fifo again */ 2805 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) 2806 goto ciu_out; 2807 } 2808 } else { 2809 /* if the controller reset bit did clear, then set clock regs */ 2810 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 2811 dev_err(host->dev, 2812 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", 2813 __func__); 2814 goto ciu_out; 2815 } 2816 } 2817 2818 if (host->use_dma == TRANS_MODE_IDMAC) 2819 /* It is also recommended that we reset and reprogram idmac */ 2820 dw_mci_idmac_reset(host); 2821 2822 ret = true; 2823 2824 ciu_out: 2825 /* After a CTRL reset we need to have CIU set clock registers */ 2826 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0); 2827 2828 return ret; 2829 } 2830 2831 static void dw_mci_cmd11_timer(unsigned long arg) 2832 { 2833 struct dw_mci *host = (struct dw_mci *)arg; 2834 2835 if (host->state != STATE_SENDING_CMD11) { 2836 dev_warn(host->dev, "Unexpected CMD11 timeout\n"); 2837 return; 2838 } 2839 2840 host->cmd_status = SDMMC_INT_RTO; 2841 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2842 tasklet_schedule(&host->tasklet); 2843 } 2844 2845 static void dw_mci_dto_timer(unsigned long arg) 2846 { 2847 struct dw_mci *host = (struct dw_mci *)arg; 2848 2849 switch (host->state) { 2850 case STATE_SENDING_DATA: 2851 case STATE_DATA_BUSY: 2852 /* 2853 * If DTO interrupt does NOT come in sending data state, 2854 * we should notify the driver to terminate current transfer 2855 * and report a data timeout to the core. 2856 */ 2857 host->data_status = SDMMC_INT_DRTO; 2858 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2859 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2860 tasklet_schedule(&host->tasklet); 2861 break; 2862 default: 2863 break; 2864 } 2865 } 2866 2867 #ifdef CONFIG_OF 2868 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2869 { 2870 struct dw_mci_board *pdata; 2871 struct device *dev = host->dev; 2872 struct device_node *np = dev->of_node; 2873 const struct dw_mci_drv_data *drv_data = host->drv_data; 2874 int ret; 2875 u32 clock_frequency; 2876 2877 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 2878 if (!pdata) 2879 return ERR_PTR(-ENOMEM); 2880 2881 /* find out number of slots supported */ 2882 of_property_read_u32(np, "num-slots", &pdata->num_slots); 2883 2884 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) 2885 dev_info(dev, 2886 "fifo-depth property not found, using value of FIFOTH register as default\n"); 2887 2888 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); 2889 2890 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency)) 2891 pdata->bus_hz = clock_frequency; 2892 2893 if (drv_data && drv_data->parse_dt) { 2894 ret = drv_data->parse_dt(host); 2895 if (ret) 2896 return ERR_PTR(ret); 2897 } 2898 2899 if (of_find_property(np, "supports-highspeed", NULL)) { 2900 dev_info(dev, "supports-highspeed property is deprecated.\n"); 2901 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 2902 } 2903 2904 return pdata; 2905 } 2906 2907 #else /* CONFIG_OF */ 2908 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2909 { 2910 return ERR_PTR(-EINVAL); 2911 } 2912 #endif /* CONFIG_OF */ 2913 2914 static void dw_mci_enable_cd(struct dw_mci *host) 2915 { 2916 unsigned long irqflags; 2917 u32 temp; 2918 int i; 2919 struct dw_mci_slot *slot; 2920 2921 /* 2922 * No need for CD if all slots have a non-error GPIO 2923 * as well as broken card detection is found. 2924 */ 2925 for (i = 0; i < host->num_slots; i++) { 2926 slot = host->slot[i]; 2927 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL) 2928 return; 2929 2930 if (mmc_gpio_get_cd(slot->mmc) < 0) 2931 break; 2932 } 2933 if (i == host->num_slots) 2934 return; 2935 2936 spin_lock_irqsave(&host->irq_lock, irqflags); 2937 temp = mci_readl(host, INTMASK); 2938 temp |= SDMMC_INT_CD; 2939 mci_writel(host, INTMASK, temp); 2940 spin_unlock_irqrestore(&host->irq_lock, irqflags); 2941 } 2942 2943 int dw_mci_probe(struct dw_mci *host) 2944 { 2945 const struct dw_mci_drv_data *drv_data = host->drv_data; 2946 int width, i, ret = 0; 2947 u32 fifo_size; 2948 int init_slots = 0; 2949 2950 if (!host->pdata) { 2951 host->pdata = dw_mci_parse_dt(host); 2952 if (IS_ERR(host->pdata)) { 2953 dev_err(host->dev, "platform data not available\n"); 2954 return -EINVAL; 2955 } 2956 } 2957 2958 host->biu_clk = devm_clk_get(host->dev, "biu"); 2959 if (IS_ERR(host->biu_clk)) { 2960 dev_dbg(host->dev, "biu clock not available\n"); 2961 } else { 2962 ret = clk_prepare_enable(host->biu_clk); 2963 if (ret) { 2964 dev_err(host->dev, "failed to enable biu clock\n"); 2965 return ret; 2966 } 2967 } 2968 2969 host->ciu_clk = devm_clk_get(host->dev, "ciu"); 2970 if (IS_ERR(host->ciu_clk)) { 2971 dev_dbg(host->dev, "ciu clock not available\n"); 2972 host->bus_hz = host->pdata->bus_hz; 2973 } else { 2974 ret = clk_prepare_enable(host->ciu_clk); 2975 if (ret) { 2976 dev_err(host->dev, "failed to enable ciu clock\n"); 2977 goto err_clk_biu; 2978 } 2979 2980 if (host->pdata->bus_hz) { 2981 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 2982 if (ret) 2983 dev_warn(host->dev, 2984 "Unable to set bus rate to %uHz\n", 2985 host->pdata->bus_hz); 2986 } 2987 host->bus_hz = clk_get_rate(host->ciu_clk); 2988 } 2989 2990 if (!host->bus_hz) { 2991 dev_err(host->dev, 2992 "Platform data must supply bus speed\n"); 2993 ret = -ENODEV; 2994 goto err_clk_ciu; 2995 } 2996 2997 if (drv_data && drv_data->init) { 2998 ret = drv_data->init(host); 2999 if (ret) { 3000 dev_err(host->dev, 3001 "implementation specific init failed\n"); 3002 goto err_clk_ciu; 3003 } 3004 } 3005 3006 setup_timer(&host->cmd11_timer, 3007 dw_mci_cmd11_timer, (unsigned long)host); 3008 3009 host->quirks = host->pdata->quirks; 3010 3011 if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO) 3012 setup_timer(&host->dto_timer, 3013 dw_mci_dto_timer, (unsigned long)host); 3014 3015 spin_lock_init(&host->lock); 3016 spin_lock_init(&host->irq_lock); 3017 INIT_LIST_HEAD(&host->queue); 3018 3019 /* 3020 * Get the host data width - this assumes that HCON has been set with 3021 * the correct values. 3022 */ 3023 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); 3024 if (!i) { 3025 host->push_data = dw_mci_push_data16; 3026 host->pull_data = dw_mci_pull_data16; 3027 width = 16; 3028 host->data_shift = 1; 3029 } else if (i == 2) { 3030 host->push_data = dw_mci_push_data64; 3031 host->pull_data = dw_mci_pull_data64; 3032 width = 64; 3033 host->data_shift = 3; 3034 } else { 3035 /* Check for a reserved value, and warn if it is */ 3036 WARN((i != 1), 3037 "HCON reports a reserved host data width!\n" 3038 "Defaulting to 32-bit access.\n"); 3039 host->push_data = dw_mci_push_data32; 3040 host->pull_data = dw_mci_pull_data32; 3041 width = 32; 3042 host->data_shift = 2; 3043 } 3044 3045 /* Reset all blocks */ 3046 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3047 ret = -ENODEV; 3048 goto err_clk_ciu; 3049 } 3050 3051 host->dma_ops = host->pdata->dma_ops; 3052 dw_mci_init_dma(host); 3053 3054 /* Clear the interrupts for the host controller */ 3055 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3056 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3057 3058 /* Put in max timeout */ 3059 mci_writel(host, TMOUT, 0xFFFFFFFF); 3060 3061 /* 3062 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 3063 * Tx Mark = fifo_size / 2 DMA Size = 8 3064 */ 3065 if (!host->pdata->fifo_depth) { 3066 /* 3067 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 3068 * have been overwritten by the bootloader, just like we're 3069 * about to do, so if you know the value for your hardware, you 3070 * should put it in the platform data. 3071 */ 3072 fifo_size = mci_readl(host, FIFOTH); 3073 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 3074 } else { 3075 fifo_size = host->pdata->fifo_depth; 3076 } 3077 host->fifo_depth = fifo_size; 3078 host->fifoth_val = 3079 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); 3080 mci_writel(host, FIFOTH, host->fifoth_val); 3081 3082 /* disable clock to CIU */ 3083 mci_writel(host, CLKENA, 0); 3084 mci_writel(host, CLKSRC, 0); 3085 3086 /* 3087 * In 2.40a spec, Data offset is changed. 3088 * Need to check the version-id and set data-offset for DATA register. 3089 */ 3090 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 3091 dev_info(host->dev, "Version ID is %04x\n", host->verid); 3092 3093 if (host->verid < DW_MMC_240A) 3094 host->fifo_reg = host->regs + DATA_OFFSET; 3095 else 3096 host->fifo_reg = host->regs + DATA_240A_OFFSET; 3097 3098 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 3099 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 3100 host->irq_flags, "dw-mci", host); 3101 if (ret) 3102 goto err_dmaunmap; 3103 3104 if (host->pdata->num_slots) 3105 host->num_slots = host->pdata->num_slots; 3106 else 3107 host->num_slots = 1; 3108 3109 if (host->num_slots < 1 || 3110 host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) { 3111 dev_err(host->dev, 3112 "Platform data must supply correct num_slots.\n"); 3113 ret = -ENODEV; 3114 goto err_clk_ciu; 3115 } 3116 3117 /* 3118 * Enable interrupts for command done, data over, data empty, 3119 * receive ready and error such as transmit, receive timeout, crc error 3120 */ 3121 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3122 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3123 DW_MCI_ERROR_FLAGS); 3124 /* Enable mci interrupt */ 3125 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3126 3127 dev_info(host->dev, 3128 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", 3129 host->irq, width, fifo_size); 3130 3131 /* We need at least one slot to succeed */ 3132 for (i = 0; i < host->num_slots; i++) { 3133 ret = dw_mci_init_slot(host, i); 3134 if (ret) 3135 dev_dbg(host->dev, "slot %d init failed\n", i); 3136 else 3137 init_slots++; 3138 } 3139 3140 if (init_slots) { 3141 dev_info(host->dev, "%d slots initialized\n", init_slots); 3142 } else { 3143 dev_dbg(host->dev, 3144 "attempted to initialize %d slots, but failed on all\n", 3145 host->num_slots); 3146 goto err_dmaunmap; 3147 } 3148 3149 /* Now that slots are all setup, we can enable card detect */ 3150 dw_mci_enable_cd(host); 3151 3152 return 0; 3153 3154 err_dmaunmap: 3155 if (host->use_dma && host->dma_ops->exit) 3156 host->dma_ops->exit(host); 3157 3158 err_clk_ciu: 3159 if (!IS_ERR(host->ciu_clk)) 3160 clk_disable_unprepare(host->ciu_clk); 3161 3162 err_clk_biu: 3163 if (!IS_ERR(host->biu_clk)) 3164 clk_disable_unprepare(host->biu_clk); 3165 3166 return ret; 3167 } 3168 EXPORT_SYMBOL(dw_mci_probe); 3169 3170 void dw_mci_remove(struct dw_mci *host) 3171 { 3172 int i; 3173 3174 for (i = 0; i < host->num_slots; i++) { 3175 dev_dbg(host->dev, "remove slot %d\n", i); 3176 if (host->slot[i]) 3177 dw_mci_cleanup_slot(host->slot[i], i); 3178 } 3179 3180 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3181 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3182 3183 /* disable clock to CIU */ 3184 mci_writel(host, CLKENA, 0); 3185 mci_writel(host, CLKSRC, 0); 3186 3187 if (host->use_dma && host->dma_ops->exit) 3188 host->dma_ops->exit(host); 3189 3190 if (!IS_ERR(host->ciu_clk)) 3191 clk_disable_unprepare(host->ciu_clk); 3192 3193 if (!IS_ERR(host->biu_clk)) 3194 clk_disable_unprepare(host->biu_clk); 3195 } 3196 EXPORT_SYMBOL(dw_mci_remove); 3197 3198 3199 3200 #ifdef CONFIG_PM_SLEEP 3201 /* 3202 * TODO: we should probably disable the clock to the card in the suspend path. 3203 */ 3204 int dw_mci_suspend(struct dw_mci *host) 3205 { 3206 if (host->use_dma && host->dma_ops->exit) 3207 host->dma_ops->exit(host); 3208 3209 return 0; 3210 } 3211 EXPORT_SYMBOL(dw_mci_suspend); 3212 3213 int dw_mci_resume(struct dw_mci *host) 3214 { 3215 int i, ret; 3216 3217 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3218 ret = -ENODEV; 3219 return ret; 3220 } 3221 3222 if (host->use_dma && host->dma_ops->init) 3223 host->dma_ops->init(host); 3224 3225 /* 3226 * Restore the initial value at FIFOTH register 3227 * And Invalidate the prev_blksz with zero 3228 */ 3229 mci_writel(host, FIFOTH, host->fifoth_val); 3230 host->prev_blksz = 0; 3231 3232 /* Put in max timeout */ 3233 mci_writel(host, TMOUT, 0xFFFFFFFF); 3234 3235 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3236 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3237 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3238 DW_MCI_ERROR_FLAGS); 3239 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3240 3241 for (i = 0; i < host->num_slots; i++) { 3242 struct dw_mci_slot *slot = host->slot[i]; 3243 3244 if (!slot) 3245 continue; 3246 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { 3247 dw_mci_set_ios(slot->mmc, &slot->mmc->ios); 3248 dw_mci_setup_bus(slot, true); 3249 } 3250 } 3251 3252 /* Now that slots are all setup, we can enable card detect */ 3253 dw_mci_enable_cd(host); 3254 3255 return 0; 3256 } 3257 EXPORT_SYMBOL(dw_mci_resume); 3258 #endif /* CONFIG_PM_SLEEP */ 3259 3260 static int __init dw_mci_init(void) 3261 { 3262 pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 3263 return 0; 3264 } 3265 3266 static void __exit dw_mci_exit(void) 3267 { 3268 } 3269 3270 module_init(dw_mci_init); 3271 module_exit(dw_mci_exit); 3272 3273 MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 3274 MODULE_AUTHOR("NXP Semiconductor VietNam"); 3275 MODULE_AUTHOR("Imagination Technologies Ltd"); 3276 MODULE_LICENSE("GPL v2"); 3277