1 /* 2 * Synopsys DesignWare Multimedia Card Interface driver 3 * (Based on NXP driver for lpc 31xx) 4 * 5 * Copyright (C) 2009 NXP Semiconductors 6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14 #include <linux/blkdev.h> 15 #include <linux/clk.h> 16 #include <linux/debugfs.h> 17 #include <linux/device.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/interrupt.h> 22 #include <linux/ioport.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/seq_file.h> 26 #include <linux/slab.h> 27 #include <linux/stat.h> 28 #include <linux/delay.h> 29 #include <linux/irq.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/sd.h> 33 #include <linux/mmc/sdio.h> 34 #include <linux/mmc/dw_mmc.h> 35 #include <linux/bitops.h> 36 #include <linux/regulator/consumer.h> 37 #include <linux/of.h> 38 #include <linux/of_gpio.h> 39 #include <linux/mmc/slot-gpio.h> 40 41 #include "dw_mmc.h" 42 43 /* Common flag combinations */ 44 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 45 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 46 SDMMC_INT_EBE) 47 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 48 SDMMC_INT_RESP_ERR) 49 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE) 51 #define DW_MCI_SEND_STATUS 1 52 #define DW_MCI_RECV_STATUS 2 53 #define DW_MCI_DMA_THRESHOLD 16 54 55 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 56 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */ 57 58 #ifdef CONFIG_MMC_DW_IDMAC 59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 62 SDMMC_IDMAC_INT_TI) 63 64 struct idmac_desc_64addr { 65 u32 des0; /* Control Descriptor */ 66 67 u32 des1; /* Reserved */ 68 69 u32 des2; /*Buffer sizes */ 70 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ 71 ((d)->des2 = ((d)->des2 & 0x03ffe000) | ((s) & 0x1fff)) 72 73 u32 des3; /* Reserved */ 74 75 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ 76 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ 77 78 u32 des6; /* Lower 32-bits of Next Descriptor Address */ 79 u32 des7; /* Upper 32-bits of Next Descriptor Address */ 80 }; 81 82 struct idmac_desc { 83 u32 des0; /* Control Descriptor */ 84 #define IDMAC_DES0_DIC BIT(1) 85 #define IDMAC_DES0_LD BIT(2) 86 #define IDMAC_DES0_FD BIT(3) 87 #define IDMAC_DES0_CH BIT(4) 88 #define IDMAC_DES0_ER BIT(5) 89 #define IDMAC_DES0_CES BIT(30) 90 #define IDMAC_DES0_OWN BIT(31) 91 92 u32 des1; /* Buffer sizes */ 93 #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 94 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) 95 96 u32 des2; /* buffer 1 physical address */ 97 98 u32 des3; /* buffer 2 physical address */ 99 }; 100 #endif /* CONFIG_MMC_DW_IDMAC */ 101 102 static bool dw_mci_reset(struct dw_mci *host); 103 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset); 104 105 #if defined(CONFIG_DEBUG_FS) 106 static int dw_mci_req_show(struct seq_file *s, void *v) 107 { 108 struct dw_mci_slot *slot = s->private; 109 struct mmc_request *mrq; 110 struct mmc_command *cmd; 111 struct mmc_command *stop; 112 struct mmc_data *data; 113 114 /* Make sure we get a consistent snapshot */ 115 spin_lock_bh(&slot->host->lock); 116 mrq = slot->mrq; 117 118 if (mrq) { 119 cmd = mrq->cmd; 120 data = mrq->data; 121 stop = mrq->stop; 122 123 if (cmd) 124 seq_printf(s, 125 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 126 cmd->opcode, cmd->arg, cmd->flags, 127 cmd->resp[0], cmd->resp[1], cmd->resp[2], 128 cmd->resp[2], cmd->error); 129 if (data) 130 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 131 data->bytes_xfered, data->blocks, 132 data->blksz, data->flags, data->error); 133 if (stop) 134 seq_printf(s, 135 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 136 stop->opcode, stop->arg, stop->flags, 137 stop->resp[0], stop->resp[1], stop->resp[2], 138 stop->resp[2], stop->error); 139 } 140 141 spin_unlock_bh(&slot->host->lock); 142 143 return 0; 144 } 145 146 static int dw_mci_req_open(struct inode *inode, struct file *file) 147 { 148 return single_open(file, dw_mci_req_show, inode->i_private); 149 } 150 151 static const struct file_operations dw_mci_req_fops = { 152 .owner = THIS_MODULE, 153 .open = dw_mci_req_open, 154 .read = seq_read, 155 .llseek = seq_lseek, 156 .release = single_release, 157 }; 158 159 static int dw_mci_regs_show(struct seq_file *s, void *v) 160 { 161 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS); 162 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS); 163 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD); 164 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL); 165 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK); 166 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA); 167 168 return 0; 169 } 170 171 static int dw_mci_regs_open(struct inode *inode, struct file *file) 172 { 173 return single_open(file, dw_mci_regs_show, inode->i_private); 174 } 175 176 static const struct file_operations dw_mci_regs_fops = { 177 .owner = THIS_MODULE, 178 .open = dw_mci_regs_open, 179 .read = seq_read, 180 .llseek = seq_lseek, 181 .release = single_release, 182 }; 183 184 static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 185 { 186 struct mmc_host *mmc = slot->mmc; 187 struct dw_mci *host = slot->host; 188 struct dentry *root; 189 struct dentry *node; 190 191 root = mmc->debugfs_root; 192 if (!root) 193 return; 194 195 node = debugfs_create_file("regs", S_IRUSR, root, host, 196 &dw_mci_regs_fops); 197 if (!node) 198 goto err; 199 200 node = debugfs_create_file("req", S_IRUSR, root, slot, 201 &dw_mci_req_fops); 202 if (!node) 203 goto err; 204 205 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 206 if (!node) 207 goto err; 208 209 node = debugfs_create_x32("pending_events", S_IRUSR, root, 210 (u32 *)&host->pending_events); 211 if (!node) 212 goto err; 213 214 node = debugfs_create_x32("completed_events", S_IRUSR, root, 215 (u32 *)&host->completed_events); 216 if (!node) 217 goto err; 218 219 return; 220 221 err: 222 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 223 } 224 #endif /* defined(CONFIG_DEBUG_FS) */ 225 226 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg); 227 228 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 229 { 230 struct mmc_data *data; 231 struct dw_mci_slot *slot = mmc_priv(mmc); 232 struct dw_mci *host = slot->host; 233 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 234 u32 cmdr; 235 cmd->error = -EINPROGRESS; 236 237 cmdr = cmd->opcode; 238 239 if (cmd->opcode == MMC_STOP_TRANSMISSION || 240 cmd->opcode == MMC_GO_IDLE_STATE || 241 cmd->opcode == MMC_GO_INACTIVE_STATE || 242 (cmd->opcode == SD_IO_RW_DIRECT && 243 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 244 cmdr |= SDMMC_CMD_STOP; 245 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 246 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 247 248 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 249 u32 clk_en_a; 250 251 /* Special bit makes CMD11 not die */ 252 cmdr |= SDMMC_CMD_VOLT_SWITCH; 253 254 /* Change state to continue to handle CMD11 weirdness */ 255 WARN_ON(slot->host->state != STATE_SENDING_CMD); 256 slot->host->state = STATE_SENDING_CMD11; 257 258 /* 259 * We need to disable low power mode (automatic clock stop) 260 * while doing voltage switch so we don't confuse the card, 261 * since stopping the clock is a specific part of the UHS 262 * voltage change dance. 263 * 264 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be 265 * unconditionally turned back on in dw_mci_setup_bus() if it's 266 * ever called with a non-zero clock. That shouldn't happen 267 * until the voltage change is all done. 268 */ 269 clk_en_a = mci_readl(host, CLKENA); 270 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); 271 mci_writel(host, CLKENA, clk_en_a); 272 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 273 SDMMC_CMD_PRV_DAT_WAIT, 0); 274 } 275 276 if (cmd->flags & MMC_RSP_PRESENT) { 277 /* We expect a response, so set this bit */ 278 cmdr |= SDMMC_CMD_RESP_EXP; 279 if (cmd->flags & MMC_RSP_136) 280 cmdr |= SDMMC_CMD_RESP_LONG; 281 } 282 283 if (cmd->flags & MMC_RSP_CRC) 284 cmdr |= SDMMC_CMD_RESP_CRC; 285 286 data = cmd->data; 287 if (data) { 288 cmdr |= SDMMC_CMD_DAT_EXP; 289 if (data->flags & MMC_DATA_STREAM) 290 cmdr |= SDMMC_CMD_STRM_MODE; 291 if (data->flags & MMC_DATA_WRITE) 292 cmdr |= SDMMC_CMD_DAT_WR; 293 } 294 295 if (drv_data && drv_data->prepare_command) 296 drv_data->prepare_command(slot->host, &cmdr); 297 298 return cmdr; 299 } 300 301 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) 302 { 303 struct mmc_command *stop; 304 u32 cmdr; 305 306 if (!cmd->data) 307 return 0; 308 309 stop = &host->stop_abort; 310 cmdr = cmd->opcode; 311 memset(stop, 0, sizeof(struct mmc_command)); 312 313 if (cmdr == MMC_READ_SINGLE_BLOCK || 314 cmdr == MMC_READ_MULTIPLE_BLOCK || 315 cmdr == MMC_WRITE_BLOCK || 316 cmdr == MMC_WRITE_MULTIPLE_BLOCK) { 317 stop->opcode = MMC_STOP_TRANSMISSION; 318 stop->arg = 0; 319 stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 320 } else if (cmdr == SD_IO_RW_EXTENDED) { 321 stop->opcode = SD_IO_RW_DIRECT; 322 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | 323 ((cmd->arg >> 28) & 0x7); 324 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; 325 } else { 326 return 0; 327 } 328 329 cmdr = stop->opcode | SDMMC_CMD_STOP | 330 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; 331 332 return cmdr; 333 } 334 335 static void dw_mci_start_command(struct dw_mci *host, 336 struct mmc_command *cmd, u32 cmd_flags) 337 { 338 host->cmd = cmd; 339 dev_vdbg(host->dev, 340 "start command: ARGR=0x%08x CMDR=0x%08x\n", 341 cmd->arg, cmd_flags); 342 343 mci_writel(host, CMDARG, cmd->arg); 344 wmb(); 345 346 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 347 } 348 349 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 350 { 351 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; 352 dw_mci_start_command(host, stop, host->stop_cmdr); 353 } 354 355 /* DMA interface functions */ 356 static void dw_mci_stop_dma(struct dw_mci *host) 357 { 358 if (host->using_dma) { 359 host->dma_ops->stop(host); 360 host->dma_ops->cleanup(host); 361 } 362 363 /* Data transfer was stopped by the interrupt handler */ 364 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 365 } 366 367 static int dw_mci_get_dma_dir(struct mmc_data *data) 368 { 369 if (data->flags & MMC_DATA_WRITE) 370 return DMA_TO_DEVICE; 371 else 372 return DMA_FROM_DEVICE; 373 } 374 375 #ifdef CONFIG_MMC_DW_IDMAC 376 static void dw_mci_dma_cleanup(struct dw_mci *host) 377 { 378 struct mmc_data *data = host->data; 379 380 if (data) 381 if (!data->host_cookie) 382 dma_unmap_sg(host->dev, 383 data->sg, 384 data->sg_len, 385 dw_mci_get_dma_dir(data)); 386 } 387 388 static void dw_mci_idmac_reset(struct dw_mci *host) 389 { 390 u32 bmod = mci_readl(host, BMOD); 391 /* Software reset of DMA */ 392 bmod |= SDMMC_IDMAC_SWRESET; 393 mci_writel(host, BMOD, bmod); 394 } 395 396 static void dw_mci_idmac_stop_dma(struct dw_mci *host) 397 { 398 u32 temp; 399 400 /* Disable and reset the IDMAC interface */ 401 temp = mci_readl(host, CTRL); 402 temp &= ~SDMMC_CTRL_USE_IDMAC; 403 temp |= SDMMC_CTRL_DMA_RESET; 404 mci_writel(host, CTRL, temp); 405 406 /* Stop the IDMAC running */ 407 temp = mci_readl(host, BMOD); 408 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 409 temp |= SDMMC_IDMAC_SWRESET; 410 mci_writel(host, BMOD, temp); 411 } 412 413 static void dw_mci_idmac_complete_dma(struct dw_mci *host) 414 { 415 struct mmc_data *data = host->data; 416 417 dev_vdbg(host->dev, "DMA complete\n"); 418 419 host->dma_ops->cleanup(host); 420 421 /* 422 * If the card was removed, data will be NULL. No point in trying to 423 * send the stop command or waiting for NBUSY in this case. 424 */ 425 if (data) { 426 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 427 tasklet_schedule(&host->tasklet); 428 } 429 } 430 431 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, 432 unsigned int sg_len) 433 { 434 int i; 435 if (host->dma_64bit_address == 1) { 436 struct idmac_desc_64addr *desc = host->sg_cpu; 437 438 for (i = 0; i < sg_len; i++, desc++) { 439 unsigned int length = sg_dma_len(&data->sg[i]); 440 u64 mem_addr = sg_dma_address(&data->sg[i]); 441 442 /* 443 * Set the OWN bit and disable interrupts for this 444 * descriptor 445 */ 446 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 447 IDMAC_DES0_CH; 448 /* Buffer length */ 449 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length); 450 451 /* Physical address to DMA to/from */ 452 desc->des4 = mem_addr & 0xffffffff; 453 desc->des5 = mem_addr >> 32; 454 } 455 456 /* Set first descriptor */ 457 desc = host->sg_cpu; 458 desc->des0 |= IDMAC_DES0_FD; 459 460 /* Set last descriptor */ 461 desc = host->sg_cpu + (i - 1) * 462 sizeof(struct idmac_desc_64addr); 463 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 464 desc->des0 |= IDMAC_DES0_LD; 465 466 } else { 467 struct idmac_desc *desc = host->sg_cpu; 468 469 for (i = 0; i < sg_len; i++, desc++) { 470 unsigned int length = sg_dma_len(&data->sg[i]); 471 u32 mem_addr = sg_dma_address(&data->sg[i]); 472 473 /* 474 * Set the OWN bit and disable interrupts for this 475 * descriptor 476 */ 477 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 478 IDMAC_DES0_CH; 479 /* Buffer length */ 480 IDMAC_SET_BUFFER1_SIZE(desc, length); 481 482 /* Physical address to DMA to/from */ 483 desc->des2 = mem_addr; 484 } 485 486 /* Set first descriptor */ 487 desc = host->sg_cpu; 488 desc->des0 |= IDMAC_DES0_FD; 489 490 /* Set last descriptor */ 491 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc); 492 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 493 desc->des0 |= IDMAC_DES0_LD; 494 } 495 496 wmb(); 497 } 498 499 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 500 { 501 u32 temp; 502 503 dw_mci_translate_sglist(host, host->data, sg_len); 504 505 /* Make sure to reset DMA in case we did PIO before this */ 506 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); 507 dw_mci_idmac_reset(host); 508 509 /* Select IDMAC interface */ 510 temp = mci_readl(host, CTRL); 511 temp |= SDMMC_CTRL_USE_IDMAC; 512 mci_writel(host, CTRL, temp); 513 514 wmb(); 515 516 /* Enable the IDMAC */ 517 temp = mci_readl(host, BMOD); 518 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 519 mci_writel(host, BMOD, temp); 520 521 /* Start it running */ 522 mci_writel(host, PLDMND, 1); 523 } 524 525 static int dw_mci_idmac_init(struct dw_mci *host) 526 { 527 int i; 528 529 if (host->dma_64bit_address == 1) { 530 struct idmac_desc_64addr *p; 531 /* Number of descriptors in the ring buffer */ 532 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr); 533 534 /* Forward link the descriptor list */ 535 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 536 i++, p++) { 537 p->des6 = (host->sg_dma + 538 (sizeof(struct idmac_desc_64addr) * 539 (i + 1))) & 0xffffffff; 540 541 p->des7 = (u64)(host->sg_dma + 542 (sizeof(struct idmac_desc_64addr) * 543 (i + 1))) >> 32; 544 /* Initialize reserved and buffer size fields to "0" */ 545 p->des1 = 0; 546 p->des2 = 0; 547 p->des3 = 0; 548 } 549 550 /* Set the last descriptor as the end-of-ring descriptor */ 551 p->des6 = host->sg_dma & 0xffffffff; 552 p->des7 = (u64)host->sg_dma >> 32; 553 p->des0 = IDMAC_DES0_ER; 554 555 } else { 556 struct idmac_desc *p; 557 /* Number of descriptors in the ring buffer */ 558 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 559 560 /* Forward link the descriptor list */ 561 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) 562 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * 563 (i + 1)); 564 565 /* Set the last descriptor as the end-of-ring descriptor */ 566 p->des3 = host->sg_dma; 567 p->des0 = IDMAC_DES0_ER; 568 } 569 570 dw_mci_idmac_reset(host); 571 572 if (host->dma_64bit_address == 1) { 573 /* Mask out interrupts - get Tx & Rx complete only */ 574 mci_writel(host, IDSTS64, IDMAC_INT_CLR); 575 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | 576 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 577 578 /* Set the descriptor base address */ 579 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); 580 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); 581 582 } else { 583 /* Mask out interrupts - get Tx & Rx complete only */ 584 mci_writel(host, IDSTS, IDMAC_INT_CLR); 585 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | 586 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 587 588 /* Set the descriptor base address */ 589 mci_writel(host, DBADDR, host->sg_dma); 590 } 591 592 return 0; 593 } 594 595 static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 596 .init = dw_mci_idmac_init, 597 .start = dw_mci_idmac_start_dma, 598 .stop = dw_mci_idmac_stop_dma, 599 .complete = dw_mci_idmac_complete_dma, 600 .cleanup = dw_mci_dma_cleanup, 601 }; 602 #endif /* CONFIG_MMC_DW_IDMAC */ 603 604 static int dw_mci_pre_dma_transfer(struct dw_mci *host, 605 struct mmc_data *data, 606 bool next) 607 { 608 struct scatterlist *sg; 609 unsigned int i, sg_len; 610 611 if (!next && data->host_cookie) 612 return data->host_cookie; 613 614 /* 615 * We don't do DMA on "complex" transfers, i.e. with 616 * non-word-aligned buffers or lengths. Also, we don't bother 617 * with all the DMA setup overhead for short transfers. 618 */ 619 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 620 return -EINVAL; 621 622 if (data->blksz & 3) 623 return -EINVAL; 624 625 for_each_sg(data->sg, sg, data->sg_len, i) { 626 if (sg->offset & 3 || sg->length & 3) 627 return -EINVAL; 628 } 629 630 sg_len = dma_map_sg(host->dev, 631 data->sg, 632 data->sg_len, 633 dw_mci_get_dma_dir(data)); 634 if (sg_len == 0) 635 return -EINVAL; 636 637 if (next) 638 data->host_cookie = sg_len; 639 640 return sg_len; 641 } 642 643 static void dw_mci_pre_req(struct mmc_host *mmc, 644 struct mmc_request *mrq, 645 bool is_first_req) 646 { 647 struct dw_mci_slot *slot = mmc_priv(mmc); 648 struct mmc_data *data = mrq->data; 649 650 if (!slot->host->use_dma || !data) 651 return; 652 653 if (data->host_cookie) { 654 data->host_cookie = 0; 655 return; 656 } 657 658 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) 659 data->host_cookie = 0; 660 } 661 662 static void dw_mci_post_req(struct mmc_host *mmc, 663 struct mmc_request *mrq, 664 int err) 665 { 666 struct dw_mci_slot *slot = mmc_priv(mmc); 667 struct mmc_data *data = mrq->data; 668 669 if (!slot->host->use_dma || !data) 670 return; 671 672 if (data->host_cookie) 673 dma_unmap_sg(slot->host->dev, 674 data->sg, 675 data->sg_len, 676 dw_mci_get_dma_dir(data)); 677 data->host_cookie = 0; 678 } 679 680 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 681 { 682 #ifdef CONFIG_MMC_DW_IDMAC 683 unsigned int blksz = data->blksz; 684 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 685 u32 fifo_width = 1 << host->data_shift; 686 u32 blksz_depth = blksz / fifo_width, fifoth_val; 687 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 688 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1; 689 690 tx_wmark = (host->fifo_depth) / 2; 691 tx_wmark_invers = host->fifo_depth - tx_wmark; 692 693 /* 694 * MSIZE is '1', 695 * if blksz is not a multiple of the FIFO width 696 */ 697 if (blksz % fifo_width) { 698 msize = 0; 699 rx_wmark = 1; 700 goto done; 701 } 702 703 do { 704 if (!((blksz_depth % mszs[idx]) || 705 (tx_wmark_invers % mszs[idx]))) { 706 msize = idx; 707 rx_wmark = mszs[idx] - 1; 708 break; 709 } 710 } while (--idx > 0); 711 /* 712 * If idx is '0', it won't be tried 713 * Thus, initial values are uesed 714 */ 715 done: 716 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 717 mci_writel(host, FIFOTH, fifoth_val); 718 #endif 719 } 720 721 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) 722 { 723 unsigned int blksz = data->blksz; 724 u32 blksz_depth, fifo_depth; 725 u16 thld_size; 726 727 WARN_ON(!(data->flags & MMC_DATA_READ)); 728 729 /* 730 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is 731 * in the FIFO region, so we really shouldn't access it). 732 */ 733 if (host->verid < DW_MMC_240A) 734 return; 735 736 if (host->timing != MMC_TIMING_MMC_HS200 && 737 host->timing != MMC_TIMING_UHS_SDR104) 738 goto disable; 739 740 blksz_depth = blksz / (1 << host->data_shift); 741 fifo_depth = host->fifo_depth; 742 743 if (blksz_depth > fifo_depth) 744 goto disable; 745 746 /* 747 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' 748 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz 749 * Currently just choose blksz. 750 */ 751 thld_size = blksz; 752 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1)); 753 return; 754 755 disable: 756 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0)); 757 } 758 759 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 760 { 761 int sg_len; 762 u32 temp; 763 764 host->using_dma = 0; 765 766 /* If we don't have a channel, we can't do DMA */ 767 if (!host->use_dma) 768 return -ENODEV; 769 770 sg_len = dw_mci_pre_dma_transfer(host, data, 0); 771 if (sg_len < 0) { 772 host->dma_ops->stop(host); 773 return sg_len; 774 } 775 776 host->using_dma = 1; 777 778 dev_vdbg(host->dev, 779 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 780 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, 781 sg_len); 782 783 /* 784 * Decide the MSIZE and RX/TX Watermark. 785 * If current block size is same with previous size, 786 * no need to update fifoth. 787 */ 788 if (host->prev_blksz != data->blksz) 789 dw_mci_adjust_fifoth(host, data); 790 791 /* Enable the DMA interface */ 792 temp = mci_readl(host, CTRL); 793 temp |= SDMMC_CTRL_DMA_ENABLE; 794 mci_writel(host, CTRL, temp); 795 796 /* Disable RX/TX IRQs, let DMA handle it */ 797 temp = mci_readl(host, INTMASK); 798 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 799 mci_writel(host, INTMASK, temp); 800 801 host->dma_ops->start(host, sg_len); 802 803 return 0; 804 } 805 806 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 807 { 808 u32 temp; 809 810 data->error = -EINPROGRESS; 811 812 WARN_ON(host->data); 813 host->sg = NULL; 814 host->data = data; 815 816 if (data->flags & MMC_DATA_READ) { 817 host->dir_status = DW_MCI_RECV_STATUS; 818 dw_mci_ctrl_rd_thld(host, data); 819 } else { 820 host->dir_status = DW_MCI_SEND_STATUS; 821 } 822 823 if (dw_mci_submit_data_dma(host, data)) { 824 int flags = SG_MITER_ATOMIC; 825 if (host->data->flags & MMC_DATA_READ) 826 flags |= SG_MITER_TO_SG; 827 else 828 flags |= SG_MITER_FROM_SG; 829 830 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 831 host->sg = data->sg; 832 host->part_buf_start = 0; 833 host->part_buf_count = 0; 834 835 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 836 temp = mci_readl(host, INTMASK); 837 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 838 mci_writel(host, INTMASK, temp); 839 840 temp = mci_readl(host, CTRL); 841 temp &= ~SDMMC_CTRL_DMA_ENABLE; 842 mci_writel(host, CTRL, temp); 843 844 /* 845 * Use the initial fifoth_val for PIO mode. 846 * If next issued data may be transfered by DMA mode, 847 * prev_blksz should be invalidated. 848 */ 849 mci_writel(host, FIFOTH, host->fifoth_val); 850 host->prev_blksz = 0; 851 } else { 852 /* 853 * Keep the current block size. 854 * It will be used to decide whether to update 855 * fifoth register next time. 856 */ 857 host->prev_blksz = data->blksz; 858 } 859 } 860 861 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 862 { 863 struct dw_mci *host = slot->host; 864 unsigned long timeout = jiffies + msecs_to_jiffies(500); 865 unsigned int cmd_status = 0; 866 867 mci_writel(host, CMDARG, arg); 868 wmb(); 869 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 870 871 while (time_before(jiffies, timeout)) { 872 cmd_status = mci_readl(host, CMD); 873 if (!(cmd_status & SDMMC_CMD_START)) 874 return; 875 } 876 dev_err(&slot->mmc->class_dev, 877 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 878 cmd, arg, cmd_status); 879 } 880 881 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 882 { 883 struct dw_mci *host = slot->host; 884 unsigned int clock = slot->clock; 885 u32 div; 886 u32 clk_en_a; 887 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; 888 889 /* We must continue to set bit 28 in CMD until the change is complete */ 890 if (host->state == STATE_WAITING_CMD11_DONE) 891 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; 892 893 if (!clock) { 894 mci_writel(host, CLKENA, 0); 895 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 896 } else if (clock != host->current_speed || force_clkinit) { 897 div = host->bus_hz / clock; 898 if (host->bus_hz % clock && host->bus_hz > clock) 899 /* 900 * move the + 1 after the divide to prevent 901 * over-clocking the card. 902 */ 903 div += 1; 904 905 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 906 907 if ((clock << div) != slot->__clk_old || force_clkinit) 908 dev_info(&slot->mmc->class_dev, 909 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 910 slot->id, host->bus_hz, clock, 911 div ? ((host->bus_hz / div) >> 1) : 912 host->bus_hz, div); 913 914 /* disable clock */ 915 mci_writel(host, CLKENA, 0); 916 mci_writel(host, CLKSRC, 0); 917 918 /* inform CIU */ 919 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 920 921 /* set clock to desired speed */ 922 mci_writel(host, CLKDIV, div); 923 924 /* inform CIU */ 925 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 926 927 /* enable clock; only low power if no SDIO */ 928 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 929 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->sdio_id))) 930 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 931 mci_writel(host, CLKENA, clk_en_a); 932 933 /* inform CIU */ 934 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 935 936 /* keep the clock with reflecting clock dividor */ 937 slot->__clk_old = clock << div; 938 } 939 940 host->current_speed = clock; 941 942 /* Set the current slot bus width */ 943 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 944 } 945 946 static void __dw_mci_start_request(struct dw_mci *host, 947 struct dw_mci_slot *slot, 948 struct mmc_command *cmd) 949 { 950 struct mmc_request *mrq; 951 struct mmc_data *data; 952 u32 cmdflags; 953 954 mrq = slot->mrq; 955 956 host->cur_slot = slot; 957 host->mrq = mrq; 958 959 host->pending_events = 0; 960 host->completed_events = 0; 961 host->cmd_status = 0; 962 host->data_status = 0; 963 host->dir_status = 0; 964 965 data = cmd->data; 966 if (data) { 967 mci_writel(host, TMOUT, 0xFFFFFFFF); 968 mci_writel(host, BYTCNT, data->blksz*data->blocks); 969 mci_writel(host, BLKSIZ, data->blksz); 970 } 971 972 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 973 974 /* this is the first command, send the initialization clock */ 975 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 976 cmdflags |= SDMMC_CMD_INIT; 977 978 if (data) { 979 dw_mci_submit_data(host, data); 980 wmb(); 981 } 982 983 dw_mci_start_command(host, cmd, cmdflags); 984 985 if (mrq->stop) 986 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); 987 else 988 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); 989 } 990 991 static void dw_mci_start_request(struct dw_mci *host, 992 struct dw_mci_slot *slot) 993 { 994 struct mmc_request *mrq = slot->mrq; 995 struct mmc_command *cmd; 996 997 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 998 __dw_mci_start_request(host, slot, cmd); 999 } 1000 1001 /* must be called with host->lock held */ 1002 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 1003 struct mmc_request *mrq) 1004 { 1005 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1006 host->state); 1007 1008 slot->mrq = mrq; 1009 1010 if (host->state == STATE_WAITING_CMD11_DONE) { 1011 dev_warn(&slot->mmc->class_dev, 1012 "Voltage change didn't complete\n"); 1013 /* 1014 * this case isn't expected to happen, so we can 1015 * either crash here or just try to continue on 1016 * in the closest possible state 1017 */ 1018 host->state = STATE_IDLE; 1019 } 1020 1021 if (host->state == STATE_IDLE) { 1022 host->state = STATE_SENDING_CMD; 1023 dw_mci_start_request(host, slot); 1024 } else { 1025 list_add_tail(&slot->queue_node, &host->queue); 1026 } 1027 } 1028 1029 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1030 { 1031 struct dw_mci_slot *slot = mmc_priv(mmc); 1032 struct dw_mci *host = slot->host; 1033 1034 WARN_ON(slot->mrq); 1035 1036 /* 1037 * The check for card presence and queueing of the request must be 1038 * atomic, otherwise the card could be removed in between and the 1039 * request wouldn't fail until another card was inserted. 1040 */ 1041 spin_lock_bh(&host->lock); 1042 1043 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 1044 spin_unlock_bh(&host->lock); 1045 mrq->cmd->error = -ENOMEDIUM; 1046 mmc_request_done(mmc, mrq); 1047 return; 1048 } 1049 1050 dw_mci_queue_request(host, slot, mrq); 1051 1052 spin_unlock_bh(&host->lock); 1053 } 1054 1055 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1056 { 1057 struct dw_mci_slot *slot = mmc_priv(mmc); 1058 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 1059 u32 regs; 1060 int ret; 1061 1062 switch (ios->bus_width) { 1063 case MMC_BUS_WIDTH_4: 1064 slot->ctype = SDMMC_CTYPE_4BIT; 1065 break; 1066 case MMC_BUS_WIDTH_8: 1067 slot->ctype = SDMMC_CTYPE_8BIT; 1068 break; 1069 default: 1070 /* set default 1 bit mode */ 1071 slot->ctype = SDMMC_CTYPE_1BIT; 1072 } 1073 1074 regs = mci_readl(slot->host, UHS_REG); 1075 1076 /* DDR mode set */ 1077 if (ios->timing == MMC_TIMING_MMC_DDR52) 1078 regs |= ((0x1 << slot->id) << 16); 1079 else 1080 regs &= ~((0x1 << slot->id) << 16); 1081 1082 mci_writel(slot->host, UHS_REG, regs); 1083 slot->host->timing = ios->timing; 1084 1085 /* 1086 * Use mirror of ios->clock to prevent race with mmc 1087 * core ios update when finding the minimum. 1088 */ 1089 slot->clock = ios->clock; 1090 1091 if (drv_data && drv_data->set_ios) 1092 drv_data->set_ios(slot->host, ios); 1093 1094 /* Slot specific timing and width adjustment */ 1095 dw_mci_setup_bus(slot, false); 1096 1097 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) 1098 slot->host->state = STATE_IDLE; 1099 1100 switch (ios->power_mode) { 1101 case MMC_POWER_UP: 1102 if (!IS_ERR(mmc->supply.vmmc)) { 1103 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 1104 ios->vdd); 1105 if (ret) { 1106 dev_err(slot->host->dev, 1107 "failed to enable vmmc regulator\n"); 1108 /*return, if failed turn on vmmc*/ 1109 return; 1110 } 1111 } 1112 if (!IS_ERR(mmc->supply.vqmmc) && !slot->host->vqmmc_enabled) { 1113 ret = regulator_enable(mmc->supply.vqmmc); 1114 if (ret < 0) 1115 dev_err(slot->host->dev, 1116 "failed to enable vqmmc regulator\n"); 1117 else 1118 slot->host->vqmmc_enabled = true; 1119 } 1120 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 1121 regs = mci_readl(slot->host, PWREN); 1122 regs |= (1 << slot->id); 1123 mci_writel(slot->host, PWREN, regs); 1124 break; 1125 case MMC_POWER_OFF: 1126 if (!IS_ERR(mmc->supply.vmmc)) 1127 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1128 1129 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) { 1130 regulator_disable(mmc->supply.vqmmc); 1131 slot->host->vqmmc_enabled = false; 1132 } 1133 1134 regs = mci_readl(slot->host, PWREN); 1135 regs &= ~(1 << slot->id); 1136 mci_writel(slot->host, PWREN, regs); 1137 break; 1138 default: 1139 break; 1140 } 1141 } 1142 1143 static int dw_mci_card_busy(struct mmc_host *mmc) 1144 { 1145 struct dw_mci_slot *slot = mmc_priv(mmc); 1146 u32 status; 1147 1148 /* 1149 * Check the busy bit which is low when DAT[3:0] 1150 * (the data lines) are 0000 1151 */ 1152 status = mci_readl(slot->host, STATUS); 1153 1154 return !!(status & SDMMC_STATUS_BUSY); 1155 } 1156 1157 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 1158 { 1159 struct dw_mci_slot *slot = mmc_priv(mmc); 1160 struct dw_mci *host = slot->host; 1161 u32 uhs; 1162 u32 v18 = SDMMC_UHS_18V << slot->id; 1163 int min_uv, max_uv; 1164 int ret; 1165 1166 /* 1167 * Program the voltage. Note that some instances of dw_mmc may use 1168 * the UHS_REG for this. For other instances (like exynos) the UHS_REG 1169 * does no harm but you need to set the regulator directly. Try both. 1170 */ 1171 uhs = mci_readl(host, UHS_REG); 1172 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { 1173 min_uv = 2700000; 1174 max_uv = 3600000; 1175 uhs &= ~v18; 1176 } else { 1177 min_uv = 1700000; 1178 max_uv = 1950000; 1179 uhs |= v18; 1180 } 1181 if (!IS_ERR(mmc->supply.vqmmc)) { 1182 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv); 1183 1184 if (ret) { 1185 dev_dbg(&mmc->class_dev, 1186 "Regulator set error %d: %d - %d\n", 1187 ret, min_uv, max_uv); 1188 return ret; 1189 } 1190 } 1191 mci_writel(host, UHS_REG, uhs); 1192 1193 return 0; 1194 } 1195 1196 static int dw_mci_get_ro(struct mmc_host *mmc) 1197 { 1198 int read_only; 1199 struct dw_mci_slot *slot = mmc_priv(mmc); 1200 int gpio_ro = mmc_gpio_get_ro(mmc); 1201 1202 /* Use platform get_ro function, else try on board write protect */ 1203 if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) || 1204 (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT)) 1205 read_only = 0; 1206 else if (!IS_ERR_VALUE(gpio_ro)) 1207 read_only = gpio_ro; 1208 else 1209 read_only = 1210 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 1211 1212 dev_dbg(&mmc->class_dev, "card is %s\n", 1213 read_only ? "read-only" : "read-write"); 1214 1215 return read_only; 1216 } 1217 1218 static int dw_mci_get_cd(struct mmc_host *mmc) 1219 { 1220 int present; 1221 struct dw_mci_slot *slot = mmc_priv(mmc); 1222 struct dw_mci_board *brd = slot->host->pdata; 1223 struct dw_mci *host = slot->host; 1224 int gpio_cd = mmc_gpio_get_cd(mmc); 1225 1226 /* Use platform get_cd function, else try onboard card detect */ 1227 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) 1228 present = 1; 1229 else if (!IS_ERR_VALUE(gpio_cd)) 1230 present = gpio_cd; 1231 else 1232 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 1233 == 0 ? 1 : 0; 1234 1235 spin_lock_bh(&host->lock); 1236 if (present) { 1237 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1238 dev_dbg(&mmc->class_dev, "card is present\n"); 1239 } else { 1240 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1241 dev_dbg(&mmc->class_dev, "card is not present\n"); 1242 } 1243 spin_unlock_bh(&host->lock); 1244 1245 return present; 1246 } 1247 1248 /* 1249 * Disable lower power mode. 1250 * 1251 * Low power mode will stop the card clock when idle. According to the 1252 * description of the CLKENA register we should disable low power mode 1253 * for SDIO cards if we need SDIO interrupts to work. 1254 * 1255 * This function is fast if low power mode is already disabled. 1256 */ 1257 static void dw_mci_disable_low_power(struct dw_mci_slot *slot) 1258 { 1259 struct dw_mci *host = slot->host; 1260 u32 clk_en_a; 1261 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 1262 1263 clk_en_a = mci_readl(host, CLKENA); 1264 1265 if (clk_en_a & clken_low_pwr) { 1266 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr); 1267 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 1268 SDMMC_CMD_PRV_DAT_WAIT, 0); 1269 } 1270 } 1271 1272 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 1273 { 1274 struct dw_mci_slot *slot = mmc_priv(mmc); 1275 struct dw_mci *host = slot->host; 1276 u32 int_mask; 1277 1278 /* Enable/disable Slot Specific SDIO interrupt */ 1279 int_mask = mci_readl(host, INTMASK); 1280 if (enb) { 1281 /* 1282 * Turn off low power mode if it was enabled. This is a bit of 1283 * a heavy operation and we disable / enable IRQs a lot, so 1284 * we'll leave low power mode disabled and it will get 1285 * re-enabled again in dw_mci_setup_bus(). 1286 */ 1287 dw_mci_disable_low_power(slot); 1288 1289 mci_writel(host, INTMASK, 1290 (int_mask | SDMMC_INT_SDIO(slot->sdio_id))); 1291 } else { 1292 mci_writel(host, INTMASK, 1293 (int_mask & ~SDMMC_INT_SDIO(slot->sdio_id))); 1294 } 1295 } 1296 1297 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1298 { 1299 struct dw_mci_slot *slot = mmc_priv(mmc); 1300 struct dw_mci *host = slot->host; 1301 const struct dw_mci_drv_data *drv_data = host->drv_data; 1302 struct dw_mci_tuning_data tuning_data; 1303 int err = -ENOSYS; 1304 1305 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) { 1306 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) { 1307 tuning_data.blk_pattern = tuning_blk_pattern_8bit; 1308 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit); 1309 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 1310 tuning_data.blk_pattern = tuning_blk_pattern_4bit; 1311 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit); 1312 } else { 1313 return -EINVAL; 1314 } 1315 } else if (opcode == MMC_SEND_TUNING_BLOCK) { 1316 tuning_data.blk_pattern = tuning_blk_pattern_4bit; 1317 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit); 1318 } else { 1319 dev_err(host->dev, 1320 "Undefined command(%d) for tuning\n", opcode); 1321 return -EINVAL; 1322 } 1323 1324 if (drv_data && drv_data->execute_tuning) 1325 err = drv_data->execute_tuning(slot, opcode, &tuning_data); 1326 return err; 1327 } 1328 1329 static const struct mmc_host_ops dw_mci_ops = { 1330 .request = dw_mci_request, 1331 .pre_req = dw_mci_pre_req, 1332 .post_req = dw_mci_post_req, 1333 .set_ios = dw_mci_set_ios, 1334 .get_ro = dw_mci_get_ro, 1335 .get_cd = dw_mci_get_cd, 1336 .enable_sdio_irq = dw_mci_enable_sdio_irq, 1337 .execute_tuning = dw_mci_execute_tuning, 1338 .card_busy = dw_mci_card_busy, 1339 .start_signal_voltage_switch = dw_mci_switch_voltage, 1340 1341 }; 1342 1343 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1344 __releases(&host->lock) 1345 __acquires(&host->lock) 1346 { 1347 struct dw_mci_slot *slot; 1348 struct mmc_host *prev_mmc = host->cur_slot->mmc; 1349 1350 WARN_ON(host->cmd || host->data); 1351 1352 host->cur_slot->mrq = NULL; 1353 host->mrq = NULL; 1354 if (!list_empty(&host->queue)) { 1355 slot = list_entry(host->queue.next, 1356 struct dw_mci_slot, queue_node); 1357 list_del(&slot->queue_node); 1358 dev_vdbg(host->dev, "list not empty: %s is next\n", 1359 mmc_hostname(slot->mmc)); 1360 host->state = STATE_SENDING_CMD; 1361 dw_mci_start_request(host, slot); 1362 } else { 1363 dev_vdbg(host->dev, "list empty\n"); 1364 1365 if (host->state == STATE_SENDING_CMD11) 1366 host->state = STATE_WAITING_CMD11_DONE; 1367 else 1368 host->state = STATE_IDLE; 1369 } 1370 1371 spin_unlock(&host->lock); 1372 mmc_request_done(prev_mmc, mrq); 1373 spin_lock(&host->lock); 1374 } 1375 1376 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 1377 { 1378 u32 status = host->cmd_status; 1379 1380 host->cmd_status = 0; 1381 1382 /* Read the response from the card (up to 16 bytes) */ 1383 if (cmd->flags & MMC_RSP_PRESENT) { 1384 if (cmd->flags & MMC_RSP_136) { 1385 cmd->resp[3] = mci_readl(host, RESP0); 1386 cmd->resp[2] = mci_readl(host, RESP1); 1387 cmd->resp[1] = mci_readl(host, RESP2); 1388 cmd->resp[0] = mci_readl(host, RESP3); 1389 } else { 1390 cmd->resp[0] = mci_readl(host, RESP0); 1391 cmd->resp[1] = 0; 1392 cmd->resp[2] = 0; 1393 cmd->resp[3] = 0; 1394 } 1395 } 1396 1397 if (status & SDMMC_INT_RTO) 1398 cmd->error = -ETIMEDOUT; 1399 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1400 cmd->error = -EILSEQ; 1401 else if (status & SDMMC_INT_RESP_ERR) 1402 cmd->error = -EIO; 1403 else 1404 cmd->error = 0; 1405 1406 if (cmd->error) { 1407 /* newer ip versions need a delay between retries */ 1408 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) 1409 mdelay(20); 1410 } 1411 1412 return cmd->error; 1413 } 1414 1415 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) 1416 { 1417 u32 status = host->data_status; 1418 1419 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1420 if (status & SDMMC_INT_DRTO) { 1421 data->error = -ETIMEDOUT; 1422 } else if (status & SDMMC_INT_DCRC) { 1423 data->error = -EILSEQ; 1424 } else if (status & SDMMC_INT_EBE) { 1425 if (host->dir_status == 1426 DW_MCI_SEND_STATUS) { 1427 /* 1428 * No data CRC status was returned. 1429 * The number of bytes transferred 1430 * will be exaggerated in PIO mode. 1431 */ 1432 data->bytes_xfered = 0; 1433 data->error = -ETIMEDOUT; 1434 } else if (host->dir_status == 1435 DW_MCI_RECV_STATUS) { 1436 data->error = -EIO; 1437 } 1438 } else { 1439 /* SDMMC_INT_SBE is included */ 1440 data->error = -EIO; 1441 } 1442 1443 dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1444 1445 /* 1446 * After an error, there may be data lingering 1447 * in the FIFO 1448 */ 1449 dw_mci_reset(host); 1450 } else { 1451 data->bytes_xfered = data->blocks * data->blksz; 1452 data->error = 0; 1453 } 1454 1455 return data->error; 1456 } 1457 1458 static void dw_mci_tasklet_func(unsigned long priv) 1459 { 1460 struct dw_mci *host = (struct dw_mci *)priv; 1461 struct mmc_data *data; 1462 struct mmc_command *cmd; 1463 struct mmc_request *mrq; 1464 enum dw_mci_state state; 1465 enum dw_mci_state prev_state; 1466 unsigned int err; 1467 1468 spin_lock(&host->lock); 1469 1470 state = host->state; 1471 data = host->data; 1472 mrq = host->mrq; 1473 1474 do { 1475 prev_state = state; 1476 1477 switch (state) { 1478 case STATE_IDLE: 1479 case STATE_WAITING_CMD11_DONE: 1480 break; 1481 1482 case STATE_SENDING_CMD11: 1483 case STATE_SENDING_CMD: 1484 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1485 &host->pending_events)) 1486 break; 1487 1488 cmd = host->cmd; 1489 host->cmd = NULL; 1490 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 1491 err = dw_mci_command_complete(host, cmd); 1492 if (cmd == mrq->sbc && !err) { 1493 prev_state = state = STATE_SENDING_CMD; 1494 __dw_mci_start_request(host, host->cur_slot, 1495 mrq->cmd); 1496 goto unlock; 1497 } 1498 1499 if (cmd->data && err) { 1500 dw_mci_stop_dma(host); 1501 send_stop_abort(host, data); 1502 state = STATE_SENDING_STOP; 1503 break; 1504 } 1505 1506 if (!cmd->data || err) { 1507 dw_mci_request_end(host, mrq); 1508 goto unlock; 1509 } 1510 1511 prev_state = state = STATE_SENDING_DATA; 1512 /* fall through */ 1513 1514 case STATE_SENDING_DATA: 1515 /* 1516 * We could get a data error and never a transfer 1517 * complete so we'd better check for it here. 1518 * 1519 * Note that we don't really care if we also got a 1520 * transfer complete; stopping the DMA and sending an 1521 * abort won't hurt. 1522 */ 1523 if (test_and_clear_bit(EVENT_DATA_ERROR, 1524 &host->pending_events)) { 1525 dw_mci_stop_dma(host); 1526 send_stop_abort(host, data); 1527 state = STATE_DATA_ERROR; 1528 break; 1529 } 1530 1531 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1532 &host->pending_events)) 1533 break; 1534 1535 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 1536 1537 /* 1538 * Handle an EVENT_DATA_ERROR that might have shown up 1539 * before the transfer completed. This might not have 1540 * been caught by the check above because the interrupt 1541 * could have gone off between the previous check and 1542 * the check for transfer complete. 1543 * 1544 * Technically this ought not be needed assuming we 1545 * get a DATA_COMPLETE eventually (we'll notice the 1546 * error and end the request), but it shouldn't hurt. 1547 * 1548 * This has the advantage of sending the stop command. 1549 */ 1550 if (test_and_clear_bit(EVENT_DATA_ERROR, 1551 &host->pending_events)) { 1552 dw_mci_stop_dma(host); 1553 send_stop_abort(host, data); 1554 state = STATE_DATA_ERROR; 1555 break; 1556 } 1557 prev_state = state = STATE_DATA_BUSY; 1558 1559 /* fall through */ 1560 1561 case STATE_DATA_BUSY: 1562 if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 1563 &host->pending_events)) 1564 break; 1565 1566 host->data = NULL; 1567 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 1568 err = dw_mci_data_complete(host, data); 1569 1570 if (!err) { 1571 if (!data->stop || mrq->sbc) { 1572 if (mrq->sbc && data->stop) 1573 data->stop->error = 0; 1574 dw_mci_request_end(host, mrq); 1575 goto unlock; 1576 } 1577 1578 /* stop command for open-ended transfer*/ 1579 if (data->stop) 1580 send_stop_abort(host, data); 1581 } else { 1582 /* 1583 * If we don't have a command complete now we'll 1584 * never get one since we just reset everything; 1585 * better end the request. 1586 * 1587 * If we do have a command complete we'll fall 1588 * through to the SENDING_STOP command and 1589 * everything will be peachy keen. 1590 */ 1591 if (!test_bit(EVENT_CMD_COMPLETE, 1592 &host->pending_events)) { 1593 host->cmd = NULL; 1594 dw_mci_request_end(host, mrq); 1595 goto unlock; 1596 } 1597 } 1598 1599 /* 1600 * If err has non-zero, 1601 * stop-abort command has been already issued. 1602 */ 1603 prev_state = state = STATE_SENDING_STOP; 1604 1605 /* fall through */ 1606 1607 case STATE_SENDING_STOP: 1608 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1609 &host->pending_events)) 1610 break; 1611 1612 /* CMD error in data command */ 1613 if (mrq->cmd->error && mrq->data) 1614 dw_mci_reset(host); 1615 1616 host->cmd = NULL; 1617 host->data = NULL; 1618 1619 if (mrq->stop) 1620 dw_mci_command_complete(host, mrq->stop); 1621 else 1622 host->cmd_status = 0; 1623 1624 dw_mci_request_end(host, mrq); 1625 goto unlock; 1626 1627 case STATE_DATA_ERROR: 1628 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1629 &host->pending_events)) 1630 break; 1631 1632 state = STATE_DATA_BUSY; 1633 break; 1634 } 1635 } while (state != prev_state); 1636 1637 host->state = state; 1638 unlock: 1639 spin_unlock(&host->lock); 1640 1641 } 1642 1643 /* push final bytes to part_buf, only use during push */ 1644 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 1645 { 1646 memcpy((void *)&host->part_buf, buf, cnt); 1647 host->part_buf_count = cnt; 1648 } 1649 1650 /* append bytes to part_buf, only use during push */ 1651 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 1652 { 1653 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 1654 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 1655 host->part_buf_count += cnt; 1656 return cnt; 1657 } 1658 1659 /* pull first bytes from part_buf, only use during pull */ 1660 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 1661 { 1662 cnt = min(cnt, (int)host->part_buf_count); 1663 if (cnt) { 1664 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 1665 cnt); 1666 host->part_buf_count -= cnt; 1667 host->part_buf_start += cnt; 1668 } 1669 return cnt; 1670 } 1671 1672 /* pull final bytes from the part_buf, assuming it's just been filled */ 1673 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 1674 { 1675 memcpy(buf, &host->part_buf, cnt); 1676 host->part_buf_start = cnt; 1677 host->part_buf_count = (1 << host->data_shift) - cnt; 1678 } 1679 1680 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 1681 { 1682 struct mmc_data *data = host->data; 1683 int init_cnt = cnt; 1684 1685 /* try and push anything in the part_buf */ 1686 if (unlikely(host->part_buf_count)) { 1687 int len = dw_mci_push_part_bytes(host, buf, cnt); 1688 buf += len; 1689 cnt -= len; 1690 if (host->part_buf_count == 2) { 1691 mci_writew(host, DATA(host->data_offset), 1692 host->part_buf16); 1693 host->part_buf_count = 0; 1694 } 1695 } 1696 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1697 if (unlikely((unsigned long)buf & 0x1)) { 1698 while (cnt >= 2) { 1699 u16 aligned_buf[64]; 1700 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1701 int items = len >> 1; 1702 int i; 1703 /* memcpy from input buffer into aligned buffer */ 1704 memcpy(aligned_buf, buf, len); 1705 buf += len; 1706 cnt -= len; 1707 /* push data from aligned buffer into fifo */ 1708 for (i = 0; i < items; ++i) 1709 mci_writew(host, DATA(host->data_offset), 1710 aligned_buf[i]); 1711 } 1712 } else 1713 #endif 1714 { 1715 u16 *pdata = buf; 1716 for (; cnt >= 2; cnt -= 2) 1717 mci_writew(host, DATA(host->data_offset), *pdata++); 1718 buf = pdata; 1719 } 1720 /* put anything remaining in the part_buf */ 1721 if (cnt) { 1722 dw_mci_set_part_bytes(host, buf, cnt); 1723 /* Push data if we have reached the expected data length */ 1724 if ((data->bytes_xfered + init_cnt) == 1725 (data->blksz * data->blocks)) 1726 mci_writew(host, DATA(host->data_offset), 1727 host->part_buf16); 1728 } 1729 } 1730 1731 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 1732 { 1733 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1734 if (unlikely((unsigned long)buf & 0x1)) { 1735 while (cnt >= 2) { 1736 /* pull data from fifo into aligned buffer */ 1737 u16 aligned_buf[64]; 1738 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1739 int items = len >> 1; 1740 int i; 1741 for (i = 0; i < items; ++i) 1742 aligned_buf[i] = mci_readw(host, 1743 DATA(host->data_offset)); 1744 /* memcpy from aligned buffer into output buffer */ 1745 memcpy(buf, aligned_buf, len); 1746 buf += len; 1747 cnt -= len; 1748 } 1749 } else 1750 #endif 1751 { 1752 u16 *pdata = buf; 1753 for (; cnt >= 2; cnt -= 2) 1754 *pdata++ = mci_readw(host, DATA(host->data_offset)); 1755 buf = pdata; 1756 } 1757 if (cnt) { 1758 host->part_buf16 = mci_readw(host, DATA(host->data_offset)); 1759 dw_mci_pull_final_bytes(host, buf, cnt); 1760 } 1761 } 1762 1763 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 1764 { 1765 struct mmc_data *data = host->data; 1766 int init_cnt = cnt; 1767 1768 /* try and push anything in the part_buf */ 1769 if (unlikely(host->part_buf_count)) { 1770 int len = dw_mci_push_part_bytes(host, buf, cnt); 1771 buf += len; 1772 cnt -= len; 1773 if (host->part_buf_count == 4) { 1774 mci_writel(host, DATA(host->data_offset), 1775 host->part_buf32); 1776 host->part_buf_count = 0; 1777 } 1778 } 1779 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1780 if (unlikely((unsigned long)buf & 0x3)) { 1781 while (cnt >= 4) { 1782 u32 aligned_buf[32]; 1783 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1784 int items = len >> 2; 1785 int i; 1786 /* memcpy from input buffer into aligned buffer */ 1787 memcpy(aligned_buf, buf, len); 1788 buf += len; 1789 cnt -= len; 1790 /* push data from aligned buffer into fifo */ 1791 for (i = 0; i < items; ++i) 1792 mci_writel(host, DATA(host->data_offset), 1793 aligned_buf[i]); 1794 } 1795 } else 1796 #endif 1797 { 1798 u32 *pdata = buf; 1799 for (; cnt >= 4; cnt -= 4) 1800 mci_writel(host, DATA(host->data_offset), *pdata++); 1801 buf = pdata; 1802 } 1803 /* put anything remaining in the part_buf */ 1804 if (cnt) { 1805 dw_mci_set_part_bytes(host, buf, cnt); 1806 /* Push data if we have reached the expected data length */ 1807 if ((data->bytes_xfered + init_cnt) == 1808 (data->blksz * data->blocks)) 1809 mci_writel(host, DATA(host->data_offset), 1810 host->part_buf32); 1811 } 1812 } 1813 1814 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 1815 { 1816 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1817 if (unlikely((unsigned long)buf & 0x3)) { 1818 while (cnt >= 4) { 1819 /* pull data from fifo into aligned buffer */ 1820 u32 aligned_buf[32]; 1821 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1822 int items = len >> 2; 1823 int i; 1824 for (i = 0; i < items; ++i) 1825 aligned_buf[i] = mci_readl(host, 1826 DATA(host->data_offset)); 1827 /* memcpy from aligned buffer into output buffer */ 1828 memcpy(buf, aligned_buf, len); 1829 buf += len; 1830 cnt -= len; 1831 } 1832 } else 1833 #endif 1834 { 1835 u32 *pdata = buf; 1836 for (; cnt >= 4; cnt -= 4) 1837 *pdata++ = mci_readl(host, DATA(host->data_offset)); 1838 buf = pdata; 1839 } 1840 if (cnt) { 1841 host->part_buf32 = mci_readl(host, DATA(host->data_offset)); 1842 dw_mci_pull_final_bytes(host, buf, cnt); 1843 } 1844 } 1845 1846 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 1847 { 1848 struct mmc_data *data = host->data; 1849 int init_cnt = cnt; 1850 1851 /* try and push anything in the part_buf */ 1852 if (unlikely(host->part_buf_count)) { 1853 int len = dw_mci_push_part_bytes(host, buf, cnt); 1854 buf += len; 1855 cnt -= len; 1856 1857 if (host->part_buf_count == 8) { 1858 mci_writeq(host, DATA(host->data_offset), 1859 host->part_buf); 1860 host->part_buf_count = 0; 1861 } 1862 } 1863 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1864 if (unlikely((unsigned long)buf & 0x7)) { 1865 while (cnt >= 8) { 1866 u64 aligned_buf[16]; 1867 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 1868 int items = len >> 3; 1869 int i; 1870 /* memcpy from input buffer into aligned buffer */ 1871 memcpy(aligned_buf, buf, len); 1872 buf += len; 1873 cnt -= len; 1874 /* push data from aligned buffer into fifo */ 1875 for (i = 0; i < items; ++i) 1876 mci_writeq(host, DATA(host->data_offset), 1877 aligned_buf[i]); 1878 } 1879 } else 1880 #endif 1881 { 1882 u64 *pdata = buf; 1883 for (; cnt >= 8; cnt -= 8) 1884 mci_writeq(host, DATA(host->data_offset), *pdata++); 1885 buf = pdata; 1886 } 1887 /* put anything remaining in the part_buf */ 1888 if (cnt) { 1889 dw_mci_set_part_bytes(host, buf, cnt); 1890 /* Push data if we have reached the expected data length */ 1891 if ((data->bytes_xfered + init_cnt) == 1892 (data->blksz * data->blocks)) 1893 mci_writeq(host, DATA(host->data_offset), 1894 host->part_buf); 1895 } 1896 } 1897 1898 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 1899 { 1900 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1901 if (unlikely((unsigned long)buf & 0x7)) { 1902 while (cnt >= 8) { 1903 /* pull data from fifo into aligned buffer */ 1904 u64 aligned_buf[16]; 1905 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 1906 int items = len >> 3; 1907 int i; 1908 for (i = 0; i < items; ++i) 1909 aligned_buf[i] = mci_readq(host, 1910 DATA(host->data_offset)); 1911 /* memcpy from aligned buffer into output buffer */ 1912 memcpy(buf, aligned_buf, len); 1913 buf += len; 1914 cnt -= len; 1915 } 1916 } else 1917 #endif 1918 { 1919 u64 *pdata = buf; 1920 for (; cnt >= 8; cnt -= 8) 1921 *pdata++ = mci_readq(host, DATA(host->data_offset)); 1922 buf = pdata; 1923 } 1924 if (cnt) { 1925 host->part_buf = mci_readq(host, DATA(host->data_offset)); 1926 dw_mci_pull_final_bytes(host, buf, cnt); 1927 } 1928 } 1929 1930 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 1931 { 1932 int len; 1933 1934 /* get remaining partial bytes */ 1935 len = dw_mci_pull_part_bytes(host, buf, cnt); 1936 if (unlikely(len == cnt)) 1937 return; 1938 buf += len; 1939 cnt -= len; 1940 1941 /* get the rest of the data */ 1942 host->pull_data(host, buf, cnt); 1943 } 1944 1945 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 1946 { 1947 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1948 void *buf; 1949 unsigned int offset; 1950 struct mmc_data *data = host->data; 1951 int shift = host->data_shift; 1952 u32 status; 1953 unsigned int len; 1954 unsigned int remain, fcnt; 1955 1956 do { 1957 if (!sg_miter_next(sg_miter)) 1958 goto done; 1959 1960 host->sg = sg_miter->piter.sg; 1961 buf = sg_miter->addr; 1962 remain = sg_miter->length; 1963 offset = 0; 1964 1965 do { 1966 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 1967 << shift) + host->part_buf_count; 1968 len = min(remain, fcnt); 1969 if (!len) 1970 break; 1971 dw_mci_pull_data(host, (void *)(buf + offset), len); 1972 data->bytes_xfered += len; 1973 offset += len; 1974 remain -= len; 1975 } while (remain); 1976 1977 sg_miter->consumed = offset; 1978 status = mci_readl(host, MINTSTS); 1979 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1980 /* if the RXDR is ready read again */ 1981 } while ((status & SDMMC_INT_RXDR) || 1982 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 1983 1984 if (!remain) { 1985 if (!sg_miter_next(sg_miter)) 1986 goto done; 1987 sg_miter->consumed = 0; 1988 } 1989 sg_miter_stop(sg_miter); 1990 return; 1991 1992 done: 1993 sg_miter_stop(sg_miter); 1994 host->sg = NULL; 1995 smp_wmb(); 1996 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1997 } 1998 1999 static void dw_mci_write_data_pio(struct dw_mci *host) 2000 { 2001 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2002 void *buf; 2003 unsigned int offset; 2004 struct mmc_data *data = host->data; 2005 int shift = host->data_shift; 2006 u32 status; 2007 unsigned int len; 2008 unsigned int fifo_depth = host->fifo_depth; 2009 unsigned int remain, fcnt; 2010 2011 do { 2012 if (!sg_miter_next(sg_miter)) 2013 goto done; 2014 2015 host->sg = sg_miter->piter.sg; 2016 buf = sg_miter->addr; 2017 remain = sg_miter->length; 2018 offset = 0; 2019 2020 do { 2021 fcnt = ((fifo_depth - 2022 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 2023 << shift) - host->part_buf_count; 2024 len = min(remain, fcnt); 2025 if (!len) 2026 break; 2027 host->push_data(host, (void *)(buf + offset), len); 2028 data->bytes_xfered += len; 2029 offset += len; 2030 remain -= len; 2031 } while (remain); 2032 2033 sg_miter->consumed = offset; 2034 status = mci_readl(host, MINTSTS); 2035 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2036 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 2037 2038 if (!remain) { 2039 if (!sg_miter_next(sg_miter)) 2040 goto done; 2041 sg_miter->consumed = 0; 2042 } 2043 sg_miter_stop(sg_miter); 2044 return; 2045 2046 done: 2047 sg_miter_stop(sg_miter); 2048 host->sg = NULL; 2049 smp_wmb(); 2050 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2051 } 2052 2053 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2054 { 2055 if (!host->cmd_status) 2056 host->cmd_status = status; 2057 2058 smp_wmb(); 2059 2060 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2061 tasklet_schedule(&host->tasklet); 2062 } 2063 2064 static void dw_mci_handle_cd(struct dw_mci *host) 2065 { 2066 int i; 2067 2068 for (i = 0; i < host->num_slots; i++) { 2069 struct dw_mci_slot *slot = host->slot[i]; 2070 2071 if (!slot) 2072 continue; 2073 2074 if (slot->mmc->ops->card_event) 2075 slot->mmc->ops->card_event(slot->mmc); 2076 mmc_detect_change(slot->mmc, 2077 msecs_to_jiffies(host->pdata->detect_delay_ms)); 2078 } 2079 } 2080 2081 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2082 { 2083 struct dw_mci *host = dev_id; 2084 u32 pending; 2085 int i; 2086 2087 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 2088 2089 /* 2090 * DTO fix - version 2.10a and below, and only if internal DMA 2091 * is configured. 2092 */ 2093 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) { 2094 if (!pending && 2095 ((mci_readl(host, STATUS) >> 17) & 0x1fff)) 2096 pending |= SDMMC_INT_DATA_OVER; 2097 } 2098 2099 if (pending) { 2100 /* Check volt switch first, since it can look like an error */ 2101 if ((host->state == STATE_SENDING_CMD11) && 2102 (pending & SDMMC_INT_VOLT_SWITCH)) { 2103 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); 2104 pending &= ~SDMMC_INT_VOLT_SWITCH; 2105 dw_mci_cmd_interrupt(host, pending); 2106 } 2107 2108 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 2109 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2110 host->cmd_status = pending; 2111 smp_wmb(); 2112 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2113 } 2114 2115 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 2116 /* if there is an error report DATA_ERROR */ 2117 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2118 host->data_status = pending; 2119 smp_wmb(); 2120 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2121 tasklet_schedule(&host->tasklet); 2122 } 2123 2124 if (pending & SDMMC_INT_DATA_OVER) { 2125 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2126 if (!host->data_status) 2127 host->data_status = pending; 2128 smp_wmb(); 2129 if (host->dir_status == DW_MCI_RECV_STATUS) { 2130 if (host->sg != NULL) 2131 dw_mci_read_data_pio(host, true); 2132 } 2133 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2134 tasklet_schedule(&host->tasklet); 2135 } 2136 2137 if (pending & SDMMC_INT_RXDR) { 2138 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2139 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 2140 dw_mci_read_data_pio(host, false); 2141 } 2142 2143 if (pending & SDMMC_INT_TXDR) { 2144 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2145 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 2146 dw_mci_write_data_pio(host); 2147 } 2148 2149 if (pending & SDMMC_INT_CMD_DONE) { 2150 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 2151 dw_mci_cmd_interrupt(host, pending); 2152 } 2153 2154 if (pending & SDMMC_INT_CD) { 2155 mci_writel(host, RINTSTS, SDMMC_INT_CD); 2156 dw_mci_handle_cd(host); 2157 } 2158 2159 /* Handle SDIO Interrupts */ 2160 for (i = 0; i < host->num_slots; i++) { 2161 struct dw_mci_slot *slot = host->slot[i]; 2162 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { 2163 mci_writel(host, RINTSTS, 2164 SDMMC_INT_SDIO(slot->sdio_id)); 2165 mmc_signal_sdio_irq(slot->mmc); 2166 } 2167 } 2168 2169 } 2170 2171 #ifdef CONFIG_MMC_DW_IDMAC 2172 /* Handle DMA interrupts */ 2173 if (host->dma_64bit_address == 1) { 2174 pending = mci_readl(host, IDSTS64); 2175 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2176 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 2177 SDMMC_IDMAC_INT_RI); 2178 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2179 host->dma_ops->complete(host); 2180 } 2181 } else { 2182 pending = mci_readl(host, IDSTS); 2183 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2184 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 2185 SDMMC_IDMAC_INT_RI); 2186 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2187 host->dma_ops->complete(host); 2188 } 2189 } 2190 #endif 2191 2192 return IRQ_HANDLED; 2193 } 2194 2195 #ifdef CONFIG_OF 2196 /* given a slot id, find out the device node representing that slot */ 2197 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) 2198 { 2199 struct device_node *np; 2200 const __be32 *addr; 2201 int len; 2202 2203 if (!dev || !dev->of_node) 2204 return NULL; 2205 2206 for_each_child_of_node(dev->of_node, np) { 2207 addr = of_get_property(np, "reg", &len); 2208 if (!addr || (len < sizeof(int))) 2209 continue; 2210 if (be32_to_cpup(addr) == slot) 2211 return np; 2212 } 2213 return NULL; 2214 } 2215 2216 static struct dw_mci_of_slot_quirks { 2217 char *quirk; 2218 int id; 2219 } of_slot_quirks[] = { 2220 { 2221 .quirk = "disable-wp", 2222 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT, 2223 }, 2224 }; 2225 2226 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) 2227 { 2228 struct device_node *np = dw_mci_of_find_slot_node(dev, slot); 2229 int quirks = 0; 2230 int idx; 2231 2232 /* get quirks */ 2233 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++) 2234 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) { 2235 dev_warn(dev, "Slot quirk %s is deprecated\n", 2236 of_slot_quirks[idx].quirk); 2237 quirks |= of_slot_quirks[idx].id; 2238 } 2239 2240 return quirks; 2241 } 2242 #else /* CONFIG_OF */ 2243 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) 2244 { 2245 return 0; 2246 } 2247 #endif /* CONFIG_OF */ 2248 2249 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) 2250 { 2251 struct mmc_host *mmc; 2252 struct dw_mci_slot *slot; 2253 const struct dw_mci_drv_data *drv_data = host->drv_data; 2254 int ctrl_id, ret; 2255 u32 freq[2]; 2256 2257 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2258 if (!mmc) 2259 return -ENOMEM; 2260 2261 slot = mmc_priv(mmc); 2262 slot->id = id; 2263 slot->sdio_id = host->sdio_id0 + id; 2264 slot->mmc = mmc; 2265 slot->host = host; 2266 host->slot[id] = slot; 2267 2268 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id); 2269 2270 mmc->ops = &dw_mci_ops; 2271 if (of_property_read_u32_array(host->dev->of_node, 2272 "clock-freq-min-max", freq, 2)) { 2273 mmc->f_min = DW_MCI_FREQ_MIN; 2274 mmc->f_max = DW_MCI_FREQ_MAX; 2275 } else { 2276 mmc->f_min = freq[0]; 2277 mmc->f_max = freq[1]; 2278 } 2279 2280 /*if there are external regulators, get them*/ 2281 ret = mmc_regulator_get_supply(mmc); 2282 if (ret == -EPROBE_DEFER) 2283 goto err_host_allocated; 2284 2285 if (!mmc->ocr_avail) 2286 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2287 2288 if (host->pdata->caps) 2289 mmc->caps = host->pdata->caps; 2290 2291 if (host->pdata->pm_caps) 2292 mmc->pm_caps = host->pdata->pm_caps; 2293 2294 if (host->dev->of_node) { 2295 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 2296 if (ctrl_id < 0) 2297 ctrl_id = 0; 2298 } else { 2299 ctrl_id = to_platform_device(host->dev)->id; 2300 } 2301 if (drv_data && drv_data->caps) 2302 mmc->caps |= drv_data->caps[ctrl_id]; 2303 2304 if (host->pdata->caps2) 2305 mmc->caps2 = host->pdata->caps2; 2306 2307 ret = mmc_of_parse(mmc); 2308 if (ret) 2309 goto err_host_allocated; 2310 2311 if (host->pdata->blk_settings) { 2312 mmc->max_segs = host->pdata->blk_settings->max_segs; 2313 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; 2314 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count; 2315 mmc->max_req_size = host->pdata->blk_settings->max_req_size; 2316 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; 2317 } else { 2318 /* Useful defaults if platform data is unset. */ 2319 #ifdef CONFIG_MMC_DW_IDMAC 2320 mmc->max_segs = host->ring_size; 2321 mmc->max_blk_size = 65536; 2322 mmc->max_blk_count = host->ring_size; 2323 mmc->max_seg_size = 0x1000; 2324 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; 2325 #else 2326 mmc->max_segs = 64; 2327 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ 2328 mmc->max_blk_count = 512; 2329 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 2330 mmc->max_seg_size = mmc->max_req_size; 2331 #endif /* CONFIG_MMC_DW_IDMAC */ 2332 } 2333 2334 if (dw_mci_get_cd(mmc)) 2335 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 2336 else 2337 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 2338 2339 ret = mmc_add_host(mmc); 2340 if (ret) 2341 goto err_host_allocated; 2342 2343 #if defined(CONFIG_DEBUG_FS) 2344 dw_mci_init_debugfs(slot); 2345 #endif 2346 2347 return 0; 2348 2349 err_host_allocated: 2350 mmc_free_host(mmc); 2351 return ret; 2352 } 2353 2354 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 2355 { 2356 /* Debugfs stuff is cleaned up by mmc core */ 2357 mmc_remove_host(slot->mmc); 2358 slot->host->slot[id] = NULL; 2359 mmc_free_host(slot->mmc); 2360 } 2361 2362 static void dw_mci_init_dma(struct dw_mci *host) 2363 { 2364 int addr_config; 2365 /* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */ 2366 addr_config = (mci_readl(host, HCON) >> 27) & 0x01; 2367 2368 if (addr_config == 1) { 2369 /* host supports IDMAC in 64-bit address mode */ 2370 host->dma_64bit_address = 1; 2371 dev_info(host->dev, "IDMAC supports 64-bit address mode.\n"); 2372 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) 2373 dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64)); 2374 } else { 2375 /* host supports IDMAC in 32-bit address mode */ 2376 host->dma_64bit_address = 0; 2377 dev_info(host->dev, "IDMAC supports 32-bit address mode.\n"); 2378 } 2379 2380 /* Alloc memory for sg translation */ 2381 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, 2382 &host->sg_dma, GFP_KERNEL); 2383 if (!host->sg_cpu) { 2384 dev_err(host->dev, "%s: could not alloc DMA memory\n", 2385 __func__); 2386 goto no_dma; 2387 } 2388 2389 /* Determine which DMA interface to use */ 2390 #ifdef CONFIG_MMC_DW_IDMAC 2391 host->dma_ops = &dw_mci_idmac_ops; 2392 dev_info(host->dev, "Using internal DMA controller.\n"); 2393 #endif 2394 2395 if (!host->dma_ops) 2396 goto no_dma; 2397 2398 if (host->dma_ops->init && host->dma_ops->start && 2399 host->dma_ops->stop && host->dma_ops->cleanup) { 2400 if (host->dma_ops->init(host)) { 2401 dev_err(host->dev, "%s: Unable to initialize " 2402 "DMA Controller.\n", __func__); 2403 goto no_dma; 2404 } 2405 } else { 2406 dev_err(host->dev, "DMA initialization not found.\n"); 2407 goto no_dma; 2408 } 2409 2410 host->use_dma = 1; 2411 return; 2412 2413 no_dma: 2414 dev_info(host->dev, "Using PIO mode.\n"); 2415 host->use_dma = 0; 2416 return; 2417 } 2418 2419 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 2420 { 2421 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2422 u32 ctrl; 2423 2424 ctrl = mci_readl(host, CTRL); 2425 ctrl |= reset; 2426 mci_writel(host, CTRL, ctrl); 2427 2428 /* wait till resets clear */ 2429 do { 2430 ctrl = mci_readl(host, CTRL); 2431 if (!(ctrl & reset)) 2432 return true; 2433 } while (time_before(jiffies, timeout)); 2434 2435 dev_err(host->dev, 2436 "Timeout resetting block (ctrl reset %#x)\n", 2437 ctrl & reset); 2438 2439 return false; 2440 } 2441 2442 static bool dw_mci_reset(struct dw_mci *host) 2443 { 2444 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; 2445 bool ret = false; 2446 2447 /* 2448 * Reseting generates a block interrupt, hence setting 2449 * the scatter-gather pointer to NULL. 2450 */ 2451 if (host->sg) { 2452 sg_miter_stop(&host->sg_miter); 2453 host->sg = NULL; 2454 } 2455 2456 if (host->use_dma) 2457 flags |= SDMMC_CTRL_DMA_RESET; 2458 2459 if (dw_mci_ctrl_reset(host, flags)) { 2460 /* 2461 * In all cases we clear the RAWINTS register to clear any 2462 * interrupts. 2463 */ 2464 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2465 2466 /* if using dma we wait for dma_req to clear */ 2467 if (host->use_dma) { 2468 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2469 u32 status; 2470 do { 2471 status = mci_readl(host, STATUS); 2472 if (!(status & SDMMC_STATUS_DMA_REQ)) 2473 break; 2474 cpu_relax(); 2475 } while (time_before(jiffies, timeout)); 2476 2477 if (status & SDMMC_STATUS_DMA_REQ) { 2478 dev_err(host->dev, 2479 "%s: Timeout waiting for dma_req to " 2480 "clear during reset\n", __func__); 2481 goto ciu_out; 2482 } 2483 2484 /* when using DMA next we reset the fifo again */ 2485 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) 2486 goto ciu_out; 2487 } 2488 } else { 2489 /* if the controller reset bit did clear, then set clock regs */ 2490 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 2491 dev_err(host->dev, "%s: fifo/dma reset bits didn't " 2492 "clear but ciu was reset, doing clock update\n", 2493 __func__); 2494 goto ciu_out; 2495 } 2496 } 2497 2498 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC) 2499 /* It is also recommended that we reset and reprogram idmac */ 2500 dw_mci_idmac_reset(host); 2501 #endif 2502 2503 ret = true; 2504 2505 ciu_out: 2506 /* After a CTRL reset we need to have CIU set clock registers */ 2507 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0); 2508 2509 return ret; 2510 } 2511 2512 #ifdef CONFIG_OF 2513 static struct dw_mci_of_quirks { 2514 char *quirk; 2515 int id; 2516 } of_quirks[] = { 2517 { 2518 .quirk = "broken-cd", 2519 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION, 2520 }, { 2521 .quirk = "disable-wp", 2522 .id = DW_MCI_QUIRK_NO_WRITE_PROTECT, 2523 }, 2524 }; 2525 2526 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2527 { 2528 struct dw_mci_board *pdata; 2529 struct device *dev = host->dev; 2530 struct device_node *np = dev->of_node; 2531 const struct dw_mci_drv_data *drv_data = host->drv_data; 2532 int idx, ret; 2533 u32 clock_frequency; 2534 2535 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 2536 if (!pdata) { 2537 dev_err(dev, "could not allocate memory for pdata\n"); 2538 return ERR_PTR(-ENOMEM); 2539 } 2540 2541 /* find out number of slots supported */ 2542 if (of_property_read_u32(dev->of_node, "num-slots", 2543 &pdata->num_slots)) { 2544 dev_info(dev, "num-slots property not found, " 2545 "assuming 1 slot is available\n"); 2546 pdata->num_slots = 1; 2547 } 2548 2549 /* get quirks */ 2550 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++) 2551 if (of_get_property(np, of_quirks[idx].quirk, NULL)) 2552 pdata->quirks |= of_quirks[idx].id; 2553 2554 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) 2555 dev_info(dev, "fifo-depth property not found, using " 2556 "value of FIFOTH register as default\n"); 2557 2558 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); 2559 2560 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency)) 2561 pdata->bus_hz = clock_frequency; 2562 2563 if (drv_data && drv_data->parse_dt) { 2564 ret = drv_data->parse_dt(host); 2565 if (ret) 2566 return ERR_PTR(ret); 2567 } 2568 2569 if (of_find_property(np, "supports-highspeed", NULL)) 2570 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 2571 2572 return pdata; 2573 } 2574 2575 #else /* CONFIG_OF */ 2576 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2577 { 2578 return ERR_PTR(-EINVAL); 2579 } 2580 #endif /* CONFIG_OF */ 2581 2582 int dw_mci_probe(struct dw_mci *host) 2583 { 2584 const struct dw_mci_drv_data *drv_data = host->drv_data; 2585 int width, i, ret = 0; 2586 u32 fifo_size; 2587 int init_slots = 0; 2588 2589 if (!host->pdata) { 2590 host->pdata = dw_mci_parse_dt(host); 2591 if (IS_ERR(host->pdata)) { 2592 dev_err(host->dev, "platform data not available\n"); 2593 return -EINVAL; 2594 } 2595 } 2596 2597 if (host->pdata->num_slots > 1) { 2598 dev_err(host->dev, 2599 "Platform data must supply num_slots.\n"); 2600 return -ENODEV; 2601 } 2602 2603 host->biu_clk = devm_clk_get(host->dev, "biu"); 2604 if (IS_ERR(host->biu_clk)) { 2605 dev_dbg(host->dev, "biu clock not available\n"); 2606 } else { 2607 ret = clk_prepare_enable(host->biu_clk); 2608 if (ret) { 2609 dev_err(host->dev, "failed to enable biu clock\n"); 2610 return ret; 2611 } 2612 } 2613 2614 host->ciu_clk = devm_clk_get(host->dev, "ciu"); 2615 if (IS_ERR(host->ciu_clk)) { 2616 dev_dbg(host->dev, "ciu clock not available\n"); 2617 host->bus_hz = host->pdata->bus_hz; 2618 } else { 2619 ret = clk_prepare_enable(host->ciu_clk); 2620 if (ret) { 2621 dev_err(host->dev, "failed to enable ciu clock\n"); 2622 goto err_clk_biu; 2623 } 2624 2625 if (host->pdata->bus_hz) { 2626 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 2627 if (ret) 2628 dev_warn(host->dev, 2629 "Unable to set bus rate to %uHz\n", 2630 host->pdata->bus_hz); 2631 } 2632 host->bus_hz = clk_get_rate(host->ciu_clk); 2633 } 2634 2635 if (!host->bus_hz) { 2636 dev_err(host->dev, 2637 "Platform data must supply bus speed\n"); 2638 ret = -ENODEV; 2639 goto err_clk_ciu; 2640 } 2641 2642 if (drv_data && drv_data->init) { 2643 ret = drv_data->init(host); 2644 if (ret) { 2645 dev_err(host->dev, 2646 "implementation specific init failed\n"); 2647 goto err_clk_ciu; 2648 } 2649 } 2650 2651 if (drv_data && drv_data->setup_clock) { 2652 ret = drv_data->setup_clock(host); 2653 if (ret) { 2654 dev_err(host->dev, 2655 "implementation specific clock setup failed\n"); 2656 goto err_clk_ciu; 2657 } 2658 } 2659 2660 host->quirks = host->pdata->quirks; 2661 2662 spin_lock_init(&host->lock); 2663 INIT_LIST_HEAD(&host->queue); 2664 2665 /* 2666 * Get the host data width - this assumes that HCON has been set with 2667 * the correct values. 2668 */ 2669 i = (mci_readl(host, HCON) >> 7) & 0x7; 2670 if (!i) { 2671 host->push_data = dw_mci_push_data16; 2672 host->pull_data = dw_mci_pull_data16; 2673 width = 16; 2674 host->data_shift = 1; 2675 } else if (i == 2) { 2676 host->push_data = dw_mci_push_data64; 2677 host->pull_data = dw_mci_pull_data64; 2678 width = 64; 2679 host->data_shift = 3; 2680 } else { 2681 /* Check for a reserved value, and warn if it is */ 2682 WARN((i != 1), 2683 "HCON reports a reserved host data width!\n" 2684 "Defaulting to 32-bit access.\n"); 2685 host->push_data = dw_mci_push_data32; 2686 host->pull_data = dw_mci_pull_data32; 2687 width = 32; 2688 host->data_shift = 2; 2689 } 2690 2691 /* Reset all blocks */ 2692 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) 2693 return -ENODEV; 2694 2695 host->dma_ops = host->pdata->dma_ops; 2696 dw_mci_init_dma(host); 2697 2698 /* Clear the interrupts for the host controller */ 2699 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2700 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2701 2702 /* Put in max timeout */ 2703 mci_writel(host, TMOUT, 0xFFFFFFFF); 2704 2705 /* 2706 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 2707 * Tx Mark = fifo_size / 2 DMA Size = 8 2708 */ 2709 if (!host->pdata->fifo_depth) { 2710 /* 2711 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 2712 * have been overwritten by the bootloader, just like we're 2713 * about to do, so if you know the value for your hardware, you 2714 * should put it in the platform data. 2715 */ 2716 fifo_size = mci_readl(host, FIFOTH); 2717 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 2718 } else { 2719 fifo_size = host->pdata->fifo_depth; 2720 } 2721 host->fifo_depth = fifo_size; 2722 host->fifoth_val = 2723 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); 2724 mci_writel(host, FIFOTH, host->fifoth_val); 2725 2726 /* disable clock to CIU */ 2727 mci_writel(host, CLKENA, 0); 2728 mci_writel(host, CLKSRC, 0); 2729 2730 /* 2731 * In 2.40a spec, Data offset is changed. 2732 * Need to check the version-id and set data-offset for DATA register. 2733 */ 2734 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 2735 dev_info(host->dev, "Version ID is %04x\n", host->verid); 2736 2737 if (host->verid < DW_MMC_240A) 2738 host->data_offset = DATA_OFFSET; 2739 else 2740 host->data_offset = DATA_240A_OFFSET; 2741 2742 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 2743 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 2744 host->irq_flags, "dw-mci", host); 2745 if (ret) 2746 goto err_dmaunmap; 2747 2748 if (host->pdata->num_slots) 2749 host->num_slots = host->pdata->num_slots; 2750 else 2751 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; 2752 2753 /* 2754 * Enable interrupts for command done, data over, data empty, card det, 2755 * receive ready and error such as transmit, receive timeout, crc error 2756 */ 2757 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2758 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2759 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2760 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2761 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 2762 2763 dev_info(host->dev, "DW MMC controller at irq %d, " 2764 "%d bit host data width, " 2765 "%u deep fifo\n", 2766 host->irq, width, fifo_size); 2767 2768 /* We need at least one slot to succeed */ 2769 for (i = 0; i < host->num_slots; i++) { 2770 ret = dw_mci_init_slot(host, i); 2771 if (ret) 2772 dev_dbg(host->dev, "slot %d init failed\n", i); 2773 else 2774 init_slots++; 2775 } 2776 2777 if (init_slots) { 2778 dev_info(host->dev, "%d slots initialized\n", init_slots); 2779 } else { 2780 dev_dbg(host->dev, "attempted to initialize %d slots, " 2781 "but failed on all\n", host->num_slots); 2782 goto err_dmaunmap; 2783 } 2784 2785 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) 2786 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n"); 2787 2788 return 0; 2789 2790 err_dmaunmap: 2791 if (host->use_dma && host->dma_ops->exit) 2792 host->dma_ops->exit(host); 2793 2794 err_clk_ciu: 2795 if (!IS_ERR(host->ciu_clk)) 2796 clk_disable_unprepare(host->ciu_clk); 2797 2798 err_clk_biu: 2799 if (!IS_ERR(host->biu_clk)) 2800 clk_disable_unprepare(host->biu_clk); 2801 2802 return ret; 2803 } 2804 EXPORT_SYMBOL(dw_mci_probe); 2805 2806 void dw_mci_remove(struct dw_mci *host) 2807 { 2808 int i; 2809 2810 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2811 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2812 2813 for (i = 0; i < host->num_slots; i++) { 2814 dev_dbg(host->dev, "remove slot %d\n", i); 2815 if (host->slot[i]) 2816 dw_mci_cleanup_slot(host->slot[i], i); 2817 } 2818 2819 /* disable clock to CIU */ 2820 mci_writel(host, CLKENA, 0); 2821 mci_writel(host, CLKSRC, 0); 2822 2823 if (host->use_dma && host->dma_ops->exit) 2824 host->dma_ops->exit(host); 2825 2826 if (!IS_ERR(host->ciu_clk)) 2827 clk_disable_unprepare(host->ciu_clk); 2828 2829 if (!IS_ERR(host->biu_clk)) 2830 clk_disable_unprepare(host->biu_clk); 2831 } 2832 EXPORT_SYMBOL(dw_mci_remove); 2833 2834 2835 2836 #ifdef CONFIG_PM_SLEEP 2837 /* 2838 * TODO: we should probably disable the clock to the card in the suspend path. 2839 */ 2840 int dw_mci_suspend(struct dw_mci *host) 2841 { 2842 return 0; 2843 } 2844 EXPORT_SYMBOL(dw_mci_suspend); 2845 2846 int dw_mci_resume(struct dw_mci *host) 2847 { 2848 int i, ret; 2849 2850 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 2851 ret = -ENODEV; 2852 return ret; 2853 } 2854 2855 if (host->use_dma && host->dma_ops->init) 2856 host->dma_ops->init(host); 2857 2858 /* 2859 * Restore the initial value at FIFOTH register 2860 * And Invalidate the prev_blksz with zero 2861 */ 2862 mci_writel(host, FIFOTH, host->fifoth_val); 2863 host->prev_blksz = 0; 2864 2865 /* Put in max timeout */ 2866 mci_writel(host, TMOUT, 0xFFFFFFFF); 2867 2868 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2869 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2870 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2871 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2872 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 2873 2874 for (i = 0; i < host->num_slots; i++) { 2875 struct dw_mci_slot *slot = host->slot[i]; 2876 if (!slot) 2877 continue; 2878 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { 2879 dw_mci_set_ios(slot->mmc, &slot->mmc->ios); 2880 dw_mci_setup_bus(slot, true); 2881 } 2882 } 2883 return 0; 2884 } 2885 EXPORT_SYMBOL(dw_mci_resume); 2886 #endif /* CONFIG_PM_SLEEP */ 2887 2888 static int __init dw_mci_init(void) 2889 { 2890 pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 2891 return 0; 2892 } 2893 2894 static void __exit dw_mci_exit(void) 2895 { 2896 } 2897 2898 module_init(dw_mci_init); 2899 module_exit(dw_mci_exit); 2900 2901 MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 2902 MODULE_AUTHOR("NXP Semiconductor VietNam"); 2903 MODULE_AUTHOR("Imagination Technologies Ltd"); 2904 MODULE_LICENSE("GPL v2"); 2905