1 /* 2 * Synopsys DesignWare Multimedia Card Interface driver 3 * (Based on NXP driver for lpc 31xx) 4 * 5 * Copyright (C) 2009 NXP Semiconductors 6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14 #include <linux/blkdev.h> 15 #include <linux/clk.h> 16 #include <linux/debugfs.h> 17 #include <linux/device.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/interrupt.h> 22 #include <linux/ioport.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/seq_file.h> 26 #include <linux/slab.h> 27 #include <linux/stat.h> 28 #include <linux/delay.h> 29 #include <linux/irq.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/dw_mmc.h> 33 #include <linux/bitops.h> 34 #include <linux/regulator/consumer.h> 35 #include <linux/workqueue.h> 36 #include <linux/of.h> 37 #include <linux/of_gpio.h> 38 39 #include "dw_mmc.h" 40 41 /* Common flag combinations */ 42 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 43 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 44 SDMMC_INT_EBE) 45 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 46 SDMMC_INT_RESP_ERR) 47 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 48 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE) 49 #define DW_MCI_SEND_STATUS 1 50 #define DW_MCI_RECV_STATUS 2 51 #define DW_MCI_DMA_THRESHOLD 16 52 53 #ifdef CONFIG_MMC_DW_IDMAC 54 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 55 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 56 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 57 SDMMC_IDMAC_INT_TI) 58 59 struct idmac_desc { 60 u32 des0; /* Control Descriptor */ 61 #define IDMAC_DES0_DIC BIT(1) 62 #define IDMAC_DES0_LD BIT(2) 63 #define IDMAC_DES0_FD BIT(3) 64 #define IDMAC_DES0_CH BIT(4) 65 #define IDMAC_DES0_ER BIT(5) 66 #define IDMAC_DES0_CES BIT(30) 67 #define IDMAC_DES0_OWN BIT(31) 68 69 u32 des1; /* Buffer sizes */ 70 #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 71 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) 72 73 u32 des2; /* buffer 1 physical address */ 74 75 u32 des3; /* buffer 2 physical address */ 76 }; 77 #endif /* CONFIG_MMC_DW_IDMAC */ 78 79 /** 80 * struct dw_mci_slot - MMC slot state 81 * @mmc: The mmc_host representing this slot. 82 * @host: The MMC controller this slot is using. 83 * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX) 84 * @wp_gpio: If gpio_is_valid() we'll use this to read write protect. 85 * @ctype: Card type for this slot. 86 * @mrq: mmc_request currently being processed or waiting to be 87 * processed, or NULL when the slot is idle. 88 * @queue_node: List node for placing this node in the @queue list of 89 * &struct dw_mci. 90 * @clock: Clock rate configured by set_ios(). Protected by host->lock. 91 * @flags: Random state bits associated with the slot. 92 * @id: Number of this slot. 93 * @last_detect_state: Most recently observed card detect state. 94 */ 95 struct dw_mci_slot { 96 struct mmc_host *mmc; 97 struct dw_mci *host; 98 99 int quirks; 100 int wp_gpio; 101 102 u32 ctype; 103 104 struct mmc_request *mrq; 105 struct list_head queue_node; 106 107 unsigned int clock; 108 unsigned long flags; 109 #define DW_MMC_CARD_PRESENT 0 110 #define DW_MMC_CARD_NEED_INIT 1 111 int id; 112 int last_detect_state; 113 }; 114 115 #if defined(CONFIG_DEBUG_FS) 116 static int dw_mci_req_show(struct seq_file *s, void *v) 117 { 118 struct dw_mci_slot *slot = s->private; 119 struct mmc_request *mrq; 120 struct mmc_command *cmd; 121 struct mmc_command *stop; 122 struct mmc_data *data; 123 124 /* Make sure we get a consistent snapshot */ 125 spin_lock_bh(&slot->host->lock); 126 mrq = slot->mrq; 127 128 if (mrq) { 129 cmd = mrq->cmd; 130 data = mrq->data; 131 stop = mrq->stop; 132 133 if (cmd) 134 seq_printf(s, 135 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 136 cmd->opcode, cmd->arg, cmd->flags, 137 cmd->resp[0], cmd->resp[1], cmd->resp[2], 138 cmd->resp[2], cmd->error); 139 if (data) 140 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 141 data->bytes_xfered, data->blocks, 142 data->blksz, data->flags, data->error); 143 if (stop) 144 seq_printf(s, 145 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 146 stop->opcode, stop->arg, stop->flags, 147 stop->resp[0], stop->resp[1], stop->resp[2], 148 stop->resp[2], stop->error); 149 } 150 151 spin_unlock_bh(&slot->host->lock); 152 153 return 0; 154 } 155 156 static int dw_mci_req_open(struct inode *inode, struct file *file) 157 { 158 return single_open(file, dw_mci_req_show, inode->i_private); 159 } 160 161 static const struct file_operations dw_mci_req_fops = { 162 .owner = THIS_MODULE, 163 .open = dw_mci_req_open, 164 .read = seq_read, 165 .llseek = seq_lseek, 166 .release = single_release, 167 }; 168 169 static int dw_mci_regs_show(struct seq_file *s, void *v) 170 { 171 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS); 172 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS); 173 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD); 174 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL); 175 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK); 176 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA); 177 178 return 0; 179 } 180 181 static int dw_mci_regs_open(struct inode *inode, struct file *file) 182 { 183 return single_open(file, dw_mci_regs_show, inode->i_private); 184 } 185 186 static const struct file_operations dw_mci_regs_fops = { 187 .owner = THIS_MODULE, 188 .open = dw_mci_regs_open, 189 .read = seq_read, 190 .llseek = seq_lseek, 191 .release = single_release, 192 }; 193 194 static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 195 { 196 struct mmc_host *mmc = slot->mmc; 197 struct dw_mci *host = slot->host; 198 struct dentry *root; 199 struct dentry *node; 200 201 root = mmc->debugfs_root; 202 if (!root) 203 return; 204 205 node = debugfs_create_file("regs", S_IRUSR, root, host, 206 &dw_mci_regs_fops); 207 if (!node) 208 goto err; 209 210 node = debugfs_create_file("req", S_IRUSR, root, slot, 211 &dw_mci_req_fops); 212 if (!node) 213 goto err; 214 215 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 216 if (!node) 217 goto err; 218 219 node = debugfs_create_x32("pending_events", S_IRUSR, root, 220 (u32 *)&host->pending_events); 221 if (!node) 222 goto err; 223 224 node = debugfs_create_x32("completed_events", S_IRUSR, root, 225 (u32 *)&host->completed_events); 226 if (!node) 227 goto err; 228 229 return; 230 231 err: 232 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 233 } 234 #endif /* defined(CONFIG_DEBUG_FS) */ 235 236 static void dw_mci_set_timeout(struct dw_mci *host) 237 { 238 /* timeout (maximum) */ 239 mci_writel(host, TMOUT, 0xffffffff); 240 } 241 242 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 243 { 244 struct mmc_data *data; 245 struct dw_mci_slot *slot = mmc_priv(mmc); 246 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 247 u32 cmdr; 248 cmd->error = -EINPROGRESS; 249 250 cmdr = cmd->opcode; 251 252 if (cmdr == MMC_STOP_TRANSMISSION) 253 cmdr |= SDMMC_CMD_STOP; 254 else 255 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 256 257 if (cmd->flags & MMC_RSP_PRESENT) { 258 /* We expect a response, so set this bit */ 259 cmdr |= SDMMC_CMD_RESP_EXP; 260 if (cmd->flags & MMC_RSP_136) 261 cmdr |= SDMMC_CMD_RESP_LONG; 262 } 263 264 if (cmd->flags & MMC_RSP_CRC) 265 cmdr |= SDMMC_CMD_RESP_CRC; 266 267 data = cmd->data; 268 if (data) { 269 cmdr |= SDMMC_CMD_DAT_EXP; 270 if (data->flags & MMC_DATA_STREAM) 271 cmdr |= SDMMC_CMD_STRM_MODE; 272 if (data->flags & MMC_DATA_WRITE) 273 cmdr |= SDMMC_CMD_DAT_WR; 274 } 275 276 if (drv_data && drv_data->prepare_command) 277 drv_data->prepare_command(slot->host, &cmdr); 278 279 return cmdr; 280 } 281 282 static void dw_mci_start_command(struct dw_mci *host, 283 struct mmc_command *cmd, u32 cmd_flags) 284 { 285 host->cmd = cmd; 286 dev_vdbg(host->dev, 287 "start command: ARGR=0x%08x CMDR=0x%08x\n", 288 cmd->arg, cmd_flags); 289 290 mci_writel(host, CMDARG, cmd->arg); 291 wmb(); 292 293 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 294 } 295 296 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data) 297 { 298 dw_mci_start_command(host, data->stop, host->stop_cmdr); 299 } 300 301 /* DMA interface functions */ 302 static void dw_mci_stop_dma(struct dw_mci *host) 303 { 304 if (host->using_dma) { 305 host->dma_ops->stop(host); 306 host->dma_ops->cleanup(host); 307 } else { 308 /* Data transfer was stopped by the interrupt handler */ 309 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 310 } 311 } 312 313 static int dw_mci_get_dma_dir(struct mmc_data *data) 314 { 315 if (data->flags & MMC_DATA_WRITE) 316 return DMA_TO_DEVICE; 317 else 318 return DMA_FROM_DEVICE; 319 } 320 321 #ifdef CONFIG_MMC_DW_IDMAC 322 static void dw_mci_dma_cleanup(struct dw_mci *host) 323 { 324 struct mmc_data *data = host->data; 325 326 if (data) 327 if (!data->host_cookie) 328 dma_unmap_sg(host->dev, 329 data->sg, 330 data->sg_len, 331 dw_mci_get_dma_dir(data)); 332 } 333 334 static void dw_mci_idmac_stop_dma(struct dw_mci *host) 335 { 336 u32 temp; 337 338 /* Disable and reset the IDMAC interface */ 339 temp = mci_readl(host, CTRL); 340 temp &= ~SDMMC_CTRL_USE_IDMAC; 341 temp |= SDMMC_CTRL_DMA_RESET; 342 mci_writel(host, CTRL, temp); 343 344 /* Stop the IDMAC running */ 345 temp = mci_readl(host, BMOD); 346 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 347 mci_writel(host, BMOD, temp); 348 } 349 350 static void dw_mci_idmac_complete_dma(struct dw_mci *host) 351 { 352 struct mmc_data *data = host->data; 353 354 dev_vdbg(host->dev, "DMA complete\n"); 355 356 host->dma_ops->cleanup(host); 357 358 /* 359 * If the card was removed, data will be NULL. No point in trying to 360 * send the stop command or waiting for NBUSY in this case. 361 */ 362 if (data) { 363 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 364 tasklet_schedule(&host->tasklet); 365 } 366 } 367 368 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, 369 unsigned int sg_len) 370 { 371 int i; 372 struct idmac_desc *desc = host->sg_cpu; 373 374 for (i = 0; i < sg_len; i++, desc++) { 375 unsigned int length = sg_dma_len(&data->sg[i]); 376 u32 mem_addr = sg_dma_address(&data->sg[i]); 377 378 /* Set the OWN bit and disable interrupts for this descriptor */ 379 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH; 380 381 /* Buffer length */ 382 IDMAC_SET_BUFFER1_SIZE(desc, length); 383 384 /* Physical address to DMA to/from */ 385 desc->des2 = mem_addr; 386 } 387 388 /* Set first descriptor */ 389 desc = host->sg_cpu; 390 desc->des0 |= IDMAC_DES0_FD; 391 392 /* Set last descriptor */ 393 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc); 394 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 395 desc->des0 |= IDMAC_DES0_LD; 396 397 wmb(); 398 } 399 400 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 401 { 402 u32 temp; 403 404 dw_mci_translate_sglist(host, host->data, sg_len); 405 406 /* Select IDMAC interface */ 407 temp = mci_readl(host, CTRL); 408 temp |= SDMMC_CTRL_USE_IDMAC; 409 mci_writel(host, CTRL, temp); 410 411 wmb(); 412 413 /* Enable the IDMAC */ 414 temp = mci_readl(host, BMOD); 415 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 416 mci_writel(host, BMOD, temp); 417 418 /* Start it running */ 419 mci_writel(host, PLDMND, 1); 420 } 421 422 static int dw_mci_idmac_init(struct dw_mci *host) 423 { 424 struct idmac_desc *p; 425 int i; 426 427 /* Number of descriptors in the ring buffer */ 428 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 429 430 /* Forward link the descriptor list */ 431 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) 432 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)); 433 434 /* Set the last descriptor as the end-of-ring descriptor */ 435 p->des3 = host->sg_dma; 436 p->des0 = IDMAC_DES0_ER; 437 438 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET); 439 440 /* Mask out interrupts - get Tx & Rx complete only */ 441 mci_writel(host, IDSTS, IDMAC_INT_CLR); 442 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | 443 SDMMC_IDMAC_INT_TI); 444 445 /* Set the descriptor base address */ 446 mci_writel(host, DBADDR, host->sg_dma); 447 return 0; 448 } 449 450 static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 451 .init = dw_mci_idmac_init, 452 .start = dw_mci_idmac_start_dma, 453 .stop = dw_mci_idmac_stop_dma, 454 .complete = dw_mci_idmac_complete_dma, 455 .cleanup = dw_mci_dma_cleanup, 456 }; 457 #endif /* CONFIG_MMC_DW_IDMAC */ 458 459 static int dw_mci_pre_dma_transfer(struct dw_mci *host, 460 struct mmc_data *data, 461 bool next) 462 { 463 struct scatterlist *sg; 464 unsigned int i, sg_len; 465 466 if (!next && data->host_cookie) 467 return data->host_cookie; 468 469 /* 470 * We don't do DMA on "complex" transfers, i.e. with 471 * non-word-aligned buffers or lengths. Also, we don't bother 472 * with all the DMA setup overhead for short transfers. 473 */ 474 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 475 return -EINVAL; 476 477 if (data->blksz & 3) 478 return -EINVAL; 479 480 for_each_sg(data->sg, sg, data->sg_len, i) { 481 if (sg->offset & 3 || sg->length & 3) 482 return -EINVAL; 483 } 484 485 sg_len = dma_map_sg(host->dev, 486 data->sg, 487 data->sg_len, 488 dw_mci_get_dma_dir(data)); 489 if (sg_len == 0) 490 return -EINVAL; 491 492 if (next) 493 data->host_cookie = sg_len; 494 495 return sg_len; 496 } 497 498 static void dw_mci_pre_req(struct mmc_host *mmc, 499 struct mmc_request *mrq, 500 bool is_first_req) 501 { 502 struct dw_mci_slot *slot = mmc_priv(mmc); 503 struct mmc_data *data = mrq->data; 504 505 if (!slot->host->use_dma || !data) 506 return; 507 508 if (data->host_cookie) { 509 data->host_cookie = 0; 510 return; 511 } 512 513 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) 514 data->host_cookie = 0; 515 } 516 517 static void dw_mci_post_req(struct mmc_host *mmc, 518 struct mmc_request *mrq, 519 int err) 520 { 521 struct dw_mci_slot *slot = mmc_priv(mmc); 522 struct mmc_data *data = mrq->data; 523 524 if (!slot->host->use_dma || !data) 525 return; 526 527 if (data->host_cookie) 528 dma_unmap_sg(slot->host->dev, 529 data->sg, 530 data->sg_len, 531 dw_mci_get_dma_dir(data)); 532 data->host_cookie = 0; 533 } 534 535 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 536 { 537 int sg_len; 538 u32 temp; 539 540 host->using_dma = 0; 541 542 /* If we don't have a channel, we can't do DMA */ 543 if (!host->use_dma) 544 return -ENODEV; 545 546 sg_len = dw_mci_pre_dma_transfer(host, data, 0); 547 if (sg_len < 0) { 548 host->dma_ops->stop(host); 549 return sg_len; 550 } 551 552 host->using_dma = 1; 553 554 dev_vdbg(host->dev, 555 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 556 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, 557 sg_len); 558 559 /* Enable the DMA interface */ 560 temp = mci_readl(host, CTRL); 561 temp |= SDMMC_CTRL_DMA_ENABLE; 562 mci_writel(host, CTRL, temp); 563 564 /* Disable RX/TX IRQs, let DMA handle it */ 565 temp = mci_readl(host, INTMASK); 566 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 567 mci_writel(host, INTMASK, temp); 568 569 host->dma_ops->start(host, sg_len); 570 571 return 0; 572 } 573 574 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 575 { 576 u32 temp; 577 578 data->error = -EINPROGRESS; 579 580 WARN_ON(host->data); 581 host->sg = NULL; 582 host->data = data; 583 584 if (data->flags & MMC_DATA_READ) 585 host->dir_status = DW_MCI_RECV_STATUS; 586 else 587 host->dir_status = DW_MCI_SEND_STATUS; 588 589 if (dw_mci_submit_data_dma(host, data)) { 590 int flags = SG_MITER_ATOMIC; 591 if (host->data->flags & MMC_DATA_READ) 592 flags |= SG_MITER_TO_SG; 593 else 594 flags |= SG_MITER_FROM_SG; 595 596 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 597 host->sg = data->sg; 598 host->part_buf_start = 0; 599 host->part_buf_count = 0; 600 601 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 602 temp = mci_readl(host, INTMASK); 603 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 604 mci_writel(host, INTMASK, temp); 605 606 temp = mci_readl(host, CTRL); 607 temp &= ~SDMMC_CTRL_DMA_ENABLE; 608 mci_writel(host, CTRL, temp); 609 } 610 } 611 612 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 613 { 614 struct dw_mci *host = slot->host; 615 unsigned long timeout = jiffies + msecs_to_jiffies(500); 616 unsigned int cmd_status = 0; 617 618 mci_writel(host, CMDARG, arg); 619 wmb(); 620 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 621 622 while (time_before(jiffies, timeout)) { 623 cmd_status = mci_readl(host, CMD); 624 if (!(cmd_status & SDMMC_CMD_START)) 625 return; 626 } 627 dev_err(&slot->mmc->class_dev, 628 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 629 cmd, arg, cmd_status); 630 } 631 632 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 633 { 634 struct dw_mci *host = slot->host; 635 u32 div; 636 u32 clk_en_a; 637 638 if (slot->clock != host->current_speed || force_clkinit) { 639 div = host->bus_hz / slot->clock; 640 if (host->bus_hz % slot->clock && host->bus_hz > slot->clock) 641 /* 642 * move the + 1 after the divide to prevent 643 * over-clocking the card. 644 */ 645 div += 1; 646 647 div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0; 648 649 dev_info(&slot->mmc->class_dev, 650 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" 651 " div = %d)\n", slot->id, host->bus_hz, slot->clock, 652 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div); 653 654 /* disable clock */ 655 mci_writel(host, CLKENA, 0); 656 mci_writel(host, CLKSRC, 0); 657 658 /* inform CIU */ 659 mci_send_cmd(slot, 660 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 661 662 /* set clock to desired speed */ 663 mci_writel(host, CLKDIV, div); 664 665 /* inform CIU */ 666 mci_send_cmd(slot, 667 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 668 669 /* enable clock; only low power if no SDIO */ 670 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 671 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id))) 672 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 673 mci_writel(host, CLKENA, clk_en_a); 674 675 /* inform CIU */ 676 mci_send_cmd(slot, 677 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 678 679 host->current_speed = slot->clock; 680 } 681 682 /* Set the current slot bus width */ 683 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 684 } 685 686 static void __dw_mci_start_request(struct dw_mci *host, 687 struct dw_mci_slot *slot, 688 struct mmc_command *cmd) 689 { 690 struct mmc_request *mrq; 691 struct mmc_data *data; 692 u32 cmdflags; 693 694 mrq = slot->mrq; 695 if (host->pdata->select_slot) 696 host->pdata->select_slot(slot->id); 697 698 host->cur_slot = slot; 699 host->mrq = mrq; 700 701 host->pending_events = 0; 702 host->completed_events = 0; 703 host->data_status = 0; 704 705 data = cmd->data; 706 if (data) { 707 dw_mci_set_timeout(host); 708 mci_writel(host, BYTCNT, data->blksz*data->blocks); 709 mci_writel(host, BLKSIZ, data->blksz); 710 } 711 712 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 713 714 /* this is the first command, send the initialization clock */ 715 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 716 cmdflags |= SDMMC_CMD_INIT; 717 718 if (data) { 719 dw_mci_submit_data(host, data); 720 wmb(); 721 } 722 723 dw_mci_start_command(host, cmd, cmdflags); 724 725 if (mrq->stop) 726 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); 727 } 728 729 static void dw_mci_start_request(struct dw_mci *host, 730 struct dw_mci_slot *slot) 731 { 732 struct mmc_request *mrq = slot->mrq; 733 struct mmc_command *cmd; 734 735 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 736 __dw_mci_start_request(host, slot, cmd); 737 } 738 739 /* must be called with host->lock held */ 740 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 741 struct mmc_request *mrq) 742 { 743 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 744 host->state); 745 746 slot->mrq = mrq; 747 748 if (host->state == STATE_IDLE) { 749 host->state = STATE_SENDING_CMD; 750 dw_mci_start_request(host, slot); 751 } else { 752 list_add_tail(&slot->queue_node, &host->queue); 753 } 754 } 755 756 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 757 { 758 struct dw_mci_slot *slot = mmc_priv(mmc); 759 struct dw_mci *host = slot->host; 760 761 WARN_ON(slot->mrq); 762 763 /* 764 * The check for card presence and queueing of the request must be 765 * atomic, otherwise the card could be removed in between and the 766 * request wouldn't fail until another card was inserted. 767 */ 768 spin_lock_bh(&host->lock); 769 770 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 771 spin_unlock_bh(&host->lock); 772 mrq->cmd->error = -ENOMEDIUM; 773 mmc_request_done(mmc, mrq); 774 return; 775 } 776 777 dw_mci_queue_request(host, slot, mrq); 778 779 spin_unlock_bh(&host->lock); 780 } 781 782 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 783 { 784 struct dw_mci_slot *slot = mmc_priv(mmc); 785 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 786 u32 regs; 787 788 switch (ios->bus_width) { 789 case MMC_BUS_WIDTH_4: 790 slot->ctype = SDMMC_CTYPE_4BIT; 791 break; 792 case MMC_BUS_WIDTH_8: 793 slot->ctype = SDMMC_CTYPE_8BIT; 794 break; 795 default: 796 /* set default 1 bit mode */ 797 slot->ctype = SDMMC_CTYPE_1BIT; 798 } 799 800 regs = mci_readl(slot->host, UHS_REG); 801 802 /* DDR mode set */ 803 if (ios->timing == MMC_TIMING_UHS_DDR50) 804 regs |= ((0x1 << slot->id) << 16); 805 else 806 regs &= ~((0x1 << slot->id) << 16); 807 808 mci_writel(slot->host, UHS_REG, regs); 809 810 if (ios->clock) { 811 /* 812 * Use mirror of ios->clock to prevent race with mmc 813 * core ios update when finding the minimum. 814 */ 815 slot->clock = ios->clock; 816 } 817 818 if (drv_data && drv_data->set_ios) 819 drv_data->set_ios(slot->host, ios); 820 821 /* Slot specific timing and width adjustment */ 822 dw_mci_setup_bus(slot, false); 823 824 switch (ios->power_mode) { 825 case MMC_POWER_UP: 826 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 827 /* Power up slot */ 828 if (slot->host->pdata->setpower) 829 slot->host->pdata->setpower(slot->id, mmc->ocr_avail); 830 regs = mci_readl(slot->host, PWREN); 831 regs |= (1 << slot->id); 832 mci_writel(slot->host, PWREN, regs); 833 break; 834 case MMC_POWER_OFF: 835 /* Power down slot */ 836 if (slot->host->pdata->setpower) 837 slot->host->pdata->setpower(slot->id, 0); 838 regs = mci_readl(slot->host, PWREN); 839 regs &= ~(1 << slot->id); 840 mci_writel(slot->host, PWREN, regs); 841 break; 842 default: 843 break; 844 } 845 } 846 847 static int dw_mci_get_ro(struct mmc_host *mmc) 848 { 849 int read_only; 850 struct dw_mci_slot *slot = mmc_priv(mmc); 851 struct dw_mci_board *brd = slot->host->pdata; 852 853 /* Use platform get_ro function, else try on board write protect */ 854 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) 855 read_only = 0; 856 else if (brd->get_ro) 857 read_only = brd->get_ro(slot->id); 858 else if (gpio_is_valid(slot->wp_gpio)) 859 read_only = gpio_get_value(slot->wp_gpio); 860 else 861 read_only = 862 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 863 864 dev_dbg(&mmc->class_dev, "card is %s\n", 865 read_only ? "read-only" : "read-write"); 866 867 return read_only; 868 } 869 870 static int dw_mci_get_cd(struct mmc_host *mmc) 871 { 872 int present; 873 struct dw_mci_slot *slot = mmc_priv(mmc); 874 struct dw_mci_board *brd = slot->host->pdata; 875 876 /* Use platform get_cd function, else try onboard card detect */ 877 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) 878 present = 1; 879 else if (brd->get_cd) 880 present = !brd->get_cd(slot->id); 881 else 882 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 883 == 0 ? 1 : 0; 884 885 if (present) 886 dev_dbg(&mmc->class_dev, "card is present\n"); 887 else 888 dev_dbg(&mmc->class_dev, "card is not present\n"); 889 890 return present; 891 } 892 893 /* 894 * Disable lower power mode. 895 * 896 * Low power mode will stop the card clock when idle. According to the 897 * description of the CLKENA register we should disable low power mode 898 * for SDIO cards if we need SDIO interrupts to work. 899 * 900 * This function is fast if low power mode is already disabled. 901 */ 902 static void dw_mci_disable_low_power(struct dw_mci_slot *slot) 903 { 904 struct dw_mci *host = slot->host; 905 u32 clk_en_a; 906 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 907 908 clk_en_a = mci_readl(host, CLKENA); 909 910 if (clk_en_a & clken_low_pwr) { 911 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr); 912 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 913 SDMMC_CMD_PRV_DAT_WAIT, 0); 914 } 915 } 916 917 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 918 { 919 struct dw_mci_slot *slot = mmc_priv(mmc); 920 struct dw_mci *host = slot->host; 921 u32 int_mask; 922 923 /* Enable/disable Slot Specific SDIO interrupt */ 924 int_mask = mci_readl(host, INTMASK); 925 if (enb) { 926 /* 927 * Turn off low power mode if it was enabled. This is a bit of 928 * a heavy operation and we disable / enable IRQs a lot, so 929 * we'll leave low power mode disabled and it will get 930 * re-enabled again in dw_mci_setup_bus(). 931 */ 932 dw_mci_disable_low_power(slot); 933 934 mci_writel(host, INTMASK, 935 (int_mask | SDMMC_INT_SDIO(slot->id))); 936 } else { 937 mci_writel(host, INTMASK, 938 (int_mask & ~SDMMC_INT_SDIO(slot->id))); 939 } 940 } 941 942 static const struct mmc_host_ops dw_mci_ops = { 943 .request = dw_mci_request, 944 .pre_req = dw_mci_pre_req, 945 .post_req = dw_mci_post_req, 946 .set_ios = dw_mci_set_ios, 947 .get_ro = dw_mci_get_ro, 948 .get_cd = dw_mci_get_cd, 949 .enable_sdio_irq = dw_mci_enable_sdio_irq, 950 }; 951 952 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 953 __releases(&host->lock) 954 __acquires(&host->lock) 955 { 956 struct dw_mci_slot *slot; 957 struct mmc_host *prev_mmc = host->cur_slot->mmc; 958 959 WARN_ON(host->cmd || host->data); 960 961 host->cur_slot->mrq = NULL; 962 host->mrq = NULL; 963 if (!list_empty(&host->queue)) { 964 slot = list_entry(host->queue.next, 965 struct dw_mci_slot, queue_node); 966 list_del(&slot->queue_node); 967 dev_vdbg(host->dev, "list not empty: %s is next\n", 968 mmc_hostname(slot->mmc)); 969 host->state = STATE_SENDING_CMD; 970 dw_mci_start_request(host, slot); 971 } else { 972 dev_vdbg(host->dev, "list empty\n"); 973 host->state = STATE_IDLE; 974 } 975 976 spin_unlock(&host->lock); 977 mmc_request_done(prev_mmc, mrq); 978 spin_lock(&host->lock); 979 } 980 981 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 982 { 983 u32 status = host->cmd_status; 984 985 host->cmd_status = 0; 986 987 /* Read the response from the card (up to 16 bytes) */ 988 if (cmd->flags & MMC_RSP_PRESENT) { 989 if (cmd->flags & MMC_RSP_136) { 990 cmd->resp[3] = mci_readl(host, RESP0); 991 cmd->resp[2] = mci_readl(host, RESP1); 992 cmd->resp[1] = mci_readl(host, RESP2); 993 cmd->resp[0] = mci_readl(host, RESP3); 994 } else { 995 cmd->resp[0] = mci_readl(host, RESP0); 996 cmd->resp[1] = 0; 997 cmd->resp[2] = 0; 998 cmd->resp[3] = 0; 999 } 1000 } 1001 1002 if (status & SDMMC_INT_RTO) 1003 cmd->error = -ETIMEDOUT; 1004 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1005 cmd->error = -EILSEQ; 1006 else if (status & SDMMC_INT_RESP_ERR) 1007 cmd->error = -EIO; 1008 else 1009 cmd->error = 0; 1010 1011 if (cmd->error) { 1012 /* newer ip versions need a delay between retries */ 1013 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) 1014 mdelay(20); 1015 1016 if (cmd->data) { 1017 dw_mci_stop_dma(host); 1018 host->data = NULL; 1019 } 1020 } 1021 } 1022 1023 static void dw_mci_tasklet_func(unsigned long priv) 1024 { 1025 struct dw_mci *host = (struct dw_mci *)priv; 1026 struct mmc_data *data; 1027 struct mmc_command *cmd; 1028 enum dw_mci_state state; 1029 enum dw_mci_state prev_state; 1030 u32 status, ctrl; 1031 1032 spin_lock(&host->lock); 1033 1034 state = host->state; 1035 data = host->data; 1036 1037 do { 1038 prev_state = state; 1039 1040 switch (state) { 1041 case STATE_IDLE: 1042 break; 1043 1044 case STATE_SENDING_CMD: 1045 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1046 &host->pending_events)) 1047 break; 1048 1049 cmd = host->cmd; 1050 host->cmd = NULL; 1051 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 1052 dw_mci_command_complete(host, cmd); 1053 if (cmd == host->mrq->sbc && !cmd->error) { 1054 prev_state = state = STATE_SENDING_CMD; 1055 __dw_mci_start_request(host, host->cur_slot, 1056 host->mrq->cmd); 1057 goto unlock; 1058 } 1059 1060 if (!host->mrq->data || cmd->error) { 1061 dw_mci_request_end(host, host->mrq); 1062 goto unlock; 1063 } 1064 1065 prev_state = state = STATE_SENDING_DATA; 1066 /* fall through */ 1067 1068 case STATE_SENDING_DATA: 1069 if (test_and_clear_bit(EVENT_DATA_ERROR, 1070 &host->pending_events)) { 1071 dw_mci_stop_dma(host); 1072 if (data->stop) 1073 send_stop_cmd(host, data); 1074 state = STATE_DATA_ERROR; 1075 break; 1076 } 1077 1078 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1079 &host->pending_events)) 1080 break; 1081 1082 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 1083 prev_state = state = STATE_DATA_BUSY; 1084 /* fall through */ 1085 1086 case STATE_DATA_BUSY: 1087 if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 1088 &host->pending_events)) 1089 break; 1090 1091 host->data = NULL; 1092 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 1093 status = host->data_status; 1094 1095 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1096 if (status & SDMMC_INT_DRTO) { 1097 data->error = -ETIMEDOUT; 1098 } else if (status & SDMMC_INT_DCRC) { 1099 data->error = -EILSEQ; 1100 } else if (status & SDMMC_INT_EBE && 1101 host->dir_status == 1102 DW_MCI_SEND_STATUS) { 1103 /* 1104 * No data CRC status was returned. 1105 * The number of bytes transferred will 1106 * be exaggerated in PIO mode. 1107 */ 1108 data->bytes_xfered = 0; 1109 data->error = -ETIMEDOUT; 1110 } else { 1111 dev_err(host->dev, 1112 "data FIFO error " 1113 "(status=%08x)\n", 1114 status); 1115 data->error = -EIO; 1116 } 1117 /* 1118 * After an error, there may be data lingering 1119 * in the FIFO, so reset it - doing so 1120 * generates a block interrupt, hence setting 1121 * the scatter-gather pointer to NULL. 1122 */ 1123 sg_miter_stop(&host->sg_miter); 1124 host->sg = NULL; 1125 ctrl = mci_readl(host, CTRL); 1126 ctrl |= SDMMC_CTRL_FIFO_RESET; 1127 mci_writel(host, CTRL, ctrl); 1128 } else { 1129 data->bytes_xfered = data->blocks * data->blksz; 1130 data->error = 0; 1131 } 1132 1133 if (!data->stop) { 1134 dw_mci_request_end(host, host->mrq); 1135 goto unlock; 1136 } 1137 1138 if (host->mrq->sbc && !data->error) { 1139 data->stop->error = 0; 1140 dw_mci_request_end(host, host->mrq); 1141 goto unlock; 1142 } 1143 1144 prev_state = state = STATE_SENDING_STOP; 1145 if (!data->error) 1146 send_stop_cmd(host, data); 1147 /* fall through */ 1148 1149 case STATE_SENDING_STOP: 1150 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1151 &host->pending_events)) 1152 break; 1153 1154 host->cmd = NULL; 1155 dw_mci_command_complete(host, host->mrq->stop); 1156 dw_mci_request_end(host, host->mrq); 1157 goto unlock; 1158 1159 case STATE_DATA_ERROR: 1160 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1161 &host->pending_events)) 1162 break; 1163 1164 state = STATE_DATA_BUSY; 1165 break; 1166 } 1167 } while (state != prev_state); 1168 1169 host->state = state; 1170 unlock: 1171 spin_unlock(&host->lock); 1172 1173 } 1174 1175 /* push final bytes to part_buf, only use during push */ 1176 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 1177 { 1178 memcpy((void *)&host->part_buf, buf, cnt); 1179 host->part_buf_count = cnt; 1180 } 1181 1182 /* append bytes to part_buf, only use during push */ 1183 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 1184 { 1185 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 1186 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 1187 host->part_buf_count += cnt; 1188 return cnt; 1189 } 1190 1191 /* pull first bytes from part_buf, only use during pull */ 1192 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 1193 { 1194 cnt = min(cnt, (int)host->part_buf_count); 1195 if (cnt) { 1196 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 1197 cnt); 1198 host->part_buf_count -= cnt; 1199 host->part_buf_start += cnt; 1200 } 1201 return cnt; 1202 } 1203 1204 /* pull final bytes from the part_buf, assuming it's just been filled */ 1205 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 1206 { 1207 memcpy(buf, &host->part_buf, cnt); 1208 host->part_buf_start = cnt; 1209 host->part_buf_count = (1 << host->data_shift) - cnt; 1210 } 1211 1212 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 1213 { 1214 struct mmc_data *data = host->data; 1215 int init_cnt = cnt; 1216 1217 /* try and push anything in the part_buf */ 1218 if (unlikely(host->part_buf_count)) { 1219 int len = dw_mci_push_part_bytes(host, buf, cnt); 1220 buf += len; 1221 cnt -= len; 1222 if (host->part_buf_count == 2) { 1223 mci_writew(host, DATA(host->data_offset), 1224 host->part_buf16); 1225 host->part_buf_count = 0; 1226 } 1227 } 1228 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1229 if (unlikely((unsigned long)buf & 0x1)) { 1230 while (cnt >= 2) { 1231 u16 aligned_buf[64]; 1232 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1233 int items = len >> 1; 1234 int i; 1235 /* memcpy from input buffer into aligned buffer */ 1236 memcpy(aligned_buf, buf, len); 1237 buf += len; 1238 cnt -= len; 1239 /* push data from aligned buffer into fifo */ 1240 for (i = 0; i < items; ++i) 1241 mci_writew(host, DATA(host->data_offset), 1242 aligned_buf[i]); 1243 } 1244 } else 1245 #endif 1246 { 1247 u16 *pdata = buf; 1248 for (; cnt >= 2; cnt -= 2) 1249 mci_writew(host, DATA(host->data_offset), *pdata++); 1250 buf = pdata; 1251 } 1252 /* put anything remaining in the part_buf */ 1253 if (cnt) { 1254 dw_mci_set_part_bytes(host, buf, cnt); 1255 /* Push data if we have reached the expected data length */ 1256 if ((data->bytes_xfered + init_cnt) == 1257 (data->blksz * data->blocks)) 1258 mci_writew(host, DATA(host->data_offset), 1259 host->part_buf16); 1260 } 1261 } 1262 1263 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 1264 { 1265 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1266 if (unlikely((unsigned long)buf & 0x1)) { 1267 while (cnt >= 2) { 1268 /* pull data from fifo into aligned buffer */ 1269 u16 aligned_buf[64]; 1270 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1271 int items = len >> 1; 1272 int i; 1273 for (i = 0; i < items; ++i) 1274 aligned_buf[i] = mci_readw(host, 1275 DATA(host->data_offset)); 1276 /* memcpy from aligned buffer into output buffer */ 1277 memcpy(buf, aligned_buf, len); 1278 buf += len; 1279 cnt -= len; 1280 } 1281 } else 1282 #endif 1283 { 1284 u16 *pdata = buf; 1285 for (; cnt >= 2; cnt -= 2) 1286 *pdata++ = mci_readw(host, DATA(host->data_offset)); 1287 buf = pdata; 1288 } 1289 if (cnt) { 1290 host->part_buf16 = mci_readw(host, DATA(host->data_offset)); 1291 dw_mci_pull_final_bytes(host, buf, cnt); 1292 } 1293 } 1294 1295 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 1296 { 1297 struct mmc_data *data = host->data; 1298 int init_cnt = cnt; 1299 1300 /* try and push anything in the part_buf */ 1301 if (unlikely(host->part_buf_count)) { 1302 int len = dw_mci_push_part_bytes(host, buf, cnt); 1303 buf += len; 1304 cnt -= len; 1305 if (host->part_buf_count == 4) { 1306 mci_writel(host, DATA(host->data_offset), 1307 host->part_buf32); 1308 host->part_buf_count = 0; 1309 } 1310 } 1311 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1312 if (unlikely((unsigned long)buf & 0x3)) { 1313 while (cnt >= 4) { 1314 u32 aligned_buf[32]; 1315 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1316 int items = len >> 2; 1317 int i; 1318 /* memcpy from input buffer into aligned buffer */ 1319 memcpy(aligned_buf, buf, len); 1320 buf += len; 1321 cnt -= len; 1322 /* push data from aligned buffer into fifo */ 1323 for (i = 0; i < items; ++i) 1324 mci_writel(host, DATA(host->data_offset), 1325 aligned_buf[i]); 1326 } 1327 } else 1328 #endif 1329 { 1330 u32 *pdata = buf; 1331 for (; cnt >= 4; cnt -= 4) 1332 mci_writel(host, DATA(host->data_offset), *pdata++); 1333 buf = pdata; 1334 } 1335 /* put anything remaining in the part_buf */ 1336 if (cnt) { 1337 dw_mci_set_part_bytes(host, buf, cnt); 1338 /* Push data if we have reached the expected data length */ 1339 if ((data->bytes_xfered + init_cnt) == 1340 (data->blksz * data->blocks)) 1341 mci_writel(host, DATA(host->data_offset), 1342 host->part_buf32); 1343 } 1344 } 1345 1346 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 1347 { 1348 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1349 if (unlikely((unsigned long)buf & 0x3)) { 1350 while (cnt >= 4) { 1351 /* pull data from fifo into aligned buffer */ 1352 u32 aligned_buf[32]; 1353 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1354 int items = len >> 2; 1355 int i; 1356 for (i = 0; i < items; ++i) 1357 aligned_buf[i] = mci_readl(host, 1358 DATA(host->data_offset)); 1359 /* memcpy from aligned buffer into output buffer */ 1360 memcpy(buf, aligned_buf, len); 1361 buf += len; 1362 cnt -= len; 1363 } 1364 } else 1365 #endif 1366 { 1367 u32 *pdata = buf; 1368 for (; cnt >= 4; cnt -= 4) 1369 *pdata++ = mci_readl(host, DATA(host->data_offset)); 1370 buf = pdata; 1371 } 1372 if (cnt) { 1373 host->part_buf32 = mci_readl(host, DATA(host->data_offset)); 1374 dw_mci_pull_final_bytes(host, buf, cnt); 1375 } 1376 } 1377 1378 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 1379 { 1380 struct mmc_data *data = host->data; 1381 int init_cnt = cnt; 1382 1383 /* try and push anything in the part_buf */ 1384 if (unlikely(host->part_buf_count)) { 1385 int len = dw_mci_push_part_bytes(host, buf, cnt); 1386 buf += len; 1387 cnt -= len; 1388 1389 if (host->part_buf_count == 8) { 1390 mci_writeq(host, DATA(host->data_offset), 1391 host->part_buf); 1392 host->part_buf_count = 0; 1393 } 1394 } 1395 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1396 if (unlikely((unsigned long)buf & 0x7)) { 1397 while (cnt >= 8) { 1398 u64 aligned_buf[16]; 1399 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 1400 int items = len >> 3; 1401 int i; 1402 /* memcpy from input buffer into aligned buffer */ 1403 memcpy(aligned_buf, buf, len); 1404 buf += len; 1405 cnt -= len; 1406 /* push data from aligned buffer into fifo */ 1407 for (i = 0; i < items; ++i) 1408 mci_writeq(host, DATA(host->data_offset), 1409 aligned_buf[i]); 1410 } 1411 } else 1412 #endif 1413 { 1414 u64 *pdata = buf; 1415 for (; cnt >= 8; cnt -= 8) 1416 mci_writeq(host, DATA(host->data_offset), *pdata++); 1417 buf = pdata; 1418 } 1419 /* put anything remaining in the part_buf */ 1420 if (cnt) { 1421 dw_mci_set_part_bytes(host, buf, cnt); 1422 /* Push data if we have reached the expected data length */ 1423 if ((data->bytes_xfered + init_cnt) == 1424 (data->blksz * data->blocks)) 1425 mci_writeq(host, DATA(host->data_offset), 1426 host->part_buf); 1427 } 1428 } 1429 1430 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 1431 { 1432 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1433 if (unlikely((unsigned long)buf & 0x7)) { 1434 while (cnt >= 8) { 1435 /* pull data from fifo into aligned buffer */ 1436 u64 aligned_buf[16]; 1437 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 1438 int items = len >> 3; 1439 int i; 1440 for (i = 0; i < items; ++i) 1441 aligned_buf[i] = mci_readq(host, 1442 DATA(host->data_offset)); 1443 /* memcpy from aligned buffer into output buffer */ 1444 memcpy(buf, aligned_buf, len); 1445 buf += len; 1446 cnt -= len; 1447 } 1448 } else 1449 #endif 1450 { 1451 u64 *pdata = buf; 1452 for (; cnt >= 8; cnt -= 8) 1453 *pdata++ = mci_readq(host, DATA(host->data_offset)); 1454 buf = pdata; 1455 } 1456 if (cnt) { 1457 host->part_buf = mci_readq(host, DATA(host->data_offset)); 1458 dw_mci_pull_final_bytes(host, buf, cnt); 1459 } 1460 } 1461 1462 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 1463 { 1464 int len; 1465 1466 /* get remaining partial bytes */ 1467 len = dw_mci_pull_part_bytes(host, buf, cnt); 1468 if (unlikely(len == cnt)) 1469 return; 1470 buf += len; 1471 cnt -= len; 1472 1473 /* get the rest of the data */ 1474 host->pull_data(host, buf, cnt); 1475 } 1476 1477 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 1478 { 1479 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1480 void *buf; 1481 unsigned int offset; 1482 struct mmc_data *data = host->data; 1483 int shift = host->data_shift; 1484 u32 status; 1485 unsigned int len; 1486 unsigned int remain, fcnt; 1487 1488 do { 1489 if (!sg_miter_next(sg_miter)) 1490 goto done; 1491 1492 host->sg = sg_miter->piter.sg; 1493 buf = sg_miter->addr; 1494 remain = sg_miter->length; 1495 offset = 0; 1496 1497 do { 1498 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 1499 << shift) + host->part_buf_count; 1500 len = min(remain, fcnt); 1501 if (!len) 1502 break; 1503 dw_mci_pull_data(host, (void *)(buf + offset), len); 1504 data->bytes_xfered += len; 1505 offset += len; 1506 remain -= len; 1507 } while (remain); 1508 1509 sg_miter->consumed = offset; 1510 status = mci_readl(host, MINTSTS); 1511 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1512 /* if the RXDR is ready read again */ 1513 } while ((status & SDMMC_INT_RXDR) || 1514 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 1515 1516 if (!remain) { 1517 if (!sg_miter_next(sg_miter)) 1518 goto done; 1519 sg_miter->consumed = 0; 1520 } 1521 sg_miter_stop(sg_miter); 1522 return; 1523 1524 done: 1525 sg_miter_stop(sg_miter); 1526 host->sg = NULL; 1527 smp_wmb(); 1528 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1529 } 1530 1531 static void dw_mci_write_data_pio(struct dw_mci *host) 1532 { 1533 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1534 void *buf; 1535 unsigned int offset; 1536 struct mmc_data *data = host->data; 1537 int shift = host->data_shift; 1538 u32 status; 1539 unsigned int len; 1540 unsigned int fifo_depth = host->fifo_depth; 1541 unsigned int remain, fcnt; 1542 1543 do { 1544 if (!sg_miter_next(sg_miter)) 1545 goto done; 1546 1547 host->sg = sg_miter->piter.sg; 1548 buf = sg_miter->addr; 1549 remain = sg_miter->length; 1550 offset = 0; 1551 1552 do { 1553 fcnt = ((fifo_depth - 1554 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 1555 << shift) - host->part_buf_count; 1556 len = min(remain, fcnt); 1557 if (!len) 1558 break; 1559 host->push_data(host, (void *)(buf + offset), len); 1560 data->bytes_xfered += len; 1561 offset += len; 1562 remain -= len; 1563 } while (remain); 1564 1565 sg_miter->consumed = offset; 1566 status = mci_readl(host, MINTSTS); 1567 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1568 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 1569 1570 if (!remain) { 1571 if (!sg_miter_next(sg_miter)) 1572 goto done; 1573 sg_miter->consumed = 0; 1574 } 1575 sg_miter_stop(sg_miter); 1576 return; 1577 1578 done: 1579 sg_miter_stop(sg_miter); 1580 host->sg = NULL; 1581 smp_wmb(); 1582 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1583 } 1584 1585 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 1586 { 1587 if (!host->cmd_status) 1588 host->cmd_status = status; 1589 1590 smp_wmb(); 1591 1592 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1593 tasklet_schedule(&host->tasklet); 1594 } 1595 1596 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 1597 { 1598 struct dw_mci *host = dev_id; 1599 u32 pending; 1600 int i; 1601 1602 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 1603 1604 /* 1605 * DTO fix - version 2.10a and below, and only if internal DMA 1606 * is configured. 1607 */ 1608 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) { 1609 if (!pending && 1610 ((mci_readl(host, STATUS) >> 17) & 0x1fff)) 1611 pending |= SDMMC_INT_DATA_OVER; 1612 } 1613 1614 if (pending) { 1615 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 1616 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 1617 host->cmd_status = pending; 1618 smp_wmb(); 1619 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1620 } 1621 1622 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 1623 /* if there is an error report DATA_ERROR */ 1624 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 1625 host->data_status = pending; 1626 smp_wmb(); 1627 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1628 tasklet_schedule(&host->tasklet); 1629 } 1630 1631 if (pending & SDMMC_INT_DATA_OVER) { 1632 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 1633 if (!host->data_status) 1634 host->data_status = pending; 1635 smp_wmb(); 1636 if (host->dir_status == DW_MCI_RECV_STATUS) { 1637 if (host->sg != NULL) 1638 dw_mci_read_data_pio(host, true); 1639 } 1640 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 1641 tasklet_schedule(&host->tasklet); 1642 } 1643 1644 if (pending & SDMMC_INT_RXDR) { 1645 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1646 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 1647 dw_mci_read_data_pio(host, false); 1648 } 1649 1650 if (pending & SDMMC_INT_TXDR) { 1651 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1652 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 1653 dw_mci_write_data_pio(host); 1654 } 1655 1656 if (pending & SDMMC_INT_CMD_DONE) { 1657 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 1658 dw_mci_cmd_interrupt(host, pending); 1659 } 1660 1661 if (pending & SDMMC_INT_CD) { 1662 mci_writel(host, RINTSTS, SDMMC_INT_CD); 1663 queue_work(host->card_workqueue, &host->card_work); 1664 } 1665 1666 /* Handle SDIO Interrupts */ 1667 for (i = 0; i < host->num_slots; i++) { 1668 struct dw_mci_slot *slot = host->slot[i]; 1669 if (pending & SDMMC_INT_SDIO(i)) { 1670 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i)); 1671 mmc_signal_sdio_irq(slot->mmc); 1672 } 1673 } 1674 1675 } 1676 1677 #ifdef CONFIG_MMC_DW_IDMAC 1678 /* Handle DMA interrupts */ 1679 pending = mci_readl(host, IDSTS); 1680 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 1681 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); 1682 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 1683 host->dma_ops->complete(host); 1684 } 1685 #endif 1686 1687 return IRQ_HANDLED; 1688 } 1689 1690 static void dw_mci_work_routine_card(struct work_struct *work) 1691 { 1692 struct dw_mci *host = container_of(work, struct dw_mci, card_work); 1693 int i; 1694 1695 for (i = 0; i < host->num_slots; i++) { 1696 struct dw_mci_slot *slot = host->slot[i]; 1697 struct mmc_host *mmc = slot->mmc; 1698 struct mmc_request *mrq; 1699 int present; 1700 u32 ctrl; 1701 1702 present = dw_mci_get_cd(mmc); 1703 while (present != slot->last_detect_state) { 1704 dev_dbg(&slot->mmc->class_dev, "card %s\n", 1705 present ? "inserted" : "removed"); 1706 1707 spin_lock_bh(&host->lock); 1708 1709 /* Card change detected */ 1710 slot->last_detect_state = present; 1711 1712 /* Mark card as present if applicable */ 1713 if (present != 0) 1714 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1715 1716 /* Clean up queue if present */ 1717 mrq = slot->mrq; 1718 if (mrq) { 1719 if (mrq == host->mrq) { 1720 host->data = NULL; 1721 host->cmd = NULL; 1722 1723 switch (host->state) { 1724 case STATE_IDLE: 1725 break; 1726 case STATE_SENDING_CMD: 1727 mrq->cmd->error = -ENOMEDIUM; 1728 if (!mrq->data) 1729 break; 1730 /* fall through */ 1731 case STATE_SENDING_DATA: 1732 mrq->data->error = -ENOMEDIUM; 1733 dw_mci_stop_dma(host); 1734 break; 1735 case STATE_DATA_BUSY: 1736 case STATE_DATA_ERROR: 1737 if (mrq->data->error == -EINPROGRESS) 1738 mrq->data->error = -ENOMEDIUM; 1739 if (!mrq->stop) 1740 break; 1741 /* fall through */ 1742 case STATE_SENDING_STOP: 1743 mrq->stop->error = -ENOMEDIUM; 1744 break; 1745 } 1746 1747 dw_mci_request_end(host, mrq); 1748 } else { 1749 list_del(&slot->queue_node); 1750 mrq->cmd->error = -ENOMEDIUM; 1751 if (mrq->data) 1752 mrq->data->error = -ENOMEDIUM; 1753 if (mrq->stop) 1754 mrq->stop->error = -ENOMEDIUM; 1755 1756 spin_unlock(&host->lock); 1757 mmc_request_done(slot->mmc, mrq); 1758 spin_lock(&host->lock); 1759 } 1760 } 1761 1762 /* Power down slot */ 1763 if (present == 0) { 1764 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1765 1766 /* 1767 * Clear down the FIFO - doing so generates a 1768 * block interrupt, hence setting the 1769 * scatter-gather pointer to NULL. 1770 */ 1771 sg_miter_stop(&host->sg_miter); 1772 host->sg = NULL; 1773 1774 ctrl = mci_readl(host, CTRL); 1775 ctrl |= SDMMC_CTRL_FIFO_RESET; 1776 mci_writel(host, CTRL, ctrl); 1777 1778 #ifdef CONFIG_MMC_DW_IDMAC 1779 ctrl = mci_readl(host, BMOD); 1780 /* Software reset of DMA */ 1781 ctrl |= SDMMC_IDMAC_SWRESET; 1782 mci_writel(host, BMOD, ctrl); 1783 #endif 1784 1785 } 1786 1787 spin_unlock_bh(&host->lock); 1788 1789 present = dw_mci_get_cd(mmc); 1790 } 1791 1792 mmc_detect_change(slot->mmc, 1793 msecs_to_jiffies(host->pdata->detect_delay_ms)); 1794 } 1795 } 1796 1797 #ifdef CONFIG_OF 1798 /* given a slot id, find out the device node representing that slot */ 1799 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) 1800 { 1801 struct device_node *np; 1802 const __be32 *addr; 1803 int len; 1804 1805 if (!dev || !dev->of_node) 1806 return NULL; 1807 1808 for_each_child_of_node(dev->of_node, np) { 1809 addr = of_get_property(np, "reg", &len); 1810 if (!addr || (len < sizeof(int))) 1811 continue; 1812 if (be32_to_cpup(addr) == slot) 1813 return np; 1814 } 1815 return NULL; 1816 } 1817 1818 static struct dw_mci_of_slot_quirks { 1819 char *quirk; 1820 int id; 1821 } of_slot_quirks[] = { 1822 { 1823 .quirk = "disable-wp", 1824 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT, 1825 }, 1826 }; 1827 1828 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) 1829 { 1830 struct device_node *np = dw_mci_of_find_slot_node(dev, slot); 1831 int quirks = 0; 1832 int idx; 1833 1834 /* get quirks */ 1835 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++) 1836 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) 1837 quirks |= of_slot_quirks[idx].id; 1838 1839 return quirks; 1840 } 1841 1842 /* find out bus-width for a given slot */ 1843 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) 1844 { 1845 struct device_node *np = dw_mci_of_find_slot_node(dev, slot); 1846 u32 bus_wd = 1; 1847 1848 if (!np) 1849 return 1; 1850 1851 if (of_property_read_u32(np, "bus-width", &bus_wd)) 1852 dev_err(dev, "bus-width property not found, assuming width" 1853 " as 1\n"); 1854 return bus_wd; 1855 } 1856 1857 /* find the write protect gpio for a given slot; or -1 if none specified */ 1858 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot) 1859 { 1860 struct device_node *np = dw_mci_of_find_slot_node(dev, slot); 1861 int gpio; 1862 1863 if (!np) 1864 return -EINVAL; 1865 1866 gpio = of_get_named_gpio(np, "wp-gpios", 0); 1867 1868 /* Having a missing entry is valid; return silently */ 1869 if (!gpio_is_valid(gpio)) 1870 return -EINVAL; 1871 1872 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) { 1873 dev_warn(dev, "gpio [%d] request failed\n", gpio); 1874 return -EINVAL; 1875 } 1876 1877 return gpio; 1878 } 1879 #else /* CONFIG_OF */ 1880 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) 1881 { 1882 return 0; 1883 } 1884 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) 1885 { 1886 return 1; 1887 } 1888 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) 1889 { 1890 return NULL; 1891 } 1892 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot) 1893 { 1894 return -EINVAL; 1895 } 1896 #endif /* CONFIG_OF */ 1897 1898 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) 1899 { 1900 struct mmc_host *mmc; 1901 struct dw_mci_slot *slot; 1902 const struct dw_mci_drv_data *drv_data = host->drv_data; 1903 int ctrl_id, ret; 1904 u8 bus_width; 1905 1906 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 1907 if (!mmc) 1908 return -ENOMEM; 1909 1910 slot = mmc_priv(mmc); 1911 slot->id = id; 1912 slot->mmc = mmc; 1913 slot->host = host; 1914 host->slot[id] = slot; 1915 1916 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id); 1917 1918 mmc->ops = &dw_mci_ops; 1919 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); 1920 mmc->f_max = host->bus_hz; 1921 1922 if (host->pdata->get_ocr) 1923 mmc->ocr_avail = host->pdata->get_ocr(id); 1924 else 1925 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1926 1927 /* 1928 * Start with slot power disabled, it will be enabled when a card 1929 * is detected. 1930 */ 1931 if (host->pdata->setpower) 1932 host->pdata->setpower(id, 0); 1933 1934 if (host->pdata->caps) 1935 mmc->caps = host->pdata->caps; 1936 1937 if (host->pdata->pm_caps) 1938 mmc->pm_caps = host->pdata->pm_caps; 1939 1940 if (host->dev->of_node) { 1941 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 1942 if (ctrl_id < 0) 1943 ctrl_id = 0; 1944 } else { 1945 ctrl_id = to_platform_device(host->dev)->id; 1946 } 1947 if (drv_data && drv_data->caps) 1948 mmc->caps |= drv_data->caps[ctrl_id]; 1949 1950 if (host->pdata->caps2) 1951 mmc->caps2 = host->pdata->caps2; 1952 1953 if (host->pdata->get_bus_wd) 1954 bus_width = host->pdata->get_bus_wd(slot->id); 1955 else if (host->dev->of_node) 1956 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id); 1957 else 1958 bus_width = 1; 1959 1960 switch (bus_width) { 1961 case 8: 1962 mmc->caps |= MMC_CAP_8_BIT_DATA; 1963 case 4: 1964 mmc->caps |= MMC_CAP_4_BIT_DATA; 1965 } 1966 1967 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) 1968 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 1969 1970 if (host->pdata->blk_settings) { 1971 mmc->max_segs = host->pdata->blk_settings->max_segs; 1972 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; 1973 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count; 1974 mmc->max_req_size = host->pdata->blk_settings->max_req_size; 1975 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; 1976 } else { 1977 /* Useful defaults if platform data is unset. */ 1978 #ifdef CONFIG_MMC_DW_IDMAC 1979 mmc->max_segs = host->ring_size; 1980 mmc->max_blk_size = 65536; 1981 mmc->max_blk_count = host->ring_size; 1982 mmc->max_seg_size = 0x1000; 1983 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; 1984 #else 1985 mmc->max_segs = 64; 1986 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ 1987 mmc->max_blk_count = 512; 1988 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1989 mmc->max_seg_size = mmc->max_req_size; 1990 #endif /* CONFIG_MMC_DW_IDMAC */ 1991 } 1992 1993 if (dw_mci_get_cd(mmc)) 1994 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1995 else 1996 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1997 1998 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id); 1999 2000 ret = mmc_add_host(mmc); 2001 if (ret) 2002 goto err_setup_bus; 2003 2004 #if defined(CONFIG_DEBUG_FS) 2005 dw_mci_init_debugfs(slot); 2006 #endif 2007 2008 /* Card initially undetected */ 2009 slot->last_detect_state = 0; 2010 2011 /* 2012 * Card may have been plugged in prior to boot so we 2013 * need to run the detect tasklet 2014 */ 2015 queue_work(host->card_workqueue, &host->card_work); 2016 2017 return 0; 2018 2019 err_setup_bus: 2020 mmc_free_host(mmc); 2021 return -EINVAL; 2022 } 2023 2024 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 2025 { 2026 /* Shutdown detect IRQ */ 2027 if (slot->host->pdata->exit) 2028 slot->host->pdata->exit(id); 2029 2030 /* Debugfs stuff is cleaned up by mmc core */ 2031 mmc_remove_host(slot->mmc); 2032 slot->host->slot[id] = NULL; 2033 mmc_free_host(slot->mmc); 2034 } 2035 2036 static void dw_mci_init_dma(struct dw_mci *host) 2037 { 2038 /* Alloc memory for sg translation */ 2039 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, 2040 &host->sg_dma, GFP_KERNEL); 2041 if (!host->sg_cpu) { 2042 dev_err(host->dev, "%s: could not alloc DMA memory\n", 2043 __func__); 2044 goto no_dma; 2045 } 2046 2047 /* Determine which DMA interface to use */ 2048 #ifdef CONFIG_MMC_DW_IDMAC 2049 host->dma_ops = &dw_mci_idmac_ops; 2050 dev_info(host->dev, "Using internal DMA controller.\n"); 2051 #endif 2052 2053 if (!host->dma_ops) 2054 goto no_dma; 2055 2056 if (host->dma_ops->init && host->dma_ops->start && 2057 host->dma_ops->stop && host->dma_ops->cleanup) { 2058 if (host->dma_ops->init(host)) { 2059 dev_err(host->dev, "%s: Unable to initialize " 2060 "DMA Controller.\n", __func__); 2061 goto no_dma; 2062 } 2063 } else { 2064 dev_err(host->dev, "DMA initialization not found.\n"); 2065 goto no_dma; 2066 } 2067 2068 host->use_dma = 1; 2069 return; 2070 2071 no_dma: 2072 dev_info(host->dev, "Using PIO mode.\n"); 2073 host->use_dma = 0; 2074 return; 2075 } 2076 2077 static bool mci_wait_reset(struct device *dev, struct dw_mci *host) 2078 { 2079 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2080 unsigned int ctrl; 2081 2082 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | 2083 SDMMC_CTRL_DMA_RESET)); 2084 2085 /* wait till resets clear */ 2086 do { 2087 ctrl = mci_readl(host, CTRL); 2088 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | 2089 SDMMC_CTRL_DMA_RESET))) 2090 return true; 2091 } while (time_before(jiffies, timeout)); 2092 2093 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl); 2094 2095 return false; 2096 } 2097 2098 #ifdef CONFIG_OF 2099 static struct dw_mci_of_quirks { 2100 char *quirk; 2101 int id; 2102 } of_quirks[] = { 2103 { 2104 .quirk = "supports-highspeed", 2105 .id = DW_MCI_QUIRK_HIGHSPEED, 2106 }, { 2107 .quirk = "broken-cd", 2108 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION, 2109 }, 2110 }; 2111 2112 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2113 { 2114 struct dw_mci_board *pdata; 2115 struct device *dev = host->dev; 2116 struct device_node *np = dev->of_node; 2117 const struct dw_mci_drv_data *drv_data = host->drv_data; 2118 int idx, ret; 2119 u32 clock_frequency; 2120 2121 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 2122 if (!pdata) { 2123 dev_err(dev, "could not allocate memory for pdata\n"); 2124 return ERR_PTR(-ENOMEM); 2125 } 2126 2127 /* find out number of slots supported */ 2128 if (of_property_read_u32(dev->of_node, "num-slots", 2129 &pdata->num_slots)) { 2130 dev_info(dev, "num-slots property not found, " 2131 "assuming 1 slot is available\n"); 2132 pdata->num_slots = 1; 2133 } 2134 2135 /* get quirks */ 2136 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++) 2137 if (of_get_property(np, of_quirks[idx].quirk, NULL)) 2138 pdata->quirks |= of_quirks[idx].id; 2139 2140 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) 2141 dev_info(dev, "fifo-depth property not found, using " 2142 "value of FIFOTH register as default\n"); 2143 2144 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); 2145 2146 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency)) 2147 pdata->bus_hz = clock_frequency; 2148 2149 if (drv_data && drv_data->parse_dt) { 2150 ret = drv_data->parse_dt(host); 2151 if (ret) 2152 return ERR_PTR(ret); 2153 } 2154 2155 if (of_find_property(np, "keep-power-in-suspend", NULL)) 2156 pdata->pm_caps |= MMC_PM_KEEP_POWER; 2157 2158 if (of_find_property(np, "enable-sdio-wakeup", NULL)) 2159 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; 2160 2161 return pdata; 2162 } 2163 2164 #else /* CONFIG_OF */ 2165 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2166 { 2167 return ERR_PTR(-EINVAL); 2168 } 2169 #endif /* CONFIG_OF */ 2170 2171 int dw_mci_probe(struct dw_mci *host) 2172 { 2173 const struct dw_mci_drv_data *drv_data = host->drv_data; 2174 int width, i, ret = 0; 2175 u32 fifo_size; 2176 int init_slots = 0; 2177 2178 if (!host->pdata) { 2179 host->pdata = dw_mci_parse_dt(host); 2180 if (IS_ERR(host->pdata)) { 2181 dev_err(host->dev, "platform data not available\n"); 2182 return -EINVAL; 2183 } 2184 } 2185 2186 if (!host->pdata->select_slot && host->pdata->num_slots > 1) { 2187 dev_err(host->dev, 2188 "Platform data must supply select_slot function\n"); 2189 return -ENODEV; 2190 } 2191 2192 host->biu_clk = devm_clk_get(host->dev, "biu"); 2193 if (IS_ERR(host->biu_clk)) { 2194 dev_dbg(host->dev, "biu clock not available\n"); 2195 } else { 2196 ret = clk_prepare_enable(host->biu_clk); 2197 if (ret) { 2198 dev_err(host->dev, "failed to enable biu clock\n"); 2199 return ret; 2200 } 2201 } 2202 2203 host->ciu_clk = devm_clk_get(host->dev, "ciu"); 2204 if (IS_ERR(host->ciu_clk)) { 2205 dev_dbg(host->dev, "ciu clock not available\n"); 2206 host->bus_hz = host->pdata->bus_hz; 2207 } else { 2208 ret = clk_prepare_enable(host->ciu_clk); 2209 if (ret) { 2210 dev_err(host->dev, "failed to enable ciu clock\n"); 2211 goto err_clk_biu; 2212 } 2213 2214 if (host->pdata->bus_hz) { 2215 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 2216 if (ret) 2217 dev_warn(host->dev, 2218 "Unable to set bus rate to %ul\n", 2219 host->pdata->bus_hz); 2220 } 2221 host->bus_hz = clk_get_rate(host->ciu_clk); 2222 } 2223 2224 if (drv_data && drv_data->setup_clock) { 2225 ret = drv_data->setup_clock(host); 2226 if (ret) { 2227 dev_err(host->dev, 2228 "implementation specific clock setup failed\n"); 2229 goto err_clk_ciu; 2230 } 2231 } 2232 2233 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc"); 2234 if (IS_ERR(host->vmmc)) { 2235 ret = PTR_ERR(host->vmmc); 2236 if (ret == -EPROBE_DEFER) 2237 goto err_clk_ciu; 2238 2239 dev_info(host->dev, "no vmmc regulator found: %d\n", ret); 2240 host->vmmc = NULL; 2241 } else { 2242 ret = regulator_enable(host->vmmc); 2243 if (ret) { 2244 if (ret != -EPROBE_DEFER) 2245 dev_err(host->dev, 2246 "regulator_enable fail: %d\n", ret); 2247 goto err_clk_ciu; 2248 } 2249 } 2250 2251 if (!host->bus_hz) { 2252 dev_err(host->dev, 2253 "Platform data must supply bus speed\n"); 2254 ret = -ENODEV; 2255 goto err_regulator; 2256 } 2257 2258 host->quirks = host->pdata->quirks; 2259 2260 spin_lock_init(&host->lock); 2261 INIT_LIST_HEAD(&host->queue); 2262 2263 /* 2264 * Get the host data width - this assumes that HCON has been set with 2265 * the correct values. 2266 */ 2267 i = (mci_readl(host, HCON) >> 7) & 0x7; 2268 if (!i) { 2269 host->push_data = dw_mci_push_data16; 2270 host->pull_data = dw_mci_pull_data16; 2271 width = 16; 2272 host->data_shift = 1; 2273 } else if (i == 2) { 2274 host->push_data = dw_mci_push_data64; 2275 host->pull_data = dw_mci_pull_data64; 2276 width = 64; 2277 host->data_shift = 3; 2278 } else { 2279 /* Check for a reserved value, and warn if it is */ 2280 WARN((i != 1), 2281 "HCON reports a reserved host data width!\n" 2282 "Defaulting to 32-bit access.\n"); 2283 host->push_data = dw_mci_push_data32; 2284 host->pull_data = dw_mci_pull_data32; 2285 width = 32; 2286 host->data_shift = 2; 2287 } 2288 2289 /* Reset all blocks */ 2290 if (!mci_wait_reset(host->dev, host)) 2291 return -ENODEV; 2292 2293 host->dma_ops = host->pdata->dma_ops; 2294 dw_mci_init_dma(host); 2295 2296 /* Clear the interrupts for the host controller */ 2297 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2298 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2299 2300 /* Put in max timeout */ 2301 mci_writel(host, TMOUT, 0xFFFFFFFF); 2302 2303 /* 2304 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 2305 * Tx Mark = fifo_size / 2 DMA Size = 8 2306 */ 2307 if (!host->pdata->fifo_depth) { 2308 /* 2309 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 2310 * have been overwritten by the bootloader, just like we're 2311 * about to do, so if you know the value for your hardware, you 2312 * should put it in the platform data. 2313 */ 2314 fifo_size = mci_readl(host, FIFOTH); 2315 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 2316 } else { 2317 fifo_size = host->pdata->fifo_depth; 2318 } 2319 host->fifo_depth = fifo_size; 2320 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | 2321 ((fifo_size/2) << 0)); 2322 mci_writel(host, FIFOTH, host->fifoth_val); 2323 2324 /* disable clock to CIU */ 2325 mci_writel(host, CLKENA, 0); 2326 mci_writel(host, CLKSRC, 0); 2327 2328 /* 2329 * In 2.40a spec, Data offset is changed. 2330 * Need to check the version-id and set data-offset for DATA register. 2331 */ 2332 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 2333 dev_info(host->dev, "Version ID is %04x\n", host->verid); 2334 2335 if (host->verid < DW_MMC_240A) 2336 host->data_offset = DATA_OFFSET; 2337 else 2338 host->data_offset = DATA_240A_OFFSET; 2339 2340 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 2341 host->card_workqueue = alloc_workqueue("dw-mci-card", 2342 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); 2343 if (!host->card_workqueue) { 2344 ret = -ENOMEM; 2345 goto err_dmaunmap; 2346 } 2347 INIT_WORK(&host->card_work, dw_mci_work_routine_card); 2348 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 2349 host->irq_flags, "dw-mci", host); 2350 if (ret) 2351 goto err_workqueue; 2352 2353 if (host->pdata->num_slots) 2354 host->num_slots = host->pdata->num_slots; 2355 else 2356 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; 2357 2358 /* 2359 * Enable interrupts for command done, data over, data empty, card det, 2360 * receive ready and error such as transmit, receive timeout, crc error 2361 */ 2362 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2363 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2364 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2365 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2366 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 2367 2368 dev_info(host->dev, "DW MMC controller at irq %d, " 2369 "%d bit host data width, " 2370 "%u deep fifo\n", 2371 host->irq, width, fifo_size); 2372 2373 /* We need at least one slot to succeed */ 2374 for (i = 0; i < host->num_slots; i++) { 2375 ret = dw_mci_init_slot(host, i); 2376 if (ret) 2377 dev_dbg(host->dev, "slot %d init failed\n", i); 2378 else 2379 init_slots++; 2380 } 2381 2382 if (init_slots) { 2383 dev_info(host->dev, "%d slots initialized\n", init_slots); 2384 } else { 2385 dev_dbg(host->dev, "attempted to initialize %d slots, " 2386 "but failed on all\n", host->num_slots); 2387 goto err_workqueue; 2388 } 2389 2390 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) 2391 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n"); 2392 2393 return 0; 2394 2395 err_workqueue: 2396 destroy_workqueue(host->card_workqueue); 2397 2398 err_dmaunmap: 2399 if (host->use_dma && host->dma_ops->exit) 2400 host->dma_ops->exit(host); 2401 2402 err_regulator: 2403 if (host->vmmc) 2404 regulator_disable(host->vmmc); 2405 2406 err_clk_ciu: 2407 if (!IS_ERR(host->ciu_clk)) 2408 clk_disable_unprepare(host->ciu_clk); 2409 2410 err_clk_biu: 2411 if (!IS_ERR(host->biu_clk)) 2412 clk_disable_unprepare(host->biu_clk); 2413 2414 return ret; 2415 } 2416 EXPORT_SYMBOL(dw_mci_probe); 2417 2418 void dw_mci_remove(struct dw_mci *host) 2419 { 2420 int i; 2421 2422 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2423 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2424 2425 for (i = 0; i < host->num_slots; i++) { 2426 dev_dbg(host->dev, "remove slot %d\n", i); 2427 if (host->slot[i]) 2428 dw_mci_cleanup_slot(host->slot[i], i); 2429 } 2430 2431 /* disable clock to CIU */ 2432 mci_writel(host, CLKENA, 0); 2433 mci_writel(host, CLKSRC, 0); 2434 2435 destroy_workqueue(host->card_workqueue); 2436 2437 if (host->use_dma && host->dma_ops->exit) 2438 host->dma_ops->exit(host); 2439 2440 if (host->vmmc) 2441 regulator_disable(host->vmmc); 2442 2443 if (!IS_ERR(host->ciu_clk)) 2444 clk_disable_unprepare(host->ciu_clk); 2445 2446 if (!IS_ERR(host->biu_clk)) 2447 clk_disable_unprepare(host->biu_clk); 2448 } 2449 EXPORT_SYMBOL(dw_mci_remove); 2450 2451 2452 2453 #ifdef CONFIG_PM_SLEEP 2454 /* 2455 * TODO: we should probably disable the clock to the card in the suspend path. 2456 */ 2457 int dw_mci_suspend(struct dw_mci *host) 2458 { 2459 int i, ret = 0; 2460 2461 for (i = 0; i < host->num_slots; i++) { 2462 struct dw_mci_slot *slot = host->slot[i]; 2463 if (!slot) 2464 continue; 2465 ret = mmc_suspend_host(slot->mmc); 2466 if (ret < 0) { 2467 while (--i >= 0) { 2468 slot = host->slot[i]; 2469 if (slot) 2470 mmc_resume_host(host->slot[i]->mmc); 2471 } 2472 return ret; 2473 } 2474 } 2475 2476 if (host->vmmc) 2477 regulator_disable(host->vmmc); 2478 2479 return 0; 2480 } 2481 EXPORT_SYMBOL(dw_mci_suspend); 2482 2483 int dw_mci_resume(struct dw_mci *host) 2484 { 2485 int i, ret; 2486 2487 if (host->vmmc) { 2488 ret = regulator_enable(host->vmmc); 2489 if (ret) { 2490 dev_err(host->dev, 2491 "failed to enable regulator: %d\n", ret); 2492 return ret; 2493 } 2494 } 2495 2496 if (!mci_wait_reset(host->dev, host)) { 2497 ret = -ENODEV; 2498 return ret; 2499 } 2500 2501 if (host->use_dma && host->dma_ops->init) 2502 host->dma_ops->init(host); 2503 2504 /* Restore the old value at FIFOTH register */ 2505 mci_writel(host, FIFOTH, host->fifoth_val); 2506 2507 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2508 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2509 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2510 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2511 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 2512 2513 for (i = 0; i < host->num_slots; i++) { 2514 struct dw_mci_slot *slot = host->slot[i]; 2515 if (!slot) 2516 continue; 2517 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { 2518 dw_mci_set_ios(slot->mmc, &slot->mmc->ios); 2519 dw_mci_setup_bus(slot, true); 2520 } 2521 2522 ret = mmc_resume_host(host->slot[i]->mmc); 2523 if (ret < 0) 2524 return ret; 2525 } 2526 return 0; 2527 } 2528 EXPORT_SYMBOL(dw_mci_resume); 2529 #endif /* CONFIG_PM_SLEEP */ 2530 2531 static int __init dw_mci_init(void) 2532 { 2533 pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 2534 return 0; 2535 } 2536 2537 static void __exit dw_mci_exit(void) 2538 { 2539 } 2540 2541 module_init(dw_mci_init); 2542 module_exit(dw_mci_exit); 2543 2544 MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 2545 MODULE_AUTHOR("NXP Semiconductor VietNam"); 2546 MODULE_AUTHOR("Imagination Technologies Ltd"); 2547 MODULE_LICENSE("GPL v2"); 2548