1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/moduleparam.h> 12 #include <linux/init.h> 13 #include <linux/ioport.h> 14 #include <linux/device.h> 15 #include <linux/interrupt.h> 16 #include <linux/delay.h> 17 #include <linux/err.h> 18 #include <linux/highmem.h> 19 #include <linux/log2.h> 20 #include <linux/mmc/host.h> 21 #include <linux/amba/bus.h> 22 #include <linux/clk.h> 23 #include <linux/scatterlist.h> 24 25 #include <asm/cacheflush.h> 26 #include <asm/div64.h> 27 #include <asm/io.h> 28 #include <asm/sizes.h> 29 #include <asm/mach/mmc.h> 30 31 #include "mmci.h" 32 33 #define DRIVER_NAME "mmci-pl18x" 34 35 #define DBG(host,fmt,args...) \ 36 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args) 37 38 static unsigned int fmax = 515633; 39 40 static void 41 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 42 { 43 writel(0, host->base + MMCICOMMAND); 44 45 BUG_ON(host->data); 46 47 host->mrq = NULL; 48 host->cmd = NULL; 49 50 if (mrq->data) 51 mrq->data->bytes_xfered = host->data_xfered; 52 53 /* 54 * Need to drop the host lock here; mmc_request_done may call 55 * back into the driver... 56 */ 57 spin_unlock(&host->lock); 58 mmc_request_done(host->mmc, mrq); 59 spin_lock(&host->lock); 60 } 61 62 static void mmci_stop_data(struct mmci_host *host) 63 { 64 writel(0, host->base + MMCIDATACTRL); 65 writel(0, host->base + MMCIMASK1); 66 host->data = NULL; 67 } 68 69 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 70 { 71 unsigned int datactrl, timeout, irqmask; 72 unsigned long long clks; 73 void __iomem *base; 74 int blksz_bits; 75 76 DBG(host, "blksz %04x blks %04x flags %08x\n", 77 data->blksz, data->blocks, data->flags); 78 79 host->data = data; 80 host->size = data->blksz; 81 host->data_xfered = 0; 82 83 mmci_init_sg(host, data); 84 85 clks = (unsigned long long)data->timeout_ns * host->cclk; 86 do_div(clks, 1000000000UL); 87 88 timeout = data->timeout_clks + (unsigned int)clks; 89 90 base = host->base; 91 writel(timeout, base + MMCIDATATIMER); 92 writel(host->size, base + MMCIDATALENGTH); 93 94 blksz_bits = ffs(data->blksz) - 1; 95 BUG_ON(1 << blksz_bits != data->blksz); 96 97 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 98 if (data->flags & MMC_DATA_READ) { 99 datactrl |= MCI_DPSM_DIRECTION; 100 irqmask = MCI_RXFIFOHALFFULLMASK; 101 102 /* 103 * If we have less than a FIFOSIZE of bytes to transfer, 104 * trigger a PIO interrupt as soon as any data is available. 105 */ 106 if (host->size < MCI_FIFOSIZE) 107 irqmask |= MCI_RXDATAAVLBLMASK; 108 } else { 109 /* 110 * We don't actually need to include "FIFO empty" here 111 * since its implicit in "FIFO half empty". 112 */ 113 irqmask = MCI_TXFIFOHALFEMPTYMASK; 114 } 115 116 writel(datactrl, base + MMCIDATACTRL); 117 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 118 writel(irqmask, base + MMCIMASK1); 119 } 120 121 static void 122 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 123 { 124 void __iomem *base = host->base; 125 126 DBG(host, "op %02x arg %08x flags %08x\n", 127 cmd->opcode, cmd->arg, cmd->flags); 128 129 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 130 writel(0, base + MMCICOMMAND); 131 udelay(1); 132 } 133 134 c |= cmd->opcode | MCI_CPSM_ENABLE; 135 if (cmd->flags & MMC_RSP_PRESENT) { 136 if (cmd->flags & MMC_RSP_136) 137 c |= MCI_CPSM_LONGRSP; 138 c |= MCI_CPSM_RESPONSE; 139 } 140 if (/*interrupt*/0) 141 c |= MCI_CPSM_INTERRUPT; 142 143 host->cmd = cmd; 144 145 writel(cmd->arg, base + MMCIARGUMENT); 146 writel(c, base + MMCICOMMAND); 147 } 148 149 static void 150 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 151 unsigned int status) 152 { 153 if (status & MCI_DATABLOCKEND) { 154 host->data_xfered += data->blksz; 155 } 156 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 157 if (status & MCI_DATACRCFAIL) 158 data->error = -EILSEQ; 159 else if (status & MCI_DATATIMEOUT) 160 data->error = -ETIMEDOUT; 161 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) 162 data->error = -EIO; 163 status |= MCI_DATAEND; 164 165 /* 166 * We hit an error condition. Ensure that any data 167 * partially written to a page is properly coherent. 168 */ 169 if (host->sg_len && data->flags & MMC_DATA_READ) 170 flush_dcache_page(sg_page(host->sg_ptr)); 171 } 172 if (status & MCI_DATAEND) { 173 mmci_stop_data(host); 174 175 if (!data->stop) { 176 mmci_request_end(host, data->mrq); 177 } else { 178 mmci_start_command(host, data->stop, 0); 179 } 180 } 181 } 182 183 static void 184 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 185 unsigned int status) 186 { 187 void __iomem *base = host->base; 188 189 host->cmd = NULL; 190 191 cmd->resp[0] = readl(base + MMCIRESPONSE0); 192 cmd->resp[1] = readl(base + MMCIRESPONSE1); 193 cmd->resp[2] = readl(base + MMCIRESPONSE2); 194 cmd->resp[3] = readl(base + MMCIRESPONSE3); 195 196 if (status & MCI_CMDTIMEOUT) { 197 cmd->error = -ETIMEDOUT; 198 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 199 cmd->error = -EILSEQ; 200 } 201 202 if (!cmd->data || cmd->error) { 203 if (host->data) 204 mmci_stop_data(host); 205 mmci_request_end(host, cmd->mrq); 206 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 207 mmci_start_data(host, cmd->data); 208 } 209 } 210 211 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 212 { 213 void __iomem *base = host->base; 214 char *ptr = buffer; 215 u32 status; 216 int host_remain = host->size; 217 218 do { 219 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 220 221 if (count > remain) 222 count = remain; 223 224 if (count <= 0) 225 break; 226 227 readsl(base + MMCIFIFO, ptr, count >> 2); 228 229 ptr += count; 230 remain -= count; 231 host_remain -= count; 232 233 if (remain == 0) 234 break; 235 236 status = readl(base + MMCISTATUS); 237 } while (status & MCI_RXDATAAVLBL); 238 239 return ptr - buffer; 240 } 241 242 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 243 { 244 void __iomem *base = host->base; 245 char *ptr = buffer; 246 247 do { 248 unsigned int count, maxcnt; 249 250 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE; 251 count = min(remain, maxcnt); 252 253 writesl(base + MMCIFIFO, ptr, count >> 2); 254 255 ptr += count; 256 remain -= count; 257 258 if (remain == 0) 259 break; 260 261 status = readl(base + MMCISTATUS); 262 } while (status & MCI_TXFIFOHALFEMPTY); 263 264 return ptr - buffer; 265 } 266 267 /* 268 * PIO data transfer IRQ handler. 269 */ 270 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 271 { 272 struct mmci_host *host = dev_id; 273 void __iomem *base = host->base; 274 u32 status; 275 276 status = readl(base + MMCISTATUS); 277 278 DBG(host, "irq1 %08x\n", status); 279 280 do { 281 unsigned long flags; 282 unsigned int remain, len; 283 char *buffer; 284 285 /* 286 * For write, we only need to test the half-empty flag 287 * here - if the FIFO is completely empty, then by 288 * definition it is more than half empty. 289 * 290 * For read, check for data available. 291 */ 292 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 293 break; 294 295 /* 296 * Map the current scatter buffer. 297 */ 298 buffer = mmci_kmap_atomic(host, &flags) + host->sg_off; 299 remain = host->sg_ptr->length - host->sg_off; 300 301 len = 0; 302 if (status & MCI_RXACTIVE) 303 len = mmci_pio_read(host, buffer, remain); 304 if (status & MCI_TXACTIVE) 305 len = mmci_pio_write(host, buffer, remain, status); 306 307 /* 308 * Unmap the buffer. 309 */ 310 mmci_kunmap_atomic(host, buffer, &flags); 311 312 host->sg_off += len; 313 host->size -= len; 314 remain -= len; 315 316 if (remain) 317 break; 318 319 /* 320 * If we were reading, and we have completed this 321 * page, ensure that the data cache is coherent. 322 */ 323 if (status & MCI_RXACTIVE) 324 flush_dcache_page(sg_page(host->sg_ptr)); 325 326 if (!mmci_next_sg(host)) 327 break; 328 329 status = readl(base + MMCISTATUS); 330 } while (1); 331 332 /* 333 * If we're nearing the end of the read, switch to 334 * "any data available" mode. 335 */ 336 if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE) 337 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); 338 339 /* 340 * If we run out of data, disable the data IRQs; this 341 * prevents a race where the FIFO becomes empty before 342 * the chip itself has disabled the data path, and 343 * stops us racing with our data end IRQ. 344 */ 345 if (host->size == 0) { 346 writel(0, base + MMCIMASK1); 347 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 348 } 349 350 return IRQ_HANDLED; 351 } 352 353 /* 354 * Handle completion of command and data transfers. 355 */ 356 static irqreturn_t mmci_irq(int irq, void *dev_id) 357 { 358 struct mmci_host *host = dev_id; 359 u32 status; 360 int ret = 0; 361 362 spin_lock(&host->lock); 363 364 do { 365 struct mmc_command *cmd; 366 struct mmc_data *data; 367 368 status = readl(host->base + MMCISTATUS); 369 status &= readl(host->base + MMCIMASK0); 370 writel(status, host->base + MMCICLEAR); 371 372 DBG(host, "irq0 %08x\n", status); 373 374 data = host->data; 375 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| 376 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) 377 mmci_data_irq(host, data, status); 378 379 cmd = host->cmd; 380 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 381 mmci_cmd_irq(host, cmd, status); 382 383 ret = 1; 384 } while (status); 385 386 spin_unlock(&host->lock); 387 388 return IRQ_RETVAL(ret); 389 } 390 391 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 392 { 393 struct mmci_host *host = mmc_priv(mmc); 394 395 WARN_ON(host->mrq != NULL); 396 397 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 398 printk(KERN_ERR "%s: Unsupported block size (%d bytes)\n", 399 mmc_hostname(mmc), mrq->data->blksz); 400 mrq->cmd->error = -EINVAL; 401 mmc_request_done(mmc, mrq); 402 return; 403 } 404 405 spin_lock_irq(&host->lock); 406 407 host->mrq = mrq; 408 409 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 410 mmci_start_data(host, mrq->data); 411 412 mmci_start_command(host, mrq->cmd, 0); 413 414 spin_unlock_irq(&host->lock); 415 } 416 417 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 418 { 419 struct mmci_host *host = mmc_priv(mmc); 420 u32 clk = 0, pwr = 0; 421 422 if (ios->clock) { 423 if (ios->clock >= host->mclk) { 424 clk = MCI_CLK_BYPASS; 425 host->cclk = host->mclk; 426 } else { 427 clk = host->mclk / (2 * ios->clock) - 1; 428 if (clk >= 256) 429 clk = 255; 430 host->cclk = host->mclk / (2 * (clk + 1)); 431 } 432 clk |= MCI_CLK_ENABLE; 433 } 434 435 if (host->plat->translate_vdd) 436 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd); 437 438 switch (ios->power_mode) { 439 case MMC_POWER_OFF: 440 break; 441 case MMC_POWER_UP: 442 pwr |= MCI_PWR_UP; 443 break; 444 case MMC_POWER_ON: 445 pwr |= MCI_PWR_ON; 446 break; 447 } 448 449 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 450 pwr |= MCI_ROD; 451 452 writel(clk, host->base + MMCICLOCK); 453 454 if (host->pwr != pwr) { 455 host->pwr = pwr; 456 writel(pwr, host->base + MMCIPOWER); 457 } 458 } 459 460 static const struct mmc_host_ops mmci_ops = { 461 .request = mmci_request, 462 .set_ios = mmci_set_ios, 463 }; 464 465 static void mmci_check_status(unsigned long data) 466 { 467 struct mmci_host *host = (struct mmci_host *)data; 468 unsigned int status; 469 470 status = host->plat->status(mmc_dev(host->mmc)); 471 if (status ^ host->oldstat) 472 mmc_detect_change(host->mmc, 0); 473 474 host->oldstat = status; 475 mod_timer(&host->timer, jiffies + HZ); 476 } 477 478 static int mmci_probe(struct amba_device *dev, void *id) 479 { 480 struct mmc_platform_data *plat = dev->dev.platform_data; 481 struct mmci_host *host; 482 struct mmc_host *mmc; 483 int ret; 484 485 /* must have platform data */ 486 if (!plat) { 487 ret = -EINVAL; 488 goto out; 489 } 490 491 ret = amba_request_regions(dev, DRIVER_NAME); 492 if (ret) 493 goto out; 494 495 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 496 if (!mmc) { 497 ret = -ENOMEM; 498 goto rel_regions; 499 } 500 501 host = mmc_priv(mmc); 502 host->clk = clk_get(&dev->dev, "MCLK"); 503 if (IS_ERR(host->clk)) { 504 ret = PTR_ERR(host->clk); 505 host->clk = NULL; 506 goto host_free; 507 } 508 509 ret = clk_enable(host->clk); 510 if (ret) 511 goto clk_free; 512 513 host->plat = plat; 514 host->mclk = clk_get_rate(host->clk); 515 /* 516 * According to the spec, mclk is max 100 MHz, 517 * so we try to adjust the clock down to this, 518 * (if possible). 519 */ 520 if (host->mclk > 100000000) { 521 ret = clk_set_rate(host->clk, 100000000); 522 if (ret < 0) 523 goto clk_disable; 524 host->mclk = clk_get_rate(host->clk); 525 DBG(host, "eventual mclk rate: %u Hz\n", host->mclk); 526 } 527 host->mmc = mmc; 528 host->base = ioremap(dev->res.start, SZ_4K); 529 if (!host->base) { 530 ret = -ENOMEM; 531 goto clk_disable; 532 } 533 534 mmc->ops = &mmci_ops; 535 mmc->f_min = (host->mclk + 511) / 512; 536 mmc->f_max = min(host->mclk, fmax); 537 mmc->ocr_avail = plat->ocr_mask; 538 539 /* 540 * We can do SGIO 541 */ 542 mmc->max_hw_segs = 16; 543 mmc->max_phys_segs = NR_SG; 544 545 /* 546 * Since we only have a 16-bit data length register, we must 547 * ensure that we don't exceed 2^16-1 bytes in a single request. 548 */ 549 mmc->max_req_size = 65535; 550 551 /* 552 * Set the maximum segment size. Since we aren't doing DMA 553 * (yet) we are only limited by the data length register. 554 */ 555 mmc->max_seg_size = mmc->max_req_size; 556 557 /* 558 * Block size can be up to 2048 bytes, but must be a power of two. 559 */ 560 mmc->max_blk_size = 2048; 561 562 /* 563 * No limit on the number of blocks transferred. 564 */ 565 mmc->max_blk_count = mmc->max_req_size; 566 567 spin_lock_init(&host->lock); 568 569 writel(0, host->base + MMCIMASK0); 570 writel(0, host->base + MMCIMASK1); 571 writel(0xfff, host->base + MMCICLEAR); 572 573 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 574 if (ret) 575 goto unmap; 576 577 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); 578 if (ret) 579 goto irq0_free; 580 581 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 582 583 amba_set_drvdata(dev, mmc); 584 585 mmc_add_host(mmc); 586 587 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", 588 mmc_hostname(mmc), amba_rev(dev), amba_config(dev), 589 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); 590 591 init_timer(&host->timer); 592 host->timer.data = (unsigned long)host; 593 host->timer.function = mmci_check_status; 594 host->timer.expires = jiffies + HZ; 595 add_timer(&host->timer); 596 597 return 0; 598 599 irq0_free: 600 free_irq(dev->irq[0], host); 601 unmap: 602 iounmap(host->base); 603 clk_disable: 604 clk_disable(host->clk); 605 clk_free: 606 clk_put(host->clk); 607 host_free: 608 mmc_free_host(mmc); 609 rel_regions: 610 amba_release_regions(dev); 611 out: 612 return ret; 613 } 614 615 static int mmci_remove(struct amba_device *dev) 616 { 617 struct mmc_host *mmc = amba_get_drvdata(dev); 618 619 amba_set_drvdata(dev, NULL); 620 621 if (mmc) { 622 struct mmci_host *host = mmc_priv(mmc); 623 624 del_timer_sync(&host->timer); 625 626 mmc_remove_host(mmc); 627 628 writel(0, host->base + MMCIMASK0); 629 writel(0, host->base + MMCIMASK1); 630 631 writel(0, host->base + MMCICOMMAND); 632 writel(0, host->base + MMCIDATACTRL); 633 634 free_irq(dev->irq[0], host); 635 free_irq(dev->irq[1], host); 636 637 iounmap(host->base); 638 clk_disable(host->clk); 639 clk_put(host->clk); 640 641 mmc_free_host(mmc); 642 643 amba_release_regions(dev); 644 } 645 646 return 0; 647 } 648 649 #ifdef CONFIG_PM 650 static int mmci_suspend(struct amba_device *dev, pm_message_t state) 651 { 652 struct mmc_host *mmc = amba_get_drvdata(dev); 653 int ret = 0; 654 655 if (mmc) { 656 struct mmci_host *host = mmc_priv(mmc); 657 658 ret = mmc_suspend_host(mmc, state); 659 if (ret == 0) 660 writel(0, host->base + MMCIMASK0); 661 } 662 663 return ret; 664 } 665 666 static int mmci_resume(struct amba_device *dev) 667 { 668 struct mmc_host *mmc = amba_get_drvdata(dev); 669 int ret = 0; 670 671 if (mmc) { 672 struct mmci_host *host = mmc_priv(mmc); 673 674 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 675 676 ret = mmc_resume_host(mmc); 677 } 678 679 return ret; 680 } 681 #else 682 #define mmci_suspend NULL 683 #define mmci_resume NULL 684 #endif 685 686 static struct amba_id mmci_ids[] = { 687 { 688 .id = 0x00041180, 689 .mask = 0x000fffff, 690 }, 691 { 692 .id = 0x00041181, 693 .mask = 0x000fffff, 694 }, 695 { 0, 0 }, 696 }; 697 698 static struct amba_driver mmci_driver = { 699 .drv = { 700 .name = DRIVER_NAME, 701 }, 702 .probe = mmci_probe, 703 .remove = mmci_remove, 704 .suspend = mmci_suspend, 705 .resume = mmci_resume, 706 .id_table = mmci_ids, 707 }; 708 709 static int __init mmci_init(void) 710 { 711 return amba_driver_register(&mmci_driver); 712 } 713 714 static void __exit mmci_exit(void) 715 { 716 amba_driver_unregister(&mmci_driver); 717 } 718 719 module_init(mmci_init); 720 module_exit(mmci_exit); 721 module_param(fmax, uint, 0444); 722 723 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 724 MODULE_LICENSE("GPL"); 725