1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/moduleparam.h> 12 #include <linux/init.h> 13 #include <linux/ioport.h> 14 #include <linux/device.h> 15 #include <linux/interrupt.h> 16 #include <linux/delay.h> 17 #include <linux/err.h> 18 #include <linux/highmem.h> 19 #include <linux/log2.h> 20 #include <linux/mmc/host.h> 21 #include <linux/amba/bus.h> 22 #include <linux/clk.h> 23 #include <linux/scatterlist.h> 24 #include <linux/gpio.h> 25 #include <linux/amba/mmci.h> 26 #include <linux/regulator/consumer.h> 27 28 #include <asm/cacheflush.h> 29 #include <asm/div64.h> 30 #include <asm/io.h> 31 #include <asm/sizes.h> 32 33 #include "mmci.h" 34 35 #define DRIVER_NAME "mmci-pl18x" 36 37 #define DBG(host,fmt,args...) \ 38 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args) 39 40 static unsigned int fmax = 515633; 41 42 /* 43 * This must be called with host->lock held 44 */ 45 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 46 { 47 u32 clk = 0; 48 49 if (desired) { 50 if (desired >= host->mclk) { 51 clk = MCI_CLK_BYPASS; 52 host->cclk = host->mclk; 53 } else { 54 clk = host->mclk / (2 * desired) - 1; 55 if (clk >= 256) 56 clk = 255; 57 host->cclk = host->mclk / (2 * (clk + 1)); 58 } 59 if (host->hw_designer == AMBA_VENDOR_ST) 60 clk |= MCI_FCEN; /* Bug fix in ST IP block */ 61 clk |= MCI_CLK_ENABLE; 62 /* This hasn't proven to be worthwhile */ 63 /* clk |= MCI_CLK_PWRSAVE; */ 64 } 65 66 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 67 clk |= MCI_WIDE_BUS; 68 69 writel(clk, host->base + MMCICLOCK); 70 } 71 72 static void 73 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 74 { 75 writel(0, host->base + MMCICOMMAND); 76 77 BUG_ON(host->data); 78 79 host->mrq = NULL; 80 host->cmd = NULL; 81 82 if (mrq->data) 83 mrq->data->bytes_xfered = host->data_xfered; 84 85 /* 86 * Need to drop the host lock here; mmc_request_done may call 87 * back into the driver... 88 */ 89 spin_unlock(&host->lock); 90 mmc_request_done(host->mmc, mrq); 91 spin_lock(&host->lock); 92 } 93 94 static void mmci_stop_data(struct mmci_host *host) 95 { 96 writel(0, host->base + MMCIDATACTRL); 97 writel(0, host->base + MMCIMASK1); 98 host->data = NULL; 99 } 100 101 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 102 { 103 unsigned int datactrl, timeout, irqmask; 104 unsigned long long clks; 105 void __iomem *base; 106 int blksz_bits; 107 108 DBG(host, "blksz %04x blks %04x flags %08x\n", 109 data->blksz, data->blocks, data->flags); 110 111 host->data = data; 112 host->size = data->blksz; 113 host->data_xfered = 0; 114 115 mmci_init_sg(host, data); 116 117 clks = (unsigned long long)data->timeout_ns * host->cclk; 118 do_div(clks, 1000000000UL); 119 120 timeout = data->timeout_clks + (unsigned int)clks; 121 122 base = host->base; 123 writel(timeout, base + MMCIDATATIMER); 124 writel(host->size, base + MMCIDATALENGTH); 125 126 blksz_bits = ffs(data->blksz) - 1; 127 BUG_ON(1 << blksz_bits != data->blksz); 128 129 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 130 if (data->flags & MMC_DATA_READ) { 131 datactrl |= MCI_DPSM_DIRECTION; 132 irqmask = MCI_RXFIFOHALFFULLMASK; 133 134 /* 135 * If we have less than a FIFOSIZE of bytes to transfer, 136 * trigger a PIO interrupt as soon as any data is available. 137 */ 138 if (host->size < MCI_FIFOSIZE) 139 irqmask |= MCI_RXDATAAVLBLMASK; 140 } else { 141 /* 142 * We don't actually need to include "FIFO empty" here 143 * since its implicit in "FIFO half empty". 144 */ 145 irqmask = MCI_TXFIFOHALFEMPTYMASK; 146 } 147 148 writel(datactrl, base + MMCIDATACTRL); 149 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 150 writel(irqmask, base + MMCIMASK1); 151 } 152 153 static void 154 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 155 { 156 void __iomem *base = host->base; 157 158 DBG(host, "op %02x arg %08x flags %08x\n", 159 cmd->opcode, cmd->arg, cmd->flags); 160 161 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 162 writel(0, base + MMCICOMMAND); 163 udelay(1); 164 } 165 166 c |= cmd->opcode | MCI_CPSM_ENABLE; 167 if (cmd->flags & MMC_RSP_PRESENT) { 168 if (cmd->flags & MMC_RSP_136) 169 c |= MCI_CPSM_LONGRSP; 170 c |= MCI_CPSM_RESPONSE; 171 } 172 if (/*interrupt*/0) 173 c |= MCI_CPSM_INTERRUPT; 174 175 host->cmd = cmd; 176 177 writel(cmd->arg, base + MMCIARGUMENT); 178 writel(c, base + MMCICOMMAND); 179 } 180 181 static void 182 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 183 unsigned int status) 184 { 185 if (status & MCI_DATABLOCKEND) { 186 host->data_xfered += data->blksz; 187 } 188 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 189 if (status & MCI_DATACRCFAIL) 190 data->error = -EILSEQ; 191 else if (status & MCI_DATATIMEOUT) 192 data->error = -ETIMEDOUT; 193 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) 194 data->error = -EIO; 195 status |= MCI_DATAEND; 196 197 /* 198 * We hit an error condition. Ensure that any data 199 * partially written to a page is properly coherent. 200 */ 201 if (host->sg_len && data->flags & MMC_DATA_READ) 202 flush_dcache_page(sg_page(host->sg_ptr)); 203 } 204 if (status & MCI_DATAEND) { 205 mmci_stop_data(host); 206 207 if (!data->stop) { 208 mmci_request_end(host, data->mrq); 209 } else { 210 mmci_start_command(host, data->stop, 0); 211 } 212 } 213 } 214 215 static void 216 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 217 unsigned int status) 218 { 219 void __iomem *base = host->base; 220 221 host->cmd = NULL; 222 223 cmd->resp[0] = readl(base + MMCIRESPONSE0); 224 cmd->resp[1] = readl(base + MMCIRESPONSE1); 225 cmd->resp[2] = readl(base + MMCIRESPONSE2); 226 cmd->resp[3] = readl(base + MMCIRESPONSE3); 227 228 if (status & MCI_CMDTIMEOUT) { 229 cmd->error = -ETIMEDOUT; 230 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 231 cmd->error = -EILSEQ; 232 } 233 234 if (!cmd->data || cmd->error) { 235 if (host->data) 236 mmci_stop_data(host); 237 mmci_request_end(host, cmd->mrq); 238 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 239 mmci_start_data(host, cmd->data); 240 } 241 } 242 243 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 244 { 245 void __iomem *base = host->base; 246 char *ptr = buffer; 247 u32 status; 248 int host_remain = host->size; 249 250 do { 251 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 252 253 if (count > remain) 254 count = remain; 255 256 if (count <= 0) 257 break; 258 259 readsl(base + MMCIFIFO, ptr, count >> 2); 260 261 ptr += count; 262 remain -= count; 263 host_remain -= count; 264 265 if (remain == 0) 266 break; 267 268 status = readl(base + MMCISTATUS); 269 } while (status & MCI_RXDATAAVLBL); 270 271 return ptr - buffer; 272 } 273 274 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 275 { 276 void __iomem *base = host->base; 277 char *ptr = buffer; 278 279 do { 280 unsigned int count, maxcnt; 281 282 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE; 283 count = min(remain, maxcnt); 284 285 writesl(base + MMCIFIFO, ptr, count >> 2); 286 287 ptr += count; 288 remain -= count; 289 290 if (remain == 0) 291 break; 292 293 status = readl(base + MMCISTATUS); 294 } while (status & MCI_TXFIFOHALFEMPTY); 295 296 return ptr - buffer; 297 } 298 299 /* 300 * PIO data transfer IRQ handler. 301 */ 302 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 303 { 304 struct mmci_host *host = dev_id; 305 void __iomem *base = host->base; 306 u32 status; 307 308 status = readl(base + MMCISTATUS); 309 310 DBG(host, "irq1 %08x\n", status); 311 312 do { 313 unsigned long flags; 314 unsigned int remain, len; 315 char *buffer; 316 317 /* 318 * For write, we only need to test the half-empty flag 319 * here - if the FIFO is completely empty, then by 320 * definition it is more than half empty. 321 * 322 * For read, check for data available. 323 */ 324 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 325 break; 326 327 /* 328 * Map the current scatter buffer. 329 */ 330 buffer = mmci_kmap_atomic(host, &flags) + host->sg_off; 331 remain = host->sg_ptr->length - host->sg_off; 332 333 len = 0; 334 if (status & MCI_RXACTIVE) 335 len = mmci_pio_read(host, buffer, remain); 336 if (status & MCI_TXACTIVE) 337 len = mmci_pio_write(host, buffer, remain, status); 338 339 /* 340 * Unmap the buffer. 341 */ 342 mmci_kunmap_atomic(host, buffer, &flags); 343 344 host->sg_off += len; 345 host->size -= len; 346 remain -= len; 347 348 if (remain) 349 break; 350 351 /* 352 * If we were reading, and we have completed this 353 * page, ensure that the data cache is coherent. 354 */ 355 if (status & MCI_RXACTIVE) 356 flush_dcache_page(sg_page(host->sg_ptr)); 357 358 if (!mmci_next_sg(host)) 359 break; 360 361 status = readl(base + MMCISTATUS); 362 } while (1); 363 364 /* 365 * If we're nearing the end of the read, switch to 366 * "any data available" mode. 367 */ 368 if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE) 369 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); 370 371 /* 372 * If we run out of data, disable the data IRQs; this 373 * prevents a race where the FIFO becomes empty before 374 * the chip itself has disabled the data path, and 375 * stops us racing with our data end IRQ. 376 */ 377 if (host->size == 0) { 378 writel(0, base + MMCIMASK1); 379 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 380 } 381 382 return IRQ_HANDLED; 383 } 384 385 /* 386 * Handle completion of command and data transfers. 387 */ 388 static irqreturn_t mmci_irq(int irq, void *dev_id) 389 { 390 struct mmci_host *host = dev_id; 391 u32 status; 392 int ret = 0; 393 394 spin_lock(&host->lock); 395 396 do { 397 struct mmc_command *cmd; 398 struct mmc_data *data; 399 400 status = readl(host->base + MMCISTATUS); 401 status &= readl(host->base + MMCIMASK0); 402 writel(status, host->base + MMCICLEAR); 403 404 DBG(host, "irq0 %08x\n", status); 405 406 data = host->data; 407 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| 408 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) 409 mmci_data_irq(host, data, status); 410 411 cmd = host->cmd; 412 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 413 mmci_cmd_irq(host, cmd, status); 414 415 ret = 1; 416 } while (status); 417 418 spin_unlock(&host->lock); 419 420 return IRQ_RETVAL(ret); 421 } 422 423 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 424 { 425 struct mmci_host *host = mmc_priv(mmc); 426 unsigned long flags; 427 428 WARN_ON(host->mrq != NULL); 429 430 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 431 printk(KERN_ERR "%s: Unsupported block size (%d bytes)\n", 432 mmc_hostname(mmc), mrq->data->blksz); 433 mrq->cmd->error = -EINVAL; 434 mmc_request_done(mmc, mrq); 435 return; 436 } 437 438 spin_lock_irqsave(&host->lock, flags); 439 440 host->mrq = mrq; 441 442 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 443 mmci_start_data(host, mrq->data); 444 445 mmci_start_command(host, mrq->cmd, 0); 446 447 spin_unlock_irqrestore(&host->lock, flags); 448 } 449 450 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 451 { 452 struct mmci_host *host = mmc_priv(mmc); 453 u32 pwr = 0; 454 unsigned long flags; 455 456 switch (ios->power_mode) { 457 case MMC_POWER_OFF: 458 if(host->vcc && 459 regulator_is_enabled(host->vcc)) 460 regulator_disable(host->vcc); 461 break; 462 case MMC_POWER_UP: 463 #ifdef CONFIG_REGULATOR 464 if (host->vcc) 465 /* This implicitly enables the regulator */ 466 mmc_regulator_set_ocr(host->vcc, ios->vdd); 467 #endif 468 /* 469 * The translate_vdd function is not used if you have 470 * an external regulator, or your design is really weird. 471 * Using it would mean sending in power control BOTH using 472 * a regulator AND the 4 MMCIPWR bits. If we don't have 473 * a regulator, we might have some other platform specific 474 * power control behind this translate function. 475 */ 476 if (!host->vcc && host->plat->translate_vdd) 477 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd); 478 /* The ST version does not have this, fall through to POWER_ON */ 479 if (host->hw_designer != AMBA_VENDOR_ST) { 480 pwr |= MCI_PWR_UP; 481 break; 482 } 483 case MMC_POWER_ON: 484 pwr |= MCI_PWR_ON; 485 break; 486 } 487 488 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 489 if (host->hw_designer != AMBA_VENDOR_ST) 490 pwr |= MCI_ROD; 491 else { 492 /* 493 * The ST Micro variant use the ROD bit for something 494 * else and only has OD (Open Drain). 495 */ 496 pwr |= MCI_OD; 497 } 498 } 499 500 spin_lock_irqsave(&host->lock, flags); 501 502 mmci_set_clkreg(host, ios->clock); 503 504 if (host->pwr != pwr) { 505 host->pwr = pwr; 506 writel(pwr, host->base + MMCIPOWER); 507 } 508 509 spin_unlock_irqrestore(&host->lock, flags); 510 } 511 512 static int mmci_get_ro(struct mmc_host *mmc) 513 { 514 struct mmci_host *host = mmc_priv(mmc); 515 516 if (host->gpio_wp == -ENOSYS) 517 return -ENOSYS; 518 519 return gpio_get_value(host->gpio_wp); 520 } 521 522 static int mmci_get_cd(struct mmc_host *mmc) 523 { 524 struct mmci_host *host = mmc_priv(mmc); 525 unsigned int status; 526 527 if (host->gpio_cd == -ENOSYS) 528 status = host->plat->status(mmc_dev(host->mmc)); 529 else 530 status = gpio_get_value(host->gpio_cd); 531 532 return !status; 533 } 534 535 static const struct mmc_host_ops mmci_ops = { 536 .request = mmci_request, 537 .set_ios = mmci_set_ios, 538 .get_ro = mmci_get_ro, 539 .get_cd = mmci_get_cd, 540 }; 541 542 static void mmci_check_status(unsigned long data) 543 { 544 struct mmci_host *host = (struct mmci_host *)data; 545 unsigned int status = mmci_get_cd(host->mmc); 546 547 if (status ^ host->oldstat) 548 mmc_detect_change(host->mmc, 0); 549 550 host->oldstat = status; 551 mod_timer(&host->timer, jiffies + HZ); 552 } 553 554 static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) 555 { 556 struct mmci_platform_data *plat = dev->dev.platform_data; 557 struct mmci_host *host; 558 struct mmc_host *mmc; 559 int ret; 560 561 /* must have platform data */ 562 if (!plat) { 563 ret = -EINVAL; 564 goto out; 565 } 566 567 ret = amba_request_regions(dev, DRIVER_NAME); 568 if (ret) 569 goto out; 570 571 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 572 if (!mmc) { 573 ret = -ENOMEM; 574 goto rel_regions; 575 } 576 577 host = mmc_priv(mmc); 578 host->mmc = mmc; 579 580 host->gpio_wp = -ENOSYS; 581 host->gpio_cd = -ENOSYS; 582 583 host->hw_designer = amba_manf(dev); 584 host->hw_revision = amba_rev(dev); 585 DBG(host, "designer ID = 0x%02x\n", host->hw_designer); 586 DBG(host, "revision = 0x%01x\n", host->hw_revision); 587 588 host->clk = clk_get(&dev->dev, NULL); 589 if (IS_ERR(host->clk)) { 590 ret = PTR_ERR(host->clk); 591 host->clk = NULL; 592 goto host_free; 593 } 594 595 ret = clk_enable(host->clk); 596 if (ret) 597 goto clk_free; 598 599 host->plat = plat; 600 host->mclk = clk_get_rate(host->clk); 601 /* 602 * According to the spec, mclk is max 100 MHz, 603 * so we try to adjust the clock down to this, 604 * (if possible). 605 */ 606 if (host->mclk > 100000000) { 607 ret = clk_set_rate(host->clk, 100000000); 608 if (ret < 0) 609 goto clk_disable; 610 host->mclk = clk_get_rate(host->clk); 611 DBG(host, "eventual mclk rate: %u Hz\n", host->mclk); 612 } 613 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 614 if (!host->base) { 615 ret = -ENOMEM; 616 goto clk_disable; 617 } 618 619 mmc->ops = &mmci_ops; 620 mmc->f_min = (host->mclk + 511) / 512; 621 mmc->f_max = min(host->mclk, fmax); 622 #ifdef CONFIG_REGULATOR 623 /* If we're using the regulator framework, try to fetch a regulator */ 624 host->vcc = regulator_get(&dev->dev, "vmmc"); 625 if (IS_ERR(host->vcc)) 626 host->vcc = NULL; 627 else { 628 int mask = mmc_regulator_get_ocrmask(host->vcc); 629 630 if (mask < 0) 631 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 632 mask); 633 else { 634 host->mmc->ocr_avail = (u32) mask; 635 if (plat->ocr_mask) 636 dev_warn(&dev->dev, 637 "Provided ocr_mask/setpower will not be used " 638 "(using regulator instead)\n"); 639 } 640 } 641 #endif 642 /* Fall back to platform data if no regulator is found */ 643 if (host->vcc == NULL) 644 mmc->ocr_avail = plat->ocr_mask; 645 mmc->caps = plat->capabilities; 646 647 /* 648 * We can do SGIO 649 */ 650 mmc->max_hw_segs = 16; 651 mmc->max_phys_segs = NR_SG; 652 653 /* 654 * Since we only have a 16-bit data length register, we must 655 * ensure that we don't exceed 2^16-1 bytes in a single request. 656 */ 657 mmc->max_req_size = 65535; 658 659 /* 660 * Set the maximum segment size. Since we aren't doing DMA 661 * (yet) we are only limited by the data length register. 662 */ 663 mmc->max_seg_size = mmc->max_req_size; 664 665 /* 666 * Block size can be up to 2048 bytes, but must be a power of two. 667 */ 668 mmc->max_blk_size = 2048; 669 670 /* 671 * No limit on the number of blocks transferred. 672 */ 673 mmc->max_blk_count = mmc->max_req_size; 674 675 spin_lock_init(&host->lock); 676 677 writel(0, host->base + MMCIMASK0); 678 writel(0, host->base + MMCIMASK1); 679 writel(0xfff, host->base + MMCICLEAR); 680 681 if (gpio_is_valid(plat->gpio_cd)) { 682 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 683 if (ret == 0) 684 ret = gpio_direction_input(plat->gpio_cd); 685 if (ret == 0) 686 host->gpio_cd = plat->gpio_cd; 687 else if (ret != -ENOSYS) 688 goto err_gpio_cd; 689 } 690 if (gpio_is_valid(plat->gpio_wp)) { 691 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 692 if (ret == 0) 693 ret = gpio_direction_input(plat->gpio_wp); 694 if (ret == 0) 695 host->gpio_wp = plat->gpio_wp; 696 else if (ret != -ENOSYS) 697 goto err_gpio_wp; 698 } 699 700 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 701 if (ret) 702 goto unmap; 703 704 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); 705 if (ret) 706 goto irq0_free; 707 708 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 709 710 amba_set_drvdata(dev, mmc); 711 host->oldstat = mmci_get_cd(host->mmc); 712 713 mmc_add_host(mmc); 714 715 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", 716 mmc_hostname(mmc), amba_rev(dev), amba_config(dev), 717 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); 718 719 init_timer(&host->timer); 720 host->timer.data = (unsigned long)host; 721 host->timer.function = mmci_check_status; 722 host->timer.expires = jiffies + HZ; 723 add_timer(&host->timer); 724 725 return 0; 726 727 irq0_free: 728 free_irq(dev->irq[0], host); 729 unmap: 730 if (host->gpio_wp != -ENOSYS) 731 gpio_free(host->gpio_wp); 732 err_gpio_wp: 733 if (host->gpio_cd != -ENOSYS) 734 gpio_free(host->gpio_cd); 735 err_gpio_cd: 736 iounmap(host->base); 737 clk_disable: 738 clk_disable(host->clk); 739 clk_free: 740 clk_put(host->clk); 741 host_free: 742 mmc_free_host(mmc); 743 rel_regions: 744 amba_release_regions(dev); 745 out: 746 return ret; 747 } 748 749 static int __devexit mmci_remove(struct amba_device *dev) 750 { 751 struct mmc_host *mmc = amba_get_drvdata(dev); 752 753 amba_set_drvdata(dev, NULL); 754 755 if (mmc) { 756 struct mmci_host *host = mmc_priv(mmc); 757 758 del_timer_sync(&host->timer); 759 760 mmc_remove_host(mmc); 761 762 writel(0, host->base + MMCIMASK0); 763 writel(0, host->base + MMCIMASK1); 764 765 writel(0, host->base + MMCICOMMAND); 766 writel(0, host->base + MMCIDATACTRL); 767 768 free_irq(dev->irq[0], host); 769 free_irq(dev->irq[1], host); 770 771 if (host->gpio_wp != -ENOSYS) 772 gpio_free(host->gpio_wp); 773 if (host->gpio_cd != -ENOSYS) 774 gpio_free(host->gpio_cd); 775 776 iounmap(host->base); 777 clk_disable(host->clk); 778 clk_put(host->clk); 779 780 if (regulator_is_enabled(host->vcc)) 781 regulator_disable(host->vcc); 782 regulator_put(host->vcc); 783 784 mmc_free_host(mmc); 785 786 amba_release_regions(dev); 787 } 788 789 return 0; 790 } 791 792 #ifdef CONFIG_PM 793 static int mmci_suspend(struct amba_device *dev, pm_message_t state) 794 { 795 struct mmc_host *mmc = amba_get_drvdata(dev); 796 int ret = 0; 797 798 if (mmc) { 799 struct mmci_host *host = mmc_priv(mmc); 800 801 ret = mmc_suspend_host(mmc, state); 802 if (ret == 0) 803 writel(0, host->base + MMCIMASK0); 804 } 805 806 return ret; 807 } 808 809 static int mmci_resume(struct amba_device *dev) 810 { 811 struct mmc_host *mmc = amba_get_drvdata(dev); 812 int ret = 0; 813 814 if (mmc) { 815 struct mmci_host *host = mmc_priv(mmc); 816 817 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 818 819 ret = mmc_resume_host(mmc); 820 } 821 822 return ret; 823 } 824 #else 825 #define mmci_suspend NULL 826 #define mmci_resume NULL 827 #endif 828 829 static struct amba_id mmci_ids[] = { 830 { 831 .id = 0x00041180, 832 .mask = 0x000fffff, 833 }, 834 { 835 .id = 0x00041181, 836 .mask = 0x000fffff, 837 }, 838 /* ST Micro variants */ 839 { 840 .id = 0x00180180, 841 .mask = 0x00ffffff, 842 }, 843 { 844 .id = 0x00280180, 845 .mask = 0x00ffffff, 846 }, 847 { 0, 0 }, 848 }; 849 850 static struct amba_driver mmci_driver = { 851 .drv = { 852 .name = DRIVER_NAME, 853 }, 854 .probe = mmci_probe, 855 .remove = __devexit_p(mmci_remove), 856 .suspend = mmci_suspend, 857 .resume = mmci_resume, 858 .id_table = mmci_ids, 859 }; 860 861 static int __init mmci_init(void) 862 { 863 return amba_driver_register(&mmci_driver); 864 } 865 866 static void __exit mmci_exit(void) 867 { 868 amba_driver_unregister(&mmci_driver); 869 } 870 871 module_init(mmci_init); 872 module_exit(mmci_exit); 873 module_param(fmax, uint, 0444); 874 875 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 876 MODULE_LICENSE("GPL"); 877