1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/delay.h> 19 #include <linux/err.h> 20 #include <linux/highmem.h> 21 #include <linux/log2.h> 22 #include <linux/mmc/host.h> 23 #include <linux/mmc/card.h> 24 #include <linux/amba/bus.h> 25 #include <linux/clk.h> 26 #include <linux/scatterlist.h> 27 #include <linux/gpio.h> 28 #include <linux/regulator/consumer.h> 29 #include <linux/dmaengine.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/amba/mmci.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/types.h> 34 35 #include <asm/div64.h> 36 #include <asm/io.h> 37 #include <asm/sizes.h> 38 39 #include "mmci.h" 40 41 #define DRIVER_NAME "mmci-pl18x" 42 43 static unsigned int fmax = 515633; 44 45 /** 46 * struct variant_data - MMCI variant-specific quirks 47 * @clkreg: default value for MCICLOCK register 48 * @clkreg_enable: enable value for MMCICLOCK register 49 * @datalength_bits: number of bits in the MMCIDATALENGTH register 50 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 51 * is asserted (likewise for RX) 52 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 53 * is asserted (likewise for RX) 54 * @sdio: variant supports SDIO 55 * @st_clkdiv: true if using a ST-specific clock divider algorithm 56 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 57 */ 58 struct variant_data { 59 unsigned int clkreg; 60 unsigned int clkreg_enable; 61 unsigned int datalength_bits; 62 unsigned int fifosize; 63 unsigned int fifohalfsize; 64 bool sdio; 65 bool st_clkdiv; 66 bool blksz_datactrl16; 67 }; 68 69 static struct variant_data variant_arm = { 70 .fifosize = 16 * 4, 71 .fifohalfsize = 8 * 4, 72 .datalength_bits = 16, 73 }; 74 75 static struct variant_data variant_arm_extended_fifo = { 76 .fifosize = 128 * 4, 77 .fifohalfsize = 64 * 4, 78 .datalength_bits = 16, 79 }; 80 81 static struct variant_data variant_u300 = { 82 .fifosize = 16 * 4, 83 .fifohalfsize = 8 * 4, 84 .clkreg_enable = MCI_ST_U300_HWFCEN, 85 .datalength_bits = 16, 86 .sdio = true, 87 }; 88 89 static struct variant_data variant_ux500 = { 90 .fifosize = 30 * 4, 91 .fifohalfsize = 8 * 4, 92 .clkreg = MCI_CLK_ENABLE, 93 .clkreg_enable = MCI_ST_UX500_HWFCEN, 94 .datalength_bits = 24, 95 .sdio = true, 96 .st_clkdiv = true, 97 }; 98 99 static struct variant_data variant_ux500v2 = { 100 .fifosize = 30 * 4, 101 .fifohalfsize = 8 * 4, 102 .clkreg = MCI_CLK_ENABLE, 103 .clkreg_enable = MCI_ST_UX500_HWFCEN, 104 .datalength_bits = 24, 105 .sdio = true, 106 .st_clkdiv = true, 107 .blksz_datactrl16 = true, 108 }; 109 110 /* 111 * This must be called with host->lock held 112 */ 113 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 114 { 115 struct variant_data *variant = host->variant; 116 u32 clk = variant->clkreg; 117 118 if (desired) { 119 if (desired >= host->mclk) { 120 clk = MCI_CLK_BYPASS; 121 if (variant->st_clkdiv) 122 clk |= MCI_ST_UX500_NEG_EDGE; 123 host->cclk = host->mclk; 124 } else if (variant->st_clkdiv) { 125 /* 126 * DB8500 TRM says f = mclk / (clkdiv + 2) 127 * => clkdiv = (mclk / f) - 2 128 * Round the divider up so we don't exceed the max 129 * frequency 130 */ 131 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 132 if (clk >= 256) 133 clk = 255; 134 host->cclk = host->mclk / (clk + 2); 135 } else { 136 /* 137 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 138 * => clkdiv = mclk / (2 * f) - 1 139 */ 140 clk = host->mclk / (2 * desired) - 1; 141 if (clk >= 256) 142 clk = 255; 143 host->cclk = host->mclk / (2 * (clk + 1)); 144 } 145 146 clk |= variant->clkreg_enable; 147 clk |= MCI_CLK_ENABLE; 148 /* This hasn't proven to be worthwhile */ 149 /* clk |= MCI_CLK_PWRSAVE; */ 150 } 151 152 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 153 clk |= MCI_4BIT_BUS; 154 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 155 clk |= MCI_ST_8BIT_BUS; 156 157 writel(clk, host->base + MMCICLOCK); 158 } 159 160 static void 161 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 162 { 163 writel(0, host->base + MMCICOMMAND); 164 165 BUG_ON(host->data); 166 167 host->mrq = NULL; 168 host->cmd = NULL; 169 170 /* 171 * Need to drop the host lock here; mmc_request_done may call 172 * back into the driver... 173 */ 174 spin_unlock(&host->lock); 175 pm_runtime_put(mmc_dev(host->mmc)); 176 mmc_request_done(host->mmc, mrq); 177 spin_lock(&host->lock); 178 } 179 180 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 181 { 182 void __iomem *base = host->base; 183 184 if (host->singleirq) { 185 unsigned int mask0 = readl(base + MMCIMASK0); 186 187 mask0 &= ~MCI_IRQ1MASK; 188 mask0 |= mask; 189 190 writel(mask0, base + MMCIMASK0); 191 } 192 193 writel(mask, base + MMCIMASK1); 194 } 195 196 static void mmci_stop_data(struct mmci_host *host) 197 { 198 writel(0, host->base + MMCIDATACTRL); 199 mmci_set_mask1(host, 0); 200 host->data = NULL; 201 } 202 203 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 204 { 205 unsigned int flags = SG_MITER_ATOMIC; 206 207 if (data->flags & MMC_DATA_READ) 208 flags |= SG_MITER_TO_SG; 209 else 210 flags |= SG_MITER_FROM_SG; 211 212 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 213 } 214 215 /* 216 * All the DMA operation mode stuff goes inside this ifdef. 217 * This assumes that you have a generic DMA device interface, 218 * no custom DMA interfaces are supported. 219 */ 220 #ifdef CONFIG_DMA_ENGINE 221 static void __devinit mmci_dma_setup(struct mmci_host *host) 222 { 223 struct mmci_platform_data *plat = host->plat; 224 const char *rxname, *txname; 225 dma_cap_mask_t mask; 226 227 if (!plat || !plat->dma_filter) { 228 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 229 return; 230 } 231 232 /* initialize pre request cookie */ 233 host->next_data.cookie = 1; 234 235 /* Try to acquire a generic DMA engine slave channel */ 236 dma_cap_zero(mask); 237 dma_cap_set(DMA_SLAVE, mask); 238 239 /* 240 * If only an RX channel is specified, the driver will 241 * attempt to use it bidirectionally, however if it is 242 * is specified but cannot be located, DMA will be disabled. 243 */ 244 if (plat->dma_rx_param) { 245 host->dma_rx_channel = dma_request_channel(mask, 246 plat->dma_filter, 247 plat->dma_rx_param); 248 /* E.g if no DMA hardware is present */ 249 if (!host->dma_rx_channel) 250 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 251 } 252 253 if (plat->dma_tx_param) { 254 host->dma_tx_channel = dma_request_channel(mask, 255 plat->dma_filter, 256 plat->dma_tx_param); 257 if (!host->dma_tx_channel) 258 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 259 } else { 260 host->dma_tx_channel = host->dma_rx_channel; 261 } 262 263 if (host->dma_rx_channel) 264 rxname = dma_chan_name(host->dma_rx_channel); 265 else 266 rxname = "none"; 267 268 if (host->dma_tx_channel) 269 txname = dma_chan_name(host->dma_tx_channel); 270 else 271 txname = "none"; 272 273 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 274 rxname, txname); 275 276 /* 277 * Limit the maximum segment size in any SG entry according to 278 * the parameters of the DMA engine device. 279 */ 280 if (host->dma_tx_channel) { 281 struct device *dev = host->dma_tx_channel->device->dev; 282 unsigned int max_seg_size = dma_get_max_seg_size(dev); 283 284 if (max_seg_size < host->mmc->max_seg_size) 285 host->mmc->max_seg_size = max_seg_size; 286 } 287 if (host->dma_rx_channel) { 288 struct device *dev = host->dma_rx_channel->device->dev; 289 unsigned int max_seg_size = dma_get_max_seg_size(dev); 290 291 if (max_seg_size < host->mmc->max_seg_size) 292 host->mmc->max_seg_size = max_seg_size; 293 } 294 } 295 296 /* 297 * This is used in __devinit or __devexit so inline it 298 * so it can be discarded. 299 */ 300 static inline void mmci_dma_release(struct mmci_host *host) 301 { 302 struct mmci_platform_data *plat = host->plat; 303 304 if (host->dma_rx_channel) 305 dma_release_channel(host->dma_rx_channel); 306 if (host->dma_tx_channel && plat->dma_tx_param) 307 dma_release_channel(host->dma_tx_channel); 308 host->dma_rx_channel = host->dma_tx_channel = NULL; 309 } 310 311 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 312 { 313 struct dma_chan *chan = host->dma_current; 314 enum dma_data_direction dir; 315 u32 status; 316 int i; 317 318 /* Wait up to 1ms for the DMA to complete */ 319 for (i = 0; ; i++) { 320 status = readl(host->base + MMCISTATUS); 321 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 322 break; 323 udelay(10); 324 } 325 326 /* 327 * Check to see whether we still have some data left in the FIFO - 328 * this catches DMA controllers which are unable to monitor the 329 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 330 * contiguous buffers. On TX, we'll get a FIFO underrun error. 331 */ 332 if (status & MCI_RXDATAAVLBLMASK) { 333 dmaengine_terminate_all(chan); 334 if (!data->error) 335 data->error = -EIO; 336 } 337 338 if (data->flags & MMC_DATA_WRITE) { 339 dir = DMA_TO_DEVICE; 340 } else { 341 dir = DMA_FROM_DEVICE; 342 } 343 344 if (!data->host_cookie) 345 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 346 347 /* 348 * Use of DMA with scatter-gather is impossible. 349 * Give up with DMA and switch back to PIO mode. 350 */ 351 if (status & MCI_RXDATAAVLBLMASK) { 352 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 353 mmci_dma_release(host); 354 } 355 } 356 357 static void mmci_dma_data_error(struct mmci_host *host) 358 { 359 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 360 dmaengine_terminate_all(host->dma_current); 361 } 362 363 static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 364 struct mmci_host_next *next) 365 { 366 struct variant_data *variant = host->variant; 367 struct dma_slave_config conf = { 368 .src_addr = host->phybase + MMCIFIFO, 369 .dst_addr = host->phybase + MMCIFIFO, 370 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 371 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 372 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 373 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 374 .device_fc = false, 375 }; 376 struct dma_chan *chan; 377 struct dma_device *device; 378 struct dma_async_tx_descriptor *desc; 379 enum dma_data_direction buffer_dirn; 380 int nr_sg; 381 382 /* Check if next job is already prepared */ 383 if (data->host_cookie && !next && 384 host->dma_current && host->dma_desc_current) 385 return 0; 386 387 if (!next) { 388 host->dma_current = NULL; 389 host->dma_desc_current = NULL; 390 } 391 392 if (data->flags & MMC_DATA_READ) { 393 conf.direction = DMA_DEV_TO_MEM; 394 buffer_dirn = DMA_FROM_DEVICE; 395 chan = host->dma_rx_channel; 396 } else { 397 conf.direction = DMA_MEM_TO_DEV; 398 buffer_dirn = DMA_TO_DEVICE; 399 chan = host->dma_tx_channel; 400 } 401 402 /* If there's no DMA channel, fall back to PIO */ 403 if (!chan) 404 return -EINVAL; 405 406 /* If less than or equal to the fifo size, don't bother with DMA */ 407 if (data->blksz * data->blocks <= variant->fifosize) 408 return -EINVAL; 409 410 device = chan->device; 411 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 412 if (nr_sg == 0) 413 return -EINVAL; 414 415 dmaengine_slave_config(chan, &conf); 416 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 417 conf.direction, DMA_CTRL_ACK); 418 if (!desc) 419 goto unmap_exit; 420 421 if (next) { 422 next->dma_chan = chan; 423 next->dma_desc = desc; 424 } else { 425 host->dma_current = chan; 426 host->dma_desc_current = desc; 427 } 428 429 return 0; 430 431 unmap_exit: 432 if (!next) 433 dmaengine_terminate_all(chan); 434 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 435 return -ENOMEM; 436 } 437 438 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 439 { 440 int ret; 441 struct mmc_data *data = host->data; 442 443 ret = mmci_dma_prep_data(host, host->data, NULL); 444 if (ret) 445 return ret; 446 447 /* Okay, go for it. */ 448 dev_vdbg(mmc_dev(host->mmc), 449 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 450 data->sg_len, data->blksz, data->blocks, data->flags); 451 dmaengine_submit(host->dma_desc_current); 452 dma_async_issue_pending(host->dma_current); 453 454 datactrl |= MCI_DPSM_DMAENABLE; 455 456 /* Trigger the DMA transfer */ 457 writel(datactrl, host->base + MMCIDATACTRL); 458 459 /* 460 * Let the MMCI say when the data is ended and it's time 461 * to fire next DMA request. When that happens, MMCI will 462 * call mmci_data_end() 463 */ 464 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 465 host->base + MMCIMASK0); 466 return 0; 467 } 468 469 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 470 { 471 struct mmci_host_next *next = &host->next_data; 472 473 if (data->host_cookie && data->host_cookie != next->cookie) { 474 pr_warning("[%s] invalid cookie: data->host_cookie %d" 475 " host->next_data.cookie %d\n", 476 __func__, data->host_cookie, host->next_data.cookie); 477 data->host_cookie = 0; 478 } 479 480 if (!data->host_cookie) 481 return; 482 483 host->dma_desc_current = next->dma_desc; 484 host->dma_current = next->dma_chan; 485 486 next->dma_desc = NULL; 487 next->dma_chan = NULL; 488 } 489 490 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 491 bool is_first_req) 492 { 493 struct mmci_host *host = mmc_priv(mmc); 494 struct mmc_data *data = mrq->data; 495 struct mmci_host_next *nd = &host->next_data; 496 497 if (!data) 498 return; 499 500 if (data->host_cookie) { 501 data->host_cookie = 0; 502 return; 503 } 504 505 /* if config for dma */ 506 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 507 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { 508 if (mmci_dma_prep_data(host, data, nd)) 509 data->host_cookie = 0; 510 else 511 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 512 } 513 } 514 515 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 516 int err) 517 { 518 struct mmci_host *host = mmc_priv(mmc); 519 struct mmc_data *data = mrq->data; 520 struct dma_chan *chan; 521 enum dma_data_direction dir; 522 523 if (!data) 524 return; 525 526 if (data->flags & MMC_DATA_READ) { 527 dir = DMA_FROM_DEVICE; 528 chan = host->dma_rx_channel; 529 } else { 530 dir = DMA_TO_DEVICE; 531 chan = host->dma_tx_channel; 532 } 533 534 535 /* if config for dma */ 536 if (chan) { 537 if (err) 538 dmaengine_terminate_all(chan); 539 if (data->host_cookie) 540 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 541 data->sg_len, dir); 542 mrq->data->host_cookie = 0; 543 } 544 } 545 546 #else 547 /* Blank functions if the DMA engine is not available */ 548 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 549 { 550 } 551 static inline void mmci_dma_setup(struct mmci_host *host) 552 { 553 } 554 555 static inline void mmci_dma_release(struct mmci_host *host) 556 { 557 } 558 559 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 560 { 561 } 562 563 static inline void mmci_dma_data_error(struct mmci_host *host) 564 { 565 } 566 567 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 568 { 569 return -ENOSYS; 570 } 571 572 #define mmci_pre_request NULL 573 #define mmci_post_request NULL 574 575 #endif 576 577 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 578 { 579 struct variant_data *variant = host->variant; 580 unsigned int datactrl, timeout, irqmask; 581 unsigned long long clks; 582 void __iomem *base; 583 int blksz_bits; 584 585 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 586 data->blksz, data->blocks, data->flags); 587 588 host->data = data; 589 host->size = data->blksz * data->blocks; 590 data->bytes_xfered = 0; 591 592 clks = (unsigned long long)data->timeout_ns * host->cclk; 593 do_div(clks, 1000000000UL); 594 595 timeout = data->timeout_clks + (unsigned int)clks; 596 597 base = host->base; 598 writel(timeout, base + MMCIDATATIMER); 599 writel(host->size, base + MMCIDATALENGTH); 600 601 blksz_bits = ffs(data->blksz) - 1; 602 BUG_ON(1 << blksz_bits != data->blksz); 603 604 if (variant->blksz_datactrl16) 605 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 606 else 607 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 608 609 if (data->flags & MMC_DATA_READ) 610 datactrl |= MCI_DPSM_DIRECTION; 611 612 /* 613 * Attempt to use DMA operation mode, if this 614 * should fail, fall back to PIO mode 615 */ 616 if (!mmci_dma_start_data(host, datactrl)) 617 return; 618 619 /* IRQ mode, map the SG list for CPU reading/writing */ 620 mmci_init_sg(host, data); 621 622 if (data->flags & MMC_DATA_READ) { 623 irqmask = MCI_RXFIFOHALFFULLMASK; 624 625 /* 626 * If we have less than the fifo 'half-full' threshold to 627 * transfer, trigger a PIO interrupt as soon as any data 628 * is available. 629 */ 630 if (host->size < variant->fifohalfsize) 631 irqmask |= MCI_RXDATAAVLBLMASK; 632 } else { 633 /* 634 * We don't actually need to include "FIFO empty" here 635 * since its implicit in "FIFO half empty". 636 */ 637 irqmask = MCI_TXFIFOHALFEMPTYMASK; 638 } 639 640 /* The ST Micro variants has a special bit to enable SDIO */ 641 if (variant->sdio && host->mmc->card) 642 if (mmc_card_sdio(host->mmc->card)) 643 datactrl |= MCI_ST_DPSM_SDIOEN; 644 645 writel(datactrl, base + MMCIDATACTRL); 646 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 647 mmci_set_mask1(host, irqmask); 648 } 649 650 static void 651 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 652 { 653 void __iomem *base = host->base; 654 655 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 656 cmd->opcode, cmd->arg, cmd->flags); 657 658 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 659 writel(0, base + MMCICOMMAND); 660 udelay(1); 661 } 662 663 c |= cmd->opcode | MCI_CPSM_ENABLE; 664 if (cmd->flags & MMC_RSP_PRESENT) { 665 if (cmd->flags & MMC_RSP_136) 666 c |= MCI_CPSM_LONGRSP; 667 c |= MCI_CPSM_RESPONSE; 668 } 669 if (/*interrupt*/0) 670 c |= MCI_CPSM_INTERRUPT; 671 672 host->cmd = cmd; 673 674 writel(cmd->arg, base + MMCIARGUMENT); 675 writel(c, base + MMCICOMMAND); 676 } 677 678 static void 679 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 680 unsigned int status) 681 { 682 /* First check for errors */ 683 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 684 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 685 u32 remain, success; 686 687 /* Terminate the DMA transfer */ 688 if (dma_inprogress(host)) 689 mmci_dma_data_error(host); 690 691 /* 692 * Calculate how far we are into the transfer. Note that 693 * the data counter gives the number of bytes transferred 694 * on the MMC bus, not on the host side. On reads, this 695 * can be as much as a FIFO-worth of data ahead. This 696 * matters for FIFO overruns only. 697 */ 698 remain = readl(host->base + MMCIDATACNT); 699 success = data->blksz * data->blocks - remain; 700 701 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 702 status, success); 703 if (status & MCI_DATACRCFAIL) { 704 /* Last block was not successful */ 705 success -= 1; 706 data->error = -EILSEQ; 707 } else if (status & MCI_DATATIMEOUT) { 708 data->error = -ETIMEDOUT; 709 } else if (status & MCI_STARTBITERR) { 710 data->error = -ECOMM; 711 } else if (status & MCI_TXUNDERRUN) { 712 data->error = -EIO; 713 } else if (status & MCI_RXOVERRUN) { 714 if (success > host->variant->fifosize) 715 success -= host->variant->fifosize; 716 else 717 success = 0; 718 data->error = -EIO; 719 } 720 data->bytes_xfered = round_down(success, data->blksz); 721 } 722 723 if (status & MCI_DATABLOCKEND) 724 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 725 726 if (status & MCI_DATAEND || data->error) { 727 if (dma_inprogress(host)) 728 mmci_dma_unmap(host, data); 729 mmci_stop_data(host); 730 731 if (!data->error) 732 /* The error clause is handled above, success! */ 733 data->bytes_xfered = data->blksz * data->blocks; 734 735 if (!data->stop) { 736 mmci_request_end(host, data->mrq); 737 } else { 738 mmci_start_command(host, data->stop, 0); 739 } 740 } 741 } 742 743 static void 744 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 745 unsigned int status) 746 { 747 void __iomem *base = host->base; 748 749 host->cmd = NULL; 750 751 if (status & MCI_CMDTIMEOUT) { 752 cmd->error = -ETIMEDOUT; 753 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 754 cmd->error = -EILSEQ; 755 } else { 756 cmd->resp[0] = readl(base + MMCIRESPONSE0); 757 cmd->resp[1] = readl(base + MMCIRESPONSE1); 758 cmd->resp[2] = readl(base + MMCIRESPONSE2); 759 cmd->resp[3] = readl(base + MMCIRESPONSE3); 760 } 761 762 if (!cmd->data || cmd->error) { 763 if (host->data) { 764 /* Terminate the DMA transfer */ 765 if (dma_inprogress(host)) 766 mmci_dma_data_error(host); 767 mmci_stop_data(host); 768 } 769 mmci_request_end(host, cmd->mrq); 770 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 771 mmci_start_data(host, cmd->data); 772 } 773 } 774 775 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 776 { 777 void __iomem *base = host->base; 778 char *ptr = buffer; 779 u32 status; 780 int host_remain = host->size; 781 782 do { 783 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 784 785 if (count > remain) 786 count = remain; 787 788 if (count <= 0) 789 break; 790 791 readsl(base + MMCIFIFO, ptr, count >> 2); 792 793 ptr += count; 794 remain -= count; 795 host_remain -= count; 796 797 if (remain == 0) 798 break; 799 800 status = readl(base + MMCISTATUS); 801 } while (status & MCI_RXDATAAVLBL); 802 803 return ptr - buffer; 804 } 805 806 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 807 { 808 struct variant_data *variant = host->variant; 809 void __iomem *base = host->base; 810 char *ptr = buffer; 811 812 do { 813 unsigned int count, maxcnt; 814 815 maxcnt = status & MCI_TXFIFOEMPTY ? 816 variant->fifosize : variant->fifohalfsize; 817 count = min(remain, maxcnt); 818 819 /* 820 * The ST Micro variant for SDIO transfer sizes 821 * less then 8 bytes should have clock H/W flow 822 * control disabled. 823 */ 824 if (variant->sdio && 825 mmc_card_sdio(host->mmc->card)) { 826 if (count < 8) 827 writel(readl(host->base + MMCICLOCK) & 828 ~variant->clkreg_enable, 829 host->base + MMCICLOCK); 830 else 831 writel(readl(host->base + MMCICLOCK) | 832 variant->clkreg_enable, 833 host->base + MMCICLOCK); 834 } 835 836 /* 837 * SDIO especially may want to send something that is 838 * not divisible by 4 (as opposed to card sectors 839 * etc), and the FIFO only accept full 32-bit writes. 840 * So compensate by adding +3 on the count, a single 841 * byte become a 32bit write, 7 bytes will be two 842 * 32bit writes etc. 843 */ 844 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); 845 846 ptr += count; 847 remain -= count; 848 849 if (remain == 0) 850 break; 851 852 status = readl(base + MMCISTATUS); 853 } while (status & MCI_TXFIFOHALFEMPTY); 854 855 return ptr - buffer; 856 } 857 858 /* 859 * PIO data transfer IRQ handler. 860 */ 861 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 862 { 863 struct mmci_host *host = dev_id; 864 struct sg_mapping_iter *sg_miter = &host->sg_miter; 865 struct variant_data *variant = host->variant; 866 void __iomem *base = host->base; 867 unsigned long flags; 868 u32 status; 869 870 status = readl(base + MMCISTATUS); 871 872 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 873 874 local_irq_save(flags); 875 876 do { 877 unsigned int remain, len; 878 char *buffer; 879 880 /* 881 * For write, we only need to test the half-empty flag 882 * here - if the FIFO is completely empty, then by 883 * definition it is more than half empty. 884 * 885 * For read, check for data available. 886 */ 887 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 888 break; 889 890 if (!sg_miter_next(sg_miter)) 891 break; 892 893 buffer = sg_miter->addr; 894 remain = sg_miter->length; 895 896 len = 0; 897 if (status & MCI_RXACTIVE) 898 len = mmci_pio_read(host, buffer, remain); 899 if (status & MCI_TXACTIVE) 900 len = mmci_pio_write(host, buffer, remain, status); 901 902 sg_miter->consumed = len; 903 904 host->size -= len; 905 remain -= len; 906 907 if (remain) 908 break; 909 910 status = readl(base + MMCISTATUS); 911 } while (1); 912 913 sg_miter_stop(sg_miter); 914 915 local_irq_restore(flags); 916 917 /* 918 * If we have less than the fifo 'half-full' threshold to transfer, 919 * trigger a PIO interrupt as soon as any data is available. 920 */ 921 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 922 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 923 924 /* 925 * If we run out of data, disable the data IRQs; this 926 * prevents a race where the FIFO becomes empty before 927 * the chip itself has disabled the data path, and 928 * stops us racing with our data end IRQ. 929 */ 930 if (host->size == 0) { 931 mmci_set_mask1(host, 0); 932 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 933 } 934 935 return IRQ_HANDLED; 936 } 937 938 /* 939 * Handle completion of command and data transfers. 940 */ 941 static irqreturn_t mmci_irq(int irq, void *dev_id) 942 { 943 struct mmci_host *host = dev_id; 944 u32 status; 945 int ret = 0; 946 947 spin_lock(&host->lock); 948 949 do { 950 struct mmc_command *cmd; 951 struct mmc_data *data; 952 953 status = readl(host->base + MMCISTATUS); 954 955 if (host->singleirq) { 956 if (status & readl(host->base + MMCIMASK1)) 957 mmci_pio_irq(irq, dev_id); 958 959 status &= ~MCI_IRQ1MASK; 960 } 961 962 status &= readl(host->base + MMCIMASK0); 963 writel(status, host->base + MMCICLEAR); 964 965 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 966 967 data = host->data; 968 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 969 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 970 MCI_DATABLOCKEND) && data) 971 mmci_data_irq(host, data, status); 972 973 cmd = host->cmd; 974 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 975 mmci_cmd_irq(host, cmd, status); 976 977 ret = 1; 978 } while (status); 979 980 spin_unlock(&host->lock); 981 982 return IRQ_RETVAL(ret); 983 } 984 985 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 986 { 987 struct mmci_host *host = mmc_priv(mmc); 988 unsigned long flags; 989 990 WARN_ON(host->mrq != NULL); 991 992 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 993 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 994 mrq->data->blksz); 995 mrq->cmd->error = -EINVAL; 996 mmc_request_done(mmc, mrq); 997 return; 998 } 999 1000 pm_runtime_get_sync(mmc_dev(mmc)); 1001 1002 spin_lock_irqsave(&host->lock, flags); 1003 1004 host->mrq = mrq; 1005 1006 if (mrq->data) 1007 mmci_get_next_data(host, mrq->data); 1008 1009 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1010 mmci_start_data(host, mrq->data); 1011 1012 mmci_start_command(host, mrq->cmd, 0); 1013 1014 spin_unlock_irqrestore(&host->lock, flags); 1015 } 1016 1017 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1018 { 1019 struct mmci_host *host = mmc_priv(mmc); 1020 u32 pwr = 0; 1021 unsigned long flags; 1022 int ret; 1023 1024 switch (ios->power_mode) { 1025 case MMC_POWER_OFF: 1026 if (host->vcc) 1027 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 1028 break; 1029 case MMC_POWER_UP: 1030 if (host->vcc) { 1031 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 1032 if (ret) { 1033 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 1034 /* 1035 * The .set_ios() function in the mmc_host_ops 1036 * struct return void, and failing to set the 1037 * power should be rare so we print an error 1038 * and return here. 1039 */ 1040 return; 1041 } 1042 } 1043 if (host->plat->vdd_handler) 1044 pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, 1045 ios->power_mode); 1046 /* The ST version does not have this, fall through to POWER_ON */ 1047 if (host->hw_designer != AMBA_VENDOR_ST) { 1048 pwr |= MCI_PWR_UP; 1049 break; 1050 } 1051 case MMC_POWER_ON: 1052 pwr |= MCI_PWR_ON; 1053 break; 1054 } 1055 1056 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1057 if (host->hw_designer != AMBA_VENDOR_ST) 1058 pwr |= MCI_ROD; 1059 else { 1060 /* 1061 * The ST Micro variant use the ROD bit for something 1062 * else and only has OD (Open Drain). 1063 */ 1064 pwr |= MCI_OD; 1065 } 1066 } 1067 1068 spin_lock_irqsave(&host->lock, flags); 1069 1070 mmci_set_clkreg(host, ios->clock); 1071 1072 if (host->pwr != pwr) { 1073 host->pwr = pwr; 1074 writel(pwr, host->base + MMCIPOWER); 1075 } 1076 1077 spin_unlock_irqrestore(&host->lock, flags); 1078 } 1079 1080 static int mmci_get_ro(struct mmc_host *mmc) 1081 { 1082 struct mmci_host *host = mmc_priv(mmc); 1083 1084 if (host->gpio_wp == -ENOSYS) 1085 return -ENOSYS; 1086 1087 return gpio_get_value_cansleep(host->gpio_wp); 1088 } 1089 1090 static int mmci_get_cd(struct mmc_host *mmc) 1091 { 1092 struct mmci_host *host = mmc_priv(mmc); 1093 struct mmci_platform_data *plat = host->plat; 1094 unsigned int status; 1095 1096 if (host->gpio_cd == -ENOSYS) { 1097 if (!plat->status) 1098 return 1; /* Assume always present */ 1099 1100 status = plat->status(mmc_dev(host->mmc)); 1101 } else 1102 status = !!gpio_get_value_cansleep(host->gpio_cd) 1103 ^ plat->cd_invert; 1104 1105 /* 1106 * Use positive logic throughout - status is zero for no card, 1107 * non-zero for card inserted. 1108 */ 1109 return status; 1110 } 1111 1112 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1113 { 1114 struct mmci_host *host = dev_id; 1115 1116 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1117 1118 return IRQ_HANDLED; 1119 } 1120 1121 static const struct mmc_host_ops mmci_ops = { 1122 .request = mmci_request, 1123 .pre_req = mmci_pre_request, 1124 .post_req = mmci_post_request, 1125 .set_ios = mmci_set_ios, 1126 .get_ro = mmci_get_ro, 1127 .get_cd = mmci_get_cd, 1128 }; 1129 1130 static int __devinit mmci_probe(struct amba_device *dev, 1131 const struct amba_id *id) 1132 { 1133 struct mmci_platform_data *plat = dev->dev.platform_data; 1134 struct variant_data *variant = id->data; 1135 struct mmci_host *host; 1136 struct mmc_host *mmc; 1137 int ret; 1138 1139 /* must have platform data */ 1140 if (!plat) { 1141 ret = -EINVAL; 1142 goto out; 1143 } 1144 1145 ret = amba_request_regions(dev, DRIVER_NAME); 1146 if (ret) 1147 goto out; 1148 1149 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1150 if (!mmc) { 1151 ret = -ENOMEM; 1152 goto rel_regions; 1153 } 1154 1155 host = mmc_priv(mmc); 1156 host->mmc = mmc; 1157 1158 host->gpio_wp = -ENOSYS; 1159 host->gpio_cd = -ENOSYS; 1160 host->gpio_cd_irq = -1; 1161 1162 host->hw_designer = amba_manf(dev); 1163 host->hw_revision = amba_rev(dev); 1164 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1165 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1166 1167 host->clk = clk_get(&dev->dev, NULL); 1168 if (IS_ERR(host->clk)) { 1169 ret = PTR_ERR(host->clk); 1170 host->clk = NULL; 1171 goto host_free; 1172 } 1173 1174 ret = clk_prepare(host->clk); 1175 if (ret) 1176 goto clk_free; 1177 1178 ret = clk_enable(host->clk); 1179 if (ret) 1180 goto clk_unprep; 1181 1182 host->plat = plat; 1183 host->variant = variant; 1184 host->mclk = clk_get_rate(host->clk); 1185 /* 1186 * According to the spec, mclk is max 100 MHz, 1187 * so we try to adjust the clock down to this, 1188 * (if possible). 1189 */ 1190 if (host->mclk > 100000000) { 1191 ret = clk_set_rate(host->clk, 100000000); 1192 if (ret < 0) 1193 goto clk_disable; 1194 host->mclk = clk_get_rate(host->clk); 1195 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1196 host->mclk); 1197 } 1198 host->phybase = dev->res.start; 1199 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1200 if (!host->base) { 1201 ret = -ENOMEM; 1202 goto clk_disable; 1203 } 1204 1205 mmc->ops = &mmci_ops; 1206 /* 1207 * The ARM and ST versions of the block have slightly different 1208 * clock divider equations which means that the minimum divider 1209 * differs too. 1210 */ 1211 if (variant->st_clkdiv) 1212 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1213 else 1214 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1215 /* 1216 * If the platform data supplies a maximum operating 1217 * frequency, this takes precedence. Else, we fall back 1218 * to using the module parameter, which has a (low) 1219 * default value in case it is not specified. Either 1220 * value must not exceed the clock rate into the block, 1221 * of course. 1222 */ 1223 if (plat->f_max) 1224 mmc->f_max = min(host->mclk, plat->f_max); 1225 else 1226 mmc->f_max = min(host->mclk, fmax); 1227 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1228 1229 #ifdef CONFIG_REGULATOR 1230 /* If we're using the regulator framework, try to fetch a regulator */ 1231 host->vcc = regulator_get(&dev->dev, "vmmc"); 1232 if (IS_ERR(host->vcc)) 1233 host->vcc = NULL; 1234 else { 1235 int mask = mmc_regulator_get_ocrmask(host->vcc); 1236 1237 if (mask < 0) 1238 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1239 mask); 1240 else { 1241 host->mmc->ocr_avail = (u32) mask; 1242 if (plat->ocr_mask) 1243 dev_warn(&dev->dev, 1244 "Provided ocr_mask/setpower will not be used " 1245 "(using regulator instead)\n"); 1246 } 1247 } 1248 #endif 1249 /* Fall back to platform data if no regulator is found */ 1250 if (host->vcc == NULL) 1251 mmc->ocr_avail = plat->ocr_mask; 1252 mmc->caps = plat->capabilities; 1253 mmc->caps2 = plat->capabilities2; 1254 1255 /* 1256 * We can do SGIO 1257 */ 1258 mmc->max_segs = NR_SG; 1259 1260 /* 1261 * Since only a certain number of bits are valid in the data length 1262 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1263 * single request. 1264 */ 1265 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1266 1267 /* 1268 * Set the maximum segment size. Since we aren't doing DMA 1269 * (yet) we are only limited by the data length register. 1270 */ 1271 mmc->max_seg_size = mmc->max_req_size; 1272 1273 /* 1274 * Block size can be up to 2048 bytes, but must be a power of two. 1275 */ 1276 mmc->max_blk_size = 2048; 1277 1278 /* 1279 * No limit on the number of blocks transferred. 1280 */ 1281 mmc->max_blk_count = mmc->max_req_size; 1282 1283 spin_lock_init(&host->lock); 1284 1285 writel(0, host->base + MMCIMASK0); 1286 writel(0, host->base + MMCIMASK1); 1287 writel(0xfff, host->base + MMCICLEAR); 1288 1289 if (gpio_is_valid(plat->gpio_cd)) { 1290 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1291 if (ret == 0) 1292 ret = gpio_direction_input(plat->gpio_cd); 1293 if (ret == 0) 1294 host->gpio_cd = plat->gpio_cd; 1295 else if (ret != -ENOSYS) 1296 goto err_gpio_cd; 1297 1298 /* 1299 * A gpio pin that will detect cards when inserted and removed 1300 * will most likely want to trigger on the edges if it is 1301 * 0 when ejected and 1 when inserted (or mutatis mutandis 1302 * for the inverted case) so we request triggers on both 1303 * edges. 1304 */ 1305 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1306 mmci_cd_irq, 1307 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1308 DRIVER_NAME " (cd)", host); 1309 if (ret >= 0) 1310 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1311 } 1312 if (gpio_is_valid(plat->gpio_wp)) { 1313 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1314 if (ret == 0) 1315 ret = gpio_direction_input(plat->gpio_wp); 1316 if (ret == 0) 1317 host->gpio_wp = plat->gpio_wp; 1318 else if (ret != -ENOSYS) 1319 goto err_gpio_wp; 1320 } 1321 1322 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1323 && host->gpio_cd_irq < 0) 1324 mmc->caps |= MMC_CAP_NEEDS_POLL; 1325 1326 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1327 if (ret) 1328 goto unmap; 1329 1330 if (dev->irq[1] == NO_IRQ) 1331 host->singleirq = true; 1332 else { 1333 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1334 DRIVER_NAME " (pio)", host); 1335 if (ret) 1336 goto irq0_free; 1337 } 1338 1339 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1340 1341 amba_set_drvdata(dev, mmc); 1342 1343 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1344 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1345 amba_rev(dev), (unsigned long long)dev->res.start, 1346 dev->irq[0], dev->irq[1]); 1347 1348 mmci_dma_setup(host); 1349 1350 pm_runtime_put(&dev->dev); 1351 1352 mmc_add_host(mmc); 1353 1354 return 0; 1355 1356 irq0_free: 1357 free_irq(dev->irq[0], host); 1358 unmap: 1359 if (host->gpio_wp != -ENOSYS) 1360 gpio_free(host->gpio_wp); 1361 err_gpio_wp: 1362 if (host->gpio_cd_irq >= 0) 1363 free_irq(host->gpio_cd_irq, host); 1364 if (host->gpio_cd != -ENOSYS) 1365 gpio_free(host->gpio_cd); 1366 err_gpio_cd: 1367 iounmap(host->base); 1368 clk_disable: 1369 clk_disable(host->clk); 1370 clk_unprep: 1371 clk_unprepare(host->clk); 1372 clk_free: 1373 clk_put(host->clk); 1374 host_free: 1375 mmc_free_host(mmc); 1376 rel_regions: 1377 amba_release_regions(dev); 1378 out: 1379 return ret; 1380 } 1381 1382 static int __devexit mmci_remove(struct amba_device *dev) 1383 { 1384 struct mmc_host *mmc = amba_get_drvdata(dev); 1385 1386 amba_set_drvdata(dev, NULL); 1387 1388 if (mmc) { 1389 struct mmci_host *host = mmc_priv(mmc); 1390 1391 /* 1392 * Undo pm_runtime_put() in probe. We use the _sync 1393 * version here so that we can access the primecell. 1394 */ 1395 pm_runtime_get_sync(&dev->dev); 1396 1397 mmc_remove_host(mmc); 1398 1399 writel(0, host->base + MMCIMASK0); 1400 writel(0, host->base + MMCIMASK1); 1401 1402 writel(0, host->base + MMCICOMMAND); 1403 writel(0, host->base + MMCIDATACTRL); 1404 1405 mmci_dma_release(host); 1406 free_irq(dev->irq[0], host); 1407 if (!host->singleirq) 1408 free_irq(dev->irq[1], host); 1409 1410 if (host->gpio_wp != -ENOSYS) 1411 gpio_free(host->gpio_wp); 1412 if (host->gpio_cd_irq >= 0) 1413 free_irq(host->gpio_cd_irq, host); 1414 if (host->gpio_cd != -ENOSYS) 1415 gpio_free(host->gpio_cd); 1416 1417 iounmap(host->base); 1418 clk_disable(host->clk); 1419 clk_unprepare(host->clk); 1420 clk_put(host->clk); 1421 1422 if (host->vcc) 1423 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1424 regulator_put(host->vcc); 1425 1426 mmc_free_host(mmc); 1427 1428 amba_release_regions(dev); 1429 } 1430 1431 return 0; 1432 } 1433 1434 #ifdef CONFIG_PM 1435 static int mmci_suspend(struct amba_device *dev, pm_message_t state) 1436 { 1437 struct mmc_host *mmc = amba_get_drvdata(dev); 1438 int ret = 0; 1439 1440 if (mmc) { 1441 struct mmci_host *host = mmc_priv(mmc); 1442 1443 ret = mmc_suspend_host(mmc); 1444 if (ret == 0) 1445 writel(0, host->base + MMCIMASK0); 1446 } 1447 1448 return ret; 1449 } 1450 1451 static int mmci_resume(struct amba_device *dev) 1452 { 1453 struct mmc_host *mmc = amba_get_drvdata(dev); 1454 int ret = 0; 1455 1456 if (mmc) { 1457 struct mmci_host *host = mmc_priv(mmc); 1458 1459 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1460 1461 ret = mmc_resume_host(mmc); 1462 } 1463 1464 return ret; 1465 } 1466 #else 1467 #define mmci_suspend NULL 1468 #define mmci_resume NULL 1469 #endif 1470 1471 static struct amba_id mmci_ids[] = { 1472 { 1473 .id = 0x00041180, 1474 .mask = 0xff0fffff, 1475 .data = &variant_arm, 1476 }, 1477 { 1478 .id = 0x01041180, 1479 .mask = 0xff0fffff, 1480 .data = &variant_arm_extended_fifo, 1481 }, 1482 { 1483 .id = 0x00041181, 1484 .mask = 0x000fffff, 1485 .data = &variant_arm, 1486 }, 1487 /* ST Micro variants */ 1488 { 1489 .id = 0x00180180, 1490 .mask = 0x00ffffff, 1491 .data = &variant_u300, 1492 }, 1493 { 1494 .id = 0x00280180, 1495 .mask = 0x00ffffff, 1496 .data = &variant_u300, 1497 }, 1498 { 1499 .id = 0x00480180, 1500 .mask = 0xf0ffffff, 1501 .data = &variant_ux500, 1502 }, 1503 { 1504 .id = 0x10480180, 1505 .mask = 0xf0ffffff, 1506 .data = &variant_ux500v2, 1507 }, 1508 { 0, 0 }, 1509 }; 1510 1511 MODULE_DEVICE_TABLE(amba, mmci_ids); 1512 1513 static struct amba_driver mmci_driver = { 1514 .drv = { 1515 .name = DRIVER_NAME, 1516 }, 1517 .probe = mmci_probe, 1518 .remove = __devexit_p(mmci_remove), 1519 .suspend = mmci_suspend, 1520 .resume = mmci_resume, 1521 .id_table = mmci_ids, 1522 }; 1523 1524 static int __init mmci_init(void) 1525 { 1526 return amba_driver_register(&mmci_driver); 1527 } 1528 1529 static void __exit mmci_exit(void) 1530 { 1531 amba_driver_unregister(&mmci_driver); 1532 } 1533 1534 module_init(mmci_init); 1535 module_exit(mmci_exit); 1536 module_param(fmax, uint, 0444); 1537 1538 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1539 MODULE_LICENSE("GPL"); 1540