1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/delay.h> 19 #include <linux/err.h> 20 #include <linux/highmem.h> 21 #include <linux/log2.h> 22 #include <linux/mmc/host.h> 23 #include <linux/mmc/card.h> 24 #include <linux/amba/bus.h> 25 #include <linux/clk.h> 26 #include <linux/scatterlist.h> 27 #include <linux/gpio.h> 28 #include <linux/regulator/consumer.h> 29 #include <linux/dmaengine.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/amba/mmci.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/types.h> 34 35 #include <asm/div64.h> 36 #include <asm/io.h> 37 #include <asm/sizes.h> 38 39 #include "mmci.h" 40 41 #define DRIVER_NAME "mmci-pl18x" 42 43 static unsigned int fmax = 515633; 44 45 /** 46 * struct variant_data - MMCI variant-specific quirks 47 * @clkreg: default value for MCICLOCK register 48 * @clkreg_enable: enable value for MMCICLOCK register 49 * @datalength_bits: number of bits in the MMCIDATALENGTH register 50 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 51 * is asserted (likewise for RX) 52 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 53 * is asserted (likewise for RX) 54 * @sdio: variant supports SDIO 55 * @st_clkdiv: true if using a ST-specific clock divider algorithm 56 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 57 * @pwrreg_powerup: power up value for MMCIPOWER register 58 * @signal_direction: input/out direction of bus signals can be indicated 59 */ 60 struct variant_data { 61 unsigned int clkreg; 62 unsigned int clkreg_enable; 63 unsigned int datalength_bits; 64 unsigned int fifosize; 65 unsigned int fifohalfsize; 66 bool sdio; 67 bool st_clkdiv; 68 bool blksz_datactrl16; 69 u32 pwrreg_powerup; 70 bool signal_direction; 71 }; 72 73 static struct variant_data variant_arm = { 74 .fifosize = 16 * 4, 75 .fifohalfsize = 8 * 4, 76 .datalength_bits = 16, 77 .pwrreg_powerup = MCI_PWR_UP, 78 }; 79 80 static struct variant_data variant_arm_extended_fifo = { 81 .fifosize = 128 * 4, 82 .fifohalfsize = 64 * 4, 83 .datalength_bits = 16, 84 .pwrreg_powerup = MCI_PWR_UP, 85 }; 86 87 static struct variant_data variant_u300 = { 88 .fifosize = 16 * 4, 89 .fifohalfsize = 8 * 4, 90 .clkreg_enable = MCI_ST_U300_HWFCEN, 91 .datalength_bits = 16, 92 .sdio = true, 93 .pwrreg_powerup = MCI_PWR_ON, 94 .signal_direction = true, 95 }; 96 97 static struct variant_data variant_ux500 = { 98 .fifosize = 30 * 4, 99 .fifohalfsize = 8 * 4, 100 .clkreg = MCI_CLK_ENABLE, 101 .clkreg_enable = MCI_ST_UX500_HWFCEN, 102 .datalength_bits = 24, 103 .sdio = true, 104 .st_clkdiv = true, 105 .pwrreg_powerup = MCI_PWR_ON, 106 .signal_direction = true, 107 }; 108 109 static struct variant_data variant_ux500v2 = { 110 .fifosize = 30 * 4, 111 .fifohalfsize = 8 * 4, 112 .clkreg = MCI_CLK_ENABLE, 113 .clkreg_enable = MCI_ST_UX500_HWFCEN, 114 .datalength_bits = 24, 115 .sdio = true, 116 .st_clkdiv = true, 117 .blksz_datactrl16 = true, 118 .pwrreg_powerup = MCI_PWR_ON, 119 .signal_direction = true, 120 }; 121 122 /* 123 * This must be called with host->lock held 124 */ 125 static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 126 { 127 if (host->clk_reg != clk) { 128 host->clk_reg = clk; 129 writel(clk, host->base + MMCICLOCK); 130 } 131 } 132 133 /* 134 * This must be called with host->lock held 135 */ 136 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 137 { 138 if (host->pwr_reg != pwr) { 139 host->pwr_reg = pwr; 140 writel(pwr, host->base + MMCIPOWER); 141 } 142 } 143 144 /* 145 * This must be called with host->lock held 146 */ 147 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 148 { 149 struct variant_data *variant = host->variant; 150 u32 clk = variant->clkreg; 151 152 if (desired) { 153 if (desired >= host->mclk) { 154 clk = MCI_CLK_BYPASS; 155 if (variant->st_clkdiv) 156 clk |= MCI_ST_UX500_NEG_EDGE; 157 host->cclk = host->mclk; 158 } else if (variant->st_clkdiv) { 159 /* 160 * DB8500 TRM says f = mclk / (clkdiv + 2) 161 * => clkdiv = (mclk / f) - 2 162 * Round the divider up so we don't exceed the max 163 * frequency 164 */ 165 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 166 if (clk >= 256) 167 clk = 255; 168 host->cclk = host->mclk / (clk + 2); 169 } else { 170 /* 171 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 172 * => clkdiv = mclk / (2 * f) - 1 173 */ 174 clk = host->mclk / (2 * desired) - 1; 175 if (clk >= 256) 176 clk = 255; 177 host->cclk = host->mclk / (2 * (clk + 1)); 178 } 179 180 clk |= variant->clkreg_enable; 181 clk |= MCI_CLK_ENABLE; 182 /* This hasn't proven to be worthwhile */ 183 /* clk |= MCI_CLK_PWRSAVE; */ 184 } 185 186 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 187 clk |= MCI_4BIT_BUS; 188 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 189 clk |= MCI_ST_8BIT_BUS; 190 191 mmci_write_clkreg(host, clk); 192 } 193 194 static void 195 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 196 { 197 writel(0, host->base + MMCICOMMAND); 198 199 BUG_ON(host->data); 200 201 host->mrq = NULL; 202 host->cmd = NULL; 203 204 mmc_request_done(host->mmc, mrq); 205 206 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 207 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 208 } 209 210 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 211 { 212 void __iomem *base = host->base; 213 214 if (host->singleirq) { 215 unsigned int mask0 = readl(base + MMCIMASK0); 216 217 mask0 &= ~MCI_IRQ1MASK; 218 mask0 |= mask; 219 220 writel(mask0, base + MMCIMASK0); 221 } 222 223 writel(mask, base + MMCIMASK1); 224 } 225 226 static void mmci_stop_data(struct mmci_host *host) 227 { 228 writel(0, host->base + MMCIDATACTRL); 229 mmci_set_mask1(host, 0); 230 host->data = NULL; 231 } 232 233 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 234 { 235 unsigned int flags = SG_MITER_ATOMIC; 236 237 if (data->flags & MMC_DATA_READ) 238 flags |= SG_MITER_TO_SG; 239 else 240 flags |= SG_MITER_FROM_SG; 241 242 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 243 } 244 245 /* 246 * All the DMA operation mode stuff goes inside this ifdef. 247 * This assumes that you have a generic DMA device interface, 248 * no custom DMA interfaces are supported. 249 */ 250 #ifdef CONFIG_DMA_ENGINE 251 static void __devinit mmci_dma_setup(struct mmci_host *host) 252 { 253 struct mmci_platform_data *plat = host->plat; 254 const char *rxname, *txname; 255 dma_cap_mask_t mask; 256 257 if (!plat || !plat->dma_filter) { 258 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 259 return; 260 } 261 262 /* initialize pre request cookie */ 263 host->next_data.cookie = 1; 264 265 /* Try to acquire a generic DMA engine slave channel */ 266 dma_cap_zero(mask); 267 dma_cap_set(DMA_SLAVE, mask); 268 269 /* 270 * If only an RX channel is specified, the driver will 271 * attempt to use it bidirectionally, however if it is 272 * is specified but cannot be located, DMA will be disabled. 273 */ 274 if (plat->dma_rx_param) { 275 host->dma_rx_channel = dma_request_channel(mask, 276 plat->dma_filter, 277 plat->dma_rx_param); 278 /* E.g if no DMA hardware is present */ 279 if (!host->dma_rx_channel) 280 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 281 } 282 283 if (plat->dma_tx_param) { 284 host->dma_tx_channel = dma_request_channel(mask, 285 plat->dma_filter, 286 plat->dma_tx_param); 287 if (!host->dma_tx_channel) 288 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 289 } else { 290 host->dma_tx_channel = host->dma_rx_channel; 291 } 292 293 if (host->dma_rx_channel) 294 rxname = dma_chan_name(host->dma_rx_channel); 295 else 296 rxname = "none"; 297 298 if (host->dma_tx_channel) 299 txname = dma_chan_name(host->dma_tx_channel); 300 else 301 txname = "none"; 302 303 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 304 rxname, txname); 305 306 /* 307 * Limit the maximum segment size in any SG entry according to 308 * the parameters of the DMA engine device. 309 */ 310 if (host->dma_tx_channel) { 311 struct device *dev = host->dma_tx_channel->device->dev; 312 unsigned int max_seg_size = dma_get_max_seg_size(dev); 313 314 if (max_seg_size < host->mmc->max_seg_size) 315 host->mmc->max_seg_size = max_seg_size; 316 } 317 if (host->dma_rx_channel) { 318 struct device *dev = host->dma_rx_channel->device->dev; 319 unsigned int max_seg_size = dma_get_max_seg_size(dev); 320 321 if (max_seg_size < host->mmc->max_seg_size) 322 host->mmc->max_seg_size = max_seg_size; 323 } 324 } 325 326 /* 327 * This is used in __devinit or __devexit so inline it 328 * so it can be discarded. 329 */ 330 static inline void mmci_dma_release(struct mmci_host *host) 331 { 332 struct mmci_platform_data *plat = host->plat; 333 334 if (host->dma_rx_channel) 335 dma_release_channel(host->dma_rx_channel); 336 if (host->dma_tx_channel && plat->dma_tx_param) 337 dma_release_channel(host->dma_tx_channel); 338 host->dma_rx_channel = host->dma_tx_channel = NULL; 339 } 340 341 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 342 { 343 struct dma_chan *chan = host->dma_current; 344 enum dma_data_direction dir; 345 u32 status; 346 int i; 347 348 /* Wait up to 1ms for the DMA to complete */ 349 for (i = 0; ; i++) { 350 status = readl(host->base + MMCISTATUS); 351 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 352 break; 353 udelay(10); 354 } 355 356 /* 357 * Check to see whether we still have some data left in the FIFO - 358 * this catches DMA controllers which are unable to monitor the 359 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 360 * contiguous buffers. On TX, we'll get a FIFO underrun error. 361 */ 362 if (status & MCI_RXDATAAVLBLMASK) { 363 dmaengine_terminate_all(chan); 364 if (!data->error) 365 data->error = -EIO; 366 } 367 368 if (data->flags & MMC_DATA_WRITE) { 369 dir = DMA_TO_DEVICE; 370 } else { 371 dir = DMA_FROM_DEVICE; 372 } 373 374 if (!data->host_cookie) 375 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 376 377 /* 378 * Use of DMA with scatter-gather is impossible. 379 * Give up with DMA and switch back to PIO mode. 380 */ 381 if (status & MCI_RXDATAAVLBLMASK) { 382 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 383 mmci_dma_release(host); 384 } 385 } 386 387 static void mmci_dma_data_error(struct mmci_host *host) 388 { 389 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 390 dmaengine_terminate_all(host->dma_current); 391 } 392 393 static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 394 struct mmci_host_next *next) 395 { 396 struct variant_data *variant = host->variant; 397 struct dma_slave_config conf = { 398 .src_addr = host->phybase + MMCIFIFO, 399 .dst_addr = host->phybase + MMCIFIFO, 400 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 401 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 402 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 403 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 404 .device_fc = false, 405 }; 406 struct dma_chan *chan; 407 struct dma_device *device; 408 struct dma_async_tx_descriptor *desc; 409 enum dma_data_direction buffer_dirn; 410 int nr_sg; 411 412 /* Check if next job is already prepared */ 413 if (data->host_cookie && !next && 414 host->dma_current && host->dma_desc_current) 415 return 0; 416 417 if (!next) { 418 host->dma_current = NULL; 419 host->dma_desc_current = NULL; 420 } 421 422 if (data->flags & MMC_DATA_READ) { 423 conf.direction = DMA_DEV_TO_MEM; 424 buffer_dirn = DMA_FROM_DEVICE; 425 chan = host->dma_rx_channel; 426 } else { 427 conf.direction = DMA_MEM_TO_DEV; 428 buffer_dirn = DMA_TO_DEVICE; 429 chan = host->dma_tx_channel; 430 } 431 432 /* If there's no DMA channel, fall back to PIO */ 433 if (!chan) 434 return -EINVAL; 435 436 /* If less than or equal to the fifo size, don't bother with DMA */ 437 if (data->blksz * data->blocks <= variant->fifosize) 438 return -EINVAL; 439 440 device = chan->device; 441 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 442 if (nr_sg == 0) 443 return -EINVAL; 444 445 dmaengine_slave_config(chan, &conf); 446 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 447 conf.direction, DMA_CTRL_ACK); 448 if (!desc) 449 goto unmap_exit; 450 451 if (next) { 452 next->dma_chan = chan; 453 next->dma_desc = desc; 454 } else { 455 host->dma_current = chan; 456 host->dma_desc_current = desc; 457 } 458 459 return 0; 460 461 unmap_exit: 462 if (!next) 463 dmaengine_terminate_all(chan); 464 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 465 return -ENOMEM; 466 } 467 468 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 469 { 470 int ret; 471 struct mmc_data *data = host->data; 472 473 ret = mmci_dma_prep_data(host, host->data, NULL); 474 if (ret) 475 return ret; 476 477 /* Okay, go for it. */ 478 dev_vdbg(mmc_dev(host->mmc), 479 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 480 data->sg_len, data->blksz, data->blocks, data->flags); 481 dmaengine_submit(host->dma_desc_current); 482 dma_async_issue_pending(host->dma_current); 483 484 datactrl |= MCI_DPSM_DMAENABLE; 485 486 /* Trigger the DMA transfer */ 487 writel(datactrl, host->base + MMCIDATACTRL); 488 489 /* 490 * Let the MMCI say when the data is ended and it's time 491 * to fire next DMA request. When that happens, MMCI will 492 * call mmci_data_end() 493 */ 494 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 495 host->base + MMCIMASK0); 496 return 0; 497 } 498 499 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 500 { 501 struct mmci_host_next *next = &host->next_data; 502 503 if (data->host_cookie && data->host_cookie != next->cookie) { 504 pr_warning("[%s] invalid cookie: data->host_cookie %d" 505 " host->next_data.cookie %d\n", 506 __func__, data->host_cookie, host->next_data.cookie); 507 data->host_cookie = 0; 508 } 509 510 if (!data->host_cookie) 511 return; 512 513 host->dma_desc_current = next->dma_desc; 514 host->dma_current = next->dma_chan; 515 516 next->dma_desc = NULL; 517 next->dma_chan = NULL; 518 } 519 520 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 521 bool is_first_req) 522 { 523 struct mmci_host *host = mmc_priv(mmc); 524 struct mmc_data *data = mrq->data; 525 struct mmci_host_next *nd = &host->next_data; 526 527 if (!data) 528 return; 529 530 if (data->host_cookie) { 531 data->host_cookie = 0; 532 return; 533 } 534 535 /* if config for dma */ 536 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 537 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { 538 if (mmci_dma_prep_data(host, data, nd)) 539 data->host_cookie = 0; 540 else 541 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 542 } 543 } 544 545 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 546 int err) 547 { 548 struct mmci_host *host = mmc_priv(mmc); 549 struct mmc_data *data = mrq->data; 550 struct dma_chan *chan; 551 enum dma_data_direction dir; 552 553 if (!data) 554 return; 555 556 if (data->flags & MMC_DATA_READ) { 557 dir = DMA_FROM_DEVICE; 558 chan = host->dma_rx_channel; 559 } else { 560 dir = DMA_TO_DEVICE; 561 chan = host->dma_tx_channel; 562 } 563 564 565 /* if config for dma */ 566 if (chan) { 567 if (err) 568 dmaengine_terminate_all(chan); 569 if (data->host_cookie) 570 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 571 data->sg_len, dir); 572 mrq->data->host_cookie = 0; 573 } 574 } 575 576 #else 577 /* Blank functions if the DMA engine is not available */ 578 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 579 { 580 } 581 static inline void mmci_dma_setup(struct mmci_host *host) 582 { 583 } 584 585 static inline void mmci_dma_release(struct mmci_host *host) 586 { 587 } 588 589 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 590 { 591 } 592 593 static inline void mmci_dma_data_error(struct mmci_host *host) 594 { 595 } 596 597 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 598 { 599 return -ENOSYS; 600 } 601 602 #define mmci_pre_request NULL 603 #define mmci_post_request NULL 604 605 #endif 606 607 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 608 { 609 struct variant_data *variant = host->variant; 610 unsigned int datactrl, timeout, irqmask; 611 unsigned long long clks; 612 void __iomem *base; 613 int blksz_bits; 614 615 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 616 data->blksz, data->blocks, data->flags); 617 618 host->data = data; 619 host->size = data->blksz * data->blocks; 620 data->bytes_xfered = 0; 621 622 clks = (unsigned long long)data->timeout_ns * host->cclk; 623 do_div(clks, 1000000000UL); 624 625 timeout = data->timeout_clks + (unsigned int)clks; 626 627 base = host->base; 628 writel(timeout, base + MMCIDATATIMER); 629 writel(host->size, base + MMCIDATALENGTH); 630 631 blksz_bits = ffs(data->blksz) - 1; 632 BUG_ON(1 << blksz_bits != data->blksz); 633 634 if (variant->blksz_datactrl16) 635 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 636 else 637 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 638 639 if (data->flags & MMC_DATA_READ) 640 datactrl |= MCI_DPSM_DIRECTION; 641 642 /* The ST Micro variants has a special bit to enable SDIO */ 643 if (variant->sdio && host->mmc->card) 644 if (mmc_card_sdio(host->mmc->card)) 645 datactrl |= MCI_ST_DPSM_SDIOEN; 646 647 /* 648 * Attempt to use DMA operation mode, if this 649 * should fail, fall back to PIO mode 650 */ 651 if (!mmci_dma_start_data(host, datactrl)) 652 return; 653 654 /* IRQ mode, map the SG list for CPU reading/writing */ 655 mmci_init_sg(host, data); 656 657 if (data->flags & MMC_DATA_READ) { 658 irqmask = MCI_RXFIFOHALFFULLMASK; 659 660 /* 661 * If we have less than the fifo 'half-full' threshold to 662 * transfer, trigger a PIO interrupt as soon as any data 663 * is available. 664 */ 665 if (host->size < variant->fifohalfsize) 666 irqmask |= MCI_RXDATAAVLBLMASK; 667 } else { 668 /* 669 * We don't actually need to include "FIFO empty" here 670 * since its implicit in "FIFO half empty". 671 */ 672 irqmask = MCI_TXFIFOHALFEMPTYMASK; 673 } 674 675 writel(datactrl, base + MMCIDATACTRL); 676 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 677 mmci_set_mask1(host, irqmask); 678 } 679 680 static void 681 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 682 { 683 void __iomem *base = host->base; 684 685 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 686 cmd->opcode, cmd->arg, cmd->flags); 687 688 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 689 writel(0, base + MMCICOMMAND); 690 udelay(1); 691 } 692 693 c |= cmd->opcode | MCI_CPSM_ENABLE; 694 if (cmd->flags & MMC_RSP_PRESENT) { 695 if (cmd->flags & MMC_RSP_136) 696 c |= MCI_CPSM_LONGRSP; 697 c |= MCI_CPSM_RESPONSE; 698 } 699 if (/*interrupt*/0) 700 c |= MCI_CPSM_INTERRUPT; 701 702 host->cmd = cmd; 703 704 writel(cmd->arg, base + MMCIARGUMENT); 705 writel(c, base + MMCICOMMAND); 706 } 707 708 static void 709 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 710 unsigned int status) 711 { 712 /* First check for errors */ 713 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 714 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 715 u32 remain, success; 716 717 /* Terminate the DMA transfer */ 718 if (dma_inprogress(host)) 719 mmci_dma_data_error(host); 720 721 /* 722 * Calculate how far we are into the transfer. Note that 723 * the data counter gives the number of bytes transferred 724 * on the MMC bus, not on the host side. On reads, this 725 * can be as much as a FIFO-worth of data ahead. This 726 * matters for FIFO overruns only. 727 */ 728 remain = readl(host->base + MMCIDATACNT); 729 success = data->blksz * data->blocks - remain; 730 731 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 732 status, success); 733 if (status & MCI_DATACRCFAIL) { 734 /* Last block was not successful */ 735 success -= 1; 736 data->error = -EILSEQ; 737 } else if (status & MCI_DATATIMEOUT) { 738 data->error = -ETIMEDOUT; 739 } else if (status & MCI_STARTBITERR) { 740 data->error = -ECOMM; 741 } else if (status & MCI_TXUNDERRUN) { 742 data->error = -EIO; 743 } else if (status & MCI_RXOVERRUN) { 744 if (success > host->variant->fifosize) 745 success -= host->variant->fifosize; 746 else 747 success = 0; 748 data->error = -EIO; 749 } 750 data->bytes_xfered = round_down(success, data->blksz); 751 } 752 753 if (status & MCI_DATABLOCKEND) 754 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 755 756 if (status & MCI_DATAEND || data->error) { 757 if (dma_inprogress(host)) 758 mmci_dma_unmap(host, data); 759 mmci_stop_data(host); 760 761 if (!data->error) 762 /* The error clause is handled above, success! */ 763 data->bytes_xfered = data->blksz * data->blocks; 764 765 if (!data->stop) { 766 mmci_request_end(host, data->mrq); 767 } else { 768 mmci_start_command(host, data->stop, 0); 769 } 770 } 771 } 772 773 static void 774 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 775 unsigned int status) 776 { 777 void __iomem *base = host->base; 778 779 host->cmd = NULL; 780 781 if (status & MCI_CMDTIMEOUT) { 782 cmd->error = -ETIMEDOUT; 783 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 784 cmd->error = -EILSEQ; 785 } else { 786 cmd->resp[0] = readl(base + MMCIRESPONSE0); 787 cmd->resp[1] = readl(base + MMCIRESPONSE1); 788 cmd->resp[2] = readl(base + MMCIRESPONSE2); 789 cmd->resp[3] = readl(base + MMCIRESPONSE3); 790 } 791 792 if (!cmd->data || cmd->error) { 793 if (host->data) { 794 /* Terminate the DMA transfer */ 795 if (dma_inprogress(host)) 796 mmci_dma_data_error(host); 797 mmci_stop_data(host); 798 } 799 mmci_request_end(host, cmd->mrq); 800 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 801 mmci_start_data(host, cmd->data); 802 } 803 } 804 805 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 806 { 807 void __iomem *base = host->base; 808 char *ptr = buffer; 809 u32 status; 810 int host_remain = host->size; 811 812 do { 813 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 814 815 if (count > remain) 816 count = remain; 817 818 if (count <= 0) 819 break; 820 821 /* 822 * SDIO especially may want to send something that is 823 * not divisible by 4 (as opposed to card sectors 824 * etc). Therefore make sure to always read the last bytes 825 * while only doing full 32-bit reads towards the FIFO. 826 */ 827 if (unlikely(count & 0x3)) { 828 if (count < 4) { 829 unsigned char buf[4]; 830 readsl(base + MMCIFIFO, buf, 1); 831 memcpy(ptr, buf, count); 832 } else { 833 readsl(base + MMCIFIFO, ptr, count >> 2); 834 count &= ~0x3; 835 } 836 } else { 837 readsl(base + MMCIFIFO, ptr, count >> 2); 838 } 839 840 ptr += count; 841 remain -= count; 842 host_remain -= count; 843 844 if (remain == 0) 845 break; 846 847 status = readl(base + MMCISTATUS); 848 } while (status & MCI_RXDATAAVLBL); 849 850 return ptr - buffer; 851 } 852 853 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 854 { 855 struct variant_data *variant = host->variant; 856 void __iomem *base = host->base; 857 char *ptr = buffer; 858 859 do { 860 unsigned int count, maxcnt; 861 862 maxcnt = status & MCI_TXFIFOEMPTY ? 863 variant->fifosize : variant->fifohalfsize; 864 count = min(remain, maxcnt); 865 866 /* 867 * The ST Micro variant for SDIO transfer sizes 868 * less then 8 bytes should have clock H/W flow 869 * control disabled. 870 */ 871 if (variant->sdio && 872 mmc_card_sdio(host->mmc->card)) { 873 u32 clk; 874 if (count < 8) 875 clk = host->clk_reg & ~variant->clkreg_enable; 876 else 877 clk = host->clk_reg | variant->clkreg_enable; 878 879 mmci_write_clkreg(host, clk); 880 } 881 882 /* 883 * SDIO especially may want to send something that is 884 * not divisible by 4 (as opposed to card sectors 885 * etc), and the FIFO only accept full 32-bit writes. 886 * So compensate by adding +3 on the count, a single 887 * byte become a 32bit write, 7 bytes will be two 888 * 32bit writes etc. 889 */ 890 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); 891 892 ptr += count; 893 remain -= count; 894 895 if (remain == 0) 896 break; 897 898 status = readl(base + MMCISTATUS); 899 } while (status & MCI_TXFIFOHALFEMPTY); 900 901 return ptr - buffer; 902 } 903 904 /* 905 * PIO data transfer IRQ handler. 906 */ 907 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 908 { 909 struct mmci_host *host = dev_id; 910 struct sg_mapping_iter *sg_miter = &host->sg_miter; 911 struct variant_data *variant = host->variant; 912 void __iomem *base = host->base; 913 unsigned long flags; 914 u32 status; 915 916 status = readl(base + MMCISTATUS); 917 918 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 919 920 local_irq_save(flags); 921 922 do { 923 unsigned int remain, len; 924 char *buffer; 925 926 /* 927 * For write, we only need to test the half-empty flag 928 * here - if the FIFO is completely empty, then by 929 * definition it is more than half empty. 930 * 931 * For read, check for data available. 932 */ 933 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 934 break; 935 936 if (!sg_miter_next(sg_miter)) 937 break; 938 939 buffer = sg_miter->addr; 940 remain = sg_miter->length; 941 942 len = 0; 943 if (status & MCI_RXACTIVE) 944 len = mmci_pio_read(host, buffer, remain); 945 if (status & MCI_TXACTIVE) 946 len = mmci_pio_write(host, buffer, remain, status); 947 948 sg_miter->consumed = len; 949 950 host->size -= len; 951 remain -= len; 952 953 if (remain) 954 break; 955 956 status = readl(base + MMCISTATUS); 957 } while (1); 958 959 sg_miter_stop(sg_miter); 960 961 local_irq_restore(flags); 962 963 /* 964 * If we have less than the fifo 'half-full' threshold to transfer, 965 * trigger a PIO interrupt as soon as any data is available. 966 */ 967 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 968 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 969 970 /* 971 * If we run out of data, disable the data IRQs; this 972 * prevents a race where the FIFO becomes empty before 973 * the chip itself has disabled the data path, and 974 * stops us racing with our data end IRQ. 975 */ 976 if (host->size == 0) { 977 mmci_set_mask1(host, 0); 978 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 979 } 980 981 return IRQ_HANDLED; 982 } 983 984 /* 985 * Handle completion of command and data transfers. 986 */ 987 static irqreturn_t mmci_irq(int irq, void *dev_id) 988 { 989 struct mmci_host *host = dev_id; 990 u32 status; 991 int ret = 0; 992 993 spin_lock(&host->lock); 994 995 do { 996 struct mmc_command *cmd; 997 struct mmc_data *data; 998 999 status = readl(host->base + MMCISTATUS); 1000 1001 if (host->singleirq) { 1002 if (status & readl(host->base + MMCIMASK1)) 1003 mmci_pio_irq(irq, dev_id); 1004 1005 status &= ~MCI_IRQ1MASK; 1006 } 1007 1008 status &= readl(host->base + MMCIMASK0); 1009 writel(status, host->base + MMCICLEAR); 1010 1011 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1012 1013 data = host->data; 1014 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 1015 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 1016 MCI_DATABLOCKEND) && data) 1017 mmci_data_irq(host, data, status); 1018 1019 cmd = host->cmd; 1020 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 1021 mmci_cmd_irq(host, cmd, status); 1022 1023 ret = 1; 1024 } while (status); 1025 1026 spin_unlock(&host->lock); 1027 1028 return IRQ_RETVAL(ret); 1029 } 1030 1031 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1032 { 1033 struct mmci_host *host = mmc_priv(mmc); 1034 unsigned long flags; 1035 1036 WARN_ON(host->mrq != NULL); 1037 1038 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 1039 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 1040 mrq->data->blksz); 1041 mrq->cmd->error = -EINVAL; 1042 mmc_request_done(mmc, mrq); 1043 return; 1044 } 1045 1046 pm_runtime_get_sync(mmc_dev(mmc)); 1047 1048 spin_lock_irqsave(&host->lock, flags); 1049 1050 host->mrq = mrq; 1051 1052 if (mrq->data) 1053 mmci_get_next_data(host, mrq->data); 1054 1055 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1056 mmci_start_data(host, mrq->data); 1057 1058 mmci_start_command(host, mrq->cmd, 0); 1059 1060 spin_unlock_irqrestore(&host->lock, flags); 1061 } 1062 1063 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1064 { 1065 struct mmci_host *host = mmc_priv(mmc); 1066 struct variant_data *variant = host->variant; 1067 u32 pwr = 0; 1068 unsigned long flags; 1069 int ret; 1070 1071 pm_runtime_get_sync(mmc_dev(mmc)); 1072 1073 if (host->plat->ios_handler && 1074 host->plat->ios_handler(mmc_dev(mmc), ios)) 1075 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1076 1077 switch (ios->power_mode) { 1078 case MMC_POWER_OFF: 1079 if (host->vcc) 1080 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 1081 break; 1082 case MMC_POWER_UP: 1083 if (host->vcc) { 1084 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 1085 if (ret) { 1086 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 1087 /* 1088 * The .set_ios() function in the mmc_host_ops 1089 * struct return void, and failing to set the 1090 * power should be rare so we print an error 1091 * and return here. 1092 */ 1093 goto out; 1094 } 1095 } 1096 /* 1097 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1098 * and instead uses MCI_PWR_ON so apply whatever value is 1099 * configured in the variant data. 1100 */ 1101 pwr |= variant->pwrreg_powerup; 1102 1103 break; 1104 case MMC_POWER_ON: 1105 pwr |= MCI_PWR_ON; 1106 break; 1107 } 1108 1109 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1110 /* 1111 * The ST Micro variant has some additional bits 1112 * indicating signal direction for the signals in 1113 * the SD/MMC bus and feedback-clock usage. 1114 */ 1115 pwr |= host->plat->sigdir; 1116 1117 if (ios->bus_width == MMC_BUS_WIDTH_4) 1118 pwr &= ~MCI_ST_DATA74DIREN; 1119 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1120 pwr &= (~MCI_ST_DATA74DIREN & 1121 ~MCI_ST_DATA31DIREN & 1122 ~MCI_ST_DATA2DIREN); 1123 } 1124 1125 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1126 if (host->hw_designer != AMBA_VENDOR_ST) 1127 pwr |= MCI_ROD; 1128 else { 1129 /* 1130 * The ST Micro variant use the ROD bit for something 1131 * else and only has OD (Open Drain). 1132 */ 1133 pwr |= MCI_OD; 1134 } 1135 } 1136 1137 spin_lock_irqsave(&host->lock, flags); 1138 1139 mmci_set_clkreg(host, ios->clock); 1140 mmci_write_pwrreg(host, pwr); 1141 1142 spin_unlock_irqrestore(&host->lock, flags); 1143 1144 out: 1145 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1146 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1147 } 1148 1149 static int mmci_get_ro(struct mmc_host *mmc) 1150 { 1151 struct mmci_host *host = mmc_priv(mmc); 1152 1153 if (host->gpio_wp == -ENOSYS) 1154 return -ENOSYS; 1155 1156 return gpio_get_value_cansleep(host->gpio_wp); 1157 } 1158 1159 static int mmci_get_cd(struct mmc_host *mmc) 1160 { 1161 struct mmci_host *host = mmc_priv(mmc); 1162 struct mmci_platform_data *plat = host->plat; 1163 unsigned int status; 1164 1165 if (host->gpio_cd == -ENOSYS) { 1166 if (!plat->status) 1167 return 1; /* Assume always present */ 1168 1169 status = plat->status(mmc_dev(host->mmc)); 1170 } else 1171 status = !!gpio_get_value_cansleep(host->gpio_cd) 1172 ^ plat->cd_invert; 1173 1174 /* 1175 * Use positive logic throughout - status is zero for no card, 1176 * non-zero for card inserted. 1177 */ 1178 return status; 1179 } 1180 1181 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1182 { 1183 struct mmci_host *host = dev_id; 1184 1185 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1186 1187 return IRQ_HANDLED; 1188 } 1189 1190 static const struct mmc_host_ops mmci_ops = { 1191 .request = mmci_request, 1192 .pre_req = mmci_pre_request, 1193 .post_req = mmci_post_request, 1194 .set_ios = mmci_set_ios, 1195 .get_ro = mmci_get_ro, 1196 .get_cd = mmci_get_cd, 1197 }; 1198 1199 static int __devinit mmci_probe(struct amba_device *dev, 1200 const struct amba_id *id) 1201 { 1202 struct mmci_platform_data *plat = dev->dev.platform_data; 1203 struct variant_data *variant = id->data; 1204 struct mmci_host *host; 1205 struct mmc_host *mmc; 1206 int ret; 1207 1208 /* must have platform data */ 1209 if (!plat) { 1210 ret = -EINVAL; 1211 goto out; 1212 } 1213 1214 ret = amba_request_regions(dev, DRIVER_NAME); 1215 if (ret) 1216 goto out; 1217 1218 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1219 if (!mmc) { 1220 ret = -ENOMEM; 1221 goto rel_regions; 1222 } 1223 1224 host = mmc_priv(mmc); 1225 host->mmc = mmc; 1226 1227 host->gpio_wp = -ENOSYS; 1228 host->gpio_cd = -ENOSYS; 1229 host->gpio_cd_irq = -1; 1230 1231 host->hw_designer = amba_manf(dev); 1232 host->hw_revision = amba_rev(dev); 1233 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1234 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1235 1236 host->clk = clk_get(&dev->dev, NULL); 1237 if (IS_ERR(host->clk)) { 1238 ret = PTR_ERR(host->clk); 1239 host->clk = NULL; 1240 goto host_free; 1241 } 1242 1243 ret = clk_prepare(host->clk); 1244 if (ret) 1245 goto clk_free; 1246 1247 ret = clk_enable(host->clk); 1248 if (ret) 1249 goto clk_unprep; 1250 1251 host->plat = plat; 1252 host->variant = variant; 1253 host->mclk = clk_get_rate(host->clk); 1254 /* 1255 * According to the spec, mclk is max 100 MHz, 1256 * so we try to adjust the clock down to this, 1257 * (if possible). 1258 */ 1259 if (host->mclk > 100000000) { 1260 ret = clk_set_rate(host->clk, 100000000); 1261 if (ret < 0) 1262 goto clk_disable; 1263 host->mclk = clk_get_rate(host->clk); 1264 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1265 host->mclk); 1266 } 1267 host->phybase = dev->res.start; 1268 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1269 if (!host->base) { 1270 ret = -ENOMEM; 1271 goto clk_disable; 1272 } 1273 1274 mmc->ops = &mmci_ops; 1275 /* 1276 * The ARM and ST versions of the block have slightly different 1277 * clock divider equations which means that the minimum divider 1278 * differs too. 1279 */ 1280 if (variant->st_clkdiv) 1281 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1282 else 1283 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1284 /* 1285 * If the platform data supplies a maximum operating 1286 * frequency, this takes precedence. Else, we fall back 1287 * to using the module parameter, which has a (low) 1288 * default value in case it is not specified. Either 1289 * value must not exceed the clock rate into the block, 1290 * of course. 1291 */ 1292 if (plat->f_max) 1293 mmc->f_max = min(host->mclk, plat->f_max); 1294 else 1295 mmc->f_max = min(host->mclk, fmax); 1296 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1297 1298 #ifdef CONFIG_REGULATOR 1299 /* If we're using the regulator framework, try to fetch a regulator */ 1300 host->vcc = regulator_get(&dev->dev, "vmmc"); 1301 if (IS_ERR(host->vcc)) 1302 host->vcc = NULL; 1303 else { 1304 int mask = mmc_regulator_get_ocrmask(host->vcc); 1305 1306 if (mask < 0) 1307 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1308 mask); 1309 else { 1310 host->mmc->ocr_avail = (u32) mask; 1311 if (plat->ocr_mask) 1312 dev_warn(&dev->dev, 1313 "Provided ocr_mask/setpower will not be used " 1314 "(using regulator instead)\n"); 1315 } 1316 } 1317 #endif 1318 /* Fall back to platform data if no regulator is found */ 1319 if (host->vcc == NULL) 1320 mmc->ocr_avail = plat->ocr_mask; 1321 mmc->caps = plat->capabilities; 1322 mmc->caps2 = plat->capabilities2; 1323 1324 /* 1325 * We can do SGIO 1326 */ 1327 mmc->max_segs = NR_SG; 1328 1329 /* 1330 * Since only a certain number of bits are valid in the data length 1331 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1332 * single request. 1333 */ 1334 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1335 1336 /* 1337 * Set the maximum segment size. Since we aren't doing DMA 1338 * (yet) we are only limited by the data length register. 1339 */ 1340 mmc->max_seg_size = mmc->max_req_size; 1341 1342 /* 1343 * Block size can be up to 2048 bytes, but must be a power of two. 1344 */ 1345 mmc->max_blk_size = 1 << 11; 1346 1347 /* 1348 * Limit the number of blocks transferred so that we don't overflow 1349 * the maximum request size. 1350 */ 1351 mmc->max_blk_count = mmc->max_req_size >> 11; 1352 1353 spin_lock_init(&host->lock); 1354 1355 writel(0, host->base + MMCIMASK0); 1356 writel(0, host->base + MMCIMASK1); 1357 writel(0xfff, host->base + MMCICLEAR); 1358 1359 if (gpio_is_valid(plat->gpio_cd)) { 1360 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1361 if (ret == 0) 1362 ret = gpio_direction_input(plat->gpio_cd); 1363 if (ret == 0) 1364 host->gpio_cd = plat->gpio_cd; 1365 else if (ret != -ENOSYS) 1366 goto err_gpio_cd; 1367 1368 /* 1369 * A gpio pin that will detect cards when inserted and removed 1370 * will most likely want to trigger on the edges if it is 1371 * 0 when ejected and 1 when inserted (or mutatis mutandis 1372 * for the inverted case) so we request triggers on both 1373 * edges. 1374 */ 1375 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1376 mmci_cd_irq, 1377 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1378 DRIVER_NAME " (cd)", host); 1379 if (ret >= 0) 1380 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1381 } 1382 if (gpio_is_valid(plat->gpio_wp)) { 1383 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1384 if (ret == 0) 1385 ret = gpio_direction_input(plat->gpio_wp); 1386 if (ret == 0) 1387 host->gpio_wp = plat->gpio_wp; 1388 else if (ret != -ENOSYS) 1389 goto err_gpio_wp; 1390 } 1391 1392 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1393 && host->gpio_cd_irq < 0) 1394 mmc->caps |= MMC_CAP_NEEDS_POLL; 1395 1396 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1397 if (ret) 1398 goto unmap; 1399 1400 if (dev->irq[1] == NO_IRQ || !dev->irq[1]) 1401 host->singleirq = true; 1402 else { 1403 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1404 DRIVER_NAME " (pio)", host); 1405 if (ret) 1406 goto irq0_free; 1407 } 1408 1409 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1410 1411 amba_set_drvdata(dev, mmc); 1412 1413 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1414 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1415 amba_rev(dev), (unsigned long long)dev->res.start, 1416 dev->irq[0], dev->irq[1]); 1417 1418 mmci_dma_setup(host); 1419 1420 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1421 pm_runtime_use_autosuspend(&dev->dev); 1422 pm_runtime_put(&dev->dev); 1423 1424 mmc_add_host(mmc); 1425 1426 return 0; 1427 1428 irq0_free: 1429 free_irq(dev->irq[0], host); 1430 unmap: 1431 if (host->gpio_wp != -ENOSYS) 1432 gpio_free(host->gpio_wp); 1433 err_gpio_wp: 1434 if (host->gpio_cd_irq >= 0) 1435 free_irq(host->gpio_cd_irq, host); 1436 if (host->gpio_cd != -ENOSYS) 1437 gpio_free(host->gpio_cd); 1438 err_gpio_cd: 1439 iounmap(host->base); 1440 clk_disable: 1441 clk_disable(host->clk); 1442 clk_unprep: 1443 clk_unprepare(host->clk); 1444 clk_free: 1445 clk_put(host->clk); 1446 host_free: 1447 mmc_free_host(mmc); 1448 rel_regions: 1449 amba_release_regions(dev); 1450 out: 1451 return ret; 1452 } 1453 1454 static int __devexit mmci_remove(struct amba_device *dev) 1455 { 1456 struct mmc_host *mmc = amba_get_drvdata(dev); 1457 1458 amba_set_drvdata(dev, NULL); 1459 1460 if (mmc) { 1461 struct mmci_host *host = mmc_priv(mmc); 1462 1463 /* 1464 * Undo pm_runtime_put() in probe. We use the _sync 1465 * version here so that we can access the primecell. 1466 */ 1467 pm_runtime_get_sync(&dev->dev); 1468 1469 mmc_remove_host(mmc); 1470 1471 writel(0, host->base + MMCIMASK0); 1472 writel(0, host->base + MMCIMASK1); 1473 1474 writel(0, host->base + MMCICOMMAND); 1475 writel(0, host->base + MMCIDATACTRL); 1476 1477 mmci_dma_release(host); 1478 free_irq(dev->irq[0], host); 1479 if (!host->singleirq) 1480 free_irq(dev->irq[1], host); 1481 1482 if (host->gpio_wp != -ENOSYS) 1483 gpio_free(host->gpio_wp); 1484 if (host->gpio_cd_irq >= 0) 1485 free_irq(host->gpio_cd_irq, host); 1486 if (host->gpio_cd != -ENOSYS) 1487 gpio_free(host->gpio_cd); 1488 1489 iounmap(host->base); 1490 clk_disable(host->clk); 1491 clk_unprepare(host->clk); 1492 clk_put(host->clk); 1493 1494 if (host->vcc) 1495 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1496 regulator_put(host->vcc); 1497 1498 mmc_free_host(mmc); 1499 1500 amba_release_regions(dev); 1501 } 1502 1503 return 0; 1504 } 1505 1506 #ifdef CONFIG_SUSPEND 1507 static int mmci_suspend(struct device *dev) 1508 { 1509 struct amba_device *adev = to_amba_device(dev); 1510 struct mmc_host *mmc = amba_get_drvdata(adev); 1511 int ret = 0; 1512 1513 if (mmc) { 1514 struct mmci_host *host = mmc_priv(mmc); 1515 1516 ret = mmc_suspend_host(mmc); 1517 if (ret == 0) { 1518 pm_runtime_get_sync(dev); 1519 writel(0, host->base + MMCIMASK0); 1520 } 1521 } 1522 1523 return ret; 1524 } 1525 1526 static int mmci_resume(struct device *dev) 1527 { 1528 struct amba_device *adev = to_amba_device(dev); 1529 struct mmc_host *mmc = amba_get_drvdata(adev); 1530 int ret = 0; 1531 1532 if (mmc) { 1533 struct mmci_host *host = mmc_priv(mmc); 1534 1535 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1536 pm_runtime_put(dev); 1537 1538 ret = mmc_resume_host(mmc); 1539 } 1540 1541 return ret; 1542 } 1543 #endif 1544 1545 static const struct dev_pm_ops mmci_dev_pm_ops = { 1546 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) 1547 }; 1548 1549 static struct amba_id mmci_ids[] = { 1550 { 1551 .id = 0x00041180, 1552 .mask = 0xff0fffff, 1553 .data = &variant_arm, 1554 }, 1555 { 1556 .id = 0x01041180, 1557 .mask = 0xff0fffff, 1558 .data = &variant_arm_extended_fifo, 1559 }, 1560 { 1561 .id = 0x00041181, 1562 .mask = 0x000fffff, 1563 .data = &variant_arm, 1564 }, 1565 /* ST Micro variants */ 1566 { 1567 .id = 0x00180180, 1568 .mask = 0x00ffffff, 1569 .data = &variant_u300, 1570 }, 1571 { 1572 .id = 0x00280180, 1573 .mask = 0x00ffffff, 1574 .data = &variant_u300, 1575 }, 1576 { 1577 .id = 0x00480180, 1578 .mask = 0xf0ffffff, 1579 .data = &variant_ux500, 1580 }, 1581 { 1582 .id = 0x10480180, 1583 .mask = 0xf0ffffff, 1584 .data = &variant_ux500v2, 1585 }, 1586 { 0, 0 }, 1587 }; 1588 1589 MODULE_DEVICE_TABLE(amba, mmci_ids); 1590 1591 static struct amba_driver mmci_driver = { 1592 .drv = { 1593 .name = DRIVER_NAME, 1594 .pm = &mmci_dev_pm_ops, 1595 }, 1596 .probe = mmci_probe, 1597 .remove = __devexit_p(mmci_remove), 1598 .id_table = mmci_ids, 1599 }; 1600 1601 module_amba_driver(mmci_driver); 1602 1603 module_param(fmax, uint, 0444); 1604 1605 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1606 MODULE_LICENSE("GPL"); 1607