1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/delay.h> 19 #include <linux/err.h> 20 #include <linux/highmem.h> 21 #include <linux/log2.h> 22 #include <linux/mmc/host.h> 23 #include <linux/mmc/card.h> 24 #include <linux/amba/bus.h> 25 #include <linux/clk.h> 26 #include <linux/scatterlist.h> 27 #include <linux/gpio.h> 28 #include <linux/regulator/consumer.h> 29 #include <linux/dmaengine.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/amba/mmci.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/types.h> 34 35 #include <asm/div64.h> 36 #include <asm/io.h> 37 #include <asm/sizes.h> 38 39 #include "mmci.h" 40 41 #define DRIVER_NAME "mmci-pl18x" 42 43 static unsigned int fmax = 515633; 44 45 /** 46 * struct variant_data - MMCI variant-specific quirks 47 * @clkreg: default value for MCICLOCK register 48 * @clkreg_enable: enable value for MMCICLOCK register 49 * @datalength_bits: number of bits in the MMCIDATALENGTH register 50 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 51 * is asserted (likewise for RX) 52 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 53 * is asserted (likewise for RX) 54 * @sdio: variant supports SDIO 55 * @st_clkdiv: true if using a ST-specific clock divider algorithm 56 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 57 * @pwrreg_powerup: power up value for MMCIPOWER register 58 * @signal_direction: input/out direction of bus signals can be indicated 59 */ 60 struct variant_data { 61 unsigned int clkreg; 62 unsigned int clkreg_enable; 63 unsigned int datalength_bits; 64 unsigned int fifosize; 65 unsigned int fifohalfsize; 66 bool sdio; 67 bool st_clkdiv; 68 bool blksz_datactrl16; 69 u32 pwrreg_powerup; 70 bool signal_direction; 71 }; 72 73 static struct variant_data variant_arm = { 74 .fifosize = 16 * 4, 75 .fifohalfsize = 8 * 4, 76 .datalength_bits = 16, 77 .pwrreg_powerup = MCI_PWR_UP, 78 }; 79 80 static struct variant_data variant_arm_extended_fifo = { 81 .fifosize = 128 * 4, 82 .fifohalfsize = 64 * 4, 83 .datalength_bits = 16, 84 .pwrreg_powerup = MCI_PWR_UP, 85 }; 86 87 static struct variant_data variant_u300 = { 88 .fifosize = 16 * 4, 89 .fifohalfsize = 8 * 4, 90 .clkreg_enable = MCI_ST_U300_HWFCEN, 91 .datalength_bits = 16, 92 .sdio = true, 93 .pwrreg_powerup = MCI_PWR_ON, 94 .signal_direction = true, 95 }; 96 97 static struct variant_data variant_nomadik = { 98 .fifosize = 16 * 4, 99 .fifohalfsize = 8 * 4, 100 .clkreg = MCI_CLK_ENABLE, 101 .datalength_bits = 24, 102 .sdio = true, 103 .st_clkdiv = true, 104 .pwrreg_powerup = MCI_PWR_ON, 105 .signal_direction = true, 106 }; 107 108 static struct variant_data variant_ux500 = { 109 .fifosize = 30 * 4, 110 .fifohalfsize = 8 * 4, 111 .clkreg = MCI_CLK_ENABLE, 112 .clkreg_enable = MCI_ST_UX500_HWFCEN, 113 .datalength_bits = 24, 114 .sdio = true, 115 .st_clkdiv = true, 116 .pwrreg_powerup = MCI_PWR_ON, 117 .signal_direction = true, 118 }; 119 120 static struct variant_data variant_ux500v2 = { 121 .fifosize = 30 * 4, 122 .fifohalfsize = 8 * 4, 123 .clkreg = MCI_CLK_ENABLE, 124 .clkreg_enable = MCI_ST_UX500_HWFCEN, 125 .datalength_bits = 24, 126 .sdio = true, 127 .st_clkdiv = true, 128 .blksz_datactrl16 = true, 129 .pwrreg_powerup = MCI_PWR_ON, 130 .signal_direction = true, 131 }; 132 133 /* 134 * This must be called with host->lock held 135 */ 136 static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 137 { 138 if (host->clk_reg != clk) { 139 host->clk_reg = clk; 140 writel(clk, host->base + MMCICLOCK); 141 } 142 } 143 144 /* 145 * This must be called with host->lock held 146 */ 147 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 148 { 149 if (host->pwr_reg != pwr) { 150 host->pwr_reg = pwr; 151 writel(pwr, host->base + MMCIPOWER); 152 } 153 } 154 155 /* 156 * This must be called with host->lock held 157 */ 158 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 159 { 160 struct variant_data *variant = host->variant; 161 u32 clk = variant->clkreg; 162 163 if (desired) { 164 if (desired >= host->mclk) { 165 clk = MCI_CLK_BYPASS; 166 if (variant->st_clkdiv) 167 clk |= MCI_ST_UX500_NEG_EDGE; 168 host->cclk = host->mclk; 169 } else if (variant->st_clkdiv) { 170 /* 171 * DB8500 TRM says f = mclk / (clkdiv + 2) 172 * => clkdiv = (mclk / f) - 2 173 * Round the divider up so we don't exceed the max 174 * frequency 175 */ 176 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 177 if (clk >= 256) 178 clk = 255; 179 host->cclk = host->mclk / (clk + 2); 180 } else { 181 /* 182 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 183 * => clkdiv = mclk / (2 * f) - 1 184 */ 185 clk = host->mclk / (2 * desired) - 1; 186 if (clk >= 256) 187 clk = 255; 188 host->cclk = host->mclk / (2 * (clk + 1)); 189 } 190 191 clk |= variant->clkreg_enable; 192 clk |= MCI_CLK_ENABLE; 193 /* This hasn't proven to be worthwhile */ 194 /* clk |= MCI_CLK_PWRSAVE; */ 195 } 196 197 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 198 clk |= MCI_4BIT_BUS; 199 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 200 clk |= MCI_ST_8BIT_BUS; 201 202 mmci_write_clkreg(host, clk); 203 } 204 205 static void 206 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 207 { 208 writel(0, host->base + MMCICOMMAND); 209 210 BUG_ON(host->data); 211 212 host->mrq = NULL; 213 host->cmd = NULL; 214 215 mmc_request_done(host->mmc, mrq); 216 217 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 218 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 219 } 220 221 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 222 { 223 void __iomem *base = host->base; 224 225 if (host->singleirq) { 226 unsigned int mask0 = readl(base + MMCIMASK0); 227 228 mask0 &= ~MCI_IRQ1MASK; 229 mask0 |= mask; 230 231 writel(mask0, base + MMCIMASK0); 232 } 233 234 writel(mask, base + MMCIMASK1); 235 } 236 237 static void mmci_stop_data(struct mmci_host *host) 238 { 239 writel(0, host->base + MMCIDATACTRL); 240 mmci_set_mask1(host, 0); 241 host->data = NULL; 242 } 243 244 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 245 { 246 unsigned int flags = SG_MITER_ATOMIC; 247 248 if (data->flags & MMC_DATA_READ) 249 flags |= SG_MITER_TO_SG; 250 else 251 flags |= SG_MITER_FROM_SG; 252 253 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 254 } 255 256 /* 257 * All the DMA operation mode stuff goes inside this ifdef. 258 * This assumes that you have a generic DMA device interface, 259 * no custom DMA interfaces are supported. 260 */ 261 #ifdef CONFIG_DMA_ENGINE 262 static void __devinit mmci_dma_setup(struct mmci_host *host) 263 { 264 struct mmci_platform_data *plat = host->plat; 265 const char *rxname, *txname; 266 dma_cap_mask_t mask; 267 268 if (!plat || !plat->dma_filter) { 269 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 270 return; 271 } 272 273 /* initialize pre request cookie */ 274 host->next_data.cookie = 1; 275 276 /* Try to acquire a generic DMA engine slave channel */ 277 dma_cap_zero(mask); 278 dma_cap_set(DMA_SLAVE, mask); 279 280 /* 281 * If only an RX channel is specified, the driver will 282 * attempt to use it bidirectionally, however if it is 283 * is specified but cannot be located, DMA will be disabled. 284 */ 285 if (plat->dma_rx_param) { 286 host->dma_rx_channel = dma_request_channel(mask, 287 plat->dma_filter, 288 plat->dma_rx_param); 289 /* E.g if no DMA hardware is present */ 290 if (!host->dma_rx_channel) 291 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 292 } 293 294 if (plat->dma_tx_param) { 295 host->dma_tx_channel = dma_request_channel(mask, 296 plat->dma_filter, 297 plat->dma_tx_param); 298 if (!host->dma_tx_channel) 299 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 300 } else { 301 host->dma_tx_channel = host->dma_rx_channel; 302 } 303 304 if (host->dma_rx_channel) 305 rxname = dma_chan_name(host->dma_rx_channel); 306 else 307 rxname = "none"; 308 309 if (host->dma_tx_channel) 310 txname = dma_chan_name(host->dma_tx_channel); 311 else 312 txname = "none"; 313 314 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 315 rxname, txname); 316 317 /* 318 * Limit the maximum segment size in any SG entry according to 319 * the parameters of the DMA engine device. 320 */ 321 if (host->dma_tx_channel) { 322 struct device *dev = host->dma_tx_channel->device->dev; 323 unsigned int max_seg_size = dma_get_max_seg_size(dev); 324 325 if (max_seg_size < host->mmc->max_seg_size) 326 host->mmc->max_seg_size = max_seg_size; 327 } 328 if (host->dma_rx_channel) { 329 struct device *dev = host->dma_rx_channel->device->dev; 330 unsigned int max_seg_size = dma_get_max_seg_size(dev); 331 332 if (max_seg_size < host->mmc->max_seg_size) 333 host->mmc->max_seg_size = max_seg_size; 334 } 335 } 336 337 /* 338 * This is used in __devinit or __devexit so inline it 339 * so it can be discarded. 340 */ 341 static inline void mmci_dma_release(struct mmci_host *host) 342 { 343 struct mmci_platform_data *plat = host->plat; 344 345 if (host->dma_rx_channel) 346 dma_release_channel(host->dma_rx_channel); 347 if (host->dma_tx_channel && plat->dma_tx_param) 348 dma_release_channel(host->dma_tx_channel); 349 host->dma_rx_channel = host->dma_tx_channel = NULL; 350 } 351 352 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 353 { 354 struct dma_chan *chan = host->dma_current; 355 enum dma_data_direction dir; 356 u32 status; 357 int i; 358 359 /* Wait up to 1ms for the DMA to complete */ 360 for (i = 0; ; i++) { 361 status = readl(host->base + MMCISTATUS); 362 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 363 break; 364 udelay(10); 365 } 366 367 /* 368 * Check to see whether we still have some data left in the FIFO - 369 * this catches DMA controllers which are unable to monitor the 370 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 371 * contiguous buffers. On TX, we'll get a FIFO underrun error. 372 */ 373 if (status & MCI_RXDATAAVLBLMASK) { 374 dmaengine_terminate_all(chan); 375 if (!data->error) 376 data->error = -EIO; 377 } 378 379 if (data->flags & MMC_DATA_WRITE) { 380 dir = DMA_TO_DEVICE; 381 } else { 382 dir = DMA_FROM_DEVICE; 383 } 384 385 if (!data->host_cookie) 386 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 387 388 /* 389 * Use of DMA with scatter-gather is impossible. 390 * Give up with DMA and switch back to PIO mode. 391 */ 392 if (status & MCI_RXDATAAVLBLMASK) { 393 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 394 mmci_dma_release(host); 395 } 396 } 397 398 static void mmci_dma_data_error(struct mmci_host *host) 399 { 400 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 401 dmaengine_terminate_all(host->dma_current); 402 } 403 404 static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 405 struct mmci_host_next *next) 406 { 407 struct variant_data *variant = host->variant; 408 struct dma_slave_config conf = { 409 .src_addr = host->phybase + MMCIFIFO, 410 .dst_addr = host->phybase + MMCIFIFO, 411 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 412 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 413 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 414 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 415 .device_fc = false, 416 }; 417 struct dma_chan *chan; 418 struct dma_device *device; 419 struct dma_async_tx_descriptor *desc; 420 enum dma_data_direction buffer_dirn; 421 int nr_sg; 422 423 /* Check if next job is already prepared */ 424 if (data->host_cookie && !next && 425 host->dma_current && host->dma_desc_current) 426 return 0; 427 428 if (!next) { 429 host->dma_current = NULL; 430 host->dma_desc_current = NULL; 431 } 432 433 if (data->flags & MMC_DATA_READ) { 434 conf.direction = DMA_DEV_TO_MEM; 435 buffer_dirn = DMA_FROM_DEVICE; 436 chan = host->dma_rx_channel; 437 } else { 438 conf.direction = DMA_MEM_TO_DEV; 439 buffer_dirn = DMA_TO_DEVICE; 440 chan = host->dma_tx_channel; 441 } 442 443 /* If there's no DMA channel, fall back to PIO */ 444 if (!chan) 445 return -EINVAL; 446 447 /* If less than or equal to the fifo size, don't bother with DMA */ 448 if (data->blksz * data->blocks <= variant->fifosize) 449 return -EINVAL; 450 451 device = chan->device; 452 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 453 if (nr_sg == 0) 454 return -EINVAL; 455 456 dmaengine_slave_config(chan, &conf); 457 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 458 conf.direction, DMA_CTRL_ACK); 459 if (!desc) 460 goto unmap_exit; 461 462 if (next) { 463 next->dma_chan = chan; 464 next->dma_desc = desc; 465 } else { 466 host->dma_current = chan; 467 host->dma_desc_current = desc; 468 } 469 470 return 0; 471 472 unmap_exit: 473 if (!next) 474 dmaengine_terminate_all(chan); 475 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 476 return -ENOMEM; 477 } 478 479 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 480 { 481 int ret; 482 struct mmc_data *data = host->data; 483 484 ret = mmci_dma_prep_data(host, host->data, NULL); 485 if (ret) 486 return ret; 487 488 /* Okay, go for it. */ 489 dev_vdbg(mmc_dev(host->mmc), 490 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 491 data->sg_len, data->blksz, data->blocks, data->flags); 492 dmaengine_submit(host->dma_desc_current); 493 dma_async_issue_pending(host->dma_current); 494 495 datactrl |= MCI_DPSM_DMAENABLE; 496 497 /* Trigger the DMA transfer */ 498 writel(datactrl, host->base + MMCIDATACTRL); 499 500 /* 501 * Let the MMCI say when the data is ended and it's time 502 * to fire next DMA request. When that happens, MMCI will 503 * call mmci_data_end() 504 */ 505 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 506 host->base + MMCIMASK0); 507 return 0; 508 } 509 510 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 511 { 512 struct mmci_host_next *next = &host->next_data; 513 514 if (data->host_cookie && data->host_cookie != next->cookie) { 515 pr_warning("[%s] invalid cookie: data->host_cookie %d" 516 " host->next_data.cookie %d\n", 517 __func__, data->host_cookie, host->next_data.cookie); 518 data->host_cookie = 0; 519 } 520 521 if (!data->host_cookie) 522 return; 523 524 host->dma_desc_current = next->dma_desc; 525 host->dma_current = next->dma_chan; 526 527 next->dma_desc = NULL; 528 next->dma_chan = NULL; 529 } 530 531 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 532 bool is_first_req) 533 { 534 struct mmci_host *host = mmc_priv(mmc); 535 struct mmc_data *data = mrq->data; 536 struct mmci_host_next *nd = &host->next_data; 537 538 if (!data) 539 return; 540 541 if (data->host_cookie) { 542 data->host_cookie = 0; 543 return; 544 } 545 546 /* if config for dma */ 547 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 548 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { 549 if (mmci_dma_prep_data(host, data, nd)) 550 data->host_cookie = 0; 551 else 552 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 553 } 554 } 555 556 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 557 int err) 558 { 559 struct mmci_host *host = mmc_priv(mmc); 560 struct mmc_data *data = mrq->data; 561 struct dma_chan *chan; 562 enum dma_data_direction dir; 563 564 if (!data) 565 return; 566 567 if (data->flags & MMC_DATA_READ) { 568 dir = DMA_FROM_DEVICE; 569 chan = host->dma_rx_channel; 570 } else { 571 dir = DMA_TO_DEVICE; 572 chan = host->dma_tx_channel; 573 } 574 575 576 /* if config for dma */ 577 if (chan) { 578 if (err) 579 dmaengine_terminate_all(chan); 580 if (data->host_cookie) 581 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 582 data->sg_len, dir); 583 mrq->data->host_cookie = 0; 584 } 585 } 586 587 #else 588 /* Blank functions if the DMA engine is not available */ 589 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 590 { 591 } 592 static inline void mmci_dma_setup(struct mmci_host *host) 593 { 594 } 595 596 static inline void mmci_dma_release(struct mmci_host *host) 597 { 598 } 599 600 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 601 { 602 } 603 604 static inline void mmci_dma_data_error(struct mmci_host *host) 605 { 606 } 607 608 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 609 { 610 return -ENOSYS; 611 } 612 613 #define mmci_pre_request NULL 614 #define mmci_post_request NULL 615 616 #endif 617 618 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 619 { 620 struct variant_data *variant = host->variant; 621 unsigned int datactrl, timeout, irqmask; 622 unsigned long long clks; 623 void __iomem *base; 624 int blksz_bits; 625 626 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 627 data->blksz, data->blocks, data->flags); 628 629 host->data = data; 630 host->size = data->blksz * data->blocks; 631 data->bytes_xfered = 0; 632 633 clks = (unsigned long long)data->timeout_ns * host->cclk; 634 do_div(clks, 1000000000UL); 635 636 timeout = data->timeout_clks + (unsigned int)clks; 637 638 base = host->base; 639 writel(timeout, base + MMCIDATATIMER); 640 writel(host->size, base + MMCIDATALENGTH); 641 642 blksz_bits = ffs(data->blksz) - 1; 643 BUG_ON(1 << blksz_bits != data->blksz); 644 645 if (variant->blksz_datactrl16) 646 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 647 else 648 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 649 650 if (data->flags & MMC_DATA_READ) 651 datactrl |= MCI_DPSM_DIRECTION; 652 653 /* The ST Micro variants has a special bit to enable SDIO */ 654 if (variant->sdio && host->mmc->card) 655 if (mmc_card_sdio(host->mmc->card)) 656 datactrl |= MCI_ST_DPSM_SDIOEN; 657 658 /* 659 * Attempt to use DMA operation mode, if this 660 * should fail, fall back to PIO mode 661 */ 662 if (!mmci_dma_start_data(host, datactrl)) 663 return; 664 665 /* IRQ mode, map the SG list for CPU reading/writing */ 666 mmci_init_sg(host, data); 667 668 if (data->flags & MMC_DATA_READ) { 669 irqmask = MCI_RXFIFOHALFFULLMASK; 670 671 /* 672 * If we have less than the fifo 'half-full' threshold to 673 * transfer, trigger a PIO interrupt as soon as any data 674 * is available. 675 */ 676 if (host->size < variant->fifohalfsize) 677 irqmask |= MCI_RXDATAAVLBLMASK; 678 } else { 679 /* 680 * We don't actually need to include "FIFO empty" here 681 * since its implicit in "FIFO half empty". 682 */ 683 irqmask = MCI_TXFIFOHALFEMPTYMASK; 684 } 685 686 writel(datactrl, base + MMCIDATACTRL); 687 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 688 mmci_set_mask1(host, irqmask); 689 } 690 691 static void 692 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 693 { 694 void __iomem *base = host->base; 695 696 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 697 cmd->opcode, cmd->arg, cmd->flags); 698 699 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 700 writel(0, base + MMCICOMMAND); 701 udelay(1); 702 } 703 704 c |= cmd->opcode | MCI_CPSM_ENABLE; 705 if (cmd->flags & MMC_RSP_PRESENT) { 706 if (cmd->flags & MMC_RSP_136) 707 c |= MCI_CPSM_LONGRSP; 708 c |= MCI_CPSM_RESPONSE; 709 } 710 if (/*interrupt*/0) 711 c |= MCI_CPSM_INTERRUPT; 712 713 host->cmd = cmd; 714 715 writel(cmd->arg, base + MMCIARGUMENT); 716 writel(c, base + MMCICOMMAND); 717 } 718 719 static void 720 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 721 unsigned int status) 722 { 723 /* First check for errors */ 724 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 725 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 726 u32 remain, success; 727 728 /* Terminate the DMA transfer */ 729 if (dma_inprogress(host)) 730 mmci_dma_data_error(host); 731 732 /* 733 * Calculate how far we are into the transfer. Note that 734 * the data counter gives the number of bytes transferred 735 * on the MMC bus, not on the host side. On reads, this 736 * can be as much as a FIFO-worth of data ahead. This 737 * matters for FIFO overruns only. 738 */ 739 remain = readl(host->base + MMCIDATACNT); 740 success = data->blksz * data->blocks - remain; 741 742 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 743 status, success); 744 if (status & MCI_DATACRCFAIL) { 745 /* Last block was not successful */ 746 success -= 1; 747 data->error = -EILSEQ; 748 } else if (status & MCI_DATATIMEOUT) { 749 data->error = -ETIMEDOUT; 750 } else if (status & MCI_STARTBITERR) { 751 data->error = -ECOMM; 752 } else if (status & MCI_TXUNDERRUN) { 753 data->error = -EIO; 754 } else if (status & MCI_RXOVERRUN) { 755 if (success > host->variant->fifosize) 756 success -= host->variant->fifosize; 757 else 758 success = 0; 759 data->error = -EIO; 760 } 761 data->bytes_xfered = round_down(success, data->blksz); 762 } 763 764 if (status & MCI_DATABLOCKEND) 765 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 766 767 if (status & MCI_DATAEND || data->error) { 768 if (dma_inprogress(host)) 769 mmci_dma_unmap(host, data); 770 mmci_stop_data(host); 771 772 if (!data->error) 773 /* The error clause is handled above, success! */ 774 data->bytes_xfered = data->blksz * data->blocks; 775 776 if (!data->stop) { 777 mmci_request_end(host, data->mrq); 778 } else { 779 mmci_start_command(host, data->stop, 0); 780 } 781 } 782 } 783 784 static void 785 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 786 unsigned int status) 787 { 788 void __iomem *base = host->base; 789 790 host->cmd = NULL; 791 792 if (status & MCI_CMDTIMEOUT) { 793 cmd->error = -ETIMEDOUT; 794 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 795 cmd->error = -EILSEQ; 796 } else { 797 cmd->resp[0] = readl(base + MMCIRESPONSE0); 798 cmd->resp[1] = readl(base + MMCIRESPONSE1); 799 cmd->resp[2] = readl(base + MMCIRESPONSE2); 800 cmd->resp[3] = readl(base + MMCIRESPONSE3); 801 } 802 803 if (!cmd->data || cmd->error) { 804 if (host->data) { 805 /* Terminate the DMA transfer */ 806 if (dma_inprogress(host)) 807 mmci_dma_data_error(host); 808 mmci_stop_data(host); 809 } 810 mmci_request_end(host, cmd->mrq); 811 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 812 mmci_start_data(host, cmd->data); 813 } 814 } 815 816 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 817 { 818 void __iomem *base = host->base; 819 char *ptr = buffer; 820 u32 status; 821 int host_remain = host->size; 822 823 do { 824 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 825 826 if (count > remain) 827 count = remain; 828 829 if (count <= 0) 830 break; 831 832 /* 833 * SDIO especially may want to send something that is 834 * not divisible by 4 (as opposed to card sectors 835 * etc). Therefore make sure to always read the last bytes 836 * while only doing full 32-bit reads towards the FIFO. 837 */ 838 if (unlikely(count & 0x3)) { 839 if (count < 4) { 840 unsigned char buf[4]; 841 readsl(base + MMCIFIFO, buf, 1); 842 memcpy(ptr, buf, count); 843 } else { 844 readsl(base + MMCIFIFO, ptr, count >> 2); 845 count &= ~0x3; 846 } 847 } else { 848 readsl(base + MMCIFIFO, ptr, count >> 2); 849 } 850 851 ptr += count; 852 remain -= count; 853 host_remain -= count; 854 855 if (remain == 0) 856 break; 857 858 status = readl(base + MMCISTATUS); 859 } while (status & MCI_RXDATAAVLBL); 860 861 return ptr - buffer; 862 } 863 864 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 865 { 866 struct variant_data *variant = host->variant; 867 void __iomem *base = host->base; 868 char *ptr = buffer; 869 870 do { 871 unsigned int count, maxcnt; 872 873 maxcnt = status & MCI_TXFIFOEMPTY ? 874 variant->fifosize : variant->fifohalfsize; 875 count = min(remain, maxcnt); 876 877 /* 878 * The ST Micro variant for SDIO transfer sizes 879 * less then 8 bytes should have clock H/W flow 880 * control disabled. 881 */ 882 if (variant->sdio && 883 mmc_card_sdio(host->mmc->card)) { 884 u32 clk; 885 if (count < 8) 886 clk = host->clk_reg & ~variant->clkreg_enable; 887 else 888 clk = host->clk_reg | variant->clkreg_enable; 889 890 mmci_write_clkreg(host, clk); 891 } 892 893 /* 894 * SDIO especially may want to send something that is 895 * not divisible by 4 (as opposed to card sectors 896 * etc), and the FIFO only accept full 32-bit writes. 897 * So compensate by adding +3 on the count, a single 898 * byte become a 32bit write, 7 bytes will be two 899 * 32bit writes etc. 900 */ 901 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); 902 903 ptr += count; 904 remain -= count; 905 906 if (remain == 0) 907 break; 908 909 status = readl(base + MMCISTATUS); 910 } while (status & MCI_TXFIFOHALFEMPTY); 911 912 return ptr - buffer; 913 } 914 915 /* 916 * PIO data transfer IRQ handler. 917 */ 918 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 919 { 920 struct mmci_host *host = dev_id; 921 struct sg_mapping_iter *sg_miter = &host->sg_miter; 922 struct variant_data *variant = host->variant; 923 void __iomem *base = host->base; 924 unsigned long flags; 925 u32 status; 926 927 status = readl(base + MMCISTATUS); 928 929 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 930 931 local_irq_save(flags); 932 933 do { 934 unsigned int remain, len; 935 char *buffer; 936 937 /* 938 * For write, we only need to test the half-empty flag 939 * here - if the FIFO is completely empty, then by 940 * definition it is more than half empty. 941 * 942 * For read, check for data available. 943 */ 944 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 945 break; 946 947 if (!sg_miter_next(sg_miter)) 948 break; 949 950 buffer = sg_miter->addr; 951 remain = sg_miter->length; 952 953 len = 0; 954 if (status & MCI_RXACTIVE) 955 len = mmci_pio_read(host, buffer, remain); 956 if (status & MCI_TXACTIVE) 957 len = mmci_pio_write(host, buffer, remain, status); 958 959 sg_miter->consumed = len; 960 961 host->size -= len; 962 remain -= len; 963 964 if (remain) 965 break; 966 967 status = readl(base + MMCISTATUS); 968 } while (1); 969 970 sg_miter_stop(sg_miter); 971 972 local_irq_restore(flags); 973 974 /* 975 * If we have less than the fifo 'half-full' threshold to transfer, 976 * trigger a PIO interrupt as soon as any data is available. 977 */ 978 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 979 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 980 981 /* 982 * If we run out of data, disable the data IRQs; this 983 * prevents a race where the FIFO becomes empty before 984 * the chip itself has disabled the data path, and 985 * stops us racing with our data end IRQ. 986 */ 987 if (host->size == 0) { 988 mmci_set_mask1(host, 0); 989 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 990 } 991 992 return IRQ_HANDLED; 993 } 994 995 /* 996 * Handle completion of command and data transfers. 997 */ 998 static irqreturn_t mmci_irq(int irq, void *dev_id) 999 { 1000 struct mmci_host *host = dev_id; 1001 u32 status; 1002 int ret = 0; 1003 1004 spin_lock(&host->lock); 1005 1006 do { 1007 struct mmc_command *cmd; 1008 struct mmc_data *data; 1009 1010 status = readl(host->base + MMCISTATUS); 1011 1012 if (host->singleirq) { 1013 if (status & readl(host->base + MMCIMASK1)) 1014 mmci_pio_irq(irq, dev_id); 1015 1016 status &= ~MCI_IRQ1MASK; 1017 } 1018 1019 status &= readl(host->base + MMCIMASK0); 1020 writel(status, host->base + MMCICLEAR); 1021 1022 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1023 1024 data = host->data; 1025 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 1026 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 1027 MCI_DATABLOCKEND) && data) 1028 mmci_data_irq(host, data, status); 1029 1030 cmd = host->cmd; 1031 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 1032 mmci_cmd_irq(host, cmd, status); 1033 1034 ret = 1; 1035 } while (status); 1036 1037 spin_unlock(&host->lock); 1038 1039 return IRQ_RETVAL(ret); 1040 } 1041 1042 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1043 { 1044 struct mmci_host *host = mmc_priv(mmc); 1045 unsigned long flags; 1046 1047 WARN_ON(host->mrq != NULL); 1048 1049 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 1050 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 1051 mrq->data->blksz); 1052 mrq->cmd->error = -EINVAL; 1053 mmc_request_done(mmc, mrq); 1054 return; 1055 } 1056 1057 pm_runtime_get_sync(mmc_dev(mmc)); 1058 1059 spin_lock_irqsave(&host->lock, flags); 1060 1061 host->mrq = mrq; 1062 1063 if (mrq->data) 1064 mmci_get_next_data(host, mrq->data); 1065 1066 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1067 mmci_start_data(host, mrq->data); 1068 1069 mmci_start_command(host, mrq->cmd, 0); 1070 1071 spin_unlock_irqrestore(&host->lock, flags); 1072 } 1073 1074 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1075 { 1076 struct mmci_host *host = mmc_priv(mmc); 1077 struct variant_data *variant = host->variant; 1078 u32 pwr = 0; 1079 unsigned long flags; 1080 int ret; 1081 1082 pm_runtime_get_sync(mmc_dev(mmc)); 1083 1084 if (host->plat->ios_handler && 1085 host->plat->ios_handler(mmc_dev(mmc), ios)) 1086 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1087 1088 switch (ios->power_mode) { 1089 case MMC_POWER_OFF: 1090 if (host->vcc) 1091 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 1092 break; 1093 case MMC_POWER_UP: 1094 if (host->vcc) { 1095 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 1096 if (ret) { 1097 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 1098 /* 1099 * The .set_ios() function in the mmc_host_ops 1100 * struct return void, and failing to set the 1101 * power should be rare so we print an error 1102 * and return here. 1103 */ 1104 goto out; 1105 } 1106 } 1107 /* 1108 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1109 * and instead uses MCI_PWR_ON so apply whatever value is 1110 * configured in the variant data. 1111 */ 1112 pwr |= variant->pwrreg_powerup; 1113 1114 break; 1115 case MMC_POWER_ON: 1116 pwr |= MCI_PWR_ON; 1117 break; 1118 } 1119 1120 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1121 /* 1122 * The ST Micro variant has some additional bits 1123 * indicating signal direction for the signals in 1124 * the SD/MMC bus and feedback-clock usage. 1125 */ 1126 pwr |= host->plat->sigdir; 1127 1128 if (ios->bus_width == MMC_BUS_WIDTH_4) 1129 pwr &= ~MCI_ST_DATA74DIREN; 1130 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1131 pwr &= (~MCI_ST_DATA74DIREN & 1132 ~MCI_ST_DATA31DIREN & 1133 ~MCI_ST_DATA2DIREN); 1134 } 1135 1136 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1137 if (host->hw_designer != AMBA_VENDOR_ST) 1138 pwr |= MCI_ROD; 1139 else { 1140 /* 1141 * The ST Micro variant use the ROD bit for something 1142 * else and only has OD (Open Drain). 1143 */ 1144 pwr |= MCI_OD; 1145 } 1146 } 1147 1148 spin_lock_irqsave(&host->lock, flags); 1149 1150 mmci_set_clkreg(host, ios->clock); 1151 mmci_write_pwrreg(host, pwr); 1152 1153 spin_unlock_irqrestore(&host->lock, flags); 1154 1155 out: 1156 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1157 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1158 } 1159 1160 static int mmci_get_ro(struct mmc_host *mmc) 1161 { 1162 struct mmci_host *host = mmc_priv(mmc); 1163 1164 if (host->gpio_wp == -ENOSYS) 1165 return -ENOSYS; 1166 1167 return gpio_get_value_cansleep(host->gpio_wp); 1168 } 1169 1170 static int mmci_get_cd(struct mmc_host *mmc) 1171 { 1172 struct mmci_host *host = mmc_priv(mmc); 1173 struct mmci_platform_data *plat = host->plat; 1174 unsigned int status; 1175 1176 if (host->gpio_cd == -ENOSYS) { 1177 if (!plat->status) 1178 return 1; /* Assume always present */ 1179 1180 status = plat->status(mmc_dev(host->mmc)); 1181 } else 1182 status = !!gpio_get_value_cansleep(host->gpio_cd) 1183 ^ plat->cd_invert; 1184 1185 /* 1186 * Use positive logic throughout - status is zero for no card, 1187 * non-zero for card inserted. 1188 */ 1189 return status; 1190 } 1191 1192 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1193 { 1194 struct mmci_host *host = dev_id; 1195 1196 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1197 1198 return IRQ_HANDLED; 1199 } 1200 1201 static const struct mmc_host_ops mmci_ops = { 1202 .request = mmci_request, 1203 .pre_req = mmci_pre_request, 1204 .post_req = mmci_post_request, 1205 .set_ios = mmci_set_ios, 1206 .get_ro = mmci_get_ro, 1207 .get_cd = mmci_get_cd, 1208 }; 1209 1210 static int __devinit mmci_probe(struct amba_device *dev, 1211 const struct amba_id *id) 1212 { 1213 struct mmci_platform_data *plat = dev->dev.platform_data; 1214 struct variant_data *variant = id->data; 1215 struct mmci_host *host; 1216 struct mmc_host *mmc; 1217 int ret; 1218 1219 /* must have platform data */ 1220 if (!plat) { 1221 ret = -EINVAL; 1222 goto out; 1223 } 1224 1225 ret = amba_request_regions(dev, DRIVER_NAME); 1226 if (ret) 1227 goto out; 1228 1229 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1230 if (!mmc) { 1231 ret = -ENOMEM; 1232 goto rel_regions; 1233 } 1234 1235 host = mmc_priv(mmc); 1236 host->mmc = mmc; 1237 1238 host->gpio_wp = -ENOSYS; 1239 host->gpio_cd = -ENOSYS; 1240 host->gpio_cd_irq = -1; 1241 1242 host->hw_designer = amba_manf(dev); 1243 host->hw_revision = amba_rev(dev); 1244 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1245 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1246 1247 host->clk = clk_get(&dev->dev, NULL); 1248 if (IS_ERR(host->clk)) { 1249 ret = PTR_ERR(host->clk); 1250 host->clk = NULL; 1251 goto host_free; 1252 } 1253 1254 ret = clk_prepare(host->clk); 1255 if (ret) 1256 goto clk_free; 1257 1258 ret = clk_enable(host->clk); 1259 if (ret) 1260 goto clk_unprep; 1261 1262 host->plat = plat; 1263 host->variant = variant; 1264 host->mclk = clk_get_rate(host->clk); 1265 /* 1266 * According to the spec, mclk is max 100 MHz, 1267 * so we try to adjust the clock down to this, 1268 * (if possible). 1269 */ 1270 if (host->mclk > 100000000) { 1271 ret = clk_set_rate(host->clk, 100000000); 1272 if (ret < 0) 1273 goto clk_disable; 1274 host->mclk = clk_get_rate(host->clk); 1275 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1276 host->mclk); 1277 } 1278 host->phybase = dev->res.start; 1279 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1280 if (!host->base) { 1281 ret = -ENOMEM; 1282 goto clk_disable; 1283 } 1284 1285 mmc->ops = &mmci_ops; 1286 /* 1287 * The ARM and ST versions of the block have slightly different 1288 * clock divider equations which means that the minimum divider 1289 * differs too. 1290 */ 1291 if (variant->st_clkdiv) 1292 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1293 else 1294 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1295 /* 1296 * If the platform data supplies a maximum operating 1297 * frequency, this takes precedence. Else, we fall back 1298 * to using the module parameter, which has a (low) 1299 * default value in case it is not specified. Either 1300 * value must not exceed the clock rate into the block, 1301 * of course. 1302 */ 1303 if (plat->f_max) 1304 mmc->f_max = min(host->mclk, plat->f_max); 1305 else 1306 mmc->f_max = min(host->mclk, fmax); 1307 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1308 1309 #ifdef CONFIG_REGULATOR 1310 /* If we're using the regulator framework, try to fetch a regulator */ 1311 host->vcc = regulator_get(&dev->dev, "vmmc"); 1312 if (IS_ERR(host->vcc)) 1313 host->vcc = NULL; 1314 else { 1315 int mask = mmc_regulator_get_ocrmask(host->vcc); 1316 1317 if (mask < 0) 1318 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1319 mask); 1320 else { 1321 host->mmc->ocr_avail = (u32) mask; 1322 if (plat->ocr_mask) 1323 dev_warn(&dev->dev, 1324 "Provided ocr_mask/setpower will not be used " 1325 "(using regulator instead)\n"); 1326 } 1327 } 1328 #endif 1329 /* Fall back to platform data if no regulator is found */ 1330 if (host->vcc == NULL) 1331 mmc->ocr_avail = plat->ocr_mask; 1332 mmc->caps = plat->capabilities; 1333 mmc->caps2 = plat->capabilities2; 1334 1335 /* 1336 * We can do SGIO 1337 */ 1338 mmc->max_segs = NR_SG; 1339 1340 /* 1341 * Since only a certain number of bits are valid in the data length 1342 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1343 * single request. 1344 */ 1345 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1346 1347 /* 1348 * Set the maximum segment size. Since we aren't doing DMA 1349 * (yet) we are only limited by the data length register. 1350 */ 1351 mmc->max_seg_size = mmc->max_req_size; 1352 1353 /* 1354 * Block size can be up to 2048 bytes, but must be a power of two. 1355 */ 1356 mmc->max_blk_size = 1 << 11; 1357 1358 /* 1359 * Limit the number of blocks transferred so that we don't overflow 1360 * the maximum request size. 1361 */ 1362 mmc->max_blk_count = mmc->max_req_size >> 11; 1363 1364 spin_lock_init(&host->lock); 1365 1366 writel(0, host->base + MMCIMASK0); 1367 writel(0, host->base + MMCIMASK1); 1368 writel(0xfff, host->base + MMCICLEAR); 1369 1370 if (gpio_is_valid(plat->gpio_cd)) { 1371 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1372 if (ret == 0) 1373 ret = gpio_direction_input(plat->gpio_cd); 1374 if (ret == 0) 1375 host->gpio_cd = plat->gpio_cd; 1376 else if (ret != -ENOSYS) 1377 goto err_gpio_cd; 1378 1379 /* 1380 * A gpio pin that will detect cards when inserted and removed 1381 * will most likely want to trigger on the edges if it is 1382 * 0 when ejected and 1 when inserted (or mutatis mutandis 1383 * for the inverted case) so we request triggers on both 1384 * edges. 1385 */ 1386 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1387 mmci_cd_irq, 1388 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1389 DRIVER_NAME " (cd)", host); 1390 if (ret >= 0) 1391 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1392 } 1393 if (gpio_is_valid(plat->gpio_wp)) { 1394 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1395 if (ret == 0) 1396 ret = gpio_direction_input(plat->gpio_wp); 1397 if (ret == 0) 1398 host->gpio_wp = plat->gpio_wp; 1399 else if (ret != -ENOSYS) 1400 goto err_gpio_wp; 1401 } 1402 1403 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1404 && host->gpio_cd_irq < 0) 1405 mmc->caps |= MMC_CAP_NEEDS_POLL; 1406 1407 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1408 if (ret) 1409 goto unmap; 1410 1411 if (dev->irq[1] == NO_IRQ || !dev->irq[1]) 1412 host->singleirq = true; 1413 else { 1414 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1415 DRIVER_NAME " (pio)", host); 1416 if (ret) 1417 goto irq0_free; 1418 } 1419 1420 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1421 1422 amba_set_drvdata(dev, mmc); 1423 1424 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1425 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1426 amba_rev(dev), (unsigned long long)dev->res.start, 1427 dev->irq[0], dev->irq[1]); 1428 1429 mmci_dma_setup(host); 1430 1431 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1432 pm_runtime_use_autosuspend(&dev->dev); 1433 pm_runtime_put(&dev->dev); 1434 1435 mmc_add_host(mmc); 1436 1437 return 0; 1438 1439 irq0_free: 1440 free_irq(dev->irq[0], host); 1441 unmap: 1442 if (host->gpio_wp != -ENOSYS) 1443 gpio_free(host->gpio_wp); 1444 err_gpio_wp: 1445 if (host->gpio_cd_irq >= 0) 1446 free_irq(host->gpio_cd_irq, host); 1447 if (host->gpio_cd != -ENOSYS) 1448 gpio_free(host->gpio_cd); 1449 err_gpio_cd: 1450 iounmap(host->base); 1451 clk_disable: 1452 clk_disable(host->clk); 1453 clk_unprep: 1454 clk_unprepare(host->clk); 1455 clk_free: 1456 clk_put(host->clk); 1457 host_free: 1458 mmc_free_host(mmc); 1459 rel_regions: 1460 amba_release_regions(dev); 1461 out: 1462 return ret; 1463 } 1464 1465 static int __devexit mmci_remove(struct amba_device *dev) 1466 { 1467 struct mmc_host *mmc = amba_get_drvdata(dev); 1468 1469 amba_set_drvdata(dev, NULL); 1470 1471 if (mmc) { 1472 struct mmci_host *host = mmc_priv(mmc); 1473 1474 /* 1475 * Undo pm_runtime_put() in probe. We use the _sync 1476 * version here so that we can access the primecell. 1477 */ 1478 pm_runtime_get_sync(&dev->dev); 1479 1480 mmc_remove_host(mmc); 1481 1482 writel(0, host->base + MMCIMASK0); 1483 writel(0, host->base + MMCIMASK1); 1484 1485 writel(0, host->base + MMCICOMMAND); 1486 writel(0, host->base + MMCIDATACTRL); 1487 1488 mmci_dma_release(host); 1489 free_irq(dev->irq[0], host); 1490 if (!host->singleirq) 1491 free_irq(dev->irq[1], host); 1492 1493 if (host->gpio_wp != -ENOSYS) 1494 gpio_free(host->gpio_wp); 1495 if (host->gpio_cd_irq >= 0) 1496 free_irq(host->gpio_cd_irq, host); 1497 if (host->gpio_cd != -ENOSYS) 1498 gpio_free(host->gpio_cd); 1499 1500 iounmap(host->base); 1501 clk_disable(host->clk); 1502 clk_unprepare(host->clk); 1503 clk_put(host->clk); 1504 1505 if (host->vcc) 1506 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1507 regulator_put(host->vcc); 1508 1509 mmc_free_host(mmc); 1510 1511 amba_release_regions(dev); 1512 } 1513 1514 return 0; 1515 } 1516 1517 #ifdef CONFIG_SUSPEND 1518 static int mmci_suspend(struct device *dev) 1519 { 1520 struct amba_device *adev = to_amba_device(dev); 1521 struct mmc_host *mmc = amba_get_drvdata(adev); 1522 int ret = 0; 1523 1524 if (mmc) { 1525 struct mmci_host *host = mmc_priv(mmc); 1526 1527 ret = mmc_suspend_host(mmc); 1528 if (ret == 0) { 1529 pm_runtime_get_sync(dev); 1530 writel(0, host->base + MMCIMASK0); 1531 } 1532 } 1533 1534 return ret; 1535 } 1536 1537 static int mmci_resume(struct device *dev) 1538 { 1539 struct amba_device *adev = to_amba_device(dev); 1540 struct mmc_host *mmc = amba_get_drvdata(adev); 1541 int ret = 0; 1542 1543 if (mmc) { 1544 struct mmci_host *host = mmc_priv(mmc); 1545 1546 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1547 pm_runtime_put(dev); 1548 1549 ret = mmc_resume_host(mmc); 1550 } 1551 1552 return ret; 1553 } 1554 #endif 1555 1556 static const struct dev_pm_ops mmci_dev_pm_ops = { 1557 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) 1558 }; 1559 1560 static struct amba_id mmci_ids[] = { 1561 { 1562 .id = 0x00041180, 1563 .mask = 0xff0fffff, 1564 .data = &variant_arm, 1565 }, 1566 { 1567 .id = 0x01041180, 1568 .mask = 0xff0fffff, 1569 .data = &variant_arm_extended_fifo, 1570 }, 1571 { 1572 .id = 0x00041181, 1573 .mask = 0x000fffff, 1574 .data = &variant_arm, 1575 }, 1576 /* ST Micro variants */ 1577 { 1578 .id = 0x00180180, 1579 .mask = 0x00ffffff, 1580 .data = &variant_u300, 1581 }, 1582 { 1583 .id = 0x10180180, 1584 .mask = 0xf0ffffff, 1585 .data = &variant_nomadik, 1586 }, 1587 { 1588 .id = 0x00280180, 1589 .mask = 0x00ffffff, 1590 .data = &variant_u300, 1591 }, 1592 { 1593 .id = 0x00480180, 1594 .mask = 0xf0ffffff, 1595 .data = &variant_ux500, 1596 }, 1597 { 1598 .id = 0x10480180, 1599 .mask = 0xf0ffffff, 1600 .data = &variant_ux500v2, 1601 }, 1602 { 0, 0 }, 1603 }; 1604 1605 MODULE_DEVICE_TABLE(amba, mmci_ids); 1606 1607 static struct amba_driver mmci_driver = { 1608 .drv = { 1609 .name = DRIVER_NAME, 1610 .pm = &mmci_dev_pm_ops, 1611 }, 1612 .probe = mmci_probe, 1613 .remove = __devexit_p(mmci_remove), 1614 .id_table = mmci_ids, 1615 }; 1616 1617 module_amba_driver(mmci_driver); 1618 1619 module_param(fmax, uint, 0444); 1620 1621 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1622 MODULE_LICENSE("GPL"); 1623