1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/slab.h> 19 #include <linux/delay.h> 20 #include <linux/err.h> 21 #include <linux/highmem.h> 22 #include <linux/log2.h> 23 #include <linux/mmc/host.h> 24 #include <linux/mmc/card.h> 25 #include <linux/amba/bus.h> 26 #include <linux/clk.h> 27 #include <linux/scatterlist.h> 28 #include <linux/gpio.h> 29 #include <linux/of_gpio.h> 30 #include <linux/regulator/consumer.h> 31 #include <linux/dmaengine.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/amba/mmci.h> 34 #include <linux/pm_runtime.h> 35 #include <linux/types.h> 36 37 #include <asm/div64.h> 38 #include <asm/io.h> 39 #include <asm/sizes.h> 40 41 #include "mmci.h" 42 43 #define DRIVER_NAME "mmci-pl18x" 44 45 static unsigned int fmax = 515633; 46 47 /** 48 * struct variant_data - MMCI variant-specific quirks 49 * @clkreg: default value for MCICLOCK register 50 * @clkreg_enable: enable value for MMCICLOCK register 51 * @datalength_bits: number of bits in the MMCIDATALENGTH register 52 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 53 * is asserted (likewise for RX) 54 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 55 * is asserted (likewise for RX) 56 * @sdio: variant supports SDIO 57 * @st_clkdiv: true if using a ST-specific clock divider algorithm 58 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 59 * @pwrreg_powerup: power up value for MMCIPOWER register 60 * @signal_direction: input/out direction of bus signals can be indicated 61 */ 62 struct variant_data { 63 unsigned int clkreg; 64 unsigned int clkreg_enable; 65 unsigned int datalength_bits; 66 unsigned int fifosize; 67 unsigned int fifohalfsize; 68 bool sdio; 69 bool st_clkdiv; 70 bool blksz_datactrl16; 71 u32 pwrreg_powerup; 72 bool signal_direction; 73 }; 74 75 static struct variant_data variant_arm = { 76 .fifosize = 16 * 4, 77 .fifohalfsize = 8 * 4, 78 .datalength_bits = 16, 79 .pwrreg_powerup = MCI_PWR_UP, 80 }; 81 82 static struct variant_data variant_arm_extended_fifo = { 83 .fifosize = 128 * 4, 84 .fifohalfsize = 64 * 4, 85 .datalength_bits = 16, 86 .pwrreg_powerup = MCI_PWR_UP, 87 }; 88 89 static struct variant_data variant_u300 = { 90 .fifosize = 16 * 4, 91 .fifohalfsize = 8 * 4, 92 .clkreg_enable = MCI_ST_U300_HWFCEN, 93 .datalength_bits = 16, 94 .sdio = true, 95 .pwrreg_powerup = MCI_PWR_ON, 96 .signal_direction = true, 97 }; 98 99 static struct variant_data variant_nomadik = { 100 .fifosize = 16 * 4, 101 .fifohalfsize = 8 * 4, 102 .clkreg = MCI_CLK_ENABLE, 103 .datalength_bits = 24, 104 .sdio = true, 105 .st_clkdiv = true, 106 .pwrreg_powerup = MCI_PWR_ON, 107 .signal_direction = true, 108 }; 109 110 static struct variant_data variant_ux500 = { 111 .fifosize = 30 * 4, 112 .fifohalfsize = 8 * 4, 113 .clkreg = MCI_CLK_ENABLE, 114 .clkreg_enable = MCI_ST_UX500_HWFCEN, 115 .datalength_bits = 24, 116 .sdio = true, 117 .st_clkdiv = true, 118 .pwrreg_powerup = MCI_PWR_ON, 119 .signal_direction = true, 120 }; 121 122 static struct variant_data variant_ux500v2 = { 123 .fifosize = 30 * 4, 124 .fifohalfsize = 8 * 4, 125 .clkreg = MCI_CLK_ENABLE, 126 .clkreg_enable = MCI_ST_UX500_HWFCEN, 127 .datalength_bits = 24, 128 .sdio = true, 129 .st_clkdiv = true, 130 .blksz_datactrl16 = true, 131 .pwrreg_powerup = MCI_PWR_ON, 132 .signal_direction = true, 133 }; 134 135 /* 136 * This must be called with host->lock held 137 */ 138 static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 139 { 140 if (host->clk_reg != clk) { 141 host->clk_reg = clk; 142 writel(clk, host->base + MMCICLOCK); 143 } 144 } 145 146 /* 147 * This must be called with host->lock held 148 */ 149 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 150 { 151 if (host->pwr_reg != pwr) { 152 host->pwr_reg = pwr; 153 writel(pwr, host->base + MMCIPOWER); 154 } 155 } 156 157 /* 158 * This must be called with host->lock held 159 */ 160 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 161 { 162 struct variant_data *variant = host->variant; 163 u32 clk = variant->clkreg; 164 165 if (desired) { 166 if (desired >= host->mclk) { 167 clk = MCI_CLK_BYPASS; 168 if (variant->st_clkdiv) 169 clk |= MCI_ST_UX500_NEG_EDGE; 170 host->cclk = host->mclk; 171 } else if (variant->st_clkdiv) { 172 /* 173 * DB8500 TRM says f = mclk / (clkdiv + 2) 174 * => clkdiv = (mclk / f) - 2 175 * Round the divider up so we don't exceed the max 176 * frequency 177 */ 178 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 179 if (clk >= 256) 180 clk = 255; 181 host->cclk = host->mclk / (clk + 2); 182 } else { 183 /* 184 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 185 * => clkdiv = mclk / (2 * f) - 1 186 */ 187 clk = host->mclk / (2 * desired) - 1; 188 if (clk >= 256) 189 clk = 255; 190 host->cclk = host->mclk / (2 * (clk + 1)); 191 } 192 193 clk |= variant->clkreg_enable; 194 clk |= MCI_CLK_ENABLE; 195 /* This hasn't proven to be worthwhile */ 196 /* clk |= MCI_CLK_PWRSAVE; */ 197 } 198 199 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 200 clk |= MCI_4BIT_BUS; 201 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 202 clk |= MCI_ST_8BIT_BUS; 203 204 mmci_write_clkreg(host, clk); 205 } 206 207 static void 208 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 209 { 210 writel(0, host->base + MMCICOMMAND); 211 212 BUG_ON(host->data); 213 214 host->mrq = NULL; 215 host->cmd = NULL; 216 217 mmc_request_done(host->mmc, mrq); 218 219 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 220 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 221 } 222 223 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 224 { 225 void __iomem *base = host->base; 226 227 if (host->singleirq) { 228 unsigned int mask0 = readl(base + MMCIMASK0); 229 230 mask0 &= ~MCI_IRQ1MASK; 231 mask0 |= mask; 232 233 writel(mask0, base + MMCIMASK0); 234 } 235 236 writel(mask, base + MMCIMASK1); 237 } 238 239 static void mmci_stop_data(struct mmci_host *host) 240 { 241 writel(0, host->base + MMCIDATACTRL); 242 mmci_set_mask1(host, 0); 243 host->data = NULL; 244 } 245 246 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 247 { 248 unsigned int flags = SG_MITER_ATOMIC; 249 250 if (data->flags & MMC_DATA_READ) 251 flags |= SG_MITER_TO_SG; 252 else 253 flags |= SG_MITER_FROM_SG; 254 255 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 256 } 257 258 /* 259 * All the DMA operation mode stuff goes inside this ifdef. 260 * This assumes that you have a generic DMA device interface, 261 * no custom DMA interfaces are supported. 262 */ 263 #ifdef CONFIG_DMA_ENGINE 264 static void __devinit mmci_dma_setup(struct mmci_host *host) 265 { 266 struct mmci_platform_data *plat = host->plat; 267 const char *rxname, *txname; 268 dma_cap_mask_t mask; 269 270 if (!plat || !plat->dma_filter) { 271 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 272 return; 273 } 274 275 /* initialize pre request cookie */ 276 host->next_data.cookie = 1; 277 278 /* Try to acquire a generic DMA engine slave channel */ 279 dma_cap_zero(mask); 280 dma_cap_set(DMA_SLAVE, mask); 281 282 /* 283 * If only an RX channel is specified, the driver will 284 * attempt to use it bidirectionally, however if it is 285 * is specified but cannot be located, DMA will be disabled. 286 */ 287 if (plat->dma_rx_param) { 288 host->dma_rx_channel = dma_request_channel(mask, 289 plat->dma_filter, 290 plat->dma_rx_param); 291 /* E.g if no DMA hardware is present */ 292 if (!host->dma_rx_channel) 293 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 294 } 295 296 if (plat->dma_tx_param) { 297 host->dma_tx_channel = dma_request_channel(mask, 298 plat->dma_filter, 299 plat->dma_tx_param); 300 if (!host->dma_tx_channel) 301 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 302 } else { 303 host->dma_tx_channel = host->dma_rx_channel; 304 } 305 306 if (host->dma_rx_channel) 307 rxname = dma_chan_name(host->dma_rx_channel); 308 else 309 rxname = "none"; 310 311 if (host->dma_tx_channel) 312 txname = dma_chan_name(host->dma_tx_channel); 313 else 314 txname = "none"; 315 316 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 317 rxname, txname); 318 319 /* 320 * Limit the maximum segment size in any SG entry according to 321 * the parameters of the DMA engine device. 322 */ 323 if (host->dma_tx_channel) { 324 struct device *dev = host->dma_tx_channel->device->dev; 325 unsigned int max_seg_size = dma_get_max_seg_size(dev); 326 327 if (max_seg_size < host->mmc->max_seg_size) 328 host->mmc->max_seg_size = max_seg_size; 329 } 330 if (host->dma_rx_channel) { 331 struct device *dev = host->dma_rx_channel->device->dev; 332 unsigned int max_seg_size = dma_get_max_seg_size(dev); 333 334 if (max_seg_size < host->mmc->max_seg_size) 335 host->mmc->max_seg_size = max_seg_size; 336 } 337 } 338 339 /* 340 * This is used in __devinit or __devexit so inline it 341 * so it can be discarded. 342 */ 343 static inline void mmci_dma_release(struct mmci_host *host) 344 { 345 struct mmci_platform_data *plat = host->plat; 346 347 if (host->dma_rx_channel) 348 dma_release_channel(host->dma_rx_channel); 349 if (host->dma_tx_channel && plat->dma_tx_param) 350 dma_release_channel(host->dma_tx_channel); 351 host->dma_rx_channel = host->dma_tx_channel = NULL; 352 } 353 354 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 355 { 356 struct dma_chan *chan = host->dma_current; 357 enum dma_data_direction dir; 358 u32 status; 359 int i; 360 361 /* Wait up to 1ms for the DMA to complete */ 362 for (i = 0; ; i++) { 363 status = readl(host->base + MMCISTATUS); 364 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 365 break; 366 udelay(10); 367 } 368 369 /* 370 * Check to see whether we still have some data left in the FIFO - 371 * this catches DMA controllers which are unable to monitor the 372 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 373 * contiguous buffers. On TX, we'll get a FIFO underrun error. 374 */ 375 if (status & MCI_RXDATAAVLBLMASK) { 376 dmaengine_terminate_all(chan); 377 if (!data->error) 378 data->error = -EIO; 379 } 380 381 if (data->flags & MMC_DATA_WRITE) { 382 dir = DMA_TO_DEVICE; 383 } else { 384 dir = DMA_FROM_DEVICE; 385 } 386 387 if (!data->host_cookie) 388 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 389 390 /* 391 * Use of DMA with scatter-gather is impossible. 392 * Give up with DMA and switch back to PIO mode. 393 */ 394 if (status & MCI_RXDATAAVLBLMASK) { 395 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 396 mmci_dma_release(host); 397 } 398 } 399 400 static void mmci_dma_data_error(struct mmci_host *host) 401 { 402 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 403 dmaengine_terminate_all(host->dma_current); 404 } 405 406 static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 407 struct mmci_host_next *next) 408 { 409 struct variant_data *variant = host->variant; 410 struct dma_slave_config conf = { 411 .src_addr = host->phybase + MMCIFIFO, 412 .dst_addr = host->phybase + MMCIFIFO, 413 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 414 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 415 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 416 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 417 .device_fc = false, 418 }; 419 struct dma_chan *chan; 420 struct dma_device *device; 421 struct dma_async_tx_descriptor *desc; 422 enum dma_data_direction buffer_dirn; 423 int nr_sg; 424 425 /* Check if next job is already prepared */ 426 if (data->host_cookie && !next && 427 host->dma_current && host->dma_desc_current) 428 return 0; 429 430 if (!next) { 431 host->dma_current = NULL; 432 host->dma_desc_current = NULL; 433 } 434 435 if (data->flags & MMC_DATA_READ) { 436 conf.direction = DMA_DEV_TO_MEM; 437 buffer_dirn = DMA_FROM_DEVICE; 438 chan = host->dma_rx_channel; 439 } else { 440 conf.direction = DMA_MEM_TO_DEV; 441 buffer_dirn = DMA_TO_DEVICE; 442 chan = host->dma_tx_channel; 443 } 444 445 /* If there's no DMA channel, fall back to PIO */ 446 if (!chan) 447 return -EINVAL; 448 449 /* If less than or equal to the fifo size, don't bother with DMA */ 450 if (data->blksz * data->blocks <= variant->fifosize) 451 return -EINVAL; 452 453 device = chan->device; 454 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 455 if (nr_sg == 0) 456 return -EINVAL; 457 458 dmaengine_slave_config(chan, &conf); 459 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 460 conf.direction, DMA_CTRL_ACK); 461 if (!desc) 462 goto unmap_exit; 463 464 if (next) { 465 next->dma_chan = chan; 466 next->dma_desc = desc; 467 } else { 468 host->dma_current = chan; 469 host->dma_desc_current = desc; 470 } 471 472 return 0; 473 474 unmap_exit: 475 if (!next) 476 dmaengine_terminate_all(chan); 477 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 478 return -ENOMEM; 479 } 480 481 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 482 { 483 int ret; 484 struct mmc_data *data = host->data; 485 486 ret = mmci_dma_prep_data(host, host->data, NULL); 487 if (ret) 488 return ret; 489 490 /* Okay, go for it. */ 491 dev_vdbg(mmc_dev(host->mmc), 492 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 493 data->sg_len, data->blksz, data->blocks, data->flags); 494 dmaengine_submit(host->dma_desc_current); 495 dma_async_issue_pending(host->dma_current); 496 497 datactrl |= MCI_DPSM_DMAENABLE; 498 499 /* Trigger the DMA transfer */ 500 writel(datactrl, host->base + MMCIDATACTRL); 501 502 /* 503 * Let the MMCI say when the data is ended and it's time 504 * to fire next DMA request. When that happens, MMCI will 505 * call mmci_data_end() 506 */ 507 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 508 host->base + MMCIMASK0); 509 return 0; 510 } 511 512 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 513 { 514 struct mmci_host_next *next = &host->next_data; 515 516 if (data->host_cookie && data->host_cookie != next->cookie) { 517 pr_warning("[%s] invalid cookie: data->host_cookie %d" 518 " host->next_data.cookie %d\n", 519 __func__, data->host_cookie, host->next_data.cookie); 520 data->host_cookie = 0; 521 } 522 523 if (!data->host_cookie) 524 return; 525 526 host->dma_desc_current = next->dma_desc; 527 host->dma_current = next->dma_chan; 528 529 next->dma_desc = NULL; 530 next->dma_chan = NULL; 531 } 532 533 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 534 bool is_first_req) 535 { 536 struct mmci_host *host = mmc_priv(mmc); 537 struct mmc_data *data = mrq->data; 538 struct mmci_host_next *nd = &host->next_data; 539 540 if (!data) 541 return; 542 543 if (data->host_cookie) { 544 data->host_cookie = 0; 545 return; 546 } 547 548 /* if config for dma */ 549 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 550 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { 551 if (mmci_dma_prep_data(host, data, nd)) 552 data->host_cookie = 0; 553 else 554 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 555 } 556 } 557 558 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 559 int err) 560 { 561 struct mmci_host *host = mmc_priv(mmc); 562 struct mmc_data *data = mrq->data; 563 struct dma_chan *chan; 564 enum dma_data_direction dir; 565 566 if (!data) 567 return; 568 569 if (data->flags & MMC_DATA_READ) { 570 dir = DMA_FROM_DEVICE; 571 chan = host->dma_rx_channel; 572 } else { 573 dir = DMA_TO_DEVICE; 574 chan = host->dma_tx_channel; 575 } 576 577 578 /* if config for dma */ 579 if (chan) { 580 if (err) 581 dmaengine_terminate_all(chan); 582 if (data->host_cookie) 583 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 584 data->sg_len, dir); 585 mrq->data->host_cookie = 0; 586 } 587 } 588 589 #else 590 /* Blank functions if the DMA engine is not available */ 591 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 592 { 593 } 594 static inline void mmci_dma_setup(struct mmci_host *host) 595 { 596 } 597 598 static inline void mmci_dma_release(struct mmci_host *host) 599 { 600 } 601 602 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 603 { 604 } 605 606 static inline void mmci_dma_data_error(struct mmci_host *host) 607 { 608 } 609 610 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 611 { 612 return -ENOSYS; 613 } 614 615 #define mmci_pre_request NULL 616 #define mmci_post_request NULL 617 618 #endif 619 620 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 621 { 622 struct variant_data *variant = host->variant; 623 unsigned int datactrl, timeout, irqmask; 624 unsigned long long clks; 625 void __iomem *base; 626 int blksz_bits; 627 628 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 629 data->blksz, data->blocks, data->flags); 630 631 host->data = data; 632 host->size = data->blksz * data->blocks; 633 data->bytes_xfered = 0; 634 635 clks = (unsigned long long)data->timeout_ns * host->cclk; 636 do_div(clks, 1000000000UL); 637 638 timeout = data->timeout_clks + (unsigned int)clks; 639 640 base = host->base; 641 writel(timeout, base + MMCIDATATIMER); 642 writel(host->size, base + MMCIDATALENGTH); 643 644 blksz_bits = ffs(data->blksz) - 1; 645 BUG_ON(1 << blksz_bits != data->blksz); 646 647 if (variant->blksz_datactrl16) 648 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 649 else 650 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 651 652 if (data->flags & MMC_DATA_READ) 653 datactrl |= MCI_DPSM_DIRECTION; 654 655 /* The ST Micro variants has a special bit to enable SDIO */ 656 if (variant->sdio && host->mmc->card) 657 if (mmc_card_sdio(host->mmc->card)) 658 datactrl |= MCI_ST_DPSM_SDIOEN; 659 660 /* 661 * Attempt to use DMA operation mode, if this 662 * should fail, fall back to PIO mode 663 */ 664 if (!mmci_dma_start_data(host, datactrl)) 665 return; 666 667 /* IRQ mode, map the SG list for CPU reading/writing */ 668 mmci_init_sg(host, data); 669 670 if (data->flags & MMC_DATA_READ) { 671 irqmask = MCI_RXFIFOHALFFULLMASK; 672 673 /* 674 * If we have less than the fifo 'half-full' threshold to 675 * transfer, trigger a PIO interrupt as soon as any data 676 * is available. 677 */ 678 if (host->size < variant->fifohalfsize) 679 irqmask |= MCI_RXDATAAVLBLMASK; 680 } else { 681 /* 682 * We don't actually need to include "FIFO empty" here 683 * since its implicit in "FIFO half empty". 684 */ 685 irqmask = MCI_TXFIFOHALFEMPTYMASK; 686 } 687 688 writel(datactrl, base + MMCIDATACTRL); 689 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 690 mmci_set_mask1(host, irqmask); 691 } 692 693 static void 694 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 695 { 696 void __iomem *base = host->base; 697 698 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 699 cmd->opcode, cmd->arg, cmd->flags); 700 701 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 702 writel(0, base + MMCICOMMAND); 703 udelay(1); 704 } 705 706 c |= cmd->opcode | MCI_CPSM_ENABLE; 707 if (cmd->flags & MMC_RSP_PRESENT) { 708 if (cmd->flags & MMC_RSP_136) 709 c |= MCI_CPSM_LONGRSP; 710 c |= MCI_CPSM_RESPONSE; 711 } 712 if (/*interrupt*/0) 713 c |= MCI_CPSM_INTERRUPT; 714 715 host->cmd = cmd; 716 717 writel(cmd->arg, base + MMCIARGUMENT); 718 writel(c, base + MMCICOMMAND); 719 } 720 721 static void 722 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 723 unsigned int status) 724 { 725 /* First check for errors */ 726 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 727 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 728 u32 remain, success; 729 730 /* Terminate the DMA transfer */ 731 if (dma_inprogress(host)) 732 mmci_dma_data_error(host); 733 734 /* 735 * Calculate how far we are into the transfer. Note that 736 * the data counter gives the number of bytes transferred 737 * on the MMC bus, not on the host side. On reads, this 738 * can be as much as a FIFO-worth of data ahead. This 739 * matters for FIFO overruns only. 740 */ 741 remain = readl(host->base + MMCIDATACNT); 742 success = data->blksz * data->blocks - remain; 743 744 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 745 status, success); 746 if (status & MCI_DATACRCFAIL) { 747 /* Last block was not successful */ 748 success -= 1; 749 data->error = -EILSEQ; 750 } else if (status & MCI_DATATIMEOUT) { 751 data->error = -ETIMEDOUT; 752 } else if (status & MCI_STARTBITERR) { 753 data->error = -ECOMM; 754 } else if (status & MCI_TXUNDERRUN) { 755 data->error = -EIO; 756 } else if (status & MCI_RXOVERRUN) { 757 if (success > host->variant->fifosize) 758 success -= host->variant->fifosize; 759 else 760 success = 0; 761 data->error = -EIO; 762 } 763 data->bytes_xfered = round_down(success, data->blksz); 764 } 765 766 if (status & MCI_DATABLOCKEND) 767 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 768 769 if (status & MCI_DATAEND || data->error) { 770 if (dma_inprogress(host)) 771 mmci_dma_unmap(host, data); 772 mmci_stop_data(host); 773 774 if (!data->error) 775 /* The error clause is handled above, success! */ 776 data->bytes_xfered = data->blksz * data->blocks; 777 778 if (!data->stop) { 779 mmci_request_end(host, data->mrq); 780 } else { 781 mmci_start_command(host, data->stop, 0); 782 } 783 } 784 } 785 786 static void 787 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 788 unsigned int status) 789 { 790 void __iomem *base = host->base; 791 792 host->cmd = NULL; 793 794 if (status & MCI_CMDTIMEOUT) { 795 cmd->error = -ETIMEDOUT; 796 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 797 cmd->error = -EILSEQ; 798 } else { 799 cmd->resp[0] = readl(base + MMCIRESPONSE0); 800 cmd->resp[1] = readl(base + MMCIRESPONSE1); 801 cmd->resp[2] = readl(base + MMCIRESPONSE2); 802 cmd->resp[3] = readl(base + MMCIRESPONSE3); 803 } 804 805 if (!cmd->data || cmd->error) { 806 if (host->data) { 807 /* Terminate the DMA transfer */ 808 if (dma_inprogress(host)) 809 mmci_dma_data_error(host); 810 mmci_stop_data(host); 811 } 812 mmci_request_end(host, cmd->mrq); 813 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 814 mmci_start_data(host, cmd->data); 815 } 816 } 817 818 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 819 { 820 void __iomem *base = host->base; 821 char *ptr = buffer; 822 u32 status; 823 int host_remain = host->size; 824 825 do { 826 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 827 828 if (count > remain) 829 count = remain; 830 831 if (count <= 0) 832 break; 833 834 /* 835 * SDIO especially may want to send something that is 836 * not divisible by 4 (as opposed to card sectors 837 * etc). Therefore make sure to always read the last bytes 838 * while only doing full 32-bit reads towards the FIFO. 839 */ 840 if (unlikely(count & 0x3)) { 841 if (count < 4) { 842 unsigned char buf[4]; 843 readsl(base + MMCIFIFO, buf, 1); 844 memcpy(ptr, buf, count); 845 } else { 846 readsl(base + MMCIFIFO, ptr, count >> 2); 847 count &= ~0x3; 848 } 849 } else { 850 readsl(base + MMCIFIFO, ptr, count >> 2); 851 } 852 853 ptr += count; 854 remain -= count; 855 host_remain -= count; 856 857 if (remain == 0) 858 break; 859 860 status = readl(base + MMCISTATUS); 861 } while (status & MCI_RXDATAAVLBL); 862 863 return ptr - buffer; 864 } 865 866 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 867 { 868 struct variant_data *variant = host->variant; 869 void __iomem *base = host->base; 870 char *ptr = buffer; 871 872 do { 873 unsigned int count, maxcnt; 874 875 maxcnt = status & MCI_TXFIFOEMPTY ? 876 variant->fifosize : variant->fifohalfsize; 877 count = min(remain, maxcnt); 878 879 /* 880 * The ST Micro variant for SDIO transfer sizes 881 * less then 8 bytes should have clock H/W flow 882 * control disabled. 883 */ 884 if (variant->sdio && 885 mmc_card_sdio(host->mmc->card)) { 886 u32 clk; 887 if (count < 8) 888 clk = host->clk_reg & ~variant->clkreg_enable; 889 else 890 clk = host->clk_reg | variant->clkreg_enable; 891 892 mmci_write_clkreg(host, clk); 893 } 894 895 /* 896 * SDIO especially may want to send something that is 897 * not divisible by 4 (as opposed to card sectors 898 * etc), and the FIFO only accept full 32-bit writes. 899 * So compensate by adding +3 on the count, a single 900 * byte become a 32bit write, 7 bytes will be two 901 * 32bit writes etc. 902 */ 903 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); 904 905 ptr += count; 906 remain -= count; 907 908 if (remain == 0) 909 break; 910 911 status = readl(base + MMCISTATUS); 912 } while (status & MCI_TXFIFOHALFEMPTY); 913 914 return ptr - buffer; 915 } 916 917 /* 918 * PIO data transfer IRQ handler. 919 */ 920 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 921 { 922 struct mmci_host *host = dev_id; 923 struct sg_mapping_iter *sg_miter = &host->sg_miter; 924 struct variant_data *variant = host->variant; 925 void __iomem *base = host->base; 926 unsigned long flags; 927 u32 status; 928 929 status = readl(base + MMCISTATUS); 930 931 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 932 933 local_irq_save(flags); 934 935 do { 936 unsigned int remain, len; 937 char *buffer; 938 939 /* 940 * For write, we only need to test the half-empty flag 941 * here - if the FIFO is completely empty, then by 942 * definition it is more than half empty. 943 * 944 * For read, check for data available. 945 */ 946 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 947 break; 948 949 if (!sg_miter_next(sg_miter)) 950 break; 951 952 buffer = sg_miter->addr; 953 remain = sg_miter->length; 954 955 len = 0; 956 if (status & MCI_RXACTIVE) 957 len = mmci_pio_read(host, buffer, remain); 958 if (status & MCI_TXACTIVE) 959 len = mmci_pio_write(host, buffer, remain, status); 960 961 sg_miter->consumed = len; 962 963 host->size -= len; 964 remain -= len; 965 966 if (remain) 967 break; 968 969 status = readl(base + MMCISTATUS); 970 } while (1); 971 972 sg_miter_stop(sg_miter); 973 974 local_irq_restore(flags); 975 976 /* 977 * If we have less than the fifo 'half-full' threshold to transfer, 978 * trigger a PIO interrupt as soon as any data is available. 979 */ 980 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 981 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 982 983 /* 984 * If we run out of data, disable the data IRQs; this 985 * prevents a race where the FIFO becomes empty before 986 * the chip itself has disabled the data path, and 987 * stops us racing with our data end IRQ. 988 */ 989 if (host->size == 0) { 990 mmci_set_mask1(host, 0); 991 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 992 } 993 994 return IRQ_HANDLED; 995 } 996 997 /* 998 * Handle completion of command and data transfers. 999 */ 1000 static irqreturn_t mmci_irq(int irq, void *dev_id) 1001 { 1002 struct mmci_host *host = dev_id; 1003 u32 status; 1004 int ret = 0; 1005 1006 spin_lock(&host->lock); 1007 1008 do { 1009 struct mmc_command *cmd; 1010 struct mmc_data *data; 1011 1012 status = readl(host->base + MMCISTATUS); 1013 1014 if (host->singleirq) { 1015 if (status & readl(host->base + MMCIMASK1)) 1016 mmci_pio_irq(irq, dev_id); 1017 1018 status &= ~MCI_IRQ1MASK; 1019 } 1020 1021 status &= readl(host->base + MMCIMASK0); 1022 writel(status, host->base + MMCICLEAR); 1023 1024 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1025 1026 data = host->data; 1027 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 1028 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 1029 MCI_DATABLOCKEND) && data) 1030 mmci_data_irq(host, data, status); 1031 1032 cmd = host->cmd; 1033 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 1034 mmci_cmd_irq(host, cmd, status); 1035 1036 ret = 1; 1037 } while (status); 1038 1039 spin_unlock(&host->lock); 1040 1041 return IRQ_RETVAL(ret); 1042 } 1043 1044 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1045 { 1046 struct mmci_host *host = mmc_priv(mmc); 1047 unsigned long flags; 1048 1049 WARN_ON(host->mrq != NULL); 1050 1051 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 1052 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 1053 mrq->data->blksz); 1054 mrq->cmd->error = -EINVAL; 1055 mmc_request_done(mmc, mrq); 1056 return; 1057 } 1058 1059 pm_runtime_get_sync(mmc_dev(mmc)); 1060 1061 spin_lock_irqsave(&host->lock, flags); 1062 1063 host->mrq = mrq; 1064 1065 if (mrq->data) 1066 mmci_get_next_data(host, mrq->data); 1067 1068 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1069 mmci_start_data(host, mrq->data); 1070 1071 mmci_start_command(host, mrq->cmd, 0); 1072 1073 spin_unlock_irqrestore(&host->lock, flags); 1074 } 1075 1076 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1077 { 1078 struct mmci_host *host = mmc_priv(mmc); 1079 struct variant_data *variant = host->variant; 1080 u32 pwr = 0; 1081 unsigned long flags; 1082 int ret; 1083 1084 pm_runtime_get_sync(mmc_dev(mmc)); 1085 1086 if (host->plat->ios_handler && 1087 host->plat->ios_handler(mmc_dev(mmc), ios)) 1088 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1089 1090 switch (ios->power_mode) { 1091 case MMC_POWER_OFF: 1092 if (host->vcc) 1093 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 1094 break; 1095 case MMC_POWER_UP: 1096 if (host->vcc) { 1097 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 1098 if (ret) { 1099 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 1100 /* 1101 * The .set_ios() function in the mmc_host_ops 1102 * struct return void, and failing to set the 1103 * power should be rare so we print an error 1104 * and return here. 1105 */ 1106 goto out; 1107 } 1108 } 1109 /* 1110 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1111 * and instead uses MCI_PWR_ON so apply whatever value is 1112 * configured in the variant data. 1113 */ 1114 pwr |= variant->pwrreg_powerup; 1115 1116 break; 1117 case MMC_POWER_ON: 1118 pwr |= MCI_PWR_ON; 1119 break; 1120 } 1121 1122 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1123 /* 1124 * The ST Micro variant has some additional bits 1125 * indicating signal direction for the signals in 1126 * the SD/MMC bus and feedback-clock usage. 1127 */ 1128 pwr |= host->plat->sigdir; 1129 1130 if (ios->bus_width == MMC_BUS_WIDTH_4) 1131 pwr &= ~MCI_ST_DATA74DIREN; 1132 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1133 pwr &= (~MCI_ST_DATA74DIREN & 1134 ~MCI_ST_DATA31DIREN & 1135 ~MCI_ST_DATA2DIREN); 1136 } 1137 1138 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1139 if (host->hw_designer != AMBA_VENDOR_ST) 1140 pwr |= MCI_ROD; 1141 else { 1142 /* 1143 * The ST Micro variant use the ROD bit for something 1144 * else and only has OD (Open Drain). 1145 */ 1146 pwr |= MCI_OD; 1147 } 1148 } 1149 1150 spin_lock_irqsave(&host->lock, flags); 1151 1152 mmci_set_clkreg(host, ios->clock); 1153 mmci_write_pwrreg(host, pwr); 1154 1155 spin_unlock_irqrestore(&host->lock, flags); 1156 1157 out: 1158 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1159 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1160 } 1161 1162 static int mmci_get_ro(struct mmc_host *mmc) 1163 { 1164 struct mmci_host *host = mmc_priv(mmc); 1165 1166 if (host->gpio_wp == -ENOSYS) 1167 return -ENOSYS; 1168 1169 return gpio_get_value_cansleep(host->gpio_wp); 1170 } 1171 1172 static int mmci_get_cd(struct mmc_host *mmc) 1173 { 1174 struct mmci_host *host = mmc_priv(mmc); 1175 struct mmci_platform_data *plat = host->plat; 1176 unsigned int status; 1177 1178 if (host->gpio_cd == -ENOSYS) { 1179 if (!plat->status) 1180 return 1; /* Assume always present */ 1181 1182 status = plat->status(mmc_dev(host->mmc)); 1183 } else 1184 status = !!gpio_get_value_cansleep(host->gpio_cd) 1185 ^ plat->cd_invert; 1186 1187 /* 1188 * Use positive logic throughout - status is zero for no card, 1189 * non-zero for card inserted. 1190 */ 1191 return status; 1192 } 1193 1194 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1195 { 1196 struct mmci_host *host = dev_id; 1197 1198 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1199 1200 return IRQ_HANDLED; 1201 } 1202 1203 static const struct mmc_host_ops mmci_ops = { 1204 .request = mmci_request, 1205 .pre_req = mmci_pre_request, 1206 .post_req = mmci_post_request, 1207 .set_ios = mmci_set_ios, 1208 .get_ro = mmci_get_ro, 1209 .get_cd = mmci_get_cd, 1210 }; 1211 1212 #ifdef CONFIG_OF 1213 static void mmci_dt_populate_generic_pdata(struct device_node *np, 1214 struct mmci_platform_data *pdata) 1215 { 1216 int bus_width = 0; 1217 1218 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); 1219 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); 1220 1221 if (of_get_property(np, "cd-inverted", NULL)) 1222 pdata->cd_invert = true; 1223 else 1224 pdata->cd_invert = false; 1225 1226 of_property_read_u32(np, "max-frequency", &pdata->f_max); 1227 if (!pdata->f_max) 1228 pr_warn("%s has no 'max-frequency' property\n", np->full_name); 1229 1230 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) 1231 pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED; 1232 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) 1233 pdata->capabilities |= MMC_CAP_SD_HIGHSPEED; 1234 1235 of_property_read_u32(np, "bus-width", &bus_width); 1236 switch (bus_width) { 1237 case 0 : 1238 /* No bus-width supplied. */ 1239 break; 1240 case 4 : 1241 pdata->capabilities |= MMC_CAP_4_BIT_DATA; 1242 break; 1243 case 8 : 1244 pdata->capabilities |= MMC_CAP_8_BIT_DATA; 1245 break; 1246 default : 1247 pr_warn("%s: Unsupported bus width\n", np->full_name); 1248 } 1249 } 1250 #else 1251 static void mmci_dt_populate_generic_pdata(struct device_node *np, 1252 struct mmci_platform_data *pdata) 1253 { 1254 return; 1255 } 1256 #endif 1257 1258 static int __devinit mmci_probe(struct amba_device *dev, 1259 const struct amba_id *id) 1260 { 1261 struct mmci_platform_data *plat = dev->dev.platform_data; 1262 struct device_node *np = dev->dev.of_node; 1263 struct variant_data *variant = id->data; 1264 struct mmci_host *host; 1265 struct mmc_host *mmc; 1266 int ret; 1267 1268 /* Must have platform data or Device Tree. */ 1269 if (!plat && !np) { 1270 dev_err(&dev->dev, "No plat data or DT found\n"); 1271 return -EINVAL; 1272 } 1273 1274 if (!plat) { 1275 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); 1276 if (!plat) 1277 return -ENOMEM; 1278 } 1279 1280 if (np) 1281 mmci_dt_populate_generic_pdata(np, plat); 1282 1283 ret = amba_request_regions(dev, DRIVER_NAME); 1284 if (ret) 1285 goto out; 1286 1287 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1288 if (!mmc) { 1289 ret = -ENOMEM; 1290 goto rel_regions; 1291 } 1292 1293 host = mmc_priv(mmc); 1294 host->mmc = mmc; 1295 1296 host->gpio_wp = -ENOSYS; 1297 host->gpio_cd = -ENOSYS; 1298 host->gpio_cd_irq = -1; 1299 1300 host->hw_designer = amba_manf(dev); 1301 host->hw_revision = amba_rev(dev); 1302 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1303 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1304 1305 host->clk = clk_get(&dev->dev, NULL); 1306 if (IS_ERR(host->clk)) { 1307 ret = PTR_ERR(host->clk); 1308 host->clk = NULL; 1309 goto host_free; 1310 } 1311 1312 ret = clk_prepare_enable(host->clk); 1313 if (ret) 1314 goto clk_free; 1315 1316 host->plat = plat; 1317 host->variant = variant; 1318 host->mclk = clk_get_rate(host->clk); 1319 /* 1320 * According to the spec, mclk is max 100 MHz, 1321 * so we try to adjust the clock down to this, 1322 * (if possible). 1323 */ 1324 if (host->mclk > 100000000) { 1325 ret = clk_set_rate(host->clk, 100000000); 1326 if (ret < 0) 1327 goto clk_disable; 1328 host->mclk = clk_get_rate(host->clk); 1329 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1330 host->mclk); 1331 } 1332 host->phybase = dev->res.start; 1333 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1334 if (!host->base) { 1335 ret = -ENOMEM; 1336 goto clk_disable; 1337 } 1338 1339 mmc->ops = &mmci_ops; 1340 /* 1341 * The ARM and ST versions of the block have slightly different 1342 * clock divider equations which means that the minimum divider 1343 * differs too. 1344 */ 1345 if (variant->st_clkdiv) 1346 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1347 else 1348 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1349 /* 1350 * If the platform data supplies a maximum operating 1351 * frequency, this takes precedence. Else, we fall back 1352 * to using the module parameter, which has a (low) 1353 * default value in case it is not specified. Either 1354 * value must not exceed the clock rate into the block, 1355 * of course. 1356 */ 1357 if (plat->f_max) 1358 mmc->f_max = min(host->mclk, plat->f_max); 1359 else 1360 mmc->f_max = min(host->mclk, fmax); 1361 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1362 1363 #ifdef CONFIG_REGULATOR 1364 /* If we're using the regulator framework, try to fetch a regulator */ 1365 host->vcc = regulator_get(&dev->dev, "vmmc"); 1366 if (IS_ERR(host->vcc)) 1367 host->vcc = NULL; 1368 else { 1369 int mask = mmc_regulator_get_ocrmask(host->vcc); 1370 1371 if (mask < 0) 1372 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1373 mask); 1374 else { 1375 host->mmc->ocr_avail = (u32) mask; 1376 if (plat->ocr_mask) 1377 dev_warn(&dev->dev, 1378 "Provided ocr_mask/setpower will not be used " 1379 "(using regulator instead)\n"); 1380 } 1381 } 1382 #endif 1383 /* Fall back to platform data if no regulator is found */ 1384 if (host->vcc == NULL) 1385 mmc->ocr_avail = plat->ocr_mask; 1386 mmc->caps = plat->capabilities; 1387 mmc->caps2 = plat->capabilities2; 1388 1389 /* 1390 * We can do SGIO 1391 */ 1392 mmc->max_segs = NR_SG; 1393 1394 /* 1395 * Since only a certain number of bits are valid in the data length 1396 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1397 * single request. 1398 */ 1399 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1400 1401 /* 1402 * Set the maximum segment size. Since we aren't doing DMA 1403 * (yet) we are only limited by the data length register. 1404 */ 1405 mmc->max_seg_size = mmc->max_req_size; 1406 1407 /* 1408 * Block size can be up to 2048 bytes, but must be a power of two. 1409 */ 1410 mmc->max_blk_size = 1 << 11; 1411 1412 /* 1413 * Limit the number of blocks transferred so that we don't overflow 1414 * the maximum request size. 1415 */ 1416 mmc->max_blk_count = mmc->max_req_size >> 11; 1417 1418 spin_lock_init(&host->lock); 1419 1420 writel(0, host->base + MMCIMASK0); 1421 writel(0, host->base + MMCIMASK1); 1422 writel(0xfff, host->base + MMCICLEAR); 1423 1424 if (plat->gpio_cd == -EPROBE_DEFER) { 1425 ret = -EPROBE_DEFER; 1426 goto err_gpio_cd; 1427 } 1428 if (gpio_is_valid(plat->gpio_cd)) { 1429 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1430 if (ret == 0) 1431 ret = gpio_direction_input(plat->gpio_cd); 1432 if (ret == 0) 1433 host->gpio_cd = plat->gpio_cd; 1434 else if (ret != -ENOSYS) 1435 goto err_gpio_cd; 1436 1437 /* 1438 * A gpio pin that will detect cards when inserted and removed 1439 * will most likely want to trigger on the edges if it is 1440 * 0 when ejected and 1 when inserted (or mutatis mutandis 1441 * for the inverted case) so we request triggers on both 1442 * edges. 1443 */ 1444 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1445 mmci_cd_irq, 1446 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1447 DRIVER_NAME " (cd)", host); 1448 if (ret >= 0) 1449 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1450 } 1451 if (plat->gpio_wp == -EPROBE_DEFER) { 1452 ret = -EPROBE_DEFER; 1453 goto err_gpio_wp; 1454 } 1455 if (gpio_is_valid(plat->gpio_wp)) { 1456 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1457 if (ret == 0) 1458 ret = gpio_direction_input(plat->gpio_wp); 1459 if (ret == 0) 1460 host->gpio_wp = plat->gpio_wp; 1461 else if (ret != -ENOSYS) 1462 goto err_gpio_wp; 1463 } 1464 1465 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1466 && host->gpio_cd_irq < 0) 1467 mmc->caps |= MMC_CAP_NEEDS_POLL; 1468 1469 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1470 if (ret) 1471 goto unmap; 1472 1473 if (!dev->irq[1]) 1474 host->singleirq = true; 1475 else { 1476 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1477 DRIVER_NAME " (pio)", host); 1478 if (ret) 1479 goto irq0_free; 1480 } 1481 1482 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1483 1484 amba_set_drvdata(dev, mmc); 1485 1486 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1487 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1488 amba_rev(dev), (unsigned long long)dev->res.start, 1489 dev->irq[0], dev->irq[1]); 1490 1491 mmci_dma_setup(host); 1492 1493 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1494 pm_runtime_use_autosuspend(&dev->dev); 1495 pm_runtime_put(&dev->dev); 1496 1497 mmc_add_host(mmc); 1498 1499 return 0; 1500 1501 irq0_free: 1502 free_irq(dev->irq[0], host); 1503 unmap: 1504 if (host->gpio_wp != -ENOSYS) 1505 gpio_free(host->gpio_wp); 1506 err_gpio_wp: 1507 if (host->gpio_cd_irq >= 0) 1508 free_irq(host->gpio_cd_irq, host); 1509 if (host->gpio_cd != -ENOSYS) 1510 gpio_free(host->gpio_cd); 1511 err_gpio_cd: 1512 iounmap(host->base); 1513 clk_disable: 1514 clk_disable_unprepare(host->clk); 1515 clk_free: 1516 clk_put(host->clk); 1517 host_free: 1518 mmc_free_host(mmc); 1519 rel_regions: 1520 amba_release_regions(dev); 1521 out: 1522 return ret; 1523 } 1524 1525 static int __devexit mmci_remove(struct amba_device *dev) 1526 { 1527 struct mmc_host *mmc = amba_get_drvdata(dev); 1528 1529 amba_set_drvdata(dev, NULL); 1530 1531 if (mmc) { 1532 struct mmci_host *host = mmc_priv(mmc); 1533 1534 /* 1535 * Undo pm_runtime_put() in probe. We use the _sync 1536 * version here so that we can access the primecell. 1537 */ 1538 pm_runtime_get_sync(&dev->dev); 1539 1540 mmc_remove_host(mmc); 1541 1542 writel(0, host->base + MMCIMASK0); 1543 writel(0, host->base + MMCIMASK1); 1544 1545 writel(0, host->base + MMCICOMMAND); 1546 writel(0, host->base + MMCIDATACTRL); 1547 1548 mmci_dma_release(host); 1549 free_irq(dev->irq[0], host); 1550 if (!host->singleirq) 1551 free_irq(dev->irq[1], host); 1552 1553 if (host->gpio_wp != -ENOSYS) 1554 gpio_free(host->gpio_wp); 1555 if (host->gpio_cd_irq >= 0) 1556 free_irq(host->gpio_cd_irq, host); 1557 if (host->gpio_cd != -ENOSYS) 1558 gpio_free(host->gpio_cd); 1559 1560 iounmap(host->base); 1561 clk_disable_unprepare(host->clk); 1562 clk_put(host->clk); 1563 1564 if (host->vcc) 1565 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1566 regulator_put(host->vcc); 1567 1568 mmc_free_host(mmc); 1569 1570 amba_release_regions(dev); 1571 } 1572 1573 return 0; 1574 } 1575 1576 #ifdef CONFIG_SUSPEND 1577 static int mmci_suspend(struct device *dev) 1578 { 1579 struct amba_device *adev = to_amba_device(dev); 1580 struct mmc_host *mmc = amba_get_drvdata(adev); 1581 int ret = 0; 1582 1583 if (mmc) { 1584 struct mmci_host *host = mmc_priv(mmc); 1585 1586 ret = mmc_suspend_host(mmc); 1587 if (ret == 0) { 1588 pm_runtime_get_sync(dev); 1589 writel(0, host->base + MMCIMASK0); 1590 } 1591 } 1592 1593 return ret; 1594 } 1595 1596 static int mmci_resume(struct device *dev) 1597 { 1598 struct amba_device *adev = to_amba_device(dev); 1599 struct mmc_host *mmc = amba_get_drvdata(adev); 1600 int ret = 0; 1601 1602 if (mmc) { 1603 struct mmci_host *host = mmc_priv(mmc); 1604 1605 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1606 pm_runtime_put(dev); 1607 1608 ret = mmc_resume_host(mmc); 1609 } 1610 1611 return ret; 1612 } 1613 #endif 1614 1615 static const struct dev_pm_ops mmci_dev_pm_ops = { 1616 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) 1617 }; 1618 1619 static struct amba_id mmci_ids[] = { 1620 { 1621 .id = 0x00041180, 1622 .mask = 0xff0fffff, 1623 .data = &variant_arm, 1624 }, 1625 { 1626 .id = 0x01041180, 1627 .mask = 0xff0fffff, 1628 .data = &variant_arm_extended_fifo, 1629 }, 1630 { 1631 .id = 0x00041181, 1632 .mask = 0x000fffff, 1633 .data = &variant_arm, 1634 }, 1635 /* ST Micro variants */ 1636 { 1637 .id = 0x00180180, 1638 .mask = 0x00ffffff, 1639 .data = &variant_u300, 1640 }, 1641 { 1642 .id = 0x10180180, 1643 .mask = 0xf0ffffff, 1644 .data = &variant_nomadik, 1645 }, 1646 { 1647 .id = 0x00280180, 1648 .mask = 0x00ffffff, 1649 .data = &variant_u300, 1650 }, 1651 { 1652 .id = 0x00480180, 1653 .mask = 0xf0ffffff, 1654 .data = &variant_ux500, 1655 }, 1656 { 1657 .id = 0x10480180, 1658 .mask = 0xf0ffffff, 1659 .data = &variant_ux500v2, 1660 }, 1661 { 0, 0 }, 1662 }; 1663 1664 MODULE_DEVICE_TABLE(amba, mmci_ids); 1665 1666 static struct amba_driver mmci_driver = { 1667 .drv = { 1668 .name = DRIVER_NAME, 1669 .pm = &mmci_dev_pm_ops, 1670 }, 1671 .probe = mmci_probe, 1672 .remove = __devexit_p(mmci_remove), 1673 .id_table = mmci_ids, 1674 }; 1675 1676 module_amba_driver(mmci_driver); 1677 1678 module_param(fmax, uint, 0444); 1679 1680 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1681 MODULE_LICENSE("GPL"); 1682