1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/slab.h> 19 #include <linux/delay.h> 20 #include <linux/err.h> 21 #include <linux/highmem.h> 22 #include <linux/log2.h> 23 #include <linux/mmc/host.h> 24 #include <linux/mmc/card.h> 25 #include <linux/amba/bus.h> 26 #include <linux/clk.h> 27 #include <linux/scatterlist.h> 28 #include <linux/gpio.h> 29 #include <linux/of_gpio.h> 30 #include <linux/regulator/consumer.h> 31 #include <linux/dmaengine.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/amba/mmci.h> 34 #include <linux/pm_runtime.h> 35 #include <linux/types.h> 36 #include <linux/pinctrl/consumer.h> 37 38 #include <asm/div64.h> 39 #include <asm/io.h> 40 #include <asm/sizes.h> 41 42 #include "mmci.h" 43 44 #define DRIVER_NAME "mmci-pl18x" 45 46 static unsigned int fmax = 515633; 47 48 /** 49 * struct variant_data - MMCI variant-specific quirks 50 * @clkreg: default value for MCICLOCK register 51 * @clkreg_enable: enable value for MMCICLOCK register 52 * @datalength_bits: number of bits in the MMCIDATALENGTH register 53 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 54 * is asserted (likewise for RX) 55 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 56 * is asserted (likewise for RX) 57 * @sdio: variant supports SDIO 58 * @st_clkdiv: true if using a ST-specific clock divider algorithm 59 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 60 * @pwrreg_powerup: power up value for MMCIPOWER register 61 * @signal_direction: input/out direction of bus signals can be indicated 62 */ 63 struct variant_data { 64 unsigned int clkreg; 65 unsigned int clkreg_enable; 66 unsigned int datalength_bits; 67 unsigned int fifosize; 68 unsigned int fifohalfsize; 69 bool sdio; 70 bool st_clkdiv; 71 bool blksz_datactrl16; 72 u32 pwrreg_powerup; 73 bool signal_direction; 74 }; 75 76 static struct variant_data variant_arm = { 77 .fifosize = 16 * 4, 78 .fifohalfsize = 8 * 4, 79 .datalength_bits = 16, 80 .pwrreg_powerup = MCI_PWR_UP, 81 }; 82 83 static struct variant_data variant_arm_extended_fifo = { 84 .fifosize = 128 * 4, 85 .fifohalfsize = 64 * 4, 86 .datalength_bits = 16, 87 .pwrreg_powerup = MCI_PWR_UP, 88 }; 89 90 static struct variant_data variant_u300 = { 91 .fifosize = 16 * 4, 92 .fifohalfsize = 8 * 4, 93 .clkreg_enable = MCI_ST_U300_HWFCEN, 94 .datalength_bits = 16, 95 .sdio = true, 96 .pwrreg_powerup = MCI_PWR_ON, 97 .signal_direction = true, 98 }; 99 100 static struct variant_data variant_nomadik = { 101 .fifosize = 16 * 4, 102 .fifohalfsize = 8 * 4, 103 .clkreg = MCI_CLK_ENABLE, 104 .datalength_bits = 24, 105 .sdio = true, 106 .st_clkdiv = true, 107 .pwrreg_powerup = MCI_PWR_ON, 108 .signal_direction = true, 109 }; 110 111 static struct variant_data variant_ux500 = { 112 .fifosize = 30 * 4, 113 .fifohalfsize = 8 * 4, 114 .clkreg = MCI_CLK_ENABLE, 115 .clkreg_enable = MCI_ST_UX500_HWFCEN, 116 .datalength_bits = 24, 117 .sdio = true, 118 .st_clkdiv = true, 119 .pwrreg_powerup = MCI_PWR_ON, 120 .signal_direction = true, 121 }; 122 123 static struct variant_data variant_ux500v2 = { 124 .fifosize = 30 * 4, 125 .fifohalfsize = 8 * 4, 126 .clkreg = MCI_CLK_ENABLE, 127 .clkreg_enable = MCI_ST_UX500_HWFCEN, 128 .datalength_bits = 24, 129 .sdio = true, 130 .st_clkdiv = true, 131 .blksz_datactrl16 = true, 132 .pwrreg_powerup = MCI_PWR_ON, 133 .signal_direction = true, 134 }; 135 136 /* 137 * This must be called with host->lock held 138 */ 139 static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 140 { 141 if (host->clk_reg != clk) { 142 host->clk_reg = clk; 143 writel(clk, host->base + MMCICLOCK); 144 } 145 } 146 147 /* 148 * This must be called with host->lock held 149 */ 150 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 151 { 152 if (host->pwr_reg != pwr) { 153 host->pwr_reg = pwr; 154 writel(pwr, host->base + MMCIPOWER); 155 } 156 } 157 158 /* 159 * This must be called with host->lock held 160 */ 161 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 162 { 163 struct variant_data *variant = host->variant; 164 u32 clk = variant->clkreg; 165 166 if (desired) { 167 if (desired >= host->mclk) { 168 clk = MCI_CLK_BYPASS; 169 if (variant->st_clkdiv) 170 clk |= MCI_ST_UX500_NEG_EDGE; 171 host->cclk = host->mclk; 172 } else if (variant->st_clkdiv) { 173 /* 174 * DB8500 TRM says f = mclk / (clkdiv + 2) 175 * => clkdiv = (mclk / f) - 2 176 * Round the divider up so we don't exceed the max 177 * frequency 178 */ 179 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 180 if (clk >= 256) 181 clk = 255; 182 host->cclk = host->mclk / (clk + 2); 183 } else { 184 /* 185 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 186 * => clkdiv = mclk / (2 * f) - 1 187 */ 188 clk = host->mclk / (2 * desired) - 1; 189 if (clk >= 256) 190 clk = 255; 191 host->cclk = host->mclk / (2 * (clk + 1)); 192 } 193 194 clk |= variant->clkreg_enable; 195 clk |= MCI_CLK_ENABLE; 196 /* This hasn't proven to be worthwhile */ 197 /* clk |= MCI_CLK_PWRSAVE; */ 198 } 199 200 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 201 clk |= MCI_4BIT_BUS; 202 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 203 clk |= MCI_ST_8BIT_BUS; 204 205 mmci_write_clkreg(host, clk); 206 } 207 208 static void 209 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 210 { 211 writel(0, host->base + MMCICOMMAND); 212 213 BUG_ON(host->data); 214 215 host->mrq = NULL; 216 host->cmd = NULL; 217 218 mmc_request_done(host->mmc, mrq); 219 220 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 221 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 222 } 223 224 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 225 { 226 void __iomem *base = host->base; 227 228 if (host->singleirq) { 229 unsigned int mask0 = readl(base + MMCIMASK0); 230 231 mask0 &= ~MCI_IRQ1MASK; 232 mask0 |= mask; 233 234 writel(mask0, base + MMCIMASK0); 235 } 236 237 writel(mask, base + MMCIMASK1); 238 } 239 240 static void mmci_stop_data(struct mmci_host *host) 241 { 242 writel(0, host->base + MMCIDATACTRL); 243 mmci_set_mask1(host, 0); 244 host->data = NULL; 245 } 246 247 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 248 { 249 unsigned int flags = SG_MITER_ATOMIC; 250 251 if (data->flags & MMC_DATA_READ) 252 flags |= SG_MITER_TO_SG; 253 else 254 flags |= SG_MITER_FROM_SG; 255 256 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 257 } 258 259 /* 260 * All the DMA operation mode stuff goes inside this ifdef. 261 * This assumes that you have a generic DMA device interface, 262 * no custom DMA interfaces are supported. 263 */ 264 #ifdef CONFIG_DMA_ENGINE 265 static void mmci_dma_setup(struct mmci_host *host) 266 { 267 struct mmci_platform_data *plat = host->plat; 268 const char *rxname, *txname; 269 dma_cap_mask_t mask; 270 271 if (!plat || !plat->dma_filter) { 272 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 273 return; 274 } 275 276 /* initialize pre request cookie */ 277 host->next_data.cookie = 1; 278 279 /* Try to acquire a generic DMA engine slave channel */ 280 dma_cap_zero(mask); 281 dma_cap_set(DMA_SLAVE, mask); 282 283 /* 284 * If only an RX channel is specified, the driver will 285 * attempt to use it bidirectionally, however if it is 286 * is specified but cannot be located, DMA will be disabled. 287 */ 288 if (plat->dma_rx_param) { 289 host->dma_rx_channel = dma_request_channel(mask, 290 plat->dma_filter, 291 plat->dma_rx_param); 292 /* E.g if no DMA hardware is present */ 293 if (!host->dma_rx_channel) 294 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 295 } 296 297 if (plat->dma_tx_param) { 298 host->dma_tx_channel = dma_request_channel(mask, 299 plat->dma_filter, 300 plat->dma_tx_param); 301 if (!host->dma_tx_channel) 302 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 303 } else { 304 host->dma_tx_channel = host->dma_rx_channel; 305 } 306 307 if (host->dma_rx_channel) 308 rxname = dma_chan_name(host->dma_rx_channel); 309 else 310 rxname = "none"; 311 312 if (host->dma_tx_channel) 313 txname = dma_chan_name(host->dma_tx_channel); 314 else 315 txname = "none"; 316 317 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 318 rxname, txname); 319 320 /* 321 * Limit the maximum segment size in any SG entry according to 322 * the parameters of the DMA engine device. 323 */ 324 if (host->dma_tx_channel) { 325 struct device *dev = host->dma_tx_channel->device->dev; 326 unsigned int max_seg_size = dma_get_max_seg_size(dev); 327 328 if (max_seg_size < host->mmc->max_seg_size) 329 host->mmc->max_seg_size = max_seg_size; 330 } 331 if (host->dma_rx_channel) { 332 struct device *dev = host->dma_rx_channel->device->dev; 333 unsigned int max_seg_size = dma_get_max_seg_size(dev); 334 335 if (max_seg_size < host->mmc->max_seg_size) 336 host->mmc->max_seg_size = max_seg_size; 337 } 338 } 339 340 /* 341 * This is used in or so inline it 342 * so it can be discarded. 343 */ 344 static inline void mmci_dma_release(struct mmci_host *host) 345 { 346 struct mmci_platform_data *plat = host->plat; 347 348 if (host->dma_rx_channel) 349 dma_release_channel(host->dma_rx_channel); 350 if (host->dma_tx_channel && plat->dma_tx_param) 351 dma_release_channel(host->dma_tx_channel); 352 host->dma_rx_channel = host->dma_tx_channel = NULL; 353 } 354 355 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 356 { 357 struct dma_chan *chan = host->dma_current; 358 enum dma_data_direction dir; 359 u32 status; 360 int i; 361 362 /* Wait up to 1ms for the DMA to complete */ 363 for (i = 0; ; i++) { 364 status = readl(host->base + MMCISTATUS); 365 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 366 break; 367 udelay(10); 368 } 369 370 /* 371 * Check to see whether we still have some data left in the FIFO - 372 * this catches DMA controllers which are unable to monitor the 373 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 374 * contiguous buffers. On TX, we'll get a FIFO underrun error. 375 */ 376 if (status & MCI_RXDATAAVLBLMASK) { 377 dmaengine_terminate_all(chan); 378 if (!data->error) 379 data->error = -EIO; 380 } 381 382 if (data->flags & MMC_DATA_WRITE) { 383 dir = DMA_TO_DEVICE; 384 } else { 385 dir = DMA_FROM_DEVICE; 386 } 387 388 if (!data->host_cookie) 389 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 390 391 /* 392 * Use of DMA with scatter-gather is impossible. 393 * Give up with DMA and switch back to PIO mode. 394 */ 395 if (status & MCI_RXDATAAVLBLMASK) { 396 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 397 mmci_dma_release(host); 398 } 399 } 400 401 static void mmci_dma_data_error(struct mmci_host *host) 402 { 403 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 404 dmaengine_terminate_all(host->dma_current); 405 } 406 407 static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 408 struct mmci_host_next *next) 409 { 410 struct variant_data *variant = host->variant; 411 struct dma_slave_config conf = { 412 .src_addr = host->phybase + MMCIFIFO, 413 .dst_addr = host->phybase + MMCIFIFO, 414 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 415 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 416 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 417 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 418 .device_fc = false, 419 }; 420 struct dma_chan *chan; 421 struct dma_device *device; 422 struct dma_async_tx_descriptor *desc; 423 enum dma_data_direction buffer_dirn; 424 int nr_sg; 425 426 /* Check if next job is already prepared */ 427 if (data->host_cookie && !next && 428 host->dma_current && host->dma_desc_current) 429 return 0; 430 431 if (!next) { 432 host->dma_current = NULL; 433 host->dma_desc_current = NULL; 434 } 435 436 if (data->flags & MMC_DATA_READ) { 437 conf.direction = DMA_DEV_TO_MEM; 438 buffer_dirn = DMA_FROM_DEVICE; 439 chan = host->dma_rx_channel; 440 } else { 441 conf.direction = DMA_MEM_TO_DEV; 442 buffer_dirn = DMA_TO_DEVICE; 443 chan = host->dma_tx_channel; 444 } 445 446 /* If there's no DMA channel, fall back to PIO */ 447 if (!chan) 448 return -EINVAL; 449 450 /* If less than or equal to the fifo size, don't bother with DMA */ 451 if (data->blksz * data->blocks <= variant->fifosize) 452 return -EINVAL; 453 454 device = chan->device; 455 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 456 if (nr_sg == 0) 457 return -EINVAL; 458 459 dmaengine_slave_config(chan, &conf); 460 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 461 conf.direction, DMA_CTRL_ACK); 462 if (!desc) 463 goto unmap_exit; 464 465 if (next) { 466 next->dma_chan = chan; 467 next->dma_desc = desc; 468 } else { 469 host->dma_current = chan; 470 host->dma_desc_current = desc; 471 } 472 473 return 0; 474 475 unmap_exit: 476 if (!next) 477 dmaengine_terminate_all(chan); 478 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 479 return -ENOMEM; 480 } 481 482 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 483 { 484 int ret; 485 struct mmc_data *data = host->data; 486 487 ret = mmci_dma_prep_data(host, host->data, NULL); 488 if (ret) 489 return ret; 490 491 /* Okay, go for it. */ 492 dev_vdbg(mmc_dev(host->mmc), 493 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 494 data->sg_len, data->blksz, data->blocks, data->flags); 495 dmaengine_submit(host->dma_desc_current); 496 dma_async_issue_pending(host->dma_current); 497 498 datactrl |= MCI_DPSM_DMAENABLE; 499 500 /* Trigger the DMA transfer */ 501 writel(datactrl, host->base + MMCIDATACTRL); 502 503 /* 504 * Let the MMCI say when the data is ended and it's time 505 * to fire next DMA request. When that happens, MMCI will 506 * call mmci_data_end() 507 */ 508 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 509 host->base + MMCIMASK0); 510 return 0; 511 } 512 513 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 514 { 515 struct mmci_host_next *next = &host->next_data; 516 517 if (data->host_cookie && data->host_cookie != next->cookie) { 518 pr_warning("[%s] invalid cookie: data->host_cookie %d" 519 " host->next_data.cookie %d\n", 520 __func__, data->host_cookie, host->next_data.cookie); 521 data->host_cookie = 0; 522 } 523 524 if (!data->host_cookie) 525 return; 526 527 host->dma_desc_current = next->dma_desc; 528 host->dma_current = next->dma_chan; 529 530 next->dma_desc = NULL; 531 next->dma_chan = NULL; 532 } 533 534 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 535 bool is_first_req) 536 { 537 struct mmci_host *host = mmc_priv(mmc); 538 struct mmc_data *data = mrq->data; 539 struct mmci_host_next *nd = &host->next_data; 540 541 if (!data) 542 return; 543 544 if (data->host_cookie) { 545 data->host_cookie = 0; 546 return; 547 } 548 549 /* if config for dma */ 550 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 551 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { 552 if (mmci_dma_prep_data(host, data, nd)) 553 data->host_cookie = 0; 554 else 555 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 556 } 557 } 558 559 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 560 int err) 561 { 562 struct mmci_host *host = mmc_priv(mmc); 563 struct mmc_data *data = mrq->data; 564 struct dma_chan *chan; 565 enum dma_data_direction dir; 566 567 if (!data) 568 return; 569 570 if (data->flags & MMC_DATA_READ) { 571 dir = DMA_FROM_DEVICE; 572 chan = host->dma_rx_channel; 573 } else { 574 dir = DMA_TO_DEVICE; 575 chan = host->dma_tx_channel; 576 } 577 578 579 /* if config for dma */ 580 if (chan) { 581 if (err) 582 dmaengine_terminate_all(chan); 583 if (data->host_cookie) 584 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 585 data->sg_len, dir); 586 mrq->data->host_cookie = 0; 587 } 588 } 589 590 #else 591 /* Blank functions if the DMA engine is not available */ 592 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 593 { 594 } 595 static inline void mmci_dma_setup(struct mmci_host *host) 596 { 597 } 598 599 static inline void mmci_dma_release(struct mmci_host *host) 600 { 601 } 602 603 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 604 { 605 } 606 607 static inline void mmci_dma_data_error(struct mmci_host *host) 608 { 609 } 610 611 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 612 { 613 return -ENOSYS; 614 } 615 616 #define mmci_pre_request NULL 617 #define mmci_post_request NULL 618 619 #endif 620 621 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 622 { 623 struct variant_data *variant = host->variant; 624 unsigned int datactrl, timeout, irqmask; 625 unsigned long long clks; 626 void __iomem *base; 627 int blksz_bits; 628 629 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 630 data->blksz, data->blocks, data->flags); 631 632 host->data = data; 633 host->size = data->blksz * data->blocks; 634 data->bytes_xfered = 0; 635 636 clks = (unsigned long long)data->timeout_ns * host->cclk; 637 do_div(clks, 1000000000UL); 638 639 timeout = data->timeout_clks + (unsigned int)clks; 640 641 base = host->base; 642 writel(timeout, base + MMCIDATATIMER); 643 writel(host->size, base + MMCIDATALENGTH); 644 645 blksz_bits = ffs(data->blksz) - 1; 646 BUG_ON(1 << blksz_bits != data->blksz); 647 648 if (variant->blksz_datactrl16) 649 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 650 else 651 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 652 653 if (data->flags & MMC_DATA_READ) 654 datactrl |= MCI_DPSM_DIRECTION; 655 656 /* The ST Micro variants has a special bit to enable SDIO */ 657 if (variant->sdio && host->mmc->card) 658 if (mmc_card_sdio(host->mmc->card)) { 659 /* 660 * The ST Micro variants has a special bit 661 * to enable SDIO. 662 */ 663 u32 clk; 664 665 datactrl |= MCI_ST_DPSM_SDIOEN; 666 667 /* 668 * The ST Micro variant for SDIO small write transfers 669 * needs to have clock H/W flow control disabled, 670 * otherwise the transfer will not start. The threshold 671 * depends on the rate of MCLK. 672 */ 673 if (data->flags & MMC_DATA_WRITE && 674 (host->size < 8 || 675 (host->size <= 8 && host->mclk > 50000000))) 676 clk = host->clk_reg & ~variant->clkreg_enable; 677 else 678 clk = host->clk_reg | variant->clkreg_enable; 679 680 mmci_write_clkreg(host, clk); 681 } 682 683 /* 684 * Attempt to use DMA operation mode, if this 685 * should fail, fall back to PIO mode 686 */ 687 if (!mmci_dma_start_data(host, datactrl)) 688 return; 689 690 /* IRQ mode, map the SG list for CPU reading/writing */ 691 mmci_init_sg(host, data); 692 693 if (data->flags & MMC_DATA_READ) { 694 irqmask = MCI_RXFIFOHALFFULLMASK; 695 696 /* 697 * If we have less than the fifo 'half-full' threshold to 698 * transfer, trigger a PIO interrupt as soon as any data 699 * is available. 700 */ 701 if (host->size < variant->fifohalfsize) 702 irqmask |= MCI_RXDATAAVLBLMASK; 703 } else { 704 /* 705 * We don't actually need to include "FIFO empty" here 706 * since its implicit in "FIFO half empty". 707 */ 708 irqmask = MCI_TXFIFOHALFEMPTYMASK; 709 } 710 711 writel(datactrl, base + MMCIDATACTRL); 712 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 713 mmci_set_mask1(host, irqmask); 714 } 715 716 static void 717 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 718 { 719 void __iomem *base = host->base; 720 721 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 722 cmd->opcode, cmd->arg, cmd->flags); 723 724 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 725 writel(0, base + MMCICOMMAND); 726 udelay(1); 727 } 728 729 c |= cmd->opcode | MCI_CPSM_ENABLE; 730 if (cmd->flags & MMC_RSP_PRESENT) { 731 if (cmd->flags & MMC_RSP_136) 732 c |= MCI_CPSM_LONGRSP; 733 c |= MCI_CPSM_RESPONSE; 734 } 735 if (/*interrupt*/0) 736 c |= MCI_CPSM_INTERRUPT; 737 738 host->cmd = cmd; 739 740 writel(cmd->arg, base + MMCIARGUMENT); 741 writel(c, base + MMCICOMMAND); 742 } 743 744 static void 745 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 746 unsigned int status) 747 { 748 /* First check for errors */ 749 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 750 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 751 u32 remain, success; 752 753 /* Terminate the DMA transfer */ 754 if (dma_inprogress(host)) 755 mmci_dma_data_error(host); 756 757 /* 758 * Calculate how far we are into the transfer. Note that 759 * the data counter gives the number of bytes transferred 760 * on the MMC bus, not on the host side. On reads, this 761 * can be as much as a FIFO-worth of data ahead. This 762 * matters for FIFO overruns only. 763 */ 764 remain = readl(host->base + MMCIDATACNT); 765 success = data->blksz * data->blocks - remain; 766 767 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 768 status, success); 769 if (status & MCI_DATACRCFAIL) { 770 /* Last block was not successful */ 771 success -= 1; 772 data->error = -EILSEQ; 773 } else if (status & MCI_DATATIMEOUT) { 774 data->error = -ETIMEDOUT; 775 } else if (status & MCI_STARTBITERR) { 776 data->error = -ECOMM; 777 } else if (status & MCI_TXUNDERRUN) { 778 data->error = -EIO; 779 } else if (status & MCI_RXOVERRUN) { 780 if (success > host->variant->fifosize) 781 success -= host->variant->fifosize; 782 else 783 success = 0; 784 data->error = -EIO; 785 } 786 data->bytes_xfered = round_down(success, data->blksz); 787 } 788 789 if (status & MCI_DATABLOCKEND) 790 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 791 792 if (status & MCI_DATAEND || data->error) { 793 if (dma_inprogress(host)) 794 mmci_dma_unmap(host, data); 795 mmci_stop_data(host); 796 797 if (!data->error) 798 /* The error clause is handled above, success! */ 799 data->bytes_xfered = data->blksz * data->blocks; 800 801 if (!data->stop) { 802 mmci_request_end(host, data->mrq); 803 } else { 804 mmci_start_command(host, data->stop, 0); 805 } 806 } 807 } 808 809 static void 810 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 811 unsigned int status) 812 { 813 void __iomem *base = host->base; 814 815 host->cmd = NULL; 816 817 if (status & MCI_CMDTIMEOUT) { 818 cmd->error = -ETIMEDOUT; 819 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 820 cmd->error = -EILSEQ; 821 } else { 822 cmd->resp[0] = readl(base + MMCIRESPONSE0); 823 cmd->resp[1] = readl(base + MMCIRESPONSE1); 824 cmd->resp[2] = readl(base + MMCIRESPONSE2); 825 cmd->resp[3] = readl(base + MMCIRESPONSE3); 826 } 827 828 if (!cmd->data || cmd->error) { 829 if (host->data) { 830 /* Terminate the DMA transfer */ 831 if (dma_inprogress(host)) 832 mmci_dma_data_error(host); 833 mmci_stop_data(host); 834 } 835 mmci_request_end(host, cmd->mrq); 836 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 837 mmci_start_data(host, cmd->data); 838 } 839 } 840 841 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 842 { 843 void __iomem *base = host->base; 844 char *ptr = buffer; 845 u32 status; 846 int host_remain = host->size; 847 848 do { 849 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 850 851 if (count > remain) 852 count = remain; 853 854 if (count <= 0) 855 break; 856 857 /* 858 * SDIO especially may want to send something that is 859 * not divisible by 4 (as opposed to card sectors 860 * etc). Therefore make sure to always read the last bytes 861 * while only doing full 32-bit reads towards the FIFO. 862 */ 863 if (unlikely(count & 0x3)) { 864 if (count < 4) { 865 unsigned char buf[4]; 866 ioread32_rep(base + MMCIFIFO, buf, 1); 867 memcpy(ptr, buf, count); 868 } else { 869 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 870 count &= ~0x3; 871 } 872 } else { 873 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 874 } 875 876 ptr += count; 877 remain -= count; 878 host_remain -= count; 879 880 if (remain == 0) 881 break; 882 883 status = readl(base + MMCISTATUS); 884 } while (status & MCI_RXDATAAVLBL); 885 886 return ptr - buffer; 887 } 888 889 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 890 { 891 struct variant_data *variant = host->variant; 892 void __iomem *base = host->base; 893 char *ptr = buffer; 894 895 do { 896 unsigned int count, maxcnt; 897 898 maxcnt = status & MCI_TXFIFOEMPTY ? 899 variant->fifosize : variant->fifohalfsize; 900 count = min(remain, maxcnt); 901 902 /* 903 * SDIO especially may want to send something that is 904 * not divisible by 4 (as opposed to card sectors 905 * etc), and the FIFO only accept full 32-bit writes. 906 * So compensate by adding +3 on the count, a single 907 * byte become a 32bit write, 7 bytes will be two 908 * 32bit writes etc. 909 */ 910 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2); 911 912 ptr += count; 913 remain -= count; 914 915 if (remain == 0) 916 break; 917 918 status = readl(base + MMCISTATUS); 919 } while (status & MCI_TXFIFOHALFEMPTY); 920 921 return ptr - buffer; 922 } 923 924 /* 925 * PIO data transfer IRQ handler. 926 */ 927 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 928 { 929 struct mmci_host *host = dev_id; 930 struct sg_mapping_iter *sg_miter = &host->sg_miter; 931 struct variant_data *variant = host->variant; 932 void __iomem *base = host->base; 933 unsigned long flags; 934 u32 status; 935 936 status = readl(base + MMCISTATUS); 937 938 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 939 940 local_irq_save(flags); 941 942 do { 943 unsigned int remain, len; 944 char *buffer; 945 946 /* 947 * For write, we only need to test the half-empty flag 948 * here - if the FIFO is completely empty, then by 949 * definition it is more than half empty. 950 * 951 * For read, check for data available. 952 */ 953 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 954 break; 955 956 if (!sg_miter_next(sg_miter)) 957 break; 958 959 buffer = sg_miter->addr; 960 remain = sg_miter->length; 961 962 len = 0; 963 if (status & MCI_RXACTIVE) 964 len = mmci_pio_read(host, buffer, remain); 965 if (status & MCI_TXACTIVE) 966 len = mmci_pio_write(host, buffer, remain, status); 967 968 sg_miter->consumed = len; 969 970 host->size -= len; 971 remain -= len; 972 973 if (remain) 974 break; 975 976 status = readl(base + MMCISTATUS); 977 } while (1); 978 979 sg_miter_stop(sg_miter); 980 981 local_irq_restore(flags); 982 983 /* 984 * If we have less than the fifo 'half-full' threshold to transfer, 985 * trigger a PIO interrupt as soon as any data is available. 986 */ 987 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 988 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 989 990 /* 991 * If we run out of data, disable the data IRQs; this 992 * prevents a race where the FIFO becomes empty before 993 * the chip itself has disabled the data path, and 994 * stops us racing with our data end IRQ. 995 */ 996 if (host->size == 0) { 997 mmci_set_mask1(host, 0); 998 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 999 } 1000 1001 return IRQ_HANDLED; 1002 } 1003 1004 /* 1005 * Handle completion of command and data transfers. 1006 */ 1007 static irqreturn_t mmci_irq(int irq, void *dev_id) 1008 { 1009 struct mmci_host *host = dev_id; 1010 u32 status; 1011 int ret = 0; 1012 1013 spin_lock(&host->lock); 1014 1015 do { 1016 struct mmc_command *cmd; 1017 struct mmc_data *data; 1018 1019 status = readl(host->base + MMCISTATUS); 1020 1021 if (host->singleirq) { 1022 if (status & readl(host->base + MMCIMASK1)) 1023 mmci_pio_irq(irq, dev_id); 1024 1025 status &= ~MCI_IRQ1MASK; 1026 } 1027 1028 status &= readl(host->base + MMCIMASK0); 1029 writel(status, host->base + MMCICLEAR); 1030 1031 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1032 1033 data = host->data; 1034 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 1035 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 1036 MCI_DATABLOCKEND) && data) 1037 mmci_data_irq(host, data, status); 1038 1039 cmd = host->cmd; 1040 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 1041 mmci_cmd_irq(host, cmd, status); 1042 1043 ret = 1; 1044 } while (status); 1045 1046 spin_unlock(&host->lock); 1047 1048 return IRQ_RETVAL(ret); 1049 } 1050 1051 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1052 { 1053 struct mmci_host *host = mmc_priv(mmc); 1054 unsigned long flags; 1055 1056 WARN_ON(host->mrq != NULL); 1057 1058 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 1059 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 1060 mrq->data->blksz); 1061 mrq->cmd->error = -EINVAL; 1062 mmc_request_done(mmc, mrq); 1063 return; 1064 } 1065 1066 pm_runtime_get_sync(mmc_dev(mmc)); 1067 1068 spin_lock_irqsave(&host->lock, flags); 1069 1070 host->mrq = mrq; 1071 1072 if (mrq->data) 1073 mmci_get_next_data(host, mrq->data); 1074 1075 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1076 mmci_start_data(host, mrq->data); 1077 1078 mmci_start_command(host, mrq->cmd, 0); 1079 1080 spin_unlock_irqrestore(&host->lock, flags); 1081 } 1082 1083 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1084 { 1085 struct mmci_host *host = mmc_priv(mmc); 1086 struct variant_data *variant = host->variant; 1087 u32 pwr = 0; 1088 unsigned long flags; 1089 int ret; 1090 1091 pm_runtime_get_sync(mmc_dev(mmc)); 1092 1093 if (host->plat->ios_handler && 1094 host->plat->ios_handler(mmc_dev(mmc), ios)) 1095 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1096 1097 switch (ios->power_mode) { 1098 case MMC_POWER_OFF: 1099 if (host->vcc) 1100 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 1101 break; 1102 case MMC_POWER_UP: 1103 if (host->vcc) { 1104 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 1105 if (ret) { 1106 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 1107 /* 1108 * The .set_ios() function in the mmc_host_ops 1109 * struct return void, and failing to set the 1110 * power should be rare so we print an error 1111 * and return here. 1112 */ 1113 goto out; 1114 } 1115 } 1116 /* 1117 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1118 * and instead uses MCI_PWR_ON so apply whatever value is 1119 * configured in the variant data. 1120 */ 1121 pwr |= variant->pwrreg_powerup; 1122 1123 break; 1124 case MMC_POWER_ON: 1125 pwr |= MCI_PWR_ON; 1126 break; 1127 } 1128 1129 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1130 /* 1131 * The ST Micro variant has some additional bits 1132 * indicating signal direction for the signals in 1133 * the SD/MMC bus and feedback-clock usage. 1134 */ 1135 pwr |= host->plat->sigdir; 1136 1137 if (ios->bus_width == MMC_BUS_WIDTH_4) 1138 pwr &= ~MCI_ST_DATA74DIREN; 1139 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1140 pwr &= (~MCI_ST_DATA74DIREN & 1141 ~MCI_ST_DATA31DIREN & 1142 ~MCI_ST_DATA2DIREN); 1143 } 1144 1145 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1146 if (host->hw_designer != AMBA_VENDOR_ST) 1147 pwr |= MCI_ROD; 1148 else { 1149 /* 1150 * The ST Micro variant use the ROD bit for something 1151 * else and only has OD (Open Drain). 1152 */ 1153 pwr |= MCI_OD; 1154 } 1155 } 1156 1157 spin_lock_irqsave(&host->lock, flags); 1158 1159 mmci_set_clkreg(host, ios->clock); 1160 mmci_write_pwrreg(host, pwr); 1161 1162 spin_unlock_irqrestore(&host->lock, flags); 1163 1164 out: 1165 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1166 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1167 } 1168 1169 static int mmci_get_ro(struct mmc_host *mmc) 1170 { 1171 struct mmci_host *host = mmc_priv(mmc); 1172 1173 if (host->gpio_wp == -ENOSYS) 1174 return -ENOSYS; 1175 1176 return gpio_get_value_cansleep(host->gpio_wp); 1177 } 1178 1179 static int mmci_get_cd(struct mmc_host *mmc) 1180 { 1181 struct mmci_host *host = mmc_priv(mmc); 1182 struct mmci_platform_data *plat = host->plat; 1183 unsigned int status; 1184 1185 if (host->gpio_cd == -ENOSYS) { 1186 if (!plat->status) 1187 return 1; /* Assume always present */ 1188 1189 status = plat->status(mmc_dev(host->mmc)); 1190 } else 1191 status = !!gpio_get_value_cansleep(host->gpio_cd) 1192 ^ plat->cd_invert; 1193 1194 /* 1195 * Use positive logic throughout - status is zero for no card, 1196 * non-zero for card inserted. 1197 */ 1198 return status; 1199 } 1200 1201 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1202 { 1203 struct mmci_host *host = dev_id; 1204 1205 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1206 1207 return IRQ_HANDLED; 1208 } 1209 1210 static const struct mmc_host_ops mmci_ops = { 1211 .request = mmci_request, 1212 .pre_req = mmci_pre_request, 1213 .post_req = mmci_post_request, 1214 .set_ios = mmci_set_ios, 1215 .get_ro = mmci_get_ro, 1216 .get_cd = mmci_get_cd, 1217 }; 1218 1219 #ifdef CONFIG_OF 1220 static void mmci_dt_populate_generic_pdata(struct device_node *np, 1221 struct mmci_platform_data *pdata) 1222 { 1223 int bus_width = 0; 1224 1225 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); 1226 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); 1227 1228 if (of_get_property(np, "cd-inverted", NULL)) 1229 pdata->cd_invert = true; 1230 else 1231 pdata->cd_invert = false; 1232 1233 of_property_read_u32(np, "max-frequency", &pdata->f_max); 1234 if (!pdata->f_max) 1235 pr_warn("%s has no 'max-frequency' property\n", np->full_name); 1236 1237 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) 1238 pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED; 1239 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) 1240 pdata->capabilities |= MMC_CAP_SD_HIGHSPEED; 1241 1242 of_property_read_u32(np, "bus-width", &bus_width); 1243 switch (bus_width) { 1244 case 0 : 1245 /* No bus-width supplied. */ 1246 break; 1247 case 4 : 1248 pdata->capabilities |= MMC_CAP_4_BIT_DATA; 1249 break; 1250 case 8 : 1251 pdata->capabilities |= MMC_CAP_8_BIT_DATA; 1252 break; 1253 default : 1254 pr_warn("%s: Unsupported bus width\n", np->full_name); 1255 } 1256 } 1257 #else 1258 static void mmci_dt_populate_generic_pdata(struct device_node *np, 1259 struct mmci_platform_data *pdata) 1260 { 1261 return; 1262 } 1263 #endif 1264 1265 static int mmci_probe(struct amba_device *dev, 1266 const struct amba_id *id) 1267 { 1268 struct mmci_platform_data *plat = dev->dev.platform_data; 1269 struct device_node *np = dev->dev.of_node; 1270 struct variant_data *variant = id->data; 1271 struct mmci_host *host; 1272 struct mmc_host *mmc; 1273 int ret; 1274 1275 /* Must have platform data or Device Tree. */ 1276 if (!plat && !np) { 1277 dev_err(&dev->dev, "No plat data or DT found\n"); 1278 return -EINVAL; 1279 } 1280 1281 if (!plat) { 1282 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); 1283 if (!plat) 1284 return -ENOMEM; 1285 } 1286 1287 if (np) 1288 mmci_dt_populate_generic_pdata(np, plat); 1289 1290 ret = amba_request_regions(dev, DRIVER_NAME); 1291 if (ret) 1292 goto out; 1293 1294 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1295 if (!mmc) { 1296 ret = -ENOMEM; 1297 goto rel_regions; 1298 } 1299 1300 host = mmc_priv(mmc); 1301 host->mmc = mmc; 1302 1303 host->gpio_wp = -ENOSYS; 1304 host->gpio_cd = -ENOSYS; 1305 host->gpio_cd_irq = -1; 1306 1307 host->hw_designer = amba_manf(dev); 1308 host->hw_revision = amba_rev(dev); 1309 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1310 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1311 1312 host->clk = clk_get(&dev->dev, NULL); 1313 if (IS_ERR(host->clk)) { 1314 ret = PTR_ERR(host->clk); 1315 host->clk = NULL; 1316 goto host_free; 1317 } 1318 1319 ret = clk_prepare_enable(host->clk); 1320 if (ret) 1321 goto clk_free; 1322 1323 host->plat = plat; 1324 host->variant = variant; 1325 host->mclk = clk_get_rate(host->clk); 1326 /* 1327 * According to the spec, mclk is max 100 MHz, 1328 * so we try to adjust the clock down to this, 1329 * (if possible). 1330 */ 1331 if (host->mclk > 100000000) { 1332 ret = clk_set_rate(host->clk, 100000000); 1333 if (ret < 0) 1334 goto clk_disable; 1335 host->mclk = clk_get_rate(host->clk); 1336 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1337 host->mclk); 1338 } 1339 host->phybase = dev->res.start; 1340 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1341 if (!host->base) { 1342 ret = -ENOMEM; 1343 goto clk_disable; 1344 } 1345 1346 mmc->ops = &mmci_ops; 1347 /* 1348 * The ARM and ST versions of the block have slightly different 1349 * clock divider equations which means that the minimum divider 1350 * differs too. 1351 */ 1352 if (variant->st_clkdiv) 1353 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1354 else 1355 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1356 /* 1357 * If the platform data supplies a maximum operating 1358 * frequency, this takes precedence. Else, we fall back 1359 * to using the module parameter, which has a (low) 1360 * default value in case it is not specified. Either 1361 * value must not exceed the clock rate into the block, 1362 * of course. 1363 */ 1364 if (plat->f_max) 1365 mmc->f_max = min(host->mclk, plat->f_max); 1366 else 1367 mmc->f_max = min(host->mclk, fmax); 1368 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1369 1370 host->pinctrl = devm_pinctrl_get(&dev->dev); 1371 if (IS_ERR(host->pinctrl)) { 1372 ret = PTR_ERR(host->pinctrl); 1373 goto clk_disable; 1374 } 1375 1376 host->pins_default = pinctrl_lookup_state(host->pinctrl, 1377 PINCTRL_STATE_DEFAULT); 1378 1379 /* enable pins to be muxed in and configured */ 1380 if (!IS_ERR(host->pins_default)) { 1381 ret = pinctrl_select_state(host->pinctrl, host->pins_default); 1382 if (ret) 1383 dev_warn(&dev->dev, "could not set default pins\n"); 1384 } else 1385 dev_warn(&dev->dev, "could not get default pinstate\n"); 1386 1387 #ifdef CONFIG_REGULATOR 1388 /* If we're using the regulator framework, try to fetch a regulator */ 1389 host->vcc = regulator_get(&dev->dev, "vmmc"); 1390 if (IS_ERR(host->vcc)) 1391 host->vcc = NULL; 1392 else { 1393 int mask = mmc_regulator_get_ocrmask(host->vcc); 1394 1395 if (mask < 0) 1396 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1397 mask); 1398 else { 1399 host->mmc->ocr_avail = (u32) mask; 1400 if (plat->ocr_mask) 1401 dev_warn(&dev->dev, 1402 "Provided ocr_mask/setpower will not be used " 1403 "(using regulator instead)\n"); 1404 } 1405 } 1406 #endif 1407 /* Fall back to platform data if no regulator is found */ 1408 if (host->vcc == NULL) 1409 mmc->ocr_avail = plat->ocr_mask; 1410 mmc->caps = plat->capabilities; 1411 mmc->caps2 = plat->capabilities2; 1412 1413 /* 1414 * We can do SGIO 1415 */ 1416 mmc->max_segs = NR_SG; 1417 1418 /* 1419 * Since only a certain number of bits are valid in the data length 1420 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1421 * single request. 1422 */ 1423 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1424 1425 /* 1426 * Set the maximum segment size. Since we aren't doing DMA 1427 * (yet) we are only limited by the data length register. 1428 */ 1429 mmc->max_seg_size = mmc->max_req_size; 1430 1431 /* 1432 * Block size can be up to 2048 bytes, but must be a power of two. 1433 */ 1434 mmc->max_blk_size = 1 << 11; 1435 1436 /* 1437 * Limit the number of blocks transferred so that we don't overflow 1438 * the maximum request size. 1439 */ 1440 mmc->max_blk_count = mmc->max_req_size >> 11; 1441 1442 spin_lock_init(&host->lock); 1443 1444 writel(0, host->base + MMCIMASK0); 1445 writel(0, host->base + MMCIMASK1); 1446 writel(0xfff, host->base + MMCICLEAR); 1447 1448 if (plat->gpio_cd == -EPROBE_DEFER) { 1449 ret = -EPROBE_DEFER; 1450 goto err_gpio_cd; 1451 } 1452 if (gpio_is_valid(plat->gpio_cd)) { 1453 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1454 if (ret == 0) 1455 ret = gpio_direction_input(plat->gpio_cd); 1456 if (ret == 0) 1457 host->gpio_cd = plat->gpio_cd; 1458 else if (ret != -ENOSYS) 1459 goto err_gpio_cd; 1460 1461 /* 1462 * A gpio pin that will detect cards when inserted and removed 1463 * will most likely want to trigger on the edges if it is 1464 * 0 when ejected and 1 when inserted (or mutatis mutandis 1465 * for the inverted case) so we request triggers on both 1466 * edges. 1467 */ 1468 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1469 mmci_cd_irq, 1470 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1471 DRIVER_NAME " (cd)", host); 1472 if (ret >= 0) 1473 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1474 } 1475 if (plat->gpio_wp == -EPROBE_DEFER) { 1476 ret = -EPROBE_DEFER; 1477 goto err_gpio_wp; 1478 } 1479 if (gpio_is_valid(plat->gpio_wp)) { 1480 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1481 if (ret == 0) 1482 ret = gpio_direction_input(plat->gpio_wp); 1483 if (ret == 0) 1484 host->gpio_wp = plat->gpio_wp; 1485 else if (ret != -ENOSYS) 1486 goto err_gpio_wp; 1487 } 1488 1489 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1490 && host->gpio_cd_irq < 0) 1491 mmc->caps |= MMC_CAP_NEEDS_POLL; 1492 1493 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1494 if (ret) 1495 goto unmap; 1496 1497 if (!dev->irq[1]) 1498 host->singleirq = true; 1499 else { 1500 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1501 DRIVER_NAME " (pio)", host); 1502 if (ret) 1503 goto irq0_free; 1504 } 1505 1506 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1507 1508 amba_set_drvdata(dev, mmc); 1509 1510 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1511 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1512 amba_rev(dev), (unsigned long long)dev->res.start, 1513 dev->irq[0], dev->irq[1]); 1514 1515 mmci_dma_setup(host); 1516 1517 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1518 pm_runtime_use_autosuspend(&dev->dev); 1519 pm_runtime_put(&dev->dev); 1520 1521 mmc_add_host(mmc); 1522 1523 return 0; 1524 1525 irq0_free: 1526 free_irq(dev->irq[0], host); 1527 unmap: 1528 if (host->gpio_wp != -ENOSYS) 1529 gpio_free(host->gpio_wp); 1530 err_gpio_wp: 1531 if (host->gpio_cd_irq >= 0) 1532 free_irq(host->gpio_cd_irq, host); 1533 if (host->gpio_cd != -ENOSYS) 1534 gpio_free(host->gpio_cd); 1535 err_gpio_cd: 1536 iounmap(host->base); 1537 clk_disable: 1538 clk_disable_unprepare(host->clk); 1539 clk_free: 1540 clk_put(host->clk); 1541 host_free: 1542 mmc_free_host(mmc); 1543 rel_regions: 1544 amba_release_regions(dev); 1545 out: 1546 return ret; 1547 } 1548 1549 static int mmci_remove(struct amba_device *dev) 1550 { 1551 struct mmc_host *mmc = amba_get_drvdata(dev); 1552 1553 amba_set_drvdata(dev, NULL); 1554 1555 if (mmc) { 1556 struct mmci_host *host = mmc_priv(mmc); 1557 1558 /* 1559 * Undo pm_runtime_put() in probe. We use the _sync 1560 * version here so that we can access the primecell. 1561 */ 1562 pm_runtime_get_sync(&dev->dev); 1563 1564 mmc_remove_host(mmc); 1565 1566 writel(0, host->base + MMCIMASK0); 1567 writel(0, host->base + MMCIMASK1); 1568 1569 writel(0, host->base + MMCICOMMAND); 1570 writel(0, host->base + MMCIDATACTRL); 1571 1572 mmci_dma_release(host); 1573 free_irq(dev->irq[0], host); 1574 if (!host->singleirq) 1575 free_irq(dev->irq[1], host); 1576 1577 if (host->gpio_wp != -ENOSYS) 1578 gpio_free(host->gpio_wp); 1579 if (host->gpio_cd_irq >= 0) 1580 free_irq(host->gpio_cd_irq, host); 1581 if (host->gpio_cd != -ENOSYS) 1582 gpio_free(host->gpio_cd); 1583 1584 iounmap(host->base); 1585 clk_disable_unprepare(host->clk); 1586 clk_put(host->clk); 1587 1588 if (host->vcc) 1589 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1590 regulator_put(host->vcc); 1591 1592 mmc_free_host(mmc); 1593 1594 amba_release_regions(dev); 1595 } 1596 1597 return 0; 1598 } 1599 1600 #ifdef CONFIG_SUSPEND 1601 static int mmci_suspend(struct device *dev) 1602 { 1603 struct amba_device *adev = to_amba_device(dev); 1604 struct mmc_host *mmc = amba_get_drvdata(adev); 1605 int ret = 0; 1606 1607 if (mmc) { 1608 struct mmci_host *host = mmc_priv(mmc); 1609 1610 ret = mmc_suspend_host(mmc); 1611 if (ret == 0) { 1612 pm_runtime_get_sync(dev); 1613 writel(0, host->base + MMCIMASK0); 1614 } 1615 } 1616 1617 return ret; 1618 } 1619 1620 static int mmci_resume(struct device *dev) 1621 { 1622 struct amba_device *adev = to_amba_device(dev); 1623 struct mmc_host *mmc = amba_get_drvdata(adev); 1624 int ret = 0; 1625 1626 if (mmc) { 1627 struct mmci_host *host = mmc_priv(mmc); 1628 1629 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1630 pm_runtime_put(dev); 1631 1632 ret = mmc_resume_host(mmc); 1633 } 1634 1635 return ret; 1636 } 1637 #endif 1638 1639 static const struct dev_pm_ops mmci_dev_pm_ops = { 1640 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) 1641 }; 1642 1643 static struct amba_id mmci_ids[] = { 1644 { 1645 .id = 0x00041180, 1646 .mask = 0xff0fffff, 1647 .data = &variant_arm, 1648 }, 1649 { 1650 .id = 0x01041180, 1651 .mask = 0xff0fffff, 1652 .data = &variant_arm_extended_fifo, 1653 }, 1654 { 1655 .id = 0x00041181, 1656 .mask = 0x000fffff, 1657 .data = &variant_arm, 1658 }, 1659 /* ST Micro variants */ 1660 { 1661 .id = 0x00180180, 1662 .mask = 0x00ffffff, 1663 .data = &variant_u300, 1664 }, 1665 { 1666 .id = 0x10180180, 1667 .mask = 0xf0ffffff, 1668 .data = &variant_nomadik, 1669 }, 1670 { 1671 .id = 0x00280180, 1672 .mask = 0x00ffffff, 1673 .data = &variant_u300, 1674 }, 1675 { 1676 .id = 0x00480180, 1677 .mask = 0xf0ffffff, 1678 .data = &variant_ux500, 1679 }, 1680 { 1681 .id = 0x10480180, 1682 .mask = 0xf0ffffff, 1683 .data = &variant_ux500v2, 1684 }, 1685 { 0, 0 }, 1686 }; 1687 1688 MODULE_DEVICE_TABLE(amba, mmci_ids); 1689 1690 static struct amba_driver mmci_driver = { 1691 .drv = { 1692 .name = DRIVER_NAME, 1693 .pm = &mmci_dev_pm_ops, 1694 }, 1695 .probe = mmci_probe, 1696 .remove = mmci_remove, 1697 .id_table = mmci_ids, 1698 }; 1699 1700 module_amba_driver(mmci_driver); 1701 1702 module_param(fmax, uint, 0444); 1703 1704 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1705 MODULE_LICENSE("GPL"); 1706