1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/slab.h> 19 #include <linux/delay.h> 20 #include <linux/err.h> 21 #include <linux/highmem.h> 22 #include <linux/log2.h> 23 #include <linux/mmc/pm.h> 24 #include <linux/mmc/host.h> 25 #include <linux/mmc/card.h> 26 #include <linux/amba/bus.h> 27 #include <linux/clk.h> 28 #include <linux/scatterlist.h> 29 #include <linux/gpio.h> 30 #include <linux/of_gpio.h> 31 #include <linux/regulator/consumer.h> 32 #include <linux/dmaengine.h> 33 #include <linux/dma-mapping.h> 34 #include <linux/amba/mmci.h> 35 #include <linux/pm_runtime.h> 36 #include <linux/types.h> 37 #include <linux/pinctrl/consumer.h> 38 39 #include <asm/div64.h> 40 #include <asm/io.h> 41 #include <asm/sizes.h> 42 43 #include "mmci.h" 44 45 #define DRIVER_NAME "mmci-pl18x" 46 47 static unsigned int fmax = 515633; 48 49 /** 50 * struct variant_data - MMCI variant-specific quirks 51 * @clkreg: default value for MCICLOCK register 52 * @clkreg_enable: enable value for MMCICLOCK register 53 * @datalength_bits: number of bits in the MMCIDATALENGTH register 54 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 55 * is asserted (likewise for RX) 56 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 57 * is asserted (likewise for RX) 58 * @sdio: variant supports SDIO 59 * @st_clkdiv: true if using a ST-specific clock divider algorithm 60 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 61 * @pwrreg_powerup: power up value for MMCIPOWER register 62 * @signal_direction: input/out direction of bus signals can be indicated 63 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock 64 */ 65 struct variant_data { 66 unsigned int clkreg; 67 unsigned int clkreg_enable; 68 unsigned int datalength_bits; 69 unsigned int fifosize; 70 unsigned int fifohalfsize; 71 bool sdio; 72 bool st_clkdiv; 73 bool blksz_datactrl16; 74 u32 pwrreg_powerup; 75 bool signal_direction; 76 bool pwrreg_clkgate; 77 }; 78 79 static struct variant_data variant_arm = { 80 .fifosize = 16 * 4, 81 .fifohalfsize = 8 * 4, 82 .datalength_bits = 16, 83 .pwrreg_powerup = MCI_PWR_UP, 84 }; 85 86 static struct variant_data variant_arm_extended_fifo = { 87 .fifosize = 128 * 4, 88 .fifohalfsize = 64 * 4, 89 .datalength_bits = 16, 90 .pwrreg_powerup = MCI_PWR_UP, 91 }; 92 93 static struct variant_data variant_arm_extended_fifo_hwfc = { 94 .fifosize = 128 * 4, 95 .fifohalfsize = 64 * 4, 96 .clkreg_enable = MCI_ARM_HWFCEN, 97 .datalength_bits = 16, 98 .pwrreg_powerup = MCI_PWR_UP, 99 }; 100 101 static struct variant_data variant_u300 = { 102 .fifosize = 16 * 4, 103 .fifohalfsize = 8 * 4, 104 .clkreg_enable = MCI_ST_U300_HWFCEN, 105 .datalength_bits = 16, 106 .sdio = true, 107 .pwrreg_powerup = MCI_PWR_ON, 108 .signal_direction = true, 109 .pwrreg_clkgate = true, 110 }; 111 112 static struct variant_data variant_nomadik = { 113 .fifosize = 16 * 4, 114 .fifohalfsize = 8 * 4, 115 .clkreg = MCI_CLK_ENABLE, 116 .datalength_bits = 24, 117 .sdio = true, 118 .st_clkdiv = true, 119 .pwrreg_powerup = MCI_PWR_ON, 120 .signal_direction = true, 121 .pwrreg_clkgate = true, 122 }; 123 124 static struct variant_data variant_ux500 = { 125 .fifosize = 30 * 4, 126 .fifohalfsize = 8 * 4, 127 .clkreg = MCI_CLK_ENABLE, 128 .clkreg_enable = MCI_ST_UX500_HWFCEN, 129 .datalength_bits = 24, 130 .sdio = true, 131 .st_clkdiv = true, 132 .pwrreg_powerup = MCI_PWR_ON, 133 .signal_direction = true, 134 .pwrreg_clkgate = true, 135 }; 136 137 static struct variant_data variant_ux500v2 = { 138 .fifosize = 30 * 4, 139 .fifohalfsize = 8 * 4, 140 .clkreg = MCI_CLK_ENABLE, 141 .clkreg_enable = MCI_ST_UX500_HWFCEN, 142 .datalength_bits = 24, 143 .sdio = true, 144 .st_clkdiv = true, 145 .blksz_datactrl16 = true, 146 .pwrreg_powerup = MCI_PWR_ON, 147 .signal_direction = true, 148 .pwrreg_clkgate = true, 149 }; 150 151 /* 152 * Validate mmc prerequisites 153 */ 154 static int mmci_validate_data(struct mmci_host *host, 155 struct mmc_data *data) 156 { 157 if (!data) 158 return 0; 159 160 if (!is_power_of_2(data->blksz)) { 161 dev_err(mmc_dev(host->mmc), 162 "unsupported block size (%d bytes)\n", data->blksz); 163 return -EINVAL; 164 } 165 166 return 0; 167 } 168 169 /* 170 * This must be called with host->lock held 171 */ 172 static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 173 { 174 if (host->clk_reg != clk) { 175 host->clk_reg = clk; 176 writel(clk, host->base + MMCICLOCK); 177 } 178 } 179 180 /* 181 * This must be called with host->lock held 182 */ 183 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 184 { 185 if (host->pwr_reg != pwr) { 186 host->pwr_reg = pwr; 187 writel(pwr, host->base + MMCIPOWER); 188 } 189 } 190 191 /* 192 * This must be called with host->lock held 193 */ 194 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 195 { 196 struct variant_data *variant = host->variant; 197 u32 clk = variant->clkreg; 198 199 if (desired) { 200 if (desired >= host->mclk) { 201 clk = MCI_CLK_BYPASS; 202 if (variant->st_clkdiv) 203 clk |= MCI_ST_UX500_NEG_EDGE; 204 host->cclk = host->mclk; 205 } else if (variant->st_clkdiv) { 206 /* 207 * DB8500 TRM says f = mclk / (clkdiv + 2) 208 * => clkdiv = (mclk / f) - 2 209 * Round the divider up so we don't exceed the max 210 * frequency 211 */ 212 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 213 if (clk >= 256) 214 clk = 255; 215 host->cclk = host->mclk / (clk + 2); 216 } else { 217 /* 218 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 219 * => clkdiv = mclk / (2 * f) - 1 220 */ 221 clk = host->mclk / (2 * desired) - 1; 222 if (clk >= 256) 223 clk = 255; 224 host->cclk = host->mclk / (2 * (clk + 1)); 225 } 226 227 clk |= variant->clkreg_enable; 228 clk |= MCI_CLK_ENABLE; 229 /* This hasn't proven to be worthwhile */ 230 /* clk |= MCI_CLK_PWRSAVE; */ 231 } 232 233 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 234 clk |= MCI_4BIT_BUS; 235 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 236 clk |= MCI_ST_8BIT_BUS; 237 238 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 239 clk |= MCI_ST_UX500_NEG_EDGE; 240 241 mmci_write_clkreg(host, clk); 242 } 243 244 static void 245 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 246 { 247 writel(0, host->base + MMCICOMMAND); 248 249 BUG_ON(host->data); 250 251 host->mrq = NULL; 252 host->cmd = NULL; 253 254 mmc_request_done(host->mmc, mrq); 255 256 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 257 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 258 } 259 260 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 261 { 262 void __iomem *base = host->base; 263 264 if (host->singleirq) { 265 unsigned int mask0 = readl(base + MMCIMASK0); 266 267 mask0 &= ~MCI_IRQ1MASK; 268 mask0 |= mask; 269 270 writel(mask0, base + MMCIMASK0); 271 } 272 273 writel(mask, base + MMCIMASK1); 274 } 275 276 static void mmci_stop_data(struct mmci_host *host) 277 { 278 writel(0, host->base + MMCIDATACTRL); 279 mmci_set_mask1(host, 0); 280 host->data = NULL; 281 } 282 283 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 284 { 285 unsigned int flags = SG_MITER_ATOMIC; 286 287 if (data->flags & MMC_DATA_READ) 288 flags |= SG_MITER_TO_SG; 289 else 290 flags |= SG_MITER_FROM_SG; 291 292 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 293 } 294 295 /* 296 * All the DMA operation mode stuff goes inside this ifdef. 297 * This assumes that you have a generic DMA device interface, 298 * no custom DMA interfaces are supported. 299 */ 300 #ifdef CONFIG_DMA_ENGINE 301 static void mmci_dma_setup(struct mmci_host *host) 302 { 303 struct mmci_platform_data *plat = host->plat; 304 const char *rxname, *txname; 305 dma_cap_mask_t mask; 306 307 if (!plat || !plat->dma_filter) { 308 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 309 return; 310 } 311 312 /* initialize pre request cookie */ 313 host->next_data.cookie = 1; 314 315 /* Try to acquire a generic DMA engine slave channel */ 316 dma_cap_zero(mask); 317 dma_cap_set(DMA_SLAVE, mask); 318 319 /* 320 * If only an RX channel is specified, the driver will 321 * attempt to use it bidirectionally, however if it is 322 * is specified but cannot be located, DMA will be disabled. 323 */ 324 if (plat->dma_rx_param) { 325 host->dma_rx_channel = dma_request_channel(mask, 326 plat->dma_filter, 327 plat->dma_rx_param); 328 /* E.g if no DMA hardware is present */ 329 if (!host->dma_rx_channel) 330 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 331 } 332 333 if (plat->dma_tx_param) { 334 host->dma_tx_channel = dma_request_channel(mask, 335 plat->dma_filter, 336 plat->dma_tx_param); 337 if (!host->dma_tx_channel) 338 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 339 } else { 340 host->dma_tx_channel = host->dma_rx_channel; 341 } 342 343 if (host->dma_rx_channel) 344 rxname = dma_chan_name(host->dma_rx_channel); 345 else 346 rxname = "none"; 347 348 if (host->dma_tx_channel) 349 txname = dma_chan_name(host->dma_tx_channel); 350 else 351 txname = "none"; 352 353 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 354 rxname, txname); 355 356 /* 357 * Limit the maximum segment size in any SG entry according to 358 * the parameters of the DMA engine device. 359 */ 360 if (host->dma_tx_channel) { 361 struct device *dev = host->dma_tx_channel->device->dev; 362 unsigned int max_seg_size = dma_get_max_seg_size(dev); 363 364 if (max_seg_size < host->mmc->max_seg_size) 365 host->mmc->max_seg_size = max_seg_size; 366 } 367 if (host->dma_rx_channel) { 368 struct device *dev = host->dma_rx_channel->device->dev; 369 unsigned int max_seg_size = dma_get_max_seg_size(dev); 370 371 if (max_seg_size < host->mmc->max_seg_size) 372 host->mmc->max_seg_size = max_seg_size; 373 } 374 } 375 376 /* 377 * This is used in or so inline it 378 * so it can be discarded. 379 */ 380 static inline void mmci_dma_release(struct mmci_host *host) 381 { 382 struct mmci_platform_data *plat = host->plat; 383 384 if (host->dma_rx_channel) 385 dma_release_channel(host->dma_rx_channel); 386 if (host->dma_tx_channel && plat->dma_tx_param) 387 dma_release_channel(host->dma_tx_channel); 388 host->dma_rx_channel = host->dma_tx_channel = NULL; 389 } 390 391 static void mmci_dma_data_error(struct mmci_host *host) 392 { 393 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 394 dmaengine_terminate_all(host->dma_current); 395 host->dma_current = NULL; 396 host->dma_desc_current = NULL; 397 host->data->host_cookie = 0; 398 } 399 400 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 401 { 402 struct dma_chan *chan; 403 enum dma_data_direction dir; 404 405 if (data->flags & MMC_DATA_READ) { 406 dir = DMA_FROM_DEVICE; 407 chan = host->dma_rx_channel; 408 } else { 409 dir = DMA_TO_DEVICE; 410 chan = host->dma_tx_channel; 411 } 412 413 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 414 } 415 416 static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) 417 { 418 u32 status; 419 int i; 420 421 /* Wait up to 1ms for the DMA to complete */ 422 for (i = 0; ; i++) { 423 status = readl(host->base + MMCISTATUS); 424 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 425 break; 426 udelay(10); 427 } 428 429 /* 430 * Check to see whether we still have some data left in the FIFO - 431 * this catches DMA controllers which are unable to monitor the 432 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 433 * contiguous buffers. On TX, we'll get a FIFO underrun error. 434 */ 435 if (status & MCI_RXDATAAVLBLMASK) { 436 mmci_dma_data_error(host); 437 if (!data->error) 438 data->error = -EIO; 439 } 440 441 if (!data->host_cookie) 442 mmci_dma_unmap(host, data); 443 444 /* 445 * Use of DMA with scatter-gather is impossible. 446 * Give up with DMA and switch back to PIO mode. 447 */ 448 if (status & MCI_RXDATAAVLBLMASK) { 449 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 450 mmci_dma_release(host); 451 } 452 453 host->dma_current = NULL; 454 host->dma_desc_current = NULL; 455 } 456 457 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */ 458 static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 459 struct dma_chan **dma_chan, 460 struct dma_async_tx_descriptor **dma_desc) 461 { 462 struct variant_data *variant = host->variant; 463 struct dma_slave_config conf = { 464 .src_addr = host->phybase + MMCIFIFO, 465 .dst_addr = host->phybase + MMCIFIFO, 466 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 467 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 468 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 469 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 470 .device_fc = false, 471 }; 472 struct dma_chan *chan; 473 struct dma_device *device; 474 struct dma_async_tx_descriptor *desc; 475 enum dma_data_direction buffer_dirn; 476 int nr_sg; 477 478 if (data->flags & MMC_DATA_READ) { 479 conf.direction = DMA_DEV_TO_MEM; 480 buffer_dirn = DMA_FROM_DEVICE; 481 chan = host->dma_rx_channel; 482 } else { 483 conf.direction = DMA_MEM_TO_DEV; 484 buffer_dirn = DMA_TO_DEVICE; 485 chan = host->dma_tx_channel; 486 } 487 488 /* If there's no DMA channel, fall back to PIO */ 489 if (!chan) 490 return -EINVAL; 491 492 /* If less than or equal to the fifo size, don't bother with DMA */ 493 if (data->blksz * data->blocks <= variant->fifosize) 494 return -EINVAL; 495 496 device = chan->device; 497 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 498 if (nr_sg == 0) 499 return -EINVAL; 500 501 dmaengine_slave_config(chan, &conf); 502 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 503 conf.direction, DMA_CTRL_ACK); 504 if (!desc) 505 goto unmap_exit; 506 507 *dma_chan = chan; 508 *dma_desc = desc; 509 510 return 0; 511 512 unmap_exit: 513 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 514 return -ENOMEM; 515 } 516 517 static inline int mmci_dma_prep_data(struct mmci_host *host, 518 struct mmc_data *data) 519 { 520 /* Check if next job is already prepared. */ 521 if (host->dma_current && host->dma_desc_current) 522 return 0; 523 524 /* No job were prepared thus do it now. */ 525 return __mmci_dma_prep_data(host, data, &host->dma_current, 526 &host->dma_desc_current); 527 } 528 529 static inline int mmci_dma_prep_next(struct mmci_host *host, 530 struct mmc_data *data) 531 { 532 struct mmci_host_next *nd = &host->next_data; 533 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); 534 } 535 536 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 537 { 538 int ret; 539 struct mmc_data *data = host->data; 540 541 ret = mmci_dma_prep_data(host, host->data); 542 if (ret) 543 return ret; 544 545 /* Okay, go for it. */ 546 dev_vdbg(mmc_dev(host->mmc), 547 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 548 data->sg_len, data->blksz, data->blocks, data->flags); 549 dmaengine_submit(host->dma_desc_current); 550 dma_async_issue_pending(host->dma_current); 551 552 datactrl |= MCI_DPSM_DMAENABLE; 553 554 /* Trigger the DMA transfer */ 555 writel(datactrl, host->base + MMCIDATACTRL); 556 557 /* 558 * Let the MMCI say when the data is ended and it's time 559 * to fire next DMA request. When that happens, MMCI will 560 * call mmci_data_end() 561 */ 562 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 563 host->base + MMCIMASK0); 564 return 0; 565 } 566 567 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 568 { 569 struct mmci_host_next *next = &host->next_data; 570 571 WARN_ON(data->host_cookie && data->host_cookie != next->cookie); 572 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan)); 573 574 host->dma_desc_current = next->dma_desc; 575 host->dma_current = next->dma_chan; 576 next->dma_desc = NULL; 577 next->dma_chan = NULL; 578 } 579 580 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 581 bool is_first_req) 582 { 583 struct mmci_host *host = mmc_priv(mmc); 584 struct mmc_data *data = mrq->data; 585 struct mmci_host_next *nd = &host->next_data; 586 587 if (!data) 588 return; 589 590 BUG_ON(data->host_cookie); 591 592 if (mmci_validate_data(host, data)) 593 return; 594 595 if (!mmci_dma_prep_next(host, data)) 596 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 597 } 598 599 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 600 int err) 601 { 602 struct mmci_host *host = mmc_priv(mmc); 603 struct mmc_data *data = mrq->data; 604 605 if (!data || !data->host_cookie) 606 return; 607 608 mmci_dma_unmap(host, data); 609 610 if (err) { 611 struct mmci_host_next *next = &host->next_data; 612 struct dma_chan *chan; 613 if (data->flags & MMC_DATA_READ) 614 chan = host->dma_rx_channel; 615 else 616 chan = host->dma_tx_channel; 617 dmaengine_terminate_all(chan); 618 619 next->dma_desc = NULL; 620 next->dma_chan = NULL; 621 } 622 } 623 624 #else 625 /* Blank functions if the DMA engine is not available */ 626 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 627 { 628 } 629 static inline void mmci_dma_setup(struct mmci_host *host) 630 { 631 } 632 633 static inline void mmci_dma_release(struct mmci_host *host) 634 { 635 } 636 637 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 638 { 639 } 640 641 static inline void mmci_dma_finalize(struct mmci_host *host, 642 struct mmc_data *data) 643 { 644 } 645 646 static inline void mmci_dma_data_error(struct mmci_host *host) 647 { 648 } 649 650 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 651 { 652 return -ENOSYS; 653 } 654 655 #define mmci_pre_request NULL 656 #define mmci_post_request NULL 657 658 #endif 659 660 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 661 { 662 struct variant_data *variant = host->variant; 663 unsigned int datactrl, timeout, irqmask; 664 unsigned long long clks; 665 void __iomem *base; 666 int blksz_bits; 667 668 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 669 data->blksz, data->blocks, data->flags); 670 671 host->data = data; 672 host->size = data->blksz * data->blocks; 673 data->bytes_xfered = 0; 674 675 clks = (unsigned long long)data->timeout_ns * host->cclk; 676 do_div(clks, 1000000000UL); 677 678 timeout = data->timeout_clks + (unsigned int)clks; 679 680 base = host->base; 681 writel(timeout, base + MMCIDATATIMER); 682 writel(host->size, base + MMCIDATALENGTH); 683 684 blksz_bits = ffs(data->blksz) - 1; 685 BUG_ON(1 << blksz_bits != data->blksz); 686 687 if (variant->blksz_datactrl16) 688 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 689 else 690 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 691 692 if (data->flags & MMC_DATA_READ) 693 datactrl |= MCI_DPSM_DIRECTION; 694 695 /* The ST Micro variants has a special bit to enable SDIO */ 696 if (variant->sdio && host->mmc->card) 697 if (mmc_card_sdio(host->mmc->card)) { 698 /* 699 * The ST Micro variants has a special bit 700 * to enable SDIO. 701 */ 702 u32 clk; 703 704 datactrl |= MCI_ST_DPSM_SDIOEN; 705 706 /* 707 * The ST Micro variant for SDIO small write transfers 708 * needs to have clock H/W flow control disabled, 709 * otherwise the transfer will not start. The threshold 710 * depends on the rate of MCLK. 711 */ 712 if (data->flags & MMC_DATA_WRITE && 713 (host->size < 8 || 714 (host->size <= 8 && host->mclk > 50000000))) 715 clk = host->clk_reg & ~variant->clkreg_enable; 716 else 717 clk = host->clk_reg | variant->clkreg_enable; 718 719 mmci_write_clkreg(host, clk); 720 } 721 722 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 723 datactrl |= MCI_ST_DPSM_DDRMODE; 724 725 /* 726 * Attempt to use DMA operation mode, if this 727 * should fail, fall back to PIO mode 728 */ 729 if (!mmci_dma_start_data(host, datactrl)) 730 return; 731 732 /* IRQ mode, map the SG list for CPU reading/writing */ 733 mmci_init_sg(host, data); 734 735 if (data->flags & MMC_DATA_READ) { 736 irqmask = MCI_RXFIFOHALFFULLMASK; 737 738 /* 739 * If we have less than the fifo 'half-full' threshold to 740 * transfer, trigger a PIO interrupt as soon as any data 741 * is available. 742 */ 743 if (host->size < variant->fifohalfsize) 744 irqmask |= MCI_RXDATAAVLBLMASK; 745 } else { 746 /* 747 * We don't actually need to include "FIFO empty" here 748 * since its implicit in "FIFO half empty". 749 */ 750 irqmask = MCI_TXFIFOHALFEMPTYMASK; 751 } 752 753 writel(datactrl, base + MMCIDATACTRL); 754 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 755 mmci_set_mask1(host, irqmask); 756 } 757 758 static void 759 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 760 { 761 void __iomem *base = host->base; 762 763 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 764 cmd->opcode, cmd->arg, cmd->flags); 765 766 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 767 writel(0, base + MMCICOMMAND); 768 udelay(1); 769 } 770 771 c |= cmd->opcode | MCI_CPSM_ENABLE; 772 if (cmd->flags & MMC_RSP_PRESENT) { 773 if (cmd->flags & MMC_RSP_136) 774 c |= MCI_CPSM_LONGRSP; 775 c |= MCI_CPSM_RESPONSE; 776 } 777 if (/*interrupt*/0) 778 c |= MCI_CPSM_INTERRUPT; 779 780 host->cmd = cmd; 781 782 writel(cmd->arg, base + MMCIARGUMENT); 783 writel(c, base + MMCICOMMAND); 784 } 785 786 static void 787 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 788 unsigned int status) 789 { 790 /* First check for errors */ 791 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 792 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 793 u32 remain, success; 794 795 /* Terminate the DMA transfer */ 796 if (dma_inprogress(host)) { 797 mmci_dma_data_error(host); 798 mmci_dma_unmap(host, data); 799 } 800 801 /* 802 * Calculate how far we are into the transfer. Note that 803 * the data counter gives the number of bytes transferred 804 * on the MMC bus, not on the host side. On reads, this 805 * can be as much as a FIFO-worth of data ahead. This 806 * matters for FIFO overruns only. 807 */ 808 remain = readl(host->base + MMCIDATACNT); 809 success = data->blksz * data->blocks - remain; 810 811 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 812 status, success); 813 if (status & MCI_DATACRCFAIL) { 814 /* Last block was not successful */ 815 success -= 1; 816 data->error = -EILSEQ; 817 } else if (status & MCI_DATATIMEOUT) { 818 data->error = -ETIMEDOUT; 819 } else if (status & MCI_STARTBITERR) { 820 data->error = -ECOMM; 821 } else if (status & MCI_TXUNDERRUN) { 822 data->error = -EIO; 823 } else if (status & MCI_RXOVERRUN) { 824 if (success > host->variant->fifosize) 825 success -= host->variant->fifosize; 826 else 827 success = 0; 828 data->error = -EIO; 829 } 830 data->bytes_xfered = round_down(success, data->blksz); 831 } 832 833 if (status & MCI_DATABLOCKEND) 834 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 835 836 if (status & MCI_DATAEND || data->error) { 837 if (dma_inprogress(host)) 838 mmci_dma_finalize(host, data); 839 mmci_stop_data(host); 840 841 if (!data->error) 842 /* The error clause is handled above, success! */ 843 data->bytes_xfered = data->blksz * data->blocks; 844 845 if (!data->stop) { 846 mmci_request_end(host, data->mrq); 847 } else { 848 mmci_start_command(host, data->stop, 0); 849 } 850 } 851 } 852 853 static void 854 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 855 unsigned int status) 856 { 857 void __iomem *base = host->base; 858 859 host->cmd = NULL; 860 861 if (status & MCI_CMDTIMEOUT) { 862 cmd->error = -ETIMEDOUT; 863 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 864 cmd->error = -EILSEQ; 865 } else { 866 cmd->resp[0] = readl(base + MMCIRESPONSE0); 867 cmd->resp[1] = readl(base + MMCIRESPONSE1); 868 cmd->resp[2] = readl(base + MMCIRESPONSE2); 869 cmd->resp[3] = readl(base + MMCIRESPONSE3); 870 } 871 872 if (!cmd->data || cmd->error) { 873 if (host->data) { 874 /* Terminate the DMA transfer */ 875 if (dma_inprogress(host)) { 876 mmci_dma_data_error(host); 877 mmci_dma_unmap(host, host->data); 878 } 879 mmci_stop_data(host); 880 } 881 mmci_request_end(host, cmd->mrq); 882 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 883 mmci_start_data(host, cmd->data); 884 } 885 } 886 887 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 888 { 889 void __iomem *base = host->base; 890 char *ptr = buffer; 891 u32 status; 892 int host_remain = host->size; 893 894 do { 895 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 896 897 if (count > remain) 898 count = remain; 899 900 if (count <= 0) 901 break; 902 903 /* 904 * SDIO especially may want to send something that is 905 * not divisible by 4 (as opposed to card sectors 906 * etc). Therefore make sure to always read the last bytes 907 * while only doing full 32-bit reads towards the FIFO. 908 */ 909 if (unlikely(count & 0x3)) { 910 if (count < 4) { 911 unsigned char buf[4]; 912 ioread32_rep(base + MMCIFIFO, buf, 1); 913 memcpy(ptr, buf, count); 914 } else { 915 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 916 count &= ~0x3; 917 } 918 } else { 919 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 920 } 921 922 ptr += count; 923 remain -= count; 924 host_remain -= count; 925 926 if (remain == 0) 927 break; 928 929 status = readl(base + MMCISTATUS); 930 } while (status & MCI_RXDATAAVLBL); 931 932 return ptr - buffer; 933 } 934 935 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 936 { 937 struct variant_data *variant = host->variant; 938 void __iomem *base = host->base; 939 char *ptr = buffer; 940 941 do { 942 unsigned int count, maxcnt; 943 944 maxcnt = status & MCI_TXFIFOEMPTY ? 945 variant->fifosize : variant->fifohalfsize; 946 count = min(remain, maxcnt); 947 948 /* 949 * SDIO especially may want to send something that is 950 * not divisible by 4 (as opposed to card sectors 951 * etc), and the FIFO only accept full 32-bit writes. 952 * So compensate by adding +3 on the count, a single 953 * byte become a 32bit write, 7 bytes will be two 954 * 32bit writes etc. 955 */ 956 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2); 957 958 ptr += count; 959 remain -= count; 960 961 if (remain == 0) 962 break; 963 964 status = readl(base + MMCISTATUS); 965 } while (status & MCI_TXFIFOHALFEMPTY); 966 967 return ptr - buffer; 968 } 969 970 /* 971 * PIO data transfer IRQ handler. 972 */ 973 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 974 { 975 struct mmci_host *host = dev_id; 976 struct sg_mapping_iter *sg_miter = &host->sg_miter; 977 struct variant_data *variant = host->variant; 978 void __iomem *base = host->base; 979 unsigned long flags; 980 u32 status; 981 982 status = readl(base + MMCISTATUS); 983 984 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 985 986 local_irq_save(flags); 987 988 do { 989 unsigned int remain, len; 990 char *buffer; 991 992 /* 993 * For write, we only need to test the half-empty flag 994 * here - if the FIFO is completely empty, then by 995 * definition it is more than half empty. 996 * 997 * For read, check for data available. 998 */ 999 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 1000 break; 1001 1002 if (!sg_miter_next(sg_miter)) 1003 break; 1004 1005 buffer = sg_miter->addr; 1006 remain = sg_miter->length; 1007 1008 len = 0; 1009 if (status & MCI_RXACTIVE) 1010 len = mmci_pio_read(host, buffer, remain); 1011 if (status & MCI_TXACTIVE) 1012 len = mmci_pio_write(host, buffer, remain, status); 1013 1014 sg_miter->consumed = len; 1015 1016 host->size -= len; 1017 remain -= len; 1018 1019 if (remain) 1020 break; 1021 1022 status = readl(base + MMCISTATUS); 1023 } while (1); 1024 1025 sg_miter_stop(sg_miter); 1026 1027 local_irq_restore(flags); 1028 1029 /* 1030 * If we have less than the fifo 'half-full' threshold to transfer, 1031 * trigger a PIO interrupt as soon as any data is available. 1032 */ 1033 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 1034 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 1035 1036 /* 1037 * If we run out of data, disable the data IRQs; this 1038 * prevents a race where the FIFO becomes empty before 1039 * the chip itself has disabled the data path, and 1040 * stops us racing with our data end IRQ. 1041 */ 1042 if (host->size == 0) { 1043 mmci_set_mask1(host, 0); 1044 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 1045 } 1046 1047 return IRQ_HANDLED; 1048 } 1049 1050 /* 1051 * Handle completion of command and data transfers. 1052 */ 1053 static irqreturn_t mmci_irq(int irq, void *dev_id) 1054 { 1055 struct mmci_host *host = dev_id; 1056 u32 status; 1057 int ret = 0; 1058 1059 spin_lock(&host->lock); 1060 1061 do { 1062 struct mmc_command *cmd; 1063 struct mmc_data *data; 1064 1065 status = readl(host->base + MMCISTATUS); 1066 1067 if (host->singleirq) { 1068 if (status & readl(host->base + MMCIMASK1)) 1069 mmci_pio_irq(irq, dev_id); 1070 1071 status &= ~MCI_IRQ1MASK; 1072 } 1073 1074 status &= readl(host->base + MMCIMASK0); 1075 writel(status, host->base + MMCICLEAR); 1076 1077 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1078 1079 data = host->data; 1080 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 1081 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 1082 MCI_DATABLOCKEND) && data) 1083 mmci_data_irq(host, data, status); 1084 1085 cmd = host->cmd; 1086 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 1087 mmci_cmd_irq(host, cmd, status); 1088 1089 ret = 1; 1090 } while (status); 1091 1092 spin_unlock(&host->lock); 1093 1094 return IRQ_RETVAL(ret); 1095 } 1096 1097 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1098 { 1099 struct mmci_host *host = mmc_priv(mmc); 1100 unsigned long flags; 1101 1102 WARN_ON(host->mrq != NULL); 1103 1104 mrq->cmd->error = mmci_validate_data(host, mrq->data); 1105 if (mrq->cmd->error) { 1106 mmc_request_done(mmc, mrq); 1107 return; 1108 } 1109 1110 pm_runtime_get_sync(mmc_dev(mmc)); 1111 1112 spin_lock_irqsave(&host->lock, flags); 1113 1114 host->mrq = mrq; 1115 1116 if (mrq->data) 1117 mmci_get_next_data(host, mrq->data); 1118 1119 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1120 mmci_start_data(host, mrq->data); 1121 1122 mmci_start_command(host, mrq->cmd, 0); 1123 1124 spin_unlock_irqrestore(&host->lock, flags); 1125 } 1126 1127 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1128 { 1129 struct mmci_host *host = mmc_priv(mmc); 1130 struct variant_data *variant = host->variant; 1131 u32 pwr = 0; 1132 unsigned long flags; 1133 int ret; 1134 1135 pm_runtime_get_sync(mmc_dev(mmc)); 1136 1137 if (host->plat->ios_handler && 1138 host->plat->ios_handler(mmc_dev(mmc), ios)) 1139 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1140 1141 switch (ios->power_mode) { 1142 case MMC_POWER_OFF: 1143 if (!IS_ERR(mmc->supply.vmmc)) 1144 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1145 1146 if (!IS_ERR(mmc->supply.vqmmc) && 1147 regulator_is_enabled(mmc->supply.vqmmc)) 1148 regulator_disable(mmc->supply.vqmmc); 1149 1150 break; 1151 case MMC_POWER_UP: 1152 if (!IS_ERR(mmc->supply.vmmc)) 1153 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 1154 1155 /* 1156 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1157 * and instead uses MCI_PWR_ON so apply whatever value is 1158 * configured in the variant data. 1159 */ 1160 pwr |= variant->pwrreg_powerup; 1161 1162 break; 1163 case MMC_POWER_ON: 1164 if (!IS_ERR(mmc->supply.vqmmc) && 1165 !regulator_is_enabled(mmc->supply.vqmmc)) { 1166 ret = regulator_enable(mmc->supply.vqmmc); 1167 if (ret < 0) 1168 dev_err(mmc_dev(mmc), 1169 "failed to enable vqmmc regulator\n"); 1170 } 1171 1172 pwr |= MCI_PWR_ON; 1173 break; 1174 } 1175 1176 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1177 /* 1178 * The ST Micro variant has some additional bits 1179 * indicating signal direction for the signals in 1180 * the SD/MMC bus and feedback-clock usage. 1181 */ 1182 pwr |= host->plat->sigdir; 1183 1184 if (ios->bus_width == MMC_BUS_WIDTH_4) 1185 pwr &= ~MCI_ST_DATA74DIREN; 1186 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1187 pwr &= (~MCI_ST_DATA74DIREN & 1188 ~MCI_ST_DATA31DIREN & 1189 ~MCI_ST_DATA2DIREN); 1190 } 1191 1192 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1193 if (host->hw_designer != AMBA_VENDOR_ST) 1194 pwr |= MCI_ROD; 1195 else { 1196 /* 1197 * The ST Micro variant use the ROD bit for something 1198 * else and only has OD (Open Drain). 1199 */ 1200 pwr |= MCI_OD; 1201 } 1202 } 1203 1204 /* 1205 * If clock = 0 and the variant requires the MMCIPOWER to be used for 1206 * gating the clock, the MCI_PWR_ON bit is cleared. 1207 */ 1208 if (!ios->clock && variant->pwrreg_clkgate) 1209 pwr &= ~MCI_PWR_ON; 1210 1211 spin_lock_irqsave(&host->lock, flags); 1212 1213 mmci_set_clkreg(host, ios->clock); 1214 mmci_write_pwrreg(host, pwr); 1215 1216 spin_unlock_irqrestore(&host->lock, flags); 1217 1218 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1219 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1220 } 1221 1222 static int mmci_get_ro(struct mmc_host *mmc) 1223 { 1224 struct mmci_host *host = mmc_priv(mmc); 1225 1226 if (host->gpio_wp == -ENOSYS) 1227 return -ENOSYS; 1228 1229 return gpio_get_value_cansleep(host->gpio_wp); 1230 } 1231 1232 static int mmci_get_cd(struct mmc_host *mmc) 1233 { 1234 struct mmci_host *host = mmc_priv(mmc); 1235 struct mmci_platform_data *plat = host->plat; 1236 unsigned int status; 1237 1238 if (host->gpio_cd == -ENOSYS) { 1239 if (!plat->status) 1240 return 1; /* Assume always present */ 1241 1242 status = plat->status(mmc_dev(host->mmc)); 1243 } else 1244 status = !!gpio_get_value_cansleep(host->gpio_cd) 1245 ^ plat->cd_invert; 1246 1247 /* 1248 * Use positive logic throughout - status is zero for no card, 1249 * non-zero for card inserted. 1250 */ 1251 return status; 1252 } 1253 1254 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1255 { 1256 struct mmci_host *host = dev_id; 1257 1258 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1259 1260 return IRQ_HANDLED; 1261 } 1262 1263 static const struct mmc_host_ops mmci_ops = { 1264 .request = mmci_request, 1265 .pre_req = mmci_pre_request, 1266 .post_req = mmci_post_request, 1267 .set_ios = mmci_set_ios, 1268 .get_ro = mmci_get_ro, 1269 .get_cd = mmci_get_cd, 1270 }; 1271 1272 #ifdef CONFIG_OF 1273 static void mmci_dt_populate_generic_pdata(struct device_node *np, 1274 struct mmci_platform_data *pdata) 1275 { 1276 int bus_width = 0; 1277 1278 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); 1279 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); 1280 1281 if (of_get_property(np, "cd-inverted", NULL)) 1282 pdata->cd_invert = true; 1283 else 1284 pdata->cd_invert = false; 1285 1286 of_property_read_u32(np, "max-frequency", &pdata->f_max); 1287 if (!pdata->f_max) 1288 pr_warn("%s has no 'max-frequency' property\n", np->full_name); 1289 1290 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) 1291 pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED; 1292 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) 1293 pdata->capabilities |= MMC_CAP_SD_HIGHSPEED; 1294 1295 of_property_read_u32(np, "bus-width", &bus_width); 1296 switch (bus_width) { 1297 case 0 : 1298 /* No bus-width supplied. */ 1299 break; 1300 case 4 : 1301 pdata->capabilities |= MMC_CAP_4_BIT_DATA; 1302 break; 1303 case 8 : 1304 pdata->capabilities |= MMC_CAP_8_BIT_DATA; 1305 break; 1306 default : 1307 pr_warn("%s: Unsupported bus width\n", np->full_name); 1308 } 1309 } 1310 #else 1311 static void mmci_dt_populate_generic_pdata(struct device_node *np, 1312 struct mmci_platform_data *pdata) 1313 { 1314 return; 1315 } 1316 #endif 1317 1318 static int mmci_probe(struct amba_device *dev, 1319 const struct amba_id *id) 1320 { 1321 struct mmci_platform_data *plat = dev->dev.platform_data; 1322 struct device_node *np = dev->dev.of_node; 1323 struct variant_data *variant = id->data; 1324 struct mmci_host *host; 1325 struct mmc_host *mmc; 1326 int ret; 1327 1328 /* Must have platform data or Device Tree. */ 1329 if (!plat && !np) { 1330 dev_err(&dev->dev, "No plat data or DT found\n"); 1331 return -EINVAL; 1332 } 1333 1334 if (!plat) { 1335 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); 1336 if (!plat) 1337 return -ENOMEM; 1338 } 1339 1340 if (np) 1341 mmci_dt_populate_generic_pdata(np, plat); 1342 1343 ret = amba_request_regions(dev, DRIVER_NAME); 1344 if (ret) 1345 goto out; 1346 1347 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1348 if (!mmc) { 1349 ret = -ENOMEM; 1350 goto rel_regions; 1351 } 1352 1353 host = mmc_priv(mmc); 1354 host->mmc = mmc; 1355 1356 host->gpio_wp = -ENOSYS; 1357 host->gpio_cd = -ENOSYS; 1358 host->gpio_cd_irq = -1; 1359 1360 host->hw_designer = amba_manf(dev); 1361 host->hw_revision = amba_rev(dev); 1362 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1363 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1364 1365 host->clk = clk_get(&dev->dev, NULL); 1366 if (IS_ERR(host->clk)) { 1367 ret = PTR_ERR(host->clk); 1368 host->clk = NULL; 1369 goto host_free; 1370 } 1371 1372 ret = clk_prepare_enable(host->clk); 1373 if (ret) 1374 goto clk_free; 1375 1376 host->plat = plat; 1377 host->variant = variant; 1378 host->mclk = clk_get_rate(host->clk); 1379 /* 1380 * According to the spec, mclk is max 100 MHz, 1381 * so we try to adjust the clock down to this, 1382 * (if possible). 1383 */ 1384 if (host->mclk > 100000000) { 1385 ret = clk_set_rate(host->clk, 100000000); 1386 if (ret < 0) 1387 goto clk_disable; 1388 host->mclk = clk_get_rate(host->clk); 1389 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1390 host->mclk); 1391 } 1392 host->phybase = dev->res.start; 1393 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1394 if (!host->base) { 1395 ret = -ENOMEM; 1396 goto clk_disable; 1397 } 1398 1399 mmc->ops = &mmci_ops; 1400 /* 1401 * The ARM and ST versions of the block have slightly different 1402 * clock divider equations which means that the minimum divider 1403 * differs too. 1404 */ 1405 if (variant->st_clkdiv) 1406 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1407 else 1408 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1409 /* 1410 * If the platform data supplies a maximum operating 1411 * frequency, this takes precedence. Else, we fall back 1412 * to using the module parameter, which has a (low) 1413 * default value in case it is not specified. Either 1414 * value must not exceed the clock rate into the block, 1415 * of course. 1416 */ 1417 if (plat->f_max) 1418 mmc->f_max = min(host->mclk, plat->f_max); 1419 else 1420 mmc->f_max = min(host->mclk, fmax); 1421 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1422 1423 host->pinctrl = devm_pinctrl_get(&dev->dev); 1424 if (IS_ERR(host->pinctrl)) { 1425 ret = PTR_ERR(host->pinctrl); 1426 goto clk_disable; 1427 } 1428 1429 host->pins_default = pinctrl_lookup_state(host->pinctrl, 1430 PINCTRL_STATE_DEFAULT); 1431 1432 /* enable pins to be muxed in and configured */ 1433 if (!IS_ERR(host->pins_default)) { 1434 ret = pinctrl_select_state(host->pinctrl, host->pins_default); 1435 if (ret) 1436 dev_warn(&dev->dev, "could not set default pins\n"); 1437 } else 1438 dev_warn(&dev->dev, "could not get default pinstate\n"); 1439 1440 /* Get regulators and the supported OCR mask */ 1441 mmc_regulator_get_supply(mmc); 1442 if (!mmc->ocr_avail) 1443 mmc->ocr_avail = plat->ocr_mask; 1444 else if (plat->ocr_mask) 1445 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); 1446 1447 mmc->caps = plat->capabilities; 1448 mmc->caps2 = plat->capabilities2; 1449 1450 /* We support these PM capabilities. */ 1451 mmc->pm_caps = MMC_PM_KEEP_POWER; 1452 1453 /* 1454 * We can do SGIO 1455 */ 1456 mmc->max_segs = NR_SG; 1457 1458 /* 1459 * Since only a certain number of bits are valid in the data length 1460 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1461 * single request. 1462 */ 1463 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1464 1465 /* 1466 * Set the maximum segment size. Since we aren't doing DMA 1467 * (yet) we are only limited by the data length register. 1468 */ 1469 mmc->max_seg_size = mmc->max_req_size; 1470 1471 /* 1472 * Block size can be up to 2048 bytes, but must be a power of two. 1473 */ 1474 mmc->max_blk_size = 1 << 11; 1475 1476 /* 1477 * Limit the number of blocks transferred so that we don't overflow 1478 * the maximum request size. 1479 */ 1480 mmc->max_blk_count = mmc->max_req_size >> 11; 1481 1482 spin_lock_init(&host->lock); 1483 1484 writel(0, host->base + MMCIMASK0); 1485 writel(0, host->base + MMCIMASK1); 1486 writel(0xfff, host->base + MMCICLEAR); 1487 1488 if (plat->gpio_cd == -EPROBE_DEFER) { 1489 ret = -EPROBE_DEFER; 1490 goto err_gpio_cd; 1491 } 1492 if (gpio_is_valid(plat->gpio_cd)) { 1493 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1494 if (ret == 0) 1495 ret = gpio_direction_input(plat->gpio_cd); 1496 if (ret == 0) 1497 host->gpio_cd = plat->gpio_cd; 1498 else if (ret != -ENOSYS) 1499 goto err_gpio_cd; 1500 1501 /* 1502 * A gpio pin that will detect cards when inserted and removed 1503 * will most likely want to trigger on the edges if it is 1504 * 0 when ejected and 1 when inserted (or mutatis mutandis 1505 * for the inverted case) so we request triggers on both 1506 * edges. 1507 */ 1508 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1509 mmci_cd_irq, 1510 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1511 DRIVER_NAME " (cd)", host); 1512 if (ret >= 0) 1513 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1514 } 1515 if (plat->gpio_wp == -EPROBE_DEFER) { 1516 ret = -EPROBE_DEFER; 1517 goto err_gpio_wp; 1518 } 1519 if (gpio_is_valid(plat->gpio_wp)) { 1520 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1521 if (ret == 0) 1522 ret = gpio_direction_input(plat->gpio_wp); 1523 if (ret == 0) 1524 host->gpio_wp = plat->gpio_wp; 1525 else if (ret != -ENOSYS) 1526 goto err_gpio_wp; 1527 } 1528 1529 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1530 && host->gpio_cd_irq < 0) 1531 mmc->caps |= MMC_CAP_NEEDS_POLL; 1532 1533 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1534 if (ret) 1535 goto unmap; 1536 1537 if (!dev->irq[1]) 1538 host->singleirq = true; 1539 else { 1540 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1541 DRIVER_NAME " (pio)", host); 1542 if (ret) 1543 goto irq0_free; 1544 } 1545 1546 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1547 1548 amba_set_drvdata(dev, mmc); 1549 1550 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1551 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1552 amba_rev(dev), (unsigned long long)dev->res.start, 1553 dev->irq[0], dev->irq[1]); 1554 1555 mmci_dma_setup(host); 1556 1557 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1558 pm_runtime_use_autosuspend(&dev->dev); 1559 pm_runtime_put(&dev->dev); 1560 1561 mmc_add_host(mmc); 1562 1563 return 0; 1564 1565 irq0_free: 1566 free_irq(dev->irq[0], host); 1567 unmap: 1568 if (host->gpio_wp != -ENOSYS) 1569 gpio_free(host->gpio_wp); 1570 err_gpio_wp: 1571 if (host->gpio_cd_irq >= 0) 1572 free_irq(host->gpio_cd_irq, host); 1573 if (host->gpio_cd != -ENOSYS) 1574 gpio_free(host->gpio_cd); 1575 err_gpio_cd: 1576 iounmap(host->base); 1577 clk_disable: 1578 clk_disable_unprepare(host->clk); 1579 clk_free: 1580 clk_put(host->clk); 1581 host_free: 1582 mmc_free_host(mmc); 1583 rel_regions: 1584 amba_release_regions(dev); 1585 out: 1586 return ret; 1587 } 1588 1589 static int mmci_remove(struct amba_device *dev) 1590 { 1591 struct mmc_host *mmc = amba_get_drvdata(dev); 1592 1593 amba_set_drvdata(dev, NULL); 1594 1595 if (mmc) { 1596 struct mmci_host *host = mmc_priv(mmc); 1597 1598 /* 1599 * Undo pm_runtime_put() in probe. We use the _sync 1600 * version here so that we can access the primecell. 1601 */ 1602 pm_runtime_get_sync(&dev->dev); 1603 1604 mmc_remove_host(mmc); 1605 1606 writel(0, host->base + MMCIMASK0); 1607 writel(0, host->base + MMCIMASK1); 1608 1609 writel(0, host->base + MMCICOMMAND); 1610 writel(0, host->base + MMCIDATACTRL); 1611 1612 mmci_dma_release(host); 1613 free_irq(dev->irq[0], host); 1614 if (!host->singleirq) 1615 free_irq(dev->irq[1], host); 1616 1617 if (host->gpio_wp != -ENOSYS) 1618 gpio_free(host->gpio_wp); 1619 if (host->gpio_cd_irq >= 0) 1620 free_irq(host->gpio_cd_irq, host); 1621 if (host->gpio_cd != -ENOSYS) 1622 gpio_free(host->gpio_cd); 1623 1624 iounmap(host->base); 1625 clk_disable_unprepare(host->clk); 1626 clk_put(host->clk); 1627 1628 mmc_free_host(mmc); 1629 1630 amba_release_regions(dev); 1631 } 1632 1633 return 0; 1634 } 1635 1636 #ifdef CONFIG_SUSPEND 1637 static int mmci_suspend(struct device *dev) 1638 { 1639 struct amba_device *adev = to_amba_device(dev); 1640 struct mmc_host *mmc = amba_get_drvdata(adev); 1641 int ret = 0; 1642 1643 if (mmc) { 1644 struct mmci_host *host = mmc_priv(mmc); 1645 1646 ret = mmc_suspend_host(mmc); 1647 if (ret == 0) { 1648 pm_runtime_get_sync(dev); 1649 writel(0, host->base + MMCIMASK0); 1650 } 1651 } 1652 1653 return ret; 1654 } 1655 1656 static int mmci_resume(struct device *dev) 1657 { 1658 struct amba_device *adev = to_amba_device(dev); 1659 struct mmc_host *mmc = amba_get_drvdata(adev); 1660 int ret = 0; 1661 1662 if (mmc) { 1663 struct mmci_host *host = mmc_priv(mmc); 1664 1665 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1666 pm_runtime_put(dev); 1667 1668 ret = mmc_resume_host(mmc); 1669 } 1670 1671 return ret; 1672 } 1673 #endif 1674 1675 #ifdef CONFIG_PM_RUNTIME 1676 static int mmci_runtime_suspend(struct device *dev) 1677 { 1678 struct amba_device *adev = to_amba_device(dev); 1679 struct mmc_host *mmc = amba_get_drvdata(adev); 1680 1681 if (mmc) { 1682 struct mmci_host *host = mmc_priv(mmc); 1683 clk_disable_unprepare(host->clk); 1684 } 1685 1686 return 0; 1687 } 1688 1689 static int mmci_runtime_resume(struct device *dev) 1690 { 1691 struct amba_device *adev = to_amba_device(dev); 1692 struct mmc_host *mmc = amba_get_drvdata(adev); 1693 1694 if (mmc) { 1695 struct mmci_host *host = mmc_priv(mmc); 1696 clk_prepare_enable(host->clk); 1697 } 1698 1699 return 0; 1700 } 1701 #endif 1702 1703 static const struct dev_pm_ops mmci_dev_pm_ops = { 1704 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) 1705 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) 1706 }; 1707 1708 static struct amba_id mmci_ids[] = { 1709 { 1710 .id = 0x00041180, 1711 .mask = 0xff0fffff, 1712 .data = &variant_arm, 1713 }, 1714 { 1715 .id = 0x01041180, 1716 .mask = 0xff0fffff, 1717 .data = &variant_arm_extended_fifo, 1718 }, 1719 { 1720 .id = 0x02041180, 1721 .mask = 0xff0fffff, 1722 .data = &variant_arm_extended_fifo_hwfc, 1723 }, 1724 { 1725 .id = 0x00041181, 1726 .mask = 0x000fffff, 1727 .data = &variant_arm, 1728 }, 1729 /* ST Micro variants */ 1730 { 1731 .id = 0x00180180, 1732 .mask = 0x00ffffff, 1733 .data = &variant_u300, 1734 }, 1735 { 1736 .id = 0x10180180, 1737 .mask = 0xf0ffffff, 1738 .data = &variant_nomadik, 1739 }, 1740 { 1741 .id = 0x00280180, 1742 .mask = 0x00ffffff, 1743 .data = &variant_u300, 1744 }, 1745 { 1746 .id = 0x00480180, 1747 .mask = 0xf0ffffff, 1748 .data = &variant_ux500, 1749 }, 1750 { 1751 .id = 0x10480180, 1752 .mask = 0xf0ffffff, 1753 .data = &variant_ux500v2, 1754 }, 1755 { 0, 0 }, 1756 }; 1757 1758 MODULE_DEVICE_TABLE(amba, mmci_ids); 1759 1760 static struct amba_driver mmci_driver = { 1761 .drv = { 1762 .name = DRIVER_NAME, 1763 .pm = &mmci_dev_pm_ops, 1764 }, 1765 .probe = mmci_probe, 1766 .remove = mmci_remove, 1767 .id_table = mmci_ids, 1768 }; 1769 1770 module_amba_driver(mmci_driver); 1771 1772 module_param(fmax, uint, 0444); 1773 1774 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1775 MODULE_LICENSE("GPL"); 1776