1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/slab.h> 19 #include <linux/delay.h> 20 #include <linux/err.h> 21 #include <linux/highmem.h> 22 #include <linux/log2.h> 23 #include <linux/mmc/pm.h> 24 #include <linux/mmc/host.h> 25 #include <linux/mmc/card.h> 26 #include <linux/amba/bus.h> 27 #include <linux/clk.h> 28 #include <linux/scatterlist.h> 29 #include <linux/gpio.h> 30 #include <linux/of_gpio.h> 31 #include <linux/regulator/consumer.h> 32 #include <linux/dmaengine.h> 33 #include <linux/dma-mapping.h> 34 #include <linux/amba/mmci.h> 35 #include <linux/pm_runtime.h> 36 #include <linux/types.h> 37 #include <linux/pinctrl/consumer.h> 38 39 #include <asm/div64.h> 40 #include <asm/io.h> 41 #include <asm/sizes.h> 42 43 #include "mmci.h" 44 45 #define DRIVER_NAME "mmci-pl18x" 46 47 static unsigned int fmax = 515633; 48 49 /** 50 * struct variant_data - MMCI variant-specific quirks 51 * @clkreg: default value for MCICLOCK register 52 * @clkreg_enable: enable value for MMCICLOCK register 53 * @datalength_bits: number of bits in the MMCIDATALENGTH register 54 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 55 * is asserted (likewise for RX) 56 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 57 * is asserted (likewise for RX) 58 * @sdio: variant supports SDIO 59 * @st_clkdiv: true if using a ST-specific clock divider algorithm 60 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 61 * @pwrreg_powerup: power up value for MMCIPOWER register 62 * @signal_direction: input/out direction of bus signals can be indicated 63 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock 64 */ 65 struct variant_data { 66 unsigned int clkreg; 67 unsigned int clkreg_enable; 68 unsigned int datalength_bits; 69 unsigned int fifosize; 70 unsigned int fifohalfsize; 71 bool sdio; 72 bool st_clkdiv; 73 bool blksz_datactrl16; 74 u32 pwrreg_powerup; 75 bool signal_direction; 76 bool pwrreg_clkgate; 77 }; 78 79 static struct variant_data variant_arm = { 80 .fifosize = 16 * 4, 81 .fifohalfsize = 8 * 4, 82 .datalength_bits = 16, 83 .pwrreg_powerup = MCI_PWR_UP, 84 }; 85 86 static struct variant_data variant_arm_extended_fifo = { 87 .fifosize = 128 * 4, 88 .fifohalfsize = 64 * 4, 89 .datalength_bits = 16, 90 .pwrreg_powerup = MCI_PWR_UP, 91 }; 92 93 static struct variant_data variant_arm_extended_fifo_hwfc = { 94 .fifosize = 128 * 4, 95 .fifohalfsize = 64 * 4, 96 .clkreg_enable = MCI_ARM_HWFCEN, 97 .datalength_bits = 16, 98 .pwrreg_powerup = MCI_PWR_UP, 99 }; 100 101 static struct variant_data variant_u300 = { 102 .fifosize = 16 * 4, 103 .fifohalfsize = 8 * 4, 104 .clkreg_enable = MCI_ST_U300_HWFCEN, 105 .datalength_bits = 16, 106 .sdio = true, 107 .pwrreg_powerup = MCI_PWR_ON, 108 .signal_direction = true, 109 .pwrreg_clkgate = true, 110 }; 111 112 static struct variant_data variant_nomadik = { 113 .fifosize = 16 * 4, 114 .fifohalfsize = 8 * 4, 115 .clkreg = MCI_CLK_ENABLE, 116 .datalength_bits = 24, 117 .sdio = true, 118 .st_clkdiv = true, 119 .pwrreg_powerup = MCI_PWR_ON, 120 .signal_direction = true, 121 .pwrreg_clkgate = true, 122 }; 123 124 static struct variant_data variant_ux500 = { 125 .fifosize = 30 * 4, 126 .fifohalfsize = 8 * 4, 127 .clkreg = MCI_CLK_ENABLE, 128 .clkreg_enable = MCI_ST_UX500_HWFCEN, 129 .datalength_bits = 24, 130 .sdio = true, 131 .st_clkdiv = true, 132 .pwrreg_powerup = MCI_PWR_ON, 133 .signal_direction = true, 134 .pwrreg_clkgate = true, 135 }; 136 137 static struct variant_data variant_ux500v2 = { 138 .fifosize = 30 * 4, 139 .fifohalfsize = 8 * 4, 140 .clkreg = MCI_CLK_ENABLE, 141 .clkreg_enable = MCI_ST_UX500_HWFCEN, 142 .datalength_bits = 24, 143 .sdio = true, 144 .st_clkdiv = true, 145 .blksz_datactrl16 = true, 146 .pwrreg_powerup = MCI_PWR_ON, 147 .signal_direction = true, 148 .pwrreg_clkgate = true, 149 }; 150 151 /* 152 * Validate mmc prerequisites 153 */ 154 static int mmci_validate_data(struct mmci_host *host, 155 struct mmc_data *data) 156 { 157 if (!data) 158 return 0; 159 160 if (!is_power_of_2(data->blksz)) { 161 dev_err(mmc_dev(host->mmc), 162 "unsupported block size (%d bytes)\n", data->blksz); 163 return -EINVAL; 164 } 165 166 return 0; 167 } 168 169 /* 170 * This must be called with host->lock held 171 */ 172 static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 173 { 174 if (host->clk_reg != clk) { 175 host->clk_reg = clk; 176 writel(clk, host->base + MMCICLOCK); 177 } 178 } 179 180 /* 181 * This must be called with host->lock held 182 */ 183 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 184 { 185 if (host->pwr_reg != pwr) { 186 host->pwr_reg = pwr; 187 writel(pwr, host->base + MMCIPOWER); 188 } 189 } 190 191 /* 192 * This must be called with host->lock held 193 */ 194 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 195 { 196 struct variant_data *variant = host->variant; 197 u32 clk = variant->clkreg; 198 199 if (desired) { 200 if (desired >= host->mclk) { 201 clk = MCI_CLK_BYPASS; 202 if (variant->st_clkdiv) 203 clk |= MCI_ST_UX500_NEG_EDGE; 204 host->cclk = host->mclk; 205 } else if (variant->st_clkdiv) { 206 /* 207 * DB8500 TRM says f = mclk / (clkdiv + 2) 208 * => clkdiv = (mclk / f) - 2 209 * Round the divider up so we don't exceed the max 210 * frequency 211 */ 212 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 213 if (clk >= 256) 214 clk = 255; 215 host->cclk = host->mclk / (clk + 2); 216 } else { 217 /* 218 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 219 * => clkdiv = mclk / (2 * f) - 1 220 */ 221 clk = host->mclk / (2 * desired) - 1; 222 if (clk >= 256) 223 clk = 255; 224 host->cclk = host->mclk / (2 * (clk + 1)); 225 } 226 227 clk |= variant->clkreg_enable; 228 clk |= MCI_CLK_ENABLE; 229 /* This hasn't proven to be worthwhile */ 230 /* clk |= MCI_CLK_PWRSAVE; */ 231 } 232 233 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 234 clk |= MCI_4BIT_BUS; 235 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 236 clk |= MCI_ST_8BIT_BUS; 237 238 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 239 clk |= MCI_ST_UX500_NEG_EDGE; 240 241 mmci_write_clkreg(host, clk); 242 } 243 244 static void 245 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 246 { 247 writel(0, host->base + MMCICOMMAND); 248 249 BUG_ON(host->data); 250 251 host->mrq = NULL; 252 host->cmd = NULL; 253 254 mmc_request_done(host->mmc, mrq); 255 256 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 257 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 258 } 259 260 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 261 { 262 void __iomem *base = host->base; 263 264 if (host->singleirq) { 265 unsigned int mask0 = readl(base + MMCIMASK0); 266 267 mask0 &= ~MCI_IRQ1MASK; 268 mask0 |= mask; 269 270 writel(mask0, base + MMCIMASK0); 271 } 272 273 writel(mask, base + MMCIMASK1); 274 } 275 276 static void mmci_stop_data(struct mmci_host *host) 277 { 278 writel(0, host->base + MMCIDATACTRL); 279 mmci_set_mask1(host, 0); 280 host->data = NULL; 281 } 282 283 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 284 { 285 unsigned int flags = SG_MITER_ATOMIC; 286 287 if (data->flags & MMC_DATA_READ) 288 flags |= SG_MITER_TO_SG; 289 else 290 flags |= SG_MITER_FROM_SG; 291 292 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 293 } 294 295 /* 296 * All the DMA operation mode stuff goes inside this ifdef. 297 * This assumes that you have a generic DMA device interface, 298 * no custom DMA interfaces are supported. 299 */ 300 #ifdef CONFIG_DMA_ENGINE 301 static void mmci_dma_setup(struct mmci_host *host) 302 { 303 struct mmci_platform_data *plat = host->plat; 304 const char *rxname, *txname; 305 dma_cap_mask_t mask; 306 307 if (!plat || !plat->dma_filter) { 308 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 309 return; 310 } 311 312 /* initialize pre request cookie */ 313 host->next_data.cookie = 1; 314 315 /* Try to acquire a generic DMA engine slave channel */ 316 dma_cap_zero(mask); 317 dma_cap_set(DMA_SLAVE, mask); 318 319 /* 320 * If only an RX channel is specified, the driver will 321 * attempt to use it bidirectionally, however if it is 322 * is specified but cannot be located, DMA will be disabled. 323 */ 324 if (plat->dma_rx_param) { 325 host->dma_rx_channel = dma_request_channel(mask, 326 plat->dma_filter, 327 plat->dma_rx_param); 328 /* E.g if no DMA hardware is present */ 329 if (!host->dma_rx_channel) 330 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 331 } 332 333 if (plat->dma_tx_param) { 334 host->dma_tx_channel = dma_request_channel(mask, 335 plat->dma_filter, 336 plat->dma_tx_param); 337 if (!host->dma_tx_channel) 338 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 339 } else { 340 host->dma_tx_channel = host->dma_rx_channel; 341 } 342 343 if (host->dma_rx_channel) 344 rxname = dma_chan_name(host->dma_rx_channel); 345 else 346 rxname = "none"; 347 348 if (host->dma_tx_channel) 349 txname = dma_chan_name(host->dma_tx_channel); 350 else 351 txname = "none"; 352 353 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 354 rxname, txname); 355 356 /* 357 * Limit the maximum segment size in any SG entry according to 358 * the parameters of the DMA engine device. 359 */ 360 if (host->dma_tx_channel) { 361 struct device *dev = host->dma_tx_channel->device->dev; 362 unsigned int max_seg_size = dma_get_max_seg_size(dev); 363 364 if (max_seg_size < host->mmc->max_seg_size) 365 host->mmc->max_seg_size = max_seg_size; 366 } 367 if (host->dma_rx_channel) { 368 struct device *dev = host->dma_rx_channel->device->dev; 369 unsigned int max_seg_size = dma_get_max_seg_size(dev); 370 371 if (max_seg_size < host->mmc->max_seg_size) 372 host->mmc->max_seg_size = max_seg_size; 373 } 374 } 375 376 /* 377 * This is used in or so inline it 378 * so it can be discarded. 379 */ 380 static inline void mmci_dma_release(struct mmci_host *host) 381 { 382 struct mmci_platform_data *plat = host->plat; 383 384 if (host->dma_rx_channel) 385 dma_release_channel(host->dma_rx_channel); 386 if (host->dma_tx_channel && plat->dma_tx_param) 387 dma_release_channel(host->dma_tx_channel); 388 host->dma_rx_channel = host->dma_tx_channel = NULL; 389 } 390 391 static void mmci_dma_data_error(struct mmci_host *host) 392 { 393 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 394 dmaengine_terminate_all(host->dma_current); 395 host->dma_current = NULL; 396 host->dma_desc_current = NULL; 397 host->data->host_cookie = 0; 398 } 399 400 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 401 { 402 struct dma_chan *chan; 403 enum dma_data_direction dir; 404 405 if (data->flags & MMC_DATA_READ) { 406 dir = DMA_FROM_DEVICE; 407 chan = host->dma_rx_channel; 408 } else { 409 dir = DMA_TO_DEVICE; 410 chan = host->dma_tx_channel; 411 } 412 413 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 414 } 415 416 static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) 417 { 418 u32 status; 419 int i; 420 421 /* Wait up to 1ms for the DMA to complete */ 422 for (i = 0; ; i++) { 423 status = readl(host->base + MMCISTATUS); 424 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 425 break; 426 udelay(10); 427 } 428 429 /* 430 * Check to see whether we still have some data left in the FIFO - 431 * this catches DMA controllers which are unable to monitor the 432 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 433 * contiguous buffers. On TX, we'll get a FIFO underrun error. 434 */ 435 if (status & MCI_RXDATAAVLBLMASK) { 436 mmci_dma_data_error(host); 437 if (!data->error) 438 data->error = -EIO; 439 } 440 441 if (!data->host_cookie) 442 mmci_dma_unmap(host, data); 443 444 /* 445 * Use of DMA with scatter-gather is impossible. 446 * Give up with DMA and switch back to PIO mode. 447 */ 448 if (status & MCI_RXDATAAVLBLMASK) { 449 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 450 mmci_dma_release(host); 451 } 452 453 host->dma_current = NULL; 454 host->dma_desc_current = NULL; 455 } 456 457 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */ 458 static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 459 struct dma_chan **dma_chan, 460 struct dma_async_tx_descriptor **dma_desc) 461 { 462 struct variant_data *variant = host->variant; 463 struct dma_slave_config conf = { 464 .src_addr = host->phybase + MMCIFIFO, 465 .dst_addr = host->phybase + MMCIFIFO, 466 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 467 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 468 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 469 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 470 .device_fc = false, 471 }; 472 struct dma_chan *chan; 473 struct dma_device *device; 474 struct dma_async_tx_descriptor *desc; 475 enum dma_data_direction buffer_dirn; 476 int nr_sg; 477 478 if (data->flags & MMC_DATA_READ) { 479 conf.direction = DMA_DEV_TO_MEM; 480 buffer_dirn = DMA_FROM_DEVICE; 481 chan = host->dma_rx_channel; 482 } else { 483 conf.direction = DMA_MEM_TO_DEV; 484 buffer_dirn = DMA_TO_DEVICE; 485 chan = host->dma_tx_channel; 486 } 487 488 /* If there's no DMA channel, fall back to PIO */ 489 if (!chan) 490 return -EINVAL; 491 492 /* If less than or equal to the fifo size, don't bother with DMA */ 493 if (data->blksz * data->blocks <= variant->fifosize) 494 return -EINVAL; 495 496 device = chan->device; 497 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 498 if (nr_sg == 0) 499 return -EINVAL; 500 501 dmaengine_slave_config(chan, &conf); 502 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 503 conf.direction, DMA_CTRL_ACK); 504 if (!desc) 505 goto unmap_exit; 506 507 *dma_chan = chan; 508 *dma_desc = desc; 509 510 return 0; 511 512 unmap_exit: 513 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 514 return -ENOMEM; 515 } 516 517 static inline int mmci_dma_prep_data(struct mmci_host *host, 518 struct mmc_data *data) 519 { 520 /* Check if next job is already prepared. */ 521 if (host->dma_current && host->dma_desc_current) 522 return 0; 523 524 /* No job were prepared thus do it now. */ 525 return __mmci_dma_prep_data(host, data, &host->dma_current, 526 &host->dma_desc_current); 527 } 528 529 static inline int mmci_dma_prep_next(struct mmci_host *host, 530 struct mmc_data *data) 531 { 532 struct mmci_host_next *nd = &host->next_data; 533 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); 534 } 535 536 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 537 { 538 int ret; 539 struct mmc_data *data = host->data; 540 541 ret = mmci_dma_prep_data(host, host->data); 542 if (ret) 543 return ret; 544 545 /* Okay, go for it. */ 546 dev_vdbg(mmc_dev(host->mmc), 547 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 548 data->sg_len, data->blksz, data->blocks, data->flags); 549 dmaengine_submit(host->dma_desc_current); 550 dma_async_issue_pending(host->dma_current); 551 552 datactrl |= MCI_DPSM_DMAENABLE; 553 554 /* Trigger the DMA transfer */ 555 writel(datactrl, host->base + MMCIDATACTRL); 556 557 /* 558 * Let the MMCI say when the data is ended and it's time 559 * to fire next DMA request. When that happens, MMCI will 560 * call mmci_data_end() 561 */ 562 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 563 host->base + MMCIMASK0); 564 return 0; 565 } 566 567 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 568 { 569 struct mmci_host_next *next = &host->next_data; 570 571 WARN_ON(data->host_cookie && data->host_cookie != next->cookie); 572 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan)); 573 574 host->dma_desc_current = next->dma_desc; 575 host->dma_current = next->dma_chan; 576 next->dma_desc = NULL; 577 next->dma_chan = NULL; 578 } 579 580 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 581 bool is_first_req) 582 { 583 struct mmci_host *host = mmc_priv(mmc); 584 struct mmc_data *data = mrq->data; 585 struct mmci_host_next *nd = &host->next_data; 586 587 if (!data) 588 return; 589 590 BUG_ON(data->host_cookie); 591 592 if (mmci_validate_data(host, data)) 593 return; 594 595 if (!mmci_dma_prep_next(host, data)) 596 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 597 } 598 599 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 600 int err) 601 { 602 struct mmci_host *host = mmc_priv(mmc); 603 struct mmc_data *data = mrq->data; 604 605 if (!data || !data->host_cookie) 606 return; 607 608 mmci_dma_unmap(host, data); 609 610 if (err) { 611 struct mmci_host_next *next = &host->next_data; 612 struct dma_chan *chan; 613 if (data->flags & MMC_DATA_READ) 614 chan = host->dma_rx_channel; 615 else 616 chan = host->dma_tx_channel; 617 dmaengine_terminate_all(chan); 618 619 next->dma_desc = NULL; 620 next->dma_chan = NULL; 621 } 622 } 623 624 #else 625 /* Blank functions if the DMA engine is not available */ 626 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 627 { 628 } 629 static inline void mmci_dma_setup(struct mmci_host *host) 630 { 631 } 632 633 static inline void mmci_dma_release(struct mmci_host *host) 634 { 635 } 636 637 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 638 { 639 } 640 641 static inline void mmci_dma_finalize(struct mmci_host *host, 642 struct mmc_data *data) 643 { 644 } 645 646 static inline void mmci_dma_data_error(struct mmci_host *host) 647 { 648 } 649 650 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 651 { 652 return -ENOSYS; 653 } 654 655 #define mmci_pre_request NULL 656 #define mmci_post_request NULL 657 658 #endif 659 660 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 661 { 662 struct variant_data *variant = host->variant; 663 unsigned int datactrl, timeout, irqmask; 664 unsigned long long clks; 665 void __iomem *base; 666 int blksz_bits; 667 668 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 669 data->blksz, data->blocks, data->flags); 670 671 host->data = data; 672 host->size = data->blksz * data->blocks; 673 data->bytes_xfered = 0; 674 675 clks = (unsigned long long)data->timeout_ns * host->cclk; 676 do_div(clks, 1000000000UL); 677 678 timeout = data->timeout_clks + (unsigned int)clks; 679 680 base = host->base; 681 writel(timeout, base + MMCIDATATIMER); 682 writel(host->size, base + MMCIDATALENGTH); 683 684 blksz_bits = ffs(data->blksz) - 1; 685 BUG_ON(1 << blksz_bits != data->blksz); 686 687 if (variant->blksz_datactrl16) 688 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 689 else 690 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 691 692 if (data->flags & MMC_DATA_READ) 693 datactrl |= MCI_DPSM_DIRECTION; 694 695 /* The ST Micro variants has a special bit to enable SDIO */ 696 if (variant->sdio && host->mmc->card) 697 if (mmc_card_sdio(host->mmc->card)) { 698 /* 699 * The ST Micro variants has a special bit 700 * to enable SDIO. 701 */ 702 u32 clk; 703 704 datactrl |= MCI_ST_DPSM_SDIOEN; 705 706 /* 707 * The ST Micro variant for SDIO small write transfers 708 * needs to have clock H/W flow control disabled, 709 * otherwise the transfer will not start. The threshold 710 * depends on the rate of MCLK. 711 */ 712 if (data->flags & MMC_DATA_WRITE && 713 (host->size < 8 || 714 (host->size <= 8 && host->mclk > 50000000))) 715 clk = host->clk_reg & ~variant->clkreg_enable; 716 else 717 clk = host->clk_reg | variant->clkreg_enable; 718 719 mmci_write_clkreg(host, clk); 720 } 721 722 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 723 datactrl |= MCI_ST_DPSM_DDRMODE; 724 725 /* 726 * Attempt to use DMA operation mode, if this 727 * should fail, fall back to PIO mode 728 */ 729 if (!mmci_dma_start_data(host, datactrl)) 730 return; 731 732 /* IRQ mode, map the SG list for CPU reading/writing */ 733 mmci_init_sg(host, data); 734 735 if (data->flags & MMC_DATA_READ) { 736 irqmask = MCI_RXFIFOHALFFULLMASK; 737 738 /* 739 * If we have less than the fifo 'half-full' threshold to 740 * transfer, trigger a PIO interrupt as soon as any data 741 * is available. 742 */ 743 if (host->size < variant->fifohalfsize) 744 irqmask |= MCI_RXDATAAVLBLMASK; 745 } else { 746 /* 747 * We don't actually need to include "FIFO empty" here 748 * since its implicit in "FIFO half empty". 749 */ 750 irqmask = MCI_TXFIFOHALFEMPTYMASK; 751 } 752 753 writel(datactrl, base + MMCIDATACTRL); 754 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 755 mmci_set_mask1(host, irqmask); 756 } 757 758 static void 759 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 760 { 761 void __iomem *base = host->base; 762 763 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 764 cmd->opcode, cmd->arg, cmd->flags); 765 766 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 767 writel(0, base + MMCICOMMAND); 768 udelay(1); 769 } 770 771 c |= cmd->opcode | MCI_CPSM_ENABLE; 772 if (cmd->flags & MMC_RSP_PRESENT) { 773 if (cmd->flags & MMC_RSP_136) 774 c |= MCI_CPSM_LONGRSP; 775 c |= MCI_CPSM_RESPONSE; 776 } 777 if (/*interrupt*/0) 778 c |= MCI_CPSM_INTERRUPT; 779 780 host->cmd = cmd; 781 782 writel(cmd->arg, base + MMCIARGUMENT); 783 writel(c, base + MMCICOMMAND); 784 } 785 786 static void 787 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 788 unsigned int status) 789 { 790 /* First check for errors */ 791 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 792 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 793 u32 remain, success; 794 795 /* Terminate the DMA transfer */ 796 if (dma_inprogress(host)) { 797 mmci_dma_data_error(host); 798 mmci_dma_unmap(host, data); 799 } 800 801 /* 802 * Calculate how far we are into the transfer. Note that 803 * the data counter gives the number of bytes transferred 804 * on the MMC bus, not on the host side. On reads, this 805 * can be as much as a FIFO-worth of data ahead. This 806 * matters for FIFO overruns only. 807 */ 808 remain = readl(host->base + MMCIDATACNT); 809 success = data->blksz * data->blocks - remain; 810 811 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 812 status, success); 813 if (status & MCI_DATACRCFAIL) { 814 /* Last block was not successful */ 815 success -= 1; 816 data->error = -EILSEQ; 817 } else if (status & MCI_DATATIMEOUT) { 818 data->error = -ETIMEDOUT; 819 } else if (status & MCI_STARTBITERR) { 820 data->error = -ECOMM; 821 } else if (status & MCI_TXUNDERRUN) { 822 data->error = -EIO; 823 } else if (status & MCI_RXOVERRUN) { 824 if (success > host->variant->fifosize) 825 success -= host->variant->fifosize; 826 else 827 success = 0; 828 data->error = -EIO; 829 } 830 data->bytes_xfered = round_down(success, data->blksz); 831 } 832 833 if (status & MCI_DATABLOCKEND) 834 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 835 836 if (status & MCI_DATAEND || data->error) { 837 if (dma_inprogress(host)) 838 mmci_dma_finalize(host, data); 839 mmci_stop_data(host); 840 841 if (!data->error) 842 /* The error clause is handled above, success! */ 843 data->bytes_xfered = data->blksz * data->blocks; 844 845 if (!data->stop) { 846 mmci_request_end(host, data->mrq); 847 } else { 848 mmci_start_command(host, data->stop, 0); 849 } 850 } 851 } 852 853 static void 854 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 855 unsigned int status) 856 { 857 void __iomem *base = host->base; 858 859 host->cmd = NULL; 860 861 if (status & MCI_CMDTIMEOUT) { 862 cmd->error = -ETIMEDOUT; 863 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 864 cmd->error = -EILSEQ; 865 } else { 866 cmd->resp[0] = readl(base + MMCIRESPONSE0); 867 cmd->resp[1] = readl(base + MMCIRESPONSE1); 868 cmd->resp[2] = readl(base + MMCIRESPONSE2); 869 cmd->resp[3] = readl(base + MMCIRESPONSE3); 870 } 871 872 if (!cmd->data || cmd->error) { 873 if (host->data) { 874 /* Terminate the DMA transfer */ 875 if (dma_inprogress(host)) { 876 mmci_dma_data_error(host); 877 mmci_dma_unmap(host, host->data); 878 } 879 mmci_stop_data(host); 880 } 881 mmci_request_end(host, cmd->mrq); 882 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 883 mmci_start_data(host, cmd->data); 884 } 885 } 886 887 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 888 { 889 void __iomem *base = host->base; 890 char *ptr = buffer; 891 u32 status; 892 int host_remain = host->size; 893 894 do { 895 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 896 897 if (count > remain) 898 count = remain; 899 900 if (count <= 0) 901 break; 902 903 /* 904 * SDIO especially may want to send something that is 905 * not divisible by 4 (as opposed to card sectors 906 * etc). Therefore make sure to always read the last bytes 907 * while only doing full 32-bit reads towards the FIFO. 908 */ 909 if (unlikely(count & 0x3)) { 910 if (count < 4) { 911 unsigned char buf[4]; 912 ioread32_rep(base + MMCIFIFO, buf, 1); 913 memcpy(ptr, buf, count); 914 } else { 915 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 916 count &= ~0x3; 917 } 918 } else { 919 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 920 } 921 922 ptr += count; 923 remain -= count; 924 host_remain -= count; 925 926 if (remain == 0) 927 break; 928 929 status = readl(base + MMCISTATUS); 930 } while (status & MCI_RXDATAAVLBL); 931 932 return ptr - buffer; 933 } 934 935 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 936 { 937 struct variant_data *variant = host->variant; 938 void __iomem *base = host->base; 939 char *ptr = buffer; 940 941 do { 942 unsigned int count, maxcnt; 943 944 maxcnt = status & MCI_TXFIFOEMPTY ? 945 variant->fifosize : variant->fifohalfsize; 946 count = min(remain, maxcnt); 947 948 /* 949 * SDIO especially may want to send something that is 950 * not divisible by 4 (as opposed to card sectors 951 * etc), and the FIFO only accept full 32-bit writes. 952 * So compensate by adding +3 on the count, a single 953 * byte become a 32bit write, 7 bytes will be two 954 * 32bit writes etc. 955 */ 956 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2); 957 958 ptr += count; 959 remain -= count; 960 961 if (remain == 0) 962 break; 963 964 status = readl(base + MMCISTATUS); 965 } while (status & MCI_TXFIFOHALFEMPTY); 966 967 return ptr - buffer; 968 } 969 970 /* 971 * PIO data transfer IRQ handler. 972 */ 973 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 974 { 975 struct mmci_host *host = dev_id; 976 struct sg_mapping_iter *sg_miter = &host->sg_miter; 977 struct variant_data *variant = host->variant; 978 void __iomem *base = host->base; 979 unsigned long flags; 980 u32 status; 981 982 status = readl(base + MMCISTATUS); 983 984 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 985 986 local_irq_save(flags); 987 988 do { 989 unsigned int remain, len; 990 char *buffer; 991 992 /* 993 * For write, we only need to test the half-empty flag 994 * here - if the FIFO is completely empty, then by 995 * definition it is more than half empty. 996 * 997 * For read, check for data available. 998 */ 999 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 1000 break; 1001 1002 if (!sg_miter_next(sg_miter)) 1003 break; 1004 1005 buffer = sg_miter->addr; 1006 remain = sg_miter->length; 1007 1008 len = 0; 1009 if (status & MCI_RXACTIVE) 1010 len = mmci_pio_read(host, buffer, remain); 1011 if (status & MCI_TXACTIVE) 1012 len = mmci_pio_write(host, buffer, remain, status); 1013 1014 sg_miter->consumed = len; 1015 1016 host->size -= len; 1017 remain -= len; 1018 1019 if (remain) 1020 break; 1021 1022 status = readl(base + MMCISTATUS); 1023 } while (1); 1024 1025 sg_miter_stop(sg_miter); 1026 1027 local_irq_restore(flags); 1028 1029 /* 1030 * If we have less than the fifo 'half-full' threshold to transfer, 1031 * trigger a PIO interrupt as soon as any data is available. 1032 */ 1033 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 1034 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 1035 1036 /* 1037 * If we run out of data, disable the data IRQs; this 1038 * prevents a race where the FIFO becomes empty before 1039 * the chip itself has disabled the data path, and 1040 * stops us racing with our data end IRQ. 1041 */ 1042 if (host->size == 0) { 1043 mmci_set_mask1(host, 0); 1044 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 1045 } 1046 1047 return IRQ_HANDLED; 1048 } 1049 1050 /* 1051 * Handle completion of command and data transfers. 1052 */ 1053 static irqreturn_t mmci_irq(int irq, void *dev_id) 1054 { 1055 struct mmci_host *host = dev_id; 1056 u32 status; 1057 int ret = 0; 1058 1059 spin_lock(&host->lock); 1060 1061 do { 1062 struct mmc_command *cmd; 1063 struct mmc_data *data; 1064 1065 status = readl(host->base + MMCISTATUS); 1066 1067 if (host->singleirq) { 1068 if (status & readl(host->base + MMCIMASK1)) 1069 mmci_pio_irq(irq, dev_id); 1070 1071 status &= ~MCI_IRQ1MASK; 1072 } 1073 1074 status &= readl(host->base + MMCIMASK0); 1075 writel(status, host->base + MMCICLEAR); 1076 1077 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1078 1079 data = host->data; 1080 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 1081 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 1082 MCI_DATABLOCKEND) && data) 1083 mmci_data_irq(host, data, status); 1084 1085 cmd = host->cmd; 1086 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 1087 mmci_cmd_irq(host, cmd, status); 1088 1089 ret = 1; 1090 } while (status); 1091 1092 spin_unlock(&host->lock); 1093 1094 return IRQ_RETVAL(ret); 1095 } 1096 1097 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1098 { 1099 struct mmci_host *host = mmc_priv(mmc); 1100 unsigned long flags; 1101 1102 WARN_ON(host->mrq != NULL); 1103 1104 mrq->cmd->error = mmci_validate_data(host, mrq->data); 1105 if (mrq->cmd->error) { 1106 mmc_request_done(mmc, mrq); 1107 return; 1108 } 1109 1110 pm_runtime_get_sync(mmc_dev(mmc)); 1111 1112 spin_lock_irqsave(&host->lock, flags); 1113 1114 host->mrq = mrq; 1115 1116 if (mrq->data) 1117 mmci_get_next_data(host, mrq->data); 1118 1119 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1120 mmci_start_data(host, mrq->data); 1121 1122 mmci_start_command(host, mrq->cmd, 0); 1123 1124 spin_unlock_irqrestore(&host->lock, flags); 1125 } 1126 1127 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1128 { 1129 struct mmci_host *host = mmc_priv(mmc); 1130 struct variant_data *variant = host->variant; 1131 u32 pwr = 0; 1132 unsigned long flags; 1133 1134 pm_runtime_get_sync(mmc_dev(mmc)); 1135 1136 if (host->plat->ios_handler && 1137 host->plat->ios_handler(mmc_dev(mmc), ios)) 1138 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1139 1140 switch (ios->power_mode) { 1141 case MMC_POWER_OFF: 1142 if (!IS_ERR(mmc->supply.vmmc)) 1143 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1144 break; 1145 case MMC_POWER_UP: 1146 if (!IS_ERR(mmc->supply.vmmc)) 1147 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 1148 1149 /* 1150 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1151 * and instead uses MCI_PWR_ON so apply whatever value is 1152 * configured in the variant data. 1153 */ 1154 pwr |= variant->pwrreg_powerup; 1155 1156 break; 1157 case MMC_POWER_ON: 1158 pwr |= MCI_PWR_ON; 1159 break; 1160 } 1161 1162 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1163 /* 1164 * The ST Micro variant has some additional bits 1165 * indicating signal direction for the signals in 1166 * the SD/MMC bus and feedback-clock usage. 1167 */ 1168 pwr |= host->plat->sigdir; 1169 1170 if (ios->bus_width == MMC_BUS_WIDTH_4) 1171 pwr &= ~MCI_ST_DATA74DIREN; 1172 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1173 pwr &= (~MCI_ST_DATA74DIREN & 1174 ~MCI_ST_DATA31DIREN & 1175 ~MCI_ST_DATA2DIREN); 1176 } 1177 1178 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1179 if (host->hw_designer != AMBA_VENDOR_ST) 1180 pwr |= MCI_ROD; 1181 else { 1182 /* 1183 * The ST Micro variant use the ROD bit for something 1184 * else and only has OD (Open Drain). 1185 */ 1186 pwr |= MCI_OD; 1187 } 1188 } 1189 1190 /* 1191 * If clock = 0 and the variant requires the MMCIPOWER to be used for 1192 * gating the clock, the MCI_PWR_ON bit is cleared. 1193 */ 1194 if (!ios->clock && variant->pwrreg_clkgate) 1195 pwr &= ~MCI_PWR_ON; 1196 1197 spin_lock_irqsave(&host->lock, flags); 1198 1199 mmci_set_clkreg(host, ios->clock); 1200 mmci_write_pwrreg(host, pwr); 1201 1202 spin_unlock_irqrestore(&host->lock, flags); 1203 1204 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1205 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1206 } 1207 1208 static int mmci_get_ro(struct mmc_host *mmc) 1209 { 1210 struct mmci_host *host = mmc_priv(mmc); 1211 1212 if (host->gpio_wp == -ENOSYS) 1213 return -ENOSYS; 1214 1215 return gpio_get_value_cansleep(host->gpio_wp); 1216 } 1217 1218 static int mmci_get_cd(struct mmc_host *mmc) 1219 { 1220 struct mmci_host *host = mmc_priv(mmc); 1221 struct mmci_platform_data *plat = host->plat; 1222 unsigned int status; 1223 1224 if (host->gpio_cd == -ENOSYS) { 1225 if (!plat->status) 1226 return 1; /* Assume always present */ 1227 1228 status = plat->status(mmc_dev(host->mmc)); 1229 } else 1230 status = !!gpio_get_value_cansleep(host->gpio_cd) 1231 ^ plat->cd_invert; 1232 1233 /* 1234 * Use positive logic throughout - status is zero for no card, 1235 * non-zero for card inserted. 1236 */ 1237 return status; 1238 } 1239 1240 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1241 { 1242 struct mmci_host *host = dev_id; 1243 1244 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1245 1246 return IRQ_HANDLED; 1247 } 1248 1249 static const struct mmc_host_ops mmci_ops = { 1250 .request = mmci_request, 1251 .pre_req = mmci_pre_request, 1252 .post_req = mmci_post_request, 1253 .set_ios = mmci_set_ios, 1254 .get_ro = mmci_get_ro, 1255 .get_cd = mmci_get_cd, 1256 }; 1257 1258 #ifdef CONFIG_OF 1259 static void mmci_dt_populate_generic_pdata(struct device_node *np, 1260 struct mmci_platform_data *pdata) 1261 { 1262 int bus_width = 0; 1263 1264 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); 1265 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); 1266 1267 if (of_get_property(np, "cd-inverted", NULL)) 1268 pdata->cd_invert = true; 1269 else 1270 pdata->cd_invert = false; 1271 1272 of_property_read_u32(np, "max-frequency", &pdata->f_max); 1273 if (!pdata->f_max) 1274 pr_warn("%s has no 'max-frequency' property\n", np->full_name); 1275 1276 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) 1277 pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED; 1278 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) 1279 pdata->capabilities |= MMC_CAP_SD_HIGHSPEED; 1280 1281 of_property_read_u32(np, "bus-width", &bus_width); 1282 switch (bus_width) { 1283 case 0 : 1284 /* No bus-width supplied. */ 1285 break; 1286 case 4 : 1287 pdata->capabilities |= MMC_CAP_4_BIT_DATA; 1288 break; 1289 case 8 : 1290 pdata->capabilities |= MMC_CAP_8_BIT_DATA; 1291 break; 1292 default : 1293 pr_warn("%s: Unsupported bus width\n", np->full_name); 1294 } 1295 } 1296 #else 1297 static void mmci_dt_populate_generic_pdata(struct device_node *np, 1298 struct mmci_platform_data *pdata) 1299 { 1300 return; 1301 } 1302 #endif 1303 1304 static int mmci_probe(struct amba_device *dev, 1305 const struct amba_id *id) 1306 { 1307 struct mmci_platform_data *plat = dev->dev.platform_data; 1308 struct device_node *np = dev->dev.of_node; 1309 struct variant_data *variant = id->data; 1310 struct mmci_host *host; 1311 struct mmc_host *mmc; 1312 int ret; 1313 1314 /* Must have platform data or Device Tree. */ 1315 if (!plat && !np) { 1316 dev_err(&dev->dev, "No plat data or DT found\n"); 1317 return -EINVAL; 1318 } 1319 1320 if (!plat) { 1321 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); 1322 if (!plat) 1323 return -ENOMEM; 1324 } 1325 1326 if (np) 1327 mmci_dt_populate_generic_pdata(np, plat); 1328 1329 ret = amba_request_regions(dev, DRIVER_NAME); 1330 if (ret) 1331 goto out; 1332 1333 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1334 if (!mmc) { 1335 ret = -ENOMEM; 1336 goto rel_regions; 1337 } 1338 1339 host = mmc_priv(mmc); 1340 host->mmc = mmc; 1341 1342 host->gpio_wp = -ENOSYS; 1343 host->gpio_cd = -ENOSYS; 1344 host->gpio_cd_irq = -1; 1345 1346 host->hw_designer = amba_manf(dev); 1347 host->hw_revision = amba_rev(dev); 1348 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1349 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1350 1351 host->clk = clk_get(&dev->dev, NULL); 1352 if (IS_ERR(host->clk)) { 1353 ret = PTR_ERR(host->clk); 1354 host->clk = NULL; 1355 goto host_free; 1356 } 1357 1358 ret = clk_prepare_enable(host->clk); 1359 if (ret) 1360 goto clk_free; 1361 1362 host->plat = plat; 1363 host->variant = variant; 1364 host->mclk = clk_get_rate(host->clk); 1365 /* 1366 * According to the spec, mclk is max 100 MHz, 1367 * so we try to adjust the clock down to this, 1368 * (if possible). 1369 */ 1370 if (host->mclk > 100000000) { 1371 ret = clk_set_rate(host->clk, 100000000); 1372 if (ret < 0) 1373 goto clk_disable; 1374 host->mclk = clk_get_rate(host->clk); 1375 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1376 host->mclk); 1377 } 1378 host->phybase = dev->res.start; 1379 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1380 if (!host->base) { 1381 ret = -ENOMEM; 1382 goto clk_disable; 1383 } 1384 1385 mmc->ops = &mmci_ops; 1386 /* 1387 * The ARM and ST versions of the block have slightly different 1388 * clock divider equations which means that the minimum divider 1389 * differs too. 1390 */ 1391 if (variant->st_clkdiv) 1392 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1393 else 1394 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1395 /* 1396 * If the platform data supplies a maximum operating 1397 * frequency, this takes precedence. Else, we fall back 1398 * to using the module parameter, which has a (low) 1399 * default value in case it is not specified. Either 1400 * value must not exceed the clock rate into the block, 1401 * of course. 1402 */ 1403 if (plat->f_max) 1404 mmc->f_max = min(host->mclk, plat->f_max); 1405 else 1406 mmc->f_max = min(host->mclk, fmax); 1407 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1408 1409 host->pinctrl = devm_pinctrl_get(&dev->dev); 1410 if (IS_ERR(host->pinctrl)) { 1411 ret = PTR_ERR(host->pinctrl); 1412 goto clk_disable; 1413 } 1414 1415 host->pins_default = pinctrl_lookup_state(host->pinctrl, 1416 PINCTRL_STATE_DEFAULT); 1417 1418 /* enable pins to be muxed in and configured */ 1419 if (!IS_ERR(host->pins_default)) { 1420 ret = pinctrl_select_state(host->pinctrl, host->pins_default); 1421 if (ret) 1422 dev_warn(&dev->dev, "could not set default pins\n"); 1423 } else 1424 dev_warn(&dev->dev, "could not get default pinstate\n"); 1425 1426 /* Get regulators and the supported OCR mask */ 1427 mmc_regulator_get_supply(mmc); 1428 if (!mmc->ocr_avail) 1429 mmc->ocr_avail = plat->ocr_mask; 1430 else if (plat->ocr_mask) 1431 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); 1432 1433 mmc->caps = plat->capabilities; 1434 mmc->caps2 = plat->capabilities2; 1435 1436 /* We support these PM capabilities. */ 1437 mmc->pm_caps = MMC_PM_KEEP_POWER; 1438 1439 /* 1440 * We can do SGIO 1441 */ 1442 mmc->max_segs = NR_SG; 1443 1444 /* 1445 * Since only a certain number of bits are valid in the data length 1446 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1447 * single request. 1448 */ 1449 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1450 1451 /* 1452 * Set the maximum segment size. Since we aren't doing DMA 1453 * (yet) we are only limited by the data length register. 1454 */ 1455 mmc->max_seg_size = mmc->max_req_size; 1456 1457 /* 1458 * Block size can be up to 2048 bytes, but must be a power of two. 1459 */ 1460 mmc->max_blk_size = 1 << 11; 1461 1462 /* 1463 * Limit the number of blocks transferred so that we don't overflow 1464 * the maximum request size. 1465 */ 1466 mmc->max_blk_count = mmc->max_req_size >> 11; 1467 1468 spin_lock_init(&host->lock); 1469 1470 writel(0, host->base + MMCIMASK0); 1471 writel(0, host->base + MMCIMASK1); 1472 writel(0xfff, host->base + MMCICLEAR); 1473 1474 if (plat->gpio_cd == -EPROBE_DEFER) { 1475 ret = -EPROBE_DEFER; 1476 goto err_gpio_cd; 1477 } 1478 if (gpio_is_valid(plat->gpio_cd)) { 1479 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1480 if (ret == 0) 1481 ret = gpio_direction_input(plat->gpio_cd); 1482 if (ret == 0) 1483 host->gpio_cd = plat->gpio_cd; 1484 else if (ret != -ENOSYS) 1485 goto err_gpio_cd; 1486 1487 /* 1488 * A gpio pin that will detect cards when inserted and removed 1489 * will most likely want to trigger on the edges if it is 1490 * 0 when ejected and 1 when inserted (or mutatis mutandis 1491 * for the inverted case) so we request triggers on both 1492 * edges. 1493 */ 1494 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1495 mmci_cd_irq, 1496 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1497 DRIVER_NAME " (cd)", host); 1498 if (ret >= 0) 1499 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1500 } 1501 if (plat->gpio_wp == -EPROBE_DEFER) { 1502 ret = -EPROBE_DEFER; 1503 goto err_gpio_wp; 1504 } 1505 if (gpio_is_valid(plat->gpio_wp)) { 1506 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1507 if (ret == 0) 1508 ret = gpio_direction_input(plat->gpio_wp); 1509 if (ret == 0) 1510 host->gpio_wp = plat->gpio_wp; 1511 else if (ret != -ENOSYS) 1512 goto err_gpio_wp; 1513 } 1514 1515 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1516 && host->gpio_cd_irq < 0) 1517 mmc->caps |= MMC_CAP_NEEDS_POLL; 1518 1519 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1520 if (ret) 1521 goto unmap; 1522 1523 if (!dev->irq[1]) 1524 host->singleirq = true; 1525 else { 1526 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1527 DRIVER_NAME " (pio)", host); 1528 if (ret) 1529 goto irq0_free; 1530 } 1531 1532 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1533 1534 amba_set_drvdata(dev, mmc); 1535 1536 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1537 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1538 amba_rev(dev), (unsigned long long)dev->res.start, 1539 dev->irq[0], dev->irq[1]); 1540 1541 mmci_dma_setup(host); 1542 1543 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1544 pm_runtime_use_autosuspend(&dev->dev); 1545 pm_runtime_put(&dev->dev); 1546 1547 mmc_add_host(mmc); 1548 1549 return 0; 1550 1551 irq0_free: 1552 free_irq(dev->irq[0], host); 1553 unmap: 1554 if (host->gpio_wp != -ENOSYS) 1555 gpio_free(host->gpio_wp); 1556 err_gpio_wp: 1557 if (host->gpio_cd_irq >= 0) 1558 free_irq(host->gpio_cd_irq, host); 1559 if (host->gpio_cd != -ENOSYS) 1560 gpio_free(host->gpio_cd); 1561 err_gpio_cd: 1562 iounmap(host->base); 1563 clk_disable: 1564 clk_disable_unprepare(host->clk); 1565 clk_free: 1566 clk_put(host->clk); 1567 host_free: 1568 mmc_free_host(mmc); 1569 rel_regions: 1570 amba_release_regions(dev); 1571 out: 1572 return ret; 1573 } 1574 1575 static int mmci_remove(struct amba_device *dev) 1576 { 1577 struct mmc_host *mmc = amba_get_drvdata(dev); 1578 1579 amba_set_drvdata(dev, NULL); 1580 1581 if (mmc) { 1582 struct mmci_host *host = mmc_priv(mmc); 1583 1584 /* 1585 * Undo pm_runtime_put() in probe. We use the _sync 1586 * version here so that we can access the primecell. 1587 */ 1588 pm_runtime_get_sync(&dev->dev); 1589 1590 mmc_remove_host(mmc); 1591 1592 writel(0, host->base + MMCIMASK0); 1593 writel(0, host->base + MMCIMASK1); 1594 1595 writel(0, host->base + MMCICOMMAND); 1596 writel(0, host->base + MMCIDATACTRL); 1597 1598 mmci_dma_release(host); 1599 free_irq(dev->irq[0], host); 1600 if (!host->singleirq) 1601 free_irq(dev->irq[1], host); 1602 1603 if (host->gpio_wp != -ENOSYS) 1604 gpio_free(host->gpio_wp); 1605 if (host->gpio_cd_irq >= 0) 1606 free_irq(host->gpio_cd_irq, host); 1607 if (host->gpio_cd != -ENOSYS) 1608 gpio_free(host->gpio_cd); 1609 1610 iounmap(host->base); 1611 clk_disable_unprepare(host->clk); 1612 clk_put(host->clk); 1613 1614 mmc_free_host(mmc); 1615 1616 amba_release_regions(dev); 1617 } 1618 1619 return 0; 1620 } 1621 1622 #ifdef CONFIG_SUSPEND 1623 static int mmci_suspend(struct device *dev) 1624 { 1625 struct amba_device *adev = to_amba_device(dev); 1626 struct mmc_host *mmc = amba_get_drvdata(adev); 1627 int ret = 0; 1628 1629 if (mmc) { 1630 struct mmci_host *host = mmc_priv(mmc); 1631 1632 ret = mmc_suspend_host(mmc); 1633 if (ret == 0) { 1634 pm_runtime_get_sync(dev); 1635 writel(0, host->base + MMCIMASK0); 1636 } 1637 } 1638 1639 return ret; 1640 } 1641 1642 static int mmci_resume(struct device *dev) 1643 { 1644 struct amba_device *adev = to_amba_device(dev); 1645 struct mmc_host *mmc = amba_get_drvdata(adev); 1646 int ret = 0; 1647 1648 if (mmc) { 1649 struct mmci_host *host = mmc_priv(mmc); 1650 1651 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1652 pm_runtime_put(dev); 1653 1654 ret = mmc_resume_host(mmc); 1655 } 1656 1657 return ret; 1658 } 1659 #endif 1660 1661 #ifdef CONFIG_PM_RUNTIME 1662 static int mmci_runtime_suspend(struct device *dev) 1663 { 1664 struct amba_device *adev = to_amba_device(dev); 1665 struct mmc_host *mmc = amba_get_drvdata(adev); 1666 1667 if (mmc) { 1668 struct mmci_host *host = mmc_priv(mmc); 1669 clk_disable_unprepare(host->clk); 1670 } 1671 1672 return 0; 1673 } 1674 1675 static int mmci_runtime_resume(struct device *dev) 1676 { 1677 struct amba_device *adev = to_amba_device(dev); 1678 struct mmc_host *mmc = amba_get_drvdata(adev); 1679 1680 if (mmc) { 1681 struct mmci_host *host = mmc_priv(mmc); 1682 clk_prepare_enable(host->clk); 1683 } 1684 1685 return 0; 1686 } 1687 #endif 1688 1689 static const struct dev_pm_ops mmci_dev_pm_ops = { 1690 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) 1691 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) 1692 }; 1693 1694 static struct amba_id mmci_ids[] = { 1695 { 1696 .id = 0x00041180, 1697 .mask = 0xff0fffff, 1698 .data = &variant_arm, 1699 }, 1700 { 1701 .id = 0x01041180, 1702 .mask = 0xff0fffff, 1703 .data = &variant_arm_extended_fifo, 1704 }, 1705 { 1706 .id = 0x02041180, 1707 .mask = 0xff0fffff, 1708 .data = &variant_arm_extended_fifo_hwfc, 1709 }, 1710 { 1711 .id = 0x00041181, 1712 .mask = 0x000fffff, 1713 .data = &variant_arm, 1714 }, 1715 /* ST Micro variants */ 1716 { 1717 .id = 0x00180180, 1718 .mask = 0x00ffffff, 1719 .data = &variant_u300, 1720 }, 1721 { 1722 .id = 0x10180180, 1723 .mask = 0xf0ffffff, 1724 .data = &variant_nomadik, 1725 }, 1726 { 1727 .id = 0x00280180, 1728 .mask = 0x00ffffff, 1729 .data = &variant_u300, 1730 }, 1731 { 1732 .id = 0x00480180, 1733 .mask = 0xf0ffffff, 1734 .data = &variant_ux500, 1735 }, 1736 { 1737 .id = 0x10480180, 1738 .mask = 0xf0ffffff, 1739 .data = &variant_ux500v2, 1740 }, 1741 { 0, 0 }, 1742 }; 1743 1744 MODULE_DEVICE_TABLE(amba, mmci_ids); 1745 1746 static struct amba_driver mmci_driver = { 1747 .drv = { 1748 .name = DRIVER_NAME, 1749 .pm = &mmci_dev_pm_ops, 1750 }, 1751 .probe = mmci_probe, 1752 .remove = mmci_remove, 1753 .id_table = mmci_ids, 1754 }; 1755 1756 module_amba_driver(mmci_driver); 1757 1758 module_param(fmax, uint, 0444); 1759 1760 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1761 MODULE_LICENSE("GPL"); 1762