1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/slab.h> 19 #include <linux/delay.h> 20 #include <linux/err.h> 21 #include <linux/highmem.h> 22 #include <linux/log2.h> 23 #include <linux/mmc/pm.h> 24 #include <linux/mmc/host.h> 25 #include <linux/mmc/card.h> 26 #include <linux/amba/bus.h> 27 #include <linux/clk.h> 28 #include <linux/scatterlist.h> 29 #include <linux/gpio.h> 30 #include <linux/of_gpio.h> 31 #include <linux/regulator/consumer.h> 32 #include <linux/dmaengine.h> 33 #include <linux/dma-mapping.h> 34 #include <linux/amba/mmci.h> 35 #include <linux/pm_runtime.h> 36 #include <linux/types.h> 37 #include <linux/pinctrl/consumer.h> 38 39 #include <asm/div64.h> 40 #include <asm/io.h> 41 #include <asm/sizes.h> 42 43 #include "mmci.h" 44 45 #define DRIVER_NAME "mmci-pl18x" 46 47 static unsigned int fmax = 515633; 48 49 /** 50 * struct variant_data - MMCI variant-specific quirks 51 * @clkreg: default value for MCICLOCK register 52 * @clkreg_enable: enable value for MMCICLOCK register 53 * @datalength_bits: number of bits in the MMCIDATALENGTH register 54 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 55 * is asserted (likewise for RX) 56 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 57 * is asserted (likewise for RX) 58 * @sdio: variant supports SDIO 59 * @st_clkdiv: true if using a ST-specific clock divider algorithm 60 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 61 * @pwrreg_powerup: power up value for MMCIPOWER register 62 * @signal_direction: input/out direction of bus signals can be indicated 63 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock 64 */ 65 struct variant_data { 66 unsigned int clkreg; 67 unsigned int clkreg_enable; 68 unsigned int datalength_bits; 69 unsigned int fifosize; 70 unsigned int fifohalfsize; 71 bool sdio; 72 bool st_clkdiv; 73 bool blksz_datactrl16; 74 u32 pwrreg_powerup; 75 bool signal_direction; 76 bool pwrreg_clkgate; 77 }; 78 79 static struct variant_data variant_arm = { 80 .fifosize = 16 * 4, 81 .fifohalfsize = 8 * 4, 82 .datalength_bits = 16, 83 .pwrreg_powerup = MCI_PWR_UP, 84 }; 85 86 static struct variant_data variant_arm_extended_fifo = { 87 .fifosize = 128 * 4, 88 .fifohalfsize = 64 * 4, 89 .datalength_bits = 16, 90 .pwrreg_powerup = MCI_PWR_UP, 91 }; 92 93 static struct variant_data variant_arm_extended_fifo_hwfc = { 94 .fifosize = 128 * 4, 95 .fifohalfsize = 64 * 4, 96 .clkreg_enable = MCI_ARM_HWFCEN, 97 .datalength_bits = 16, 98 .pwrreg_powerup = MCI_PWR_UP, 99 }; 100 101 static struct variant_data variant_u300 = { 102 .fifosize = 16 * 4, 103 .fifohalfsize = 8 * 4, 104 .clkreg_enable = MCI_ST_U300_HWFCEN, 105 .datalength_bits = 16, 106 .sdio = true, 107 .pwrreg_powerup = MCI_PWR_ON, 108 .signal_direction = true, 109 .pwrreg_clkgate = true, 110 }; 111 112 static struct variant_data variant_nomadik = { 113 .fifosize = 16 * 4, 114 .fifohalfsize = 8 * 4, 115 .clkreg = MCI_CLK_ENABLE, 116 .datalength_bits = 24, 117 .sdio = true, 118 .st_clkdiv = true, 119 .pwrreg_powerup = MCI_PWR_ON, 120 .signal_direction = true, 121 .pwrreg_clkgate = true, 122 }; 123 124 static struct variant_data variant_ux500 = { 125 .fifosize = 30 * 4, 126 .fifohalfsize = 8 * 4, 127 .clkreg = MCI_CLK_ENABLE, 128 .clkreg_enable = MCI_ST_UX500_HWFCEN, 129 .datalength_bits = 24, 130 .sdio = true, 131 .st_clkdiv = true, 132 .pwrreg_powerup = MCI_PWR_ON, 133 .signal_direction = true, 134 .pwrreg_clkgate = true, 135 }; 136 137 static struct variant_data variant_ux500v2 = { 138 .fifosize = 30 * 4, 139 .fifohalfsize = 8 * 4, 140 .clkreg = MCI_CLK_ENABLE, 141 .clkreg_enable = MCI_ST_UX500_HWFCEN, 142 .datalength_bits = 24, 143 .sdio = true, 144 .st_clkdiv = true, 145 .blksz_datactrl16 = true, 146 .pwrreg_powerup = MCI_PWR_ON, 147 .signal_direction = true, 148 .pwrreg_clkgate = true, 149 }; 150 151 /* 152 * Validate mmc prerequisites 153 */ 154 static int mmci_validate_data(struct mmci_host *host, 155 struct mmc_data *data) 156 { 157 if (!data) 158 return 0; 159 160 if (!is_power_of_2(data->blksz)) { 161 dev_err(mmc_dev(host->mmc), 162 "unsupported block size (%d bytes)\n", data->blksz); 163 return -EINVAL; 164 } 165 166 return 0; 167 } 168 169 /* 170 * This must be called with host->lock held 171 */ 172 static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 173 { 174 if (host->clk_reg != clk) { 175 host->clk_reg = clk; 176 writel(clk, host->base + MMCICLOCK); 177 } 178 } 179 180 /* 181 * This must be called with host->lock held 182 */ 183 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 184 { 185 if (host->pwr_reg != pwr) { 186 host->pwr_reg = pwr; 187 writel(pwr, host->base + MMCIPOWER); 188 } 189 } 190 191 /* 192 * This must be called with host->lock held 193 */ 194 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 195 { 196 struct variant_data *variant = host->variant; 197 u32 clk = variant->clkreg; 198 199 if (desired) { 200 if (desired >= host->mclk) { 201 clk = MCI_CLK_BYPASS; 202 if (variant->st_clkdiv) 203 clk |= MCI_ST_UX500_NEG_EDGE; 204 host->cclk = host->mclk; 205 } else if (variant->st_clkdiv) { 206 /* 207 * DB8500 TRM says f = mclk / (clkdiv + 2) 208 * => clkdiv = (mclk / f) - 2 209 * Round the divider up so we don't exceed the max 210 * frequency 211 */ 212 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 213 if (clk >= 256) 214 clk = 255; 215 host->cclk = host->mclk / (clk + 2); 216 } else { 217 /* 218 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 219 * => clkdiv = mclk / (2 * f) - 1 220 */ 221 clk = host->mclk / (2 * desired) - 1; 222 if (clk >= 256) 223 clk = 255; 224 host->cclk = host->mclk / (2 * (clk + 1)); 225 } 226 227 clk |= variant->clkreg_enable; 228 clk |= MCI_CLK_ENABLE; 229 /* This hasn't proven to be worthwhile */ 230 /* clk |= MCI_CLK_PWRSAVE; */ 231 } 232 233 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 234 clk |= MCI_4BIT_BUS; 235 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 236 clk |= MCI_ST_8BIT_BUS; 237 238 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 239 clk |= MCI_ST_UX500_NEG_EDGE; 240 241 mmci_write_clkreg(host, clk); 242 } 243 244 static void 245 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 246 { 247 writel(0, host->base + MMCICOMMAND); 248 249 BUG_ON(host->data); 250 251 host->mrq = NULL; 252 host->cmd = NULL; 253 254 mmc_request_done(host->mmc, mrq); 255 256 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 257 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 258 } 259 260 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 261 { 262 void __iomem *base = host->base; 263 264 if (host->singleirq) { 265 unsigned int mask0 = readl(base + MMCIMASK0); 266 267 mask0 &= ~MCI_IRQ1MASK; 268 mask0 |= mask; 269 270 writel(mask0, base + MMCIMASK0); 271 } 272 273 writel(mask, base + MMCIMASK1); 274 } 275 276 static void mmci_stop_data(struct mmci_host *host) 277 { 278 writel(0, host->base + MMCIDATACTRL); 279 mmci_set_mask1(host, 0); 280 host->data = NULL; 281 } 282 283 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 284 { 285 unsigned int flags = SG_MITER_ATOMIC; 286 287 if (data->flags & MMC_DATA_READ) 288 flags |= SG_MITER_TO_SG; 289 else 290 flags |= SG_MITER_FROM_SG; 291 292 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 293 } 294 295 /* 296 * All the DMA operation mode stuff goes inside this ifdef. 297 * This assumes that you have a generic DMA device interface, 298 * no custom DMA interfaces are supported. 299 */ 300 #ifdef CONFIG_DMA_ENGINE 301 static void mmci_dma_setup(struct mmci_host *host) 302 { 303 struct mmci_platform_data *plat = host->plat; 304 const char *rxname, *txname; 305 dma_cap_mask_t mask; 306 307 if (!plat || !plat->dma_filter) { 308 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 309 return; 310 } 311 312 /* initialize pre request cookie */ 313 host->next_data.cookie = 1; 314 315 /* Try to acquire a generic DMA engine slave channel */ 316 dma_cap_zero(mask); 317 dma_cap_set(DMA_SLAVE, mask); 318 319 /* 320 * If only an RX channel is specified, the driver will 321 * attempt to use it bidirectionally, however if it is 322 * is specified but cannot be located, DMA will be disabled. 323 */ 324 if (plat->dma_rx_param) { 325 host->dma_rx_channel = dma_request_channel(mask, 326 plat->dma_filter, 327 plat->dma_rx_param); 328 /* E.g if no DMA hardware is present */ 329 if (!host->dma_rx_channel) 330 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 331 } 332 333 if (plat->dma_tx_param) { 334 host->dma_tx_channel = dma_request_channel(mask, 335 plat->dma_filter, 336 plat->dma_tx_param); 337 if (!host->dma_tx_channel) 338 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 339 } else { 340 host->dma_tx_channel = host->dma_rx_channel; 341 } 342 343 if (host->dma_rx_channel) 344 rxname = dma_chan_name(host->dma_rx_channel); 345 else 346 rxname = "none"; 347 348 if (host->dma_tx_channel) 349 txname = dma_chan_name(host->dma_tx_channel); 350 else 351 txname = "none"; 352 353 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 354 rxname, txname); 355 356 /* 357 * Limit the maximum segment size in any SG entry according to 358 * the parameters of the DMA engine device. 359 */ 360 if (host->dma_tx_channel) { 361 struct device *dev = host->dma_tx_channel->device->dev; 362 unsigned int max_seg_size = dma_get_max_seg_size(dev); 363 364 if (max_seg_size < host->mmc->max_seg_size) 365 host->mmc->max_seg_size = max_seg_size; 366 } 367 if (host->dma_rx_channel) { 368 struct device *dev = host->dma_rx_channel->device->dev; 369 unsigned int max_seg_size = dma_get_max_seg_size(dev); 370 371 if (max_seg_size < host->mmc->max_seg_size) 372 host->mmc->max_seg_size = max_seg_size; 373 } 374 } 375 376 /* 377 * This is used in or so inline it 378 * so it can be discarded. 379 */ 380 static inline void mmci_dma_release(struct mmci_host *host) 381 { 382 struct mmci_platform_data *plat = host->plat; 383 384 if (host->dma_rx_channel) 385 dma_release_channel(host->dma_rx_channel); 386 if (host->dma_tx_channel && plat->dma_tx_param) 387 dma_release_channel(host->dma_tx_channel); 388 host->dma_rx_channel = host->dma_tx_channel = NULL; 389 } 390 391 static void mmci_dma_data_error(struct mmci_host *host) 392 { 393 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 394 dmaengine_terminate_all(host->dma_current); 395 host->dma_current = NULL; 396 host->dma_desc_current = NULL; 397 host->data->host_cookie = 0; 398 } 399 400 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 401 { 402 struct dma_chan *chan; 403 enum dma_data_direction dir; 404 405 if (data->flags & MMC_DATA_READ) { 406 dir = DMA_FROM_DEVICE; 407 chan = host->dma_rx_channel; 408 } else { 409 dir = DMA_TO_DEVICE; 410 chan = host->dma_tx_channel; 411 } 412 413 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 414 } 415 416 static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) 417 { 418 u32 status; 419 int i; 420 421 /* Wait up to 1ms for the DMA to complete */ 422 for (i = 0; ; i++) { 423 status = readl(host->base + MMCISTATUS); 424 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 425 break; 426 udelay(10); 427 } 428 429 /* 430 * Check to see whether we still have some data left in the FIFO - 431 * this catches DMA controllers which are unable to monitor the 432 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 433 * contiguous buffers. On TX, we'll get a FIFO underrun error. 434 */ 435 if (status & MCI_RXDATAAVLBLMASK) { 436 mmci_dma_data_error(host); 437 if (!data->error) 438 data->error = -EIO; 439 } 440 441 if (!data->host_cookie) 442 mmci_dma_unmap(host, data); 443 444 /* 445 * Use of DMA with scatter-gather is impossible. 446 * Give up with DMA and switch back to PIO mode. 447 */ 448 if (status & MCI_RXDATAAVLBLMASK) { 449 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 450 mmci_dma_release(host); 451 } 452 453 host->dma_current = NULL; 454 host->dma_desc_current = NULL; 455 } 456 457 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */ 458 static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 459 struct dma_chan **dma_chan, 460 struct dma_async_tx_descriptor **dma_desc) 461 { 462 struct variant_data *variant = host->variant; 463 struct dma_slave_config conf = { 464 .src_addr = host->phybase + MMCIFIFO, 465 .dst_addr = host->phybase + MMCIFIFO, 466 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 467 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 468 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 469 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 470 .device_fc = false, 471 }; 472 struct dma_chan *chan; 473 struct dma_device *device; 474 struct dma_async_tx_descriptor *desc; 475 enum dma_data_direction buffer_dirn; 476 int nr_sg; 477 478 if (data->flags & MMC_DATA_READ) { 479 conf.direction = DMA_DEV_TO_MEM; 480 buffer_dirn = DMA_FROM_DEVICE; 481 chan = host->dma_rx_channel; 482 } else { 483 conf.direction = DMA_MEM_TO_DEV; 484 buffer_dirn = DMA_TO_DEVICE; 485 chan = host->dma_tx_channel; 486 } 487 488 /* If there's no DMA channel, fall back to PIO */ 489 if (!chan) 490 return -EINVAL; 491 492 /* If less than or equal to the fifo size, don't bother with DMA */ 493 if (data->blksz * data->blocks <= variant->fifosize) 494 return -EINVAL; 495 496 device = chan->device; 497 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 498 if (nr_sg == 0) 499 return -EINVAL; 500 501 dmaengine_slave_config(chan, &conf); 502 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 503 conf.direction, DMA_CTRL_ACK); 504 if (!desc) 505 goto unmap_exit; 506 507 *dma_chan = chan; 508 *dma_desc = desc; 509 510 return 0; 511 512 unmap_exit: 513 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 514 return -ENOMEM; 515 } 516 517 static inline int mmci_dma_prep_data(struct mmci_host *host, 518 struct mmc_data *data) 519 { 520 /* Check if next job is already prepared. */ 521 if (host->dma_current && host->dma_desc_current) 522 return 0; 523 524 /* No job were prepared thus do it now. */ 525 return __mmci_dma_prep_data(host, data, &host->dma_current, 526 &host->dma_desc_current); 527 } 528 529 static inline int mmci_dma_prep_next(struct mmci_host *host, 530 struct mmc_data *data) 531 { 532 struct mmci_host_next *nd = &host->next_data; 533 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); 534 } 535 536 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 537 { 538 int ret; 539 struct mmc_data *data = host->data; 540 541 ret = mmci_dma_prep_data(host, host->data); 542 if (ret) 543 return ret; 544 545 /* Okay, go for it. */ 546 dev_vdbg(mmc_dev(host->mmc), 547 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 548 data->sg_len, data->blksz, data->blocks, data->flags); 549 dmaengine_submit(host->dma_desc_current); 550 dma_async_issue_pending(host->dma_current); 551 552 datactrl |= MCI_DPSM_DMAENABLE; 553 554 /* Trigger the DMA transfer */ 555 writel(datactrl, host->base + MMCIDATACTRL); 556 557 /* 558 * Let the MMCI say when the data is ended and it's time 559 * to fire next DMA request. When that happens, MMCI will 560 * call mmci_data_end() 561 */ 562 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 563 host->base + MMCIMASK0); 564 return 0; 565 } 566 567 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 568 { 569 struct mmci_host_next *next = &host->next_data; 570 571 WARN_ON(data->host_cookie && data->host_cookie != next->cookie); 572 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan)); 573 574 host->dma_desc_current = next->dma_desc; 575 host->dma_current = next->dma_chan; 576 next->dma_desc = NULL; 577 next->dma_chan = NULL; 578 } 579 580 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 581 bool is_first_req) 582 { 583 struct mmci_host *host = mmc_priv(mmc); 584 struct mmc_data *data = mrq->data; 585 struct mmci_host_next *nd = &host->next_data; 586 587 if (!data) 588 return; 589 590 BUG_ON(data->host_cookie); 591 592 if (mmci_validate_data(host, data)) 593 return; 594 595 if (!mmci_dma_prep_next(host, data)) 596 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 597 } 598 599 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 600 int err) 601 { 602 struct mmci_host *host = mmc_priv(mmc); 603 struct mmc_data *data = mrq->data; 604 605 if (!data || !data->host_cookie) 606 return; 607 608 mmci_dma_unmap(host, data); 609 610 if (err) { 611 struct mmci_host_next *next = &host->next_data; 612 struct dma_chan *chan; 613 if (data->flags & MMC_DATA_READ) 614 chan = host->dma_rx_channel; 615 else 616 chan = host->dma_tx_channel; 617 dmaengine_terminate_all(chan); 618 619 next->dma_desc = NULL; 620 next->dma_chan = NULL; 621 } 622 } 623 624 #else 625 /* Blank functions if the DMA engine is not available */ 626 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 627 { 628 } 629 static inline void mmci_dma_setup(struct mmci_host *host) 630 { 631 } 632 633 static inline void mmci_dma_release(struct mmci_host *host) 634 { 635 } 636 637 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 638 { 639 } 640 641 static inline void mmci_dma_finalize(struct mmci_host *host, 642 struct mmc_data *data) 643 { 644 } 645 646 static inline void mmci_dma_data_error(struct mmci_host *host) 647 { 648 } 649 650 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 651 { 652 return -ENOSYS; 653 } 654 655 #define mmci_pre_request NULL 656 #define mmci_post_request NULL 657 658 #endif 659 660 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 661 { 662 struct variant_data *variant = host->variant; 663 unsigned int datactrl, timeout, irqmask; 664 unsigned long long clks; 665 void __iomem *base; 666 int blksz_bits; 667 668 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 669 data->blksz, data->blocks, data->flags); 670 671 host->data = data; 672 host->size = data->blksz * data->blocks; 673 data->bytes_xfered = 0; 674 675 clks = (unsigned long long)data->timeout_ns * host->cclk; 676 do_div(clks, 1000000000UL); 677 678 timeout = data->timeout_clks + (unsigned int)clks; 679 680 base = host->base; 681 writel(timeout, base + MMCIDATATIMER); 682 writel(host->size, base + MMCIDATALENGTH); 683 684 blksz_bits = ffs(data->blksz) - 1; 685 BUG_ON(1 << blksz_bits != data->blksz); 686 687 if (variant->blksz_datactrl16) 688 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 689 else 690 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 691 692 if (data->flags & MMC_DATA_READ) 693 datactrl |= MCI_DPSM_DIRECTION; 694 695 /* The ST Micro variants has a special bit to enable SDIO */ 696 if (variant->sdio && host->mmc->card) 697 if (mmc_card_sdio(host->mmc->card)) { 698 /* 699 * The ST Micro variants has a special bit 700 * to enable SDIO. 701 */ 702 u32 clk; 703 704 datactrl |= MCI_ST_DPSM_SDIOEN; 705 706 /* 707 * The ST Micro variant for SDIO small write transfers 708 * needs to have clock H/W flow control disabled, 709 * otherwise the transfer will not start. The threshold 710 * depends on the rate of MCLK. 711 */ 712 if (data->flags & MMC_DATA_WRITE && 713 (host->size < 8 || 714 (host->size <= 8 && host->mclk > 50000000))) 715 clk = host->clk_reg & ~variant->clkreg_enable; 716 else 717 clk = host->clk_reg | variant->clkreg_enable; 718 719 mmci_write_clkreg(host, clk); 720 } 721 722 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 723 datactrl |= MCI_ST_DPSM_DDRMODE; 724 725 /* 726 * Attempt to use DMA operation mode, if this 727 * should fail, fall back to PIO mode 728 */ 729 if (!mmci_dma_start_data(host, datactrl)) 730 return; 731 732 /* IRQ mode, map the SG list for CPU reading/writing */ 733 mmci_init_sg(host, data); 734 735 if (data->flags & MMC_DATA_READ) { 736 irqmask = MCI_RXFIFOHALFFULLMASK; 737 738 /* 739 * If we have less than the fifo 'half-full' threshold to 740 * transfer, trigger a PIO interrupt as soon as any data 741 * is available. 742 */ 743 if (host->size < variant->fifohalfsize) 744 irqmask |= MCI_RXDATAAVLBLMASK; 745 } else { 746 /* 747 * We don't actually need to include "FIFO empty" here 748 * since its implicit in "FIFO half empty". 749 */ 750 irqmask = MCI_TXFIFOHALFEMPTYMASK; 751 } 752 753 writel(datactrl, base + MMCIDATACTRL); 754 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 755 mmci_set_mask1(host, irqmask); 756 } 757 758 static void 759 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 760 { 761 void __iomem *base = host->base; 762 763 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 764 cmd->opcode, cmd->arg, cmd->flags); 765 766 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 767 writel(0, base + MMCICOMMAND); 768 udelay(1); 769 } 770 771 c |= cmd->opcode | MCI_CPSM_ENABLE; 772 if (cmd->flags & MMC_RSP_PRESENT) { 773 if (cmd->flags & MMC_RSP_136) 774 c |= MCI_CPSM_LONGRSP; 775 c |= MCI_CPSM_RESPONSE; 776 } 777 if (/*interrupt*/0) 778 c |= MCI_CPSM_INTERRUPT; 779 780 host->cmd = cmd; 781 782 writel(cmd->arg, base + MMCIARGUMENT); 783 writel(c, base + MMCICOMMAND); 784 } 785 786 static void 787 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 788 unsigned int status) 789 { 790 /* First check for errors */ 791 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 792 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 793 u32 remain, success; 794 795 /* Terminate the DMA transfer */ 796 if (dma_inprogress(host)) { 797 mmci_dma_data_error(host); 798 mmci_dma_unmap(host, data); 799 } 800 801 /* 802 * Calculate how far we are into the transfer. Note that 803 * the data counter gives the number of bytes transferred 804 * on the MMC bus, not on the host side. On reads, this 805 * can be as much as a FIFO-worth of data ahead. This 806 * matters for FIFO overruns only. 807 */ 808 remain = readl(host->base + MMCIDATACNT); 809 success = data->blksz * data->blocks - remain; 810 811 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 812 status, success); 813 if (status & MCI_DATACRCFAIL) { 814 /* Last block was not successful */ 815 success -= 1; 816 data->error = -EILSEQ; 817 } else if (status & MCI_DATATIMEOUT) { 818 data->error = -ETIMEDOUT; 819 } else if (status & MCI_STARTBITERR) { 820 data->error = -ECOMM; 821 } else if (status & MCI_TXUNDERRUN) { 822 data->error = -EIO; 823 } else if (status & MCI_RXOVERRUN) { 824 if (success > host->variant->fifosize) 825 success -= host->variant->fifosize; 826 else 827 success = 0; 828 data->error = -EIO; 829 } 830 data->bytes_xfered = round_down(success, data->blksz); 831 } 832 833 if (status & MCI_DATABLOCKEND) 834 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 835 836 if (status & MCI_DATAEND || data->error) { 837 if (dma_inprogress(host)) 838 mmci_dma_finalize(host, data); 839 mmci_stop_data(host); 840 841 if (!data->error) 842 /* The error clause is handled above, success! */ 843 data->bytes_xfered = data->blksz * data->blocks; 844 845 if (!data->stop) { 846 mmci_request_end(host, data->mrq); 847 } else { 848 mmci_start_command(host, data->stop, 0); 849 } 850 } 851 } 852 853 static void 854 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 855 unsigned int status) 856 { 857 void __iomem *base = host->base; 858 859 host->cmd = NULL; 860 861 if (status & MCI_CMDTIMEOUT) { 862 cmd->error = -ETIMEDOUT; 863 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 864 cmd->error = -EILSEQ; 865 } else { 866 cmd->resp[0] = readl(base + MMCIRESPONSE0); 867 cmd->resp[1] = readl(base + MMCIRESPONSE1); 868 cmd->resp[2] = readl(base + MMCIRESPONSE2); 869 cmd->resp[3] = readl(base + MMCIRESPONSE3); 870 } 871 872 if (!cmd->data || cmd->error) { 873 if (host->data) { 874 /* Terminate the DMA transfer */ 875 if (dma_inprogress(host)) { 876 mmci_dma_data_error(host); 877 mmci_dma_unmap(host, host->data); 878 } 879 mmci_stop_data(host); 880 } 881 mmci_request_end(host, cmd->mrq); 882 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 883 mmci_start_data(host, cmd->data); 884 } 885 } 886 887 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 888 { 889 void __iomem *base = host->base; 890 char *ptr = buffer; 891 u32 status; 892 int host_remain = host->size; 893 894 do { 895 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 896 897 if (count > remain) 898 count = remain; 899 900 if (count <= 0) 901 break; 902 903 /* 904 * SDIO especially may want to send something that is 905 * not divisible by 4 (as opposed to card sectors 906 * etc). Therefore make sure to always read the last bytes 907 * while only doing full 32-bit reads towards the FIFO. 908 */ 909 if (unlikely(count & 0x3)) { 910 if (count < 4) { 911 unsigned char buf[4]; 912 ioread32_rep(base + MMCIFIFO, buf, 1); 913 memcpy(ptr, buf, count); 914 } else { 915 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 916 count &= ~0x3; 917 } 918 } else { 919 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 920 } 921 922 ptr += count; 923 remain -= count; 924 host_remain -= count; 925 926 if (remain == 0) 927 break; 928 929 status = readl(base + MMCISTATUS); 930 } while (status & MCI_RXDATAAVLBL); 931 932 return ptr - buffer; 933 } 934 935 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 936 { 937 struct variant_data *variant = host->variant; 938 void __iomem *base = host->base; 939 char *ptr = buffer; 940 941 do { 942 unsigned int count, maxcnt; 943 944 maxcnt = status & MCI_TXFIFOEMPTY ? 945 variant->fifosize : variant->fifohalfsize; 946 count = min(remain, maxcnt); 947 948 /* 949 * SDIO especially may want to send something that is 950 * not divisible by 4 (as opposed to card sectors 951 * etc), and the FIFO only accept full 32-bit writes. 952 * So compensate by adding +3 on the count, a single 953 * byte become a 32bit write, 7 bytes will be two 954 * 32bit writes etc. 955 */ 956 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2); 957 958 ptr += count; 959 remain -= count; 960 961 if (remain == 0) 962 break; 963 964 status = readl(base + MMCISTATUS); 965 } while (status & MCI_TXFIFOHALFEMPTY); 966 967 return ptr - buffer; 968 } 969 970 /* 971 * PIO data transfer IRQ handler. 972 */ 973 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 974 { 975 struct mmci_host *host = dev_id; 976 struct sg_mapping_iter *sg_miter = &host->sg_miter; 977 struct variant_data *variant = host->variant; 978 void __iomem *base = host->base; 979 unsigned long flags; 980 u32 status; 981 982 status = readl(base + MMCISTATUS); 983 984 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 985 986 local_irq_save(flags); 987 988 do { 989 unsigned int remain, len; 990 char *buffer; 991 992 /* 993 * For write, we only need to test the half-empty flag 994 * here - if the FIFO is completely empty, then by 995 * definition it is more than half empty. 996 * 997 * For read, check for data available. 998 */ 999 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 1000 break; 1001 1002 if (!sg_miter_next(sg_miter)) 1003 break; 1004 1005 buffer = sg_miter->addr; 1006 remain = sg_miter->length; 1007 1008 len = 0; 1009 if (status & MCI_RXACTIVE) 1010 len = mmci_pio_read(host, buffer, remain); 1011 if (status & MCI_TXACTIVE) 1012 len = mmci_pio_write(host, buffer, remain, status); 1013 1014 sg_miter->consumed = len; 1015 1016 host->size -= len; 1017 remain -= len; 1018 1019 if (remain) 1020 break; 1021 1022 status = readl(base + MMCISTATUS); 1023 } while (1); 1024 1025 sg_miter_stop(sg_miter); 1026 1027 local_irq_restore(flags); 1028 1029 /* 1030 * If we have less than the fifo 'half-full' threshold to transfer, 1031 * trigger a PIO interrupt as soon as any data is available. 1032 */ 1033 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 1034 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 1035 1036 /* 1037 * If we run out of data, disable the data IRQs; this 1038 * prevents a race where the FIFO becomes empty before 1039 * the chip itself has disabled the data path, and 1040 * stops us racing with our data end IRQ. 1041 */ 1042 if (host->size == 0) { 1043 mmci_set_mask1(host, 0); 1044 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 1045 } 1046 1047 return IRQ_HANDLED; 1048 } 1049 1050 /* 1051 * Handle completion of command and data transfers. 1052 */ 1053 static irqreturn_t mmci_irq(int irq, void *dev_id) 1054 { 1055 struct mmci_host *host = dev_id; 1056 u32 status; 1057 int ret = 0; 1058 1059 spin_lock(&host->lock); 1060 1061 do { 1062 struct mmc_command *cmd; 1063 struct mmc_data *data; 1064 1065 status = readl(host->base + MMCISTATUS); 1066 1067 if (host->singleirq) { 1068 if (status & readl(host->base + MMCIMASK1)) 1069 mmci_pio_irq(irq, dev_id); 1070 1071 status &= ~MCI_IRQ1MASK; 1072 } 1073 1074 status &= readl(host->base + MMCIMASK0); 1075 writel(status, host->base + MMCICLEAR); 1076 1077 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1078 1079 data = host->data; 1080 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 1081 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 1082 MCI_DATABLOCKEND) && data) 1083 mmci_data_irq(host, data, status); 1084 1085 cmd = host->cmd; 1086 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 1087 mmci_cmd_irq(host, cmd, status); 1088 1089 ret = 1; 1090 } while (status); 1091 1092 spin_unlock(&host->lock); 1093 1094 return IRQ_RETVAL(ret); 1095 } 1096 1097 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1098 { 1099 struct mmci_host *host = mmc_priv(mmc); 1100 unsigned long flags; 1101 1102 WARN_ON(host->mrq != NULL); 1103 1104 mrq->cmd->error = mmci_validate_data(host, mrq->data); 1105 if (mrq->cmd->error) { 1106 mmc_request_done(mmc, mrq); 1107 return; 1108 } 1109 1110 pm_runtime_get_sync(mmc_dev(mmc)); 1111 1112 spin_lock_irqsave(&host->lock, flags); 1113 1114 host->mrq = mrq; 1115 1116 if (mrq->data) 1117 mmci_get_next_data(host, mrq->data); 1118 1119 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1120 mmci_start_data(host, mrq->data); 1121 1122 mmci_start_command(host, mrq->cmd, 0); 1123 1124 spin_unlock_irqrestore(&host->lock, flags); 1125 } 1126 1127 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1128 { 1129 struct mmci_host *host = mmc_priv(mmc); 1130 struct variant_data *variant = host->variant; 1131 u32 pwr = 0; 1132 unsigned long flags; 1133 1134 pm_runtime_get_sync(mmc_dev(mmc)); 1135 1136 if (host->plat->ios_handler && 1137 host->plat->ios_handler(mmc_dev(mmc), ios)) 1138 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1139 1140 switch (ios->power_mode) { 1141 case MMC_POWER_OFF: 1142 if (!IS_ERR(mmc->supply.vmmc)) 1143 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1144 1145 if (!IS_ERR(mmc->supply.vqmmc) && 1146 regulator_is_enabled(mmc->supply.vqmmc)) 1147 regulator_disable(mmc->supply.vqmmc); 1148 1149 break; 1150 case MMC_POWER_UP: 1151 if (!IS_ERR(mmc->supply.vmmc)) 1152 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 1153 1154 /* 1155 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1156 * and instead uses MCI_PWR_ON so apply whatever value is 1157 * configured in the variant data. 1158 */ 1159 pwr |= variant->pwrreg_powerup; 1160 1161 break; 1162 case MMC_POWER_ON: 1163 if (!IS_ERR(mmc->supply.vqmmc) && 1164 !regulator_is_enabled(mmc->supply.vqmmc)) 1165 regulator_enable(mmc->supply.vqmmc); 1166 1167 pwr |= MCI_PWR_ON; 1168 break; 1169 } 1170 1171 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1172 /* 1173 * The ST Micro variant has some additional bits 1174 * indicating signal direction for the signals in 1175 * the SD/MMC bus and feedback-clock usage. 1176 */ 1177 pwr |= host->plat->sigdir; 1178 1179 if (ios->bus_width == MMC_BUS_WIDTH_4) 1180 pwr &= ~MCI_ST_DATA74DIREN; 1181 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1182 pwr &= (~MCI_ST_DATA74DIREN & 1183 ~MCI_ST_DATA31DIREN & 1184 ~MCI_ST_DATA2DIREN); 1185 } 1186 1187 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1188 if (host->hw_designer != AMBA_VENDOR_ST) 1189 pwr |= MCI_ROD; 1190 else { 1191 /* 1192 * The ST Micro variant use the ROD bit for something 1193 * else and only has OD (Open Drain). 1194 */ 1195 pwr |= MCI_OD; 1196 } 1197 } 1198 1199 /* 1200 * If clock = 0 and the variant requires the MMCIPOWER to be used for 1201 * gating the clock, the MCI_PWR_ON bit is cleared. 1202 */ 1203 if (!ios->clock && variant->pwrreg_clkgate) 1204 pwr &= ~MCI_PWR_ON; 1205 1206 spin_lock_irqsave(&host->lock, flags); 1207 1208 mmci_set_clkreg(host, ios->clock); 1209 mmci_write_pwrreg(host, pwr); 1210 1211 spin_unlock_irqrestore(&host->lock, flags); 1212 1213 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1214 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1215 } 1216 1217 static int mmci_get_ro(struct mmc_host *mmc) 1218 { 1219 struct mmci_host *host = mmc_priv(mmc); 1220 1221 if (host->gpio_wp == -ENOSYS) 1222 return -ENOSYS; 1223 1224 return gpio_get_value_cansleep(host->gpio_wp); 1225 } 1226 1227 static int mmci_get_cd(struct mmc_host *mmc) 1228 { 1229 struct mmci_host *host = mmc_priv(mmc); 1230 struct mmci_platform_data *plat = host->plat; 1231 unsigned int status; 1232 1233 if (host->gpio_cd == -ENOSYS) { 1234 if (!plat->status) 1235 return 1; /* Assume always present */ 1236 1237 status = plat->status(mmc_dev(host->mmc)); 1238 } else 1239 status = !!gpio_get_value_cansleep(host->gpio_cd) 1240 ^ plat->cd_invert; 1241 1242 /* 1243 * Use positive logic throughout - status is zero for no card, 1244 * non-zero for card inserted. 1245 */ 1246 return status; 1247 } 1248 1249 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1250 { 1251 struct mmci_host *host = dev_id; 1252 1253 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1254 1255 return IRQ_HANDLED; 1256 } 1257 1258 static const struct mmc_host_ops mmci_ops = { 1259 .request = mmci_request, 1260 .pre_req = mmci_pre_request, 1261 .post_req = mmci_post_request, 1262 .set_ios = mmci_set_ios, 1263 .get_ro = mmci_get_ro, 1264 .get_cd = mmci_get_cd, 1265 }; 1266 1267 #ifdef CONFIG_OF 1268 static void mmci_dt_populate_generic_pdata(struct device_node *np, 1269 struct mmci_platform_data *pdata) 1270 { 1271 int bus_width = 0; 1272 1273 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); 1274 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); 1275 1276 if (of_get_property(np, "cd-inverted", NULL)) 1277 pdata->cd_invert = true; 1278 else 1279 pdata->cd_invert = false; 1280 1281 of_property_read_u32(np, "max-frequency", &pdata->f_max); 1282 if (!pdata->f_max) 1283 pr_warn("%s has no 'max-frequency' property\n", np->full_name); 1284 1285 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) 1286 pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED; 1287 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) 1288 pdata->capabilities |= MMC_CAP_SD_HIGHSPEED; 1289 1290 of_property_read_u32(np, "bus-width", &bus_width); 1291 switch (bus_width) { 1292 case 0 : 1293 /* No bus-width supplied. */ 1294 break; 1295 case 4 : 1296 pdata->capabilities |= MMC_CAP_4_BIT_DATA; 1297 break; 1298 case 8 : 1299 pdata->capabilities |= MMC_CAP_8_BIT_DATA; 1300 break; 1301 default : 1302 pr_warn("%s: Unsupported bus width\n", np->full_name); 1303 } 1304 } 1305 #else 1306 static void mmci_dt_populate_generic_pdata(struct device_node *np, 1307 struct mmci_platform_data *pdata) 1308 { 1309 return; 1310 } 1311 #endif 1312 1313 static int mmci_probe(struct amba_device *dev, 1314 const struct amba_id *id) 1315 { 1316 struct mmci_platform_data *plat = dev->dev.platform_data; 1317 struct device_node *np = dev->dev.of_node; 1318 struct variant_data *variant = id->data; 1319 struct mmci_host *host; 1320 struct mmc_host *mmc; 1321 int ret; 1322 1323 /* Must have platform data or Device Tree. */ 1324 if (!plat && !np) { 1325 dev_err(&dev->dev, "No plat data or DT found\n"); 1326 return -EINVAL; 1327 } 1328 1329 if (!plat) { 1330 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); 1331 if (!plat) 1332 return -ENOMEM; 1333 } 1334 1335 if (np) 1336 mmci_dt_populate_generic_pdata(np, plat); 1337 1338 ret = amba_request_regions(dev, DRIVER_NAME); 1339 if (ret) 1340 goto out; 1341 1342 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1343 if (!mmc) { 1344 ret = -ENOMEM; 1345 goto rel_regions; 1346 } 1347 1348 host = mmc_priv(mmc); 1349 host->mmc = mmc; 1350 1351 host->gpio_wp = -ENOSYS; 1352 host->gpio_cd = -ENOSYS; 1353 host->gpio_cd_irq = -1; 1354 1355 host->hw_designer = amba_manf(dev); 1356 host->hw_revision = amba_rev(dev); 1357 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1358 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1359 1360 host->clk = clk_get(&dev->dev, NULL); 1361 if (IS_ERR(host->clk)) { 1362 ret = PTR_ERR(host->clk); 1363 host->clk = NULL; 1364 goto host_free; 1365 } 1366 1367 ret = clk_prepare_enable(host->clk); 1368 if (ret) 1369 goto clk_free; 1370 1371 host->plat = plat; 1372 host->variant = variant; 1373 host->mclk = clk_get_rate(host->clk); 1374 /* 1375 * According to the spec, mclk is max 100 MHz, 1376 * so we try to adjust the clock down to this, 1377 * (if possible). 1378 */ 1379 if (host->mclk > 100000000) { 1380 ret = clk_set_rate(host->clk, 100000000); 1381 if (ret < 0) 1382 goto clk_disable; 1383 host->mclk = clk_get_rate(host->clk); 1384 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1385 host->mclk); 1386 } 1387 host->phybase = dev->res.start; 1388 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1389 if (!host->base) { 1390 ret = -ENOMEM; 1391 goto clk_disable; 1392 } 1393 1394 mmc->ops = &mmci_ops; 1395 /* 1396 * The ARM and ST versions of the block have slightly different 1397 * clock divider equations which means that the minimum divider 1398 * differs too. 1399 */ 1400 if (variant->st_clkdiv) 1401 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1402 else 1403 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1404 /* 1405 * If the platform data supplies a maximum operating 1406 * frequency, this takes precedence. Else, we fall back 1407 * to using the module parameter, which has a (low) 1408 * default value in case it is not specified. Either 1409 * value must not exceed the clock rate into the block, 1410 * of course. 1411 */ 1412 if (plat->f_max) 1413 mmc->f_max = min(host->mclk, plat->f_max); 1414 else 1415 mmc->f_max = min(host->mclk, fmax); 1416 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1417 1418 host->pinctrl = devm_pinctrl_get(&dev->dev); 1419 if (IS_ERR(host->pinctrl)) { 1420 ret = PTR_ERR(host->pinctrl); 1421 goto clk_disable; 1422 } 1423 1424 host->pins_default = pinctrl_lookup_state(host->pinctrl, 1425 PINCTRL_STATE_DEFAULT); 1426 1427 /* enable pins to be muxed in and configured */ 1428 if (!IS_ERR(host->pins_default)) { 1429 ret = pinctrl_select_state(host->pinctrl, host->pins_default); 1430 if (ret) 1431 dev_warn(&dev->dev, "could not set default pins\n"); 1432 } else 1433 dev_warn(&dev->dev, "could not get default pinstate\n"); 1434 1435 /* Get regulators and the supported OCR mask */ 1436 mmc_regulator_get_supply(mmc); 1437 if (!mmc->ocr_avail) 1438 mmc->ocr_avail = plat->ocr_mask; 1439 else if (plat->ocr_mask) 1440 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); 1441 1442 mmc->caps = plat->capabilities; 1443 mmc->caps2 = plat->capabilities2; 1444 1445 /* We support these PM capabilities. */ 1446 mmc->pm_caps = MMC_PM_KEEP_POWER; 1447 1448 /* 1449 * We can do SGIO 1450 */ 1451 mmc->max_segs = NR_SG; 1452 1453 /* 1454 * Since only a certain number of bits are valid in the data length 1455 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1456 * single request. 1457 */ 1458 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1459 1460 /* 1461 * Set the maximum segment size. Since we aren't doing DMA 1462 * (yet) we are only limited by the data length register. 1463 */ 1464 mmc->max_seg_size = mmc->max_req_size; 1465 1466 /* 1467 * Block size can be up to 2048 bytes, but must be a power of two. 1468 */ 1469 mmc->max_blk_size = 1 << 11; 1470 1471 /* 1472 * Limit the number of blocks transferred so that we don't overflow 1473 * the maximum request size. 1474 */ 1475 mmc->max_blk_count = mmc->max_req_size >> 11; 1476 1477 spin_lock_init(&host->lock); 1478 1479 writel(0, host->base + MMCIMASK0); 1480 writel(0, host->base + MMCIMASK1); 1481 writel(0xfff, host->base + MMCICLEAR); 1482 1483 if (plat->gpio_cd == -EPROBE_DEFER) { 1484 ret = -EPROBE_DEFER; 1485 goto err_gpio_cd; 1486 } 1487 if (gpio_is_valid(plat->gpio_cd)) { 1488 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1489 if (ret == 0) 1490 ret = gpio_direction_input(plat->gpio_cd); 1491 if (ret == 0) 1492 host->gpio_cd = plat->gpio_cd; 1493 else if (ret != -ENOSYS) 1494 goto err_gpio_cd; 1495 1496 /* 1497 * A gpio pin that will detect cards when inserted and removed 1498 * will most likely want to trigger on the edges if it is 1499 * 0 when ejected and 1 when inserted (or mutatis mutandis 1500 * for the inverted case) so we request triggers on both 1501 * edges. 1502 */ 1503 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1504 mmci_cd_irq, 1505 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1506 DRIVER_NAME " (cd)", host); 1507 if (ret >= 0) 1508 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1509 } 1510 if (plat->gpio_wp == -EPROBE_DEFER) { 1511 ret = -EPROBE_DEFER; 1512 goto err_gpio_wp; 1513 } 1514 if (gpio_is_valid(plat->gpio_wp)) { 1515 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1516 if (ret == 0) 1517 ret = gpio_direction_input(plat->gpio_wp); 1518 if (ret == 0) 1519 host->gpio_wp = plat->gpio_wp; 1520 else if (ret != -ENOSYS) 1521 goto err_gpio_wp; 1522 } 1523 1524 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1525 && host->gpio_cd_irq < 0) 1526 mmc->caps |= MMC_CAP_NEEDS_POLL; 1527 1528 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1529 if (ret) 1530 goto unmap; 1531 1532 if (!dev->irq[1]) 1533 host->singleirq = true; 1534 else { 1535 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1536 DRIVER_NAME " (pio)", host); 1537 if (ret) 1538 goto irq0_free; 1539 } 1540 1541 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1542 1543 amba_set_drvdata(dev, mmc); 1544 1545 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1546 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1547 amba_rev(dev), (unsigned long long)dev->res.start, 1548 dev->irq[0], dev->irq[1]); 1549 1550 mmci_dma_setup(host); 1551 1552 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1553 pm_runtime_use_autosuspend(&dev->dev); 1554 pm_runtime_put(&dev->dev); 1555 1556 mmc_add_host(mmc); 1557 1558 return 0; 1559 1560 irq0_free: 1561 free_irq(dev->irq[0], host); 1562 unmap: 1563 if (host->gpio_wp != -ENOSYS) 1564 gpio_free(host->gpio_wp); 1565 err_gpio_wp: 1566 if (host->gpio_cd_irq >= 0) 1567 free_irq(host->gpio_cd_irq, host); 1568 if (host->gpio_cd != -ENOSYS) 1569 gpio_free(host->gpio_cd); 1570 err_gpio_cd: 1571 iounmap(host->base); 1572 clk_disable: 1573 clk_disable_unprepare(host->clk); 1574 clk_free: 1575 clk_put(host->clk); 1576 host_free: 1577 mmc_free_host(mmc); 1578 rel_regions: 1579 amba_release_regions(dev); 1580 out: 1581 return ret; 1582 } 1583 1584 static int mmci_remove(struct amba_device *dev) 1585 { 1586 struct mmc_host *mmc = amba_get_drvdata(dev); 1587 1588 amba_set_drvdata(dev, NULL); 1589 1590 if (mmc) { 1591 struct mmci_host *host = mmc_priv(mmc); 1592 1593 /* 1594 * Undo pm_runtime_put() in probe. We use the _sync 1595 * version here so that we can access the primecell. 1596 */ 1597 pm_runtime_get_sync(&dev->dev); 1598 1599 mmc_remove_host(mmc); 1600 1601 writel(0, host->base + MMCIMASK0); 1602 writel(0, host->base + MMCIMASK1); 1603 1604 writel(0, host->base + MMCICOMMAND); 1605 writel(0, host->base + MMCIDATACTRL); 1606 1607 mmci_dma_release(host); 1608 free_irq(dev->irq[0], host); 1609 if (!host->singleirq) 1610 free_irq(dev->irq[1], host); 1611 1612 if (host->gpio_wp != -ENOSYS) 1613 gpio_free(host->gpio_wp); 1614 if (host->gpio_cd_irq >= 0) 1615 free_irq(host->gpio_cd_irq, host); 1616 if (host->gpio_cd != -ENOSYS) 1617 gpio_free(host->gpio_cd); 1618 1619 iounmap(host->base); 1620 clk_disable_unprepare(host->clk); 1621 clk_put(host->clk); 1622 1623 mmc_free_host(mmc); 1624 1625 amba_release_regions(dev); 1626 } 1627 1628 return 0; 1629 } 1630 1631 #ifdef CONFIG_SUSPEND 1632 static int mmci_suspend(struct device *dev) 1633 { 1634 struct amba_device *adev = to_amba_device(dev); 1635 struct mmc_host *mmc = amba_get_drvdata(adev); 1636 int ret = 0; 1637 1638 if (mmc) { 1639 struct mmci_host *host = mmc_priv(mmc); 1640 1641 ret = mmc_suspend_host(mmc); 1642 if (ret == 0) { 1643 pm_runtime_get_sync(dev); 1644 writel(0, host->base + MMCIMASK0); 1645 } 1646 } 1647 1648 return ret; 1649 } 1650 1651 static int mmci_resume(struct device *dev) 1652 { 1653 struct amba_device *adev = to_amba_device(dev); 1654 struct mmc_host *mmc = amba_get_drvdata(adev); 1655 int ret = 0; 1656 1657 if (mmc) { 1658 struct mmci_host *host = mmc_priv(mmc); 1659 1660 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1661 pm_runtime_put(dev); 1662 1663 ret = mmc_resume_host(mmc); 1664 } 1665 1666 return ret; 1667 } 1668 #endif 1669 1670 #ifdef CONFIG_PM_RUNTIME 1671 static int mmci_runtime_suspend(struct device *dev) 1672 { 1673 struct amba_device *adev = to_amba_device(dev); 1674 struct mmc_host *mmc = amba_get_drvdata(adev); 1675 1676 if (mmc) { 1677 struct mmci_host *host = mmc_priv(mmc); 1678 clk_disable_unprepare(host->clk); 1679 } 1680 1681 return 0; 1682 } 1683 1684 static int mmci_runtime_resume(struct device *dev) 1685 { 1686 struct amba_device *adev = to_amba_device(dev); 1687 struct mmc_host *mmc = amba_get_drvdata(adev); 1688 1689 if (mmc) { 1690 struct mmci_host *host = mmc_priv(mmc); 1691 clk_prepare_enable(host->clk); 1692 } 1693 1694 return 0; 1695 } 1696 #endif 1697 1698 static const struct dev_pm_ops mmci_dev_pm_ops = { 1699 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) 1700 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) 1701 }; 1702 1703 static struct amba_id mmci_ids[] = { 1704 { 1705 .id = 0x00041180, 1706 .mask = 0xff0fffff, 1707 .data = &variant_arm, 1708 }, 1709 { 1710 .id = 0x01041180, 1711 .mask = 0xff0fffff, 1712 .data = &variant_arm_extended_fifo, 1713 }, 1714 { 1715 .id = 0x02041180, 1716 .mask = 0xff0fffff, 1717 .data = &variant_arm_extended_fifo_hwfc, 1718 }, 1719 { 1720 .id = 0x00041181, 1721 .mask = 0x000fffff, 1722 .data = &variant_arm, 1723 }, 1724 /* ST Micro variants */ 1725 { 1726 .id = 0x00180180, 1727 .mask = 0x00ffffff, 1728 .data = &variant_u300, 1729 }, 1730 { 1731 .id = 0x10180180, 1732 .mask = 0xf0ffffff, 1733 .data = &variant_nomadik, 1734 }, 1735 { 1736 .id = 0x00280180, 1737 .mask = 0x00ffffff, 1738 .data = &variant_u300, 1739 }, 1740 { 1741 .id = 0x00480180, 1742 .mask = 0xf0ffffff, 1743 .data = &variant_ux500, 1744 }, 1745 { 1746 .id = 0x10480180, 1747 .mask = 0xf0ffffff, 1748 .data = &variant_ux500v2, 1749 }, 1750 { 0, 0 }, 1751 }; 1752 1753 MODULE_DEVICE_TABLE(amba, mmci_ids); 1754 1755 static struct amba_driver mmci_driver = { 1756 .drv = { 1757 .name = DRIVER_NAME, 1758 .pm = &mmci_dev_pm_ops, 1759 }, 1760 .probe = mmci_probe, 1761 .remove = mmci_remove, 1762 .id_table = mmci_ids, 1763 }; 1764 1765 module_amba_driver(mmci_driver); 1766 1767 module_param(fmax, uint, 0444); 1768 1769 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1770 MODULE_LICENSE("GPL"); 1771