1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/io.h> 17 #include <linux/interrupt.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/delay.h> 21 #include <linux/err.h> 22 #include <linux/highmem.h> 23 #include <linux/log2.h> 24 #include <linux/mmc/pm.h> 25 #include <linux/mmc/host.h> 26 #include <linux/mmc/card.h> 27 #include <linux/mmc/slot-gpio.h> 28 #include <linux/amba/bus.h> 29 #include <linux/clk.h> 30 #include <linux/scatterlist.h> 31 #include <linux/of.h> 32 #include <linux/regulator/consumer.h> 33 #include <linux/dmaengine.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/amba/mmci.h> 36 #include <linux/pm_runtime.h> 37 #include <linux/types.h> 38 #include <linux/pinctrl/consumer.h> 39 40 #include <asm/div64.h> 41 #include <asm/io.h> 42 43 #include "mmci.h" 44 #include "mmci_qcom_dml.h" 45 46 #define DRIVER_NAME "mmci-pl18x" 47 48 #ifdef CONFIG_DMA_ENGINE 49 void mmci_variant_init(struct mmci_host *host); 50 #else 51 static inline void mmci_variant_init(struct mmci_host *host) {} 52 #endif 53 54 static unsigned int fmax = 515633; 55 56 static struct variant_data variant_arm = { 57 .fifosize = 16 * 4, 58 .fifohalfsize = 8 * 4, 59 .datalength_bits = 16, 60 .pwrreg_powerup = MCI_PWR_UP, 61 .f_max = 100000000, 62 .reversed_irq_handling = true, 63 .mmcimask1 = true, 64 .start_err = MCI_STARTBITERR, 65 .opendrain = MCI_ROD, 66 .init = mmci_variant_init, 67 }; 68 69 static struct variant_data variant_arm_extended_fifo = { 70 .fifosize = 128 * 4, 71 .fifohalfsize = 64 * 4, 72 .datalength_bits = 16, 73 .pwrreg_powerup = MCI_PWR_UP, 74 .f_max = 100000000, 75 .mmcimask1 = true, 76 .start_err = MCI_STARTBITERR, 77 .opendrain = MCI_ROD, 78 .init = mmci_variant_init, 79 }; 80 81 static struct variant_data variant_arm_extended_fifo_hwfc = { 82 .fifosize = 128 * 4, 83 .fifohalfsize = 64 * 4, 84 .clkreg_enable = MCI_ARM_HWFCEN, 85 .datalength_bits = 16, 86 .pwrreg_powerup = MCI_PWR_UP, 87 .f_max = 100000000, 88 .mmcimask1 = true, 89 .start_err = MCI_STARTBITERR, 90 .opendrain = MCI_ROD, 91 .init = mmci_variant_init, 92 }; 93 94 static struct variant_data variant_u300 = { 95 .fifosize = 16 * 4, 96 .fifohalfsize = 8 * 4, 97 .clkreg_enable = MCI_ST_U300_HWFCEN, 98 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, 99 .datalength_bits = 16, 100 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 101 .st_sdio = true, 102 .pwrreg_powerup = MCI_PWR_ON, 103 .f_max = 100000000, 104 .signal_direction = true, 105 .pwrreg_clkgate = true, 106 .pwrreg_nopower = true, 107 .mmcimask1 = true, 108 .start_err = MCI_STARTBITERR, 109 .opendrain = MCI_OD, 110 .init = mmci_variant_init, 111 }; 112 113 static struct variant_data variant_nomadik = { 114 .fifosize = 16 * 4, 115 .fifohalfsize = 8 * 4, 116 .clkreg = MCI_CLK_ENABLE, 117 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, 118 .datalength_bits = 24, 119 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 120 .st_sdio = true, 121 .st_clkdiv = true, 122 .pwrreg_powerup = MCI_PWR_ON, 123 .f_max = 100000000, 124 .signal_direction = true, 125 .pwrreg_clkgate = true, 126 .pwrreg_nopower = true, 127 .mmcimask1 = true, 128 .start_err = MCI_STARTBITERR, 129 .opendrain = MCI_OD, 130 .init = mmci_variant_init, 131 }; 132 133 static struct variant_data variant_ux500 = { 134 .fifosize = 30 * 4, 135 .fifohalfsize = 8 * 4, 136 .clkreg = MCI_CLK_ENABLE, 137 .clkreg_enable = MCI_ST_UX500_HWFCEN, 138 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, 139 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, 140 .datalength_bits = 24, 141 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 142 .st_sdio = true, 143 .st_clkdiv = true, 144 .pwrreg_powerup = MCI_PWR_ON, 145 .f_max = 100000000, 146 .signal_direction = true, 147 .pwrreg_clkgate = true, 148 .busy_detect = true, 149 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE, 150 .busy_detect_flag = MCI_ST_CARDBUSY, 151 .busy_detect_mask = MCI_ST_BUSYENDMASK, 152 .pwrreg_nopower = true, 153 .mmcimask1 = true, 154 .start_err = MCI_STARTBITERR, 155 .opendrain = MCI_OD, 156 .init = mmci_variant_init, 157 }; 158 159 static struct variant_data variant_ux500v2 = { 160 .fifosize = 30 * 4, 161 .fifohalfsize = 8 * 4, 162 .clkreg = MCI_CLK_ENABLE, 163 .clkreg_enable = MCI_ST_UX500_HWFCEN, 164 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, 165 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, 166 .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE, 167 .datalength_bits = 24, 168 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 169 .st_sdio = true, 170 .st_clkdiv = true, 171 .blksz_datactrl16 = true, 172 .pwrreg_powerup = MCI_PWR_ON, 173 .f_max = 100000000, 174 .signal_direction = true, 175 .pwrreg_clkgate = true, 176 .busy_detect = true, 177 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE, 178 .busy_detect_flag = MCI_ST_CARDBUSY, 179 .busy_detect_mask = MCI_ST_BUSYENDMASK, 180 .pwrreg_nopower = true, 181 .mmcimask1 = true, 182 .start_err = MCI_STARTBITERR, 183 .opendrain = MCI_OD, 184 .init = mmci_variant_init, 185 }; 186 187 static struct variant_data variant_stm32 = { 188 .fifosize = 32 * 4, 189 .fifohalfsize = 8 * 4, 190 .clkreg = MCI_CLK_ENABLE, 191 .clkreg_enable = MCI_ST_UX500_HWFCEN, 192 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, 193 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, 194 .datalength_bits = 24, 195 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 196 .st_sdio = true, 197 .st_clkdiv = true, 198 .pwrreg_powerup = MCI_PWR_ON, 199 .f_max = 48000000, 200 .pwrreg_clkgate = true, 201 .pwrreg_nopower = true, 202 .init = mmci_variant_init, 203 }; 204 205 static struct variant_data variant_qcom = { 206 .fifosize = 16 * 4, 207 .fifohalfsize = 8 * 4, 208 .clkreg = MCI_CLK_ENABLE, 209 .clkreg_enable = MCI_QCOM_CLK_FLOWENA | 210 MCI_QCOM_CLK_SELECT_IN_FBCLK, 211 .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8, 212 .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE, 213 .data_cmd_enable = MCI_CPSM_QCOM_DATCMD, 214 .blksz_datactrl4 = true, 215 .datalength_bits = 24, 216 .pwrreg_powerup = MCI_PWR_UP, 217 .f_max = 208000000, 218 .explicit_mclk_control = true, 219 .qcom_fifo = true, 220 .qcom_dml = true, 221 .mmcimask1 = true, 222 .start_err = MCI_STARTBITERR, 223 .opendrain = MCI_ROD, 224 .init = qcom_variant_init, 225 }; 226 227 /* Busy detection for the ST Micro variant */ 228 static int mmci_card_busy(struct mmc_host *mmc) 229 { 230 struct mmci_host *host = mmc_priv(mmc); 231 unsigned long flags; 232 int busy = 0; 233 234 spin_lock_irqsave(&host->lock, flags); 235 if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag) 236 busy = 1; 237 spin_unlock_irqrestore(&host->lock, flags); 238 239 return busy; 240 } 241 242 /* 243 * Validate mmc prerequisites 244 */ 245 static int mmci_validate_data(struct mmci_host *host, 246 struct mmc_data *data) 247 { 248 if (!data) 249 return 0; 250 251 if (!is_power_of_2(data->blksz)) { 252 dev_err(mmc_dev(host->mmc), 253 "unsupported block size (%d bytes)\n", data->blksz); 254 return -EINVAL; 255 } 256 257 return 0; 258 } 259 260 static void mmci_reg_delay(struct mmci_host *host) 261 { 262 /* 263 * According to the spec, at least three feedback clock cycles 264 * of max 52 MHz must pass between two writes to the MMCICLOCK reg. 265 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes. 266 * Worst delay time during card init is at 100 kHz => 30 us. 267 * Worst delay time when up and running is at 25 MHz => 120 ns. 268 */ 269 if (host->cclk < 25000000) 270 udelay(30); 271 else 272 ndelay(120); 273 } 274 275 /* 276 * This must be called with host->lock held 277 */ 278 static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 279 { 280 if (host->clk_reg != clk) { 281 host->clk_reg = clk; 282 writel(clk, host->base + MMCICLOCK); 283 } 284 } 285 286 /* 287 * This must be called with host->lock held 288 */ 289 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 290 { 291 if (host->pwr_reg != pwr) { 292 host->pwr_reg = pwr; 293 writel(pwr, host->base + MMCIPOWER); 294 } 295 } 296 297 /* 298 * This must be called with host->lock held 299 */ 300 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl) 301 { 302 /* Keep busy mode in DPSM if enabled */ 303 datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag; 304 305 if (host->datactrl_reg != datactrl) { 306 host->datactrl_reg = datactrl; 307 writel(datactrl, host->base + MMCIDATACTRL); 308 } 309 } 310 311 /* 312 * This must be called with host->lock held 313 */ 314 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 315 { 316 struct variant_data *variant = host->variant; 317 u32 clk = variant->clkreg; 318 319 /* Make sure cclk reflects the current calculated clock */ 320 host->cclk = 0; 321 322 if (desired) { 323 if (variant->explicit_mclk_control) { 324 host->cclk = host->mclk; 325 } else if (desired >= host->mclk) { 326 clk = MCI_CLK_BYPASS; 327 if (variant->st_clkdiv) 328 clk |= MCI_ST_UX500_NEG_EDGE; 329 host->cclk = host->mclk; 330 } else if (variant->st_clkdiv) { 331 /* 332 * DB8500 TRM says f = mclk / (clkdiv + 2) 333 * => clkdiv = (mclk / f) - 2 334 * Round the divider up so we don't exceed the max 335 * frequency 336 */ 337 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 338 if (clk >= 256) 339 clk = 255; 340 host->cclk = host->mclk / (clk + 2); 341 } else { 342 /* 343 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 344 * => clkdiv = mclk / (2 * f) - 1 345 */ 346 clk = host->mclk / (2 * desired) - 1; 347 if (clk >= 256) 348 clk = 255; 349 host->cclk = host->mclk / (2 * (clk + 1)); 350 } 351 352 clk |= variant->clkreg_enable; 353 clk |= MCI_CLK_ENABLE; 354 /* This hasn't proven to be worthwhile */ 355 /* clk |= MCI_CLK_PWRSAVE; */ 356 } 357 358 /* Set actual clock for debug */ 359 host->mmc->actual_clock = host->cclk; 360 361 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 362 clk |= MCI_4BIT_BUS; 363 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 364 clk |= variant->clkreg_8bit_bus_enable; 365 366 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || 367 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) 368 clk |= variant->clkreg_neg_edge_enable; 369 370 mmci_write_clkreg(host, clk); 371 } 372 373 void mmci_dma_release(struct mmci_host *host) 374 { 375 if (host->ops && host->ops->dma_release) 376 host->ops->dma_release(host); 377 378 host->use_dma = false; 379 } 380 381 void mmci_dma_setup(struct mmci_host *host) 382 { 383 if (!host->ops || !host->ops->dma_setup) 384 return; 385 386 if (host->ops->dma_setup(host)) 387 return; 388 389 /* initialize pre request cookie */ 390 host->next_cookie = 1; 391 392 host->use_dma = true; 393 } 394 395 int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next) 396 { 397 int err; 398 399 if (!host->ops || !host->ops->prep_data) 400 return 0; 401 402 err = host->ops->prep_data(host, data, next); 403 404 if (next && !err) 405 data->host_cookie = ++host->next_cookie < 0 ? 406 1 : host->next_cookie; 407 408 return err; 409 } 410 411 void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data, 412 int err) 413 { 414 if (host->ops && host->ops->unprep_data) 415 host->ops->unprep_data(host, data, err); 416 417 data->host_cookie = 0; 418 } 419 420 static void 421 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 422 { 423 writel(0, host->base + MMCICOMMAND); 424 425 BUG_ON(host->data); 426 427 host->mrq = NULL; 428 host->cmd = NULL; 429 430 mmc_request_done(host->mmc, mrq); 431 } 432 433 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 434 { 435 void __iomem *base = host->base; 436 struct variant_data *variant = host->variant; 437 438 if (host->singleirq) { 439 unsigned int mask0 = readl(base + MMCIMASK0); 440 441 mask0 &= ~MCI_IRQ1MASK; 442 mask0 |= mask; 443 444 writel(mask0, base + MMCIMASK0); 445 } 446 447 if (variant->mmcimask1) 448 writel(mask, base + MMCIMASK1); 449 450 host->mask1_reg = mask; 451 } 452 453 static void mmci_stop_data(struct mmci_host *host) 454 { 455 mmci_write_datactrlreg(host, 0); 456 mmci_set_mask1(host, 0); 457 host->data = NULL; 458 } 459 460 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 461 { 462 unsigned int flags = SG_MITER_ATOMIC; 463 464 if (data->flags & MMC_DATA_READ) 465 flags |= SG_MITER_TO_SG; 466 else 467 flags |= SG_MITER_FROM_SG; 468 469 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 470 } 471 472 /* 473 * All the DMA operation mode stuff goes inside this ifdef. 474 * This assumes that you have a generic DMA device interface, 475 * no custom DMA interfaces are supported. 476 */ 477 #ifdef CONFIG_DMA_ENGINE 478 struct mmci_dmae_next { 479 struct dma_async_tx_descriptor *desc; 480 struct dma_chan *chan; 481 }; 482 483 struct mmci_dmae_priv { 484 struct dma_chan *cur; 485 struct dma_chan *rx_channel; 486 struct dma_chan *tx_channel; 487 struct dma_async_tx_descriptor *desc_current; 488 struct mmci_dmae_next next_data; 489 }; 490 491 int mmci_dmae_setup(struct mmci_host *host) 492 { 493 const char *rxname, *txname; 494 struct mmci_dmae_priv *dmae; 495 496 dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL); 497 if (!dmae) 498 return -ENOMEM; 499 500 host->dma_priv = dmae; 501 502 dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), 503 "rx"); 504 dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), 505 "tx"); 506 507 /* 508 * If only an RX channel is specified, the driver will 509 * attempt to use it bidirectionally, however if it is 510 * is specified but cannot be located, DMA will be disabled. 511 */ 512 if (dmae->rx_channel && !dmae->tx_channel) 513 dmae->tx_channel = dmae->rx_channel; 514 515 if (dmae->rx_channel) 516 rxname = dma_chan_name(dmae->rx_channel); 517 else 518 rxname = "none"; 519 520 if (dmae->tx_channel) 521 txname = dma_chan_name(dmae->tx_channel); 522 else 523 txname = "none"; 524 525 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 526 rxname, txname); 527 528 /* 529 * Limit the maximum segment size in any SG entry according to 530 * the parameters of the DMA engine device. 531 */ 532 if (dmae->tx_channel) { 533 struct device *dev = dmae->tx_channel->device->dev; 534 unsigned int max_seg_size = dma_get_max_seg_size(dev); 535 536 if (max_seg_size < host->mmc->max_seg_size) 537 host->mmc->max_seg_size = max_seg_size; 538 } 539 if (dmae->rx_channel) { 540 struct device *dev = dmae->rx_channel->device->dev; 541 unsigned int max_seg_size = dma_get_max_seg_size(dev); 542 543 if (max_seg_size < host->mmc->max_seg_size) 544 host->mmc->max_seg_size = max_seg_size; 545 } 546 547 if (!dmae->tx_channel || !dmae->rx_channel) { 548 mmci_dmae_release(host); 549 return -EINVAL; 550 } 551 552 return 0; 553 } 554 555 /* 556 * This is used in or so inline it 557 * so it can be discarded. 558 */ 559 void mmci_dmae_release(struct mmci_host *host) 560 { 561 struct mmci_dmae_priv *dmae = host->dma_priv; 562 563 if (dmae->rx_channel) 564 dma_release_channel(dmae->rx_channel); 565 if (dmae->tx_channel) 566 dma_release_channel(dmae->tx_channel); 567 dmae->rx_channel = dmae->tx_channel = NULL; 568 } 569 570 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 571 { 572 struct mmci_dmae_priv *dmae = host->dma_priv; 573 struct dma_chan *chan; 574 575 if (data->flags & MMC_DATA_READ) 576 chan = dmae->rx_channel; 577 else 578 chan = dmae->tx_channel; 579 580 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, 581 mmc_get_dma_dir(data)); 582 } 583 584 static void mmci_dma_data_error(struct mmci_host *host) 585 { 586 struct mmci_dmae_priv *dmae = host->dma_priv; 587 588 if (!host->use_dma || !dma_inprogress(host)) 589 return; 590 591 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 592 dmaengine_terminate_all(dmae->cur); 593 host->dma_in_progress = false; 594 dmae->cur = NULL; 595 dmae->desc_current = NULL; 596 host->data->host_cookie = 0; 597 598 mmci_dma_unmap(host, host->data); 599 } 600 601 static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) 602 { 603 struct mmci_dmae_priv *dmae = host->dma_priv; 604 u32 status; 605 int i; 606 607 if (!host->use_dma || !dma_inprogress(host)) 608 return; 609 610 /* Wait up to 1ms for the DMA to complete */ 611 for (i = 0; ; i++) { 612 status = readl(host->base + MMCISTATUS); 613 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 614 break; 615 udelay(10); 616 } 617 618 /* 619 * Check to see whether we still have some data left in the FIFO - 620 * this catches DMA controllers which are unable to monitor the 621 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 622 * contiguous buffers. On TX, we'll get a FIFO underrun error. 623 */ 624 if (status & MCI_RXDATAAVLBLMASK) { 625 mmci_dma_data_error(host); 626 if (!data->error) 627 data->error = -EIO; 628 } else if (!data->host_cookie) { 629 mmci_dma_unmap(host, data); 630 } 631 632 /* 633 * Use of DMA with scatter-gather is impossible. 634 * Give up with DMA and switch back to PIO mode. 635 */ 636 if (status & MCI_RXDATAAVLBLMASK) { 637 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 638 mmci_dma_release(host); 639 } 640 641 host->dma_in_progress = false; 642 dmae->cur = NULL; 643 dmae->desc_current = NULL; 644 } 645 646 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */ 647 static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data, 648 struct dma_chan **dma_chan, 649 struct dma_async_tx_descriptor **dma_desc) 650 { 651 struct mmci_dmae_priv *dmae = host->dma_priv; 652 struct variant_data *variant = host->variant; 653 struct dma_slave_config conf = { 654 .src_addr = host->phybase + MMCIFIFO, 655 .dst_addr = host->phybase + MMCIFIFO, 656 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 657 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 658 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 659 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 660 .device_fc = false, 661 }; 662 struct dma_chan *chan; 663 struct dma_device *device; 664 struct dma_async_tx_descriptor *desc; 665 int nr_sg; 666 unsigned long flags = DMA_CTRL_ACK; 667 668 if (data->flags & MMC_DATA_READ) { 669 conf.direction = DMA_DEV_TO_MEM; 670 chan = dmae->rx_channel; 671 } else { 672 conf.direction = DMA_MEM_TO_DEV; 673 chan = dmae->tx_channel; 674 } 675 676 /* If there's no DMA channel, fall back to PIO */ 677 if (!chan) 678 return -EINVAL; 679 680 /* If less than or equal to the fifo size, don't bother with DMA */ 681 if (data->blksz * data->blocks <= variant->fifosize) 682 return -EINVAL; 683 684 device = chan->device; 685 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, 686 mmc_get_dma_dir(data)); 687 if (nr_sg == 0) 688 return -EINVAL; 689 690 if (host->variant->qcom_dml) 691 flags |= DMA_PREP_INTERRUPT; 692 693 dmaengine_slave_config(chan, &conf); 694 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 695 conf.direction, flags); 696 if (!desc) 697 goto unmap_exit; 698 699 *dma_chan = chan; 700 *dma_desc = desc; 701 702 return 0; 703 704 unmap_exit: 705 dma_unmap_sg(device->dev, data->sg, data->sg_len, 706 mmc_get_dma_dir(data)); 707 return -ENOMEM; 708 } 709 710 int mmci_dmae_prep_data(struct mmci_host *host, 711 struct mmc_data *data, 712 bool next) 713 { 714 struct mmci_dmae_priv *dmae = host->dma_priv; 715 struct mmci_dmae_next *nd = &dmae->next_data; 716 717 if (!host->use_dma) 718 return -EINVAL; 719 720 if (next) 721 return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc); 722 /* Check if next job is already prepared. */ 723 if (dmae->cur && dmae->desc_current) 724 return 0; 725 726 /* No job were prepared thus do it now. */ 727 return _mmci_dmae_prep_data(host, data, &dmae->cur, 728 &dmae->desc_current); 729 } 730 731 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 732 { 733 int ret; 734 struct mmci_dmae_priv *dmae = host->dma_priv; 735 struct mmc_data *data = host->data; 736 737 if (!host->use_dma) 738 return -EINVAL; 739 740 ret = mmci_dmae_prep_data(host, host->data, false); 741 if (ret) 742 return ret; 743 744 /* Okay, go for it. */ 745 dev_vdbg(mmc_dev(host->mmc), 746 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 747 data->sg_len, data->blksz, data->blocks, data->flags); 748 host->dma_in_progress = true; 749 dmaengine_submit(dmae->desc_current); 750 dma_async_issue_pending(dmae->cur); 751 752 if (host->variant->qcom_dml) 753 dml_start_xfer(host, data); 754 755 datactrl |= MCI_DPSM_DMAENABLE; 756 757 /* Trigger the DMA transfer */ 758 mmci_write_datactrlreg(host, datactrl); 759 760 /* 761 * Let the MMCI say when the data is ended and it's time 762 * to fire next DMA request. When that happens, MMCI will 763 * call mmci_data_end() 764 */ 765 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 766 host->base + MMCIMASK0); 767 return 0; 768 } 769 770 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 771 { 772 struct mmci_dmae_priv *dmae = host->dma_priv; 773 struct mmci_dmae_next *next = &dmae->next_data; 774 775 if (!host->use_dma) 776 return; 777 778 WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie); 779 WARN_ON(!data->host_cookie && (next->desc || next->chan)); 780 781 dmae->desc_current = next->desc; 782 dmae->cur = next->chan; 783 next->desc = NULL; 784 next->chan = NULL; 785 } 786 787 void mmci_dmae_unprep_data(struct mmci_host *host, 788 struct mmc_data *data, int err) 789 790 { 791 struct mmci_dmae_priv *dmae = host->dma_priv; 792 793 if (!host->use_dma) 794 return; 795 796 mmci_dma_unmap(host, data); 797 798 if (err) { 799 struct mmci_dmae_next *next = &dmae->next_data; 800 struct dma_chan *chan; 801 if (data->flags & MMC_DATA_READ) 802 chan = dmae->rx_channel; 803 else 804 chan = dmae->tx_channel; 805 dmaengine_terminate_all(chan); 806 807 if (dmae->desc_current == next->desc) 808 dmae->desc_current = NULL; 809 810 if (dmae->cur == next->chan) { 811 host->dma_in_progress = false; 812 dmae->cur = NULL; 813 } 814 815 next->desc = NULL; 816 next->chan = NULL; 817 } 818 } 819 820 static struct mmci_host_ops mmci_variant_ops = { 821 .prep_data = mmci_dmae_prep_data, 822 .unprep_data = mmci_dmae_unprep_data, 823 .dma_setup = mmci_dmae_setup, 824 .dma_release = mmci_dmae_release, 825 }; 826 827 void mmci_variant_init(struct mmci_host *host) 828 { 829 host->ops = &mmci_variant_ops; 830 } 831 #else 832 /* Blank functions if the DMA engine is not available */ 833 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 834 { 835 } 836 837 static inline void mmci_dma_finalize(struct mmci_host *host, 838 struct mmc_data *data) 839 { 840 } 841 842 static inline void mmci_dma_data_error(struct mmci_host *host) 843 { 844 } 845 846 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 847 { 848 return -ENOSYS; 849 } 850 851 #endif 852 853 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq) 854 { 855 struct mmci_host *host = mmc_priv(mmc); 856 struct mmc_data *data = mrq->data; 857 858 if (!data) 859 return; 860 861 WARN_ON(data->host_cookie); 862 863 if (mmci_validate_data(host, data)) 864 return; 865 866 mmci_prep_data(host, data, true); 867 } 868 869 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 870 int err) 871 { 872 struct mmci_host *host = mmc_priv(mmc); 873 struct mmc_data *data = mrq->data; 874 875 if (!data || !data->host_cookie) 876 return; 877 878 mmci_unprep_data(host, data, err); 879 } 880 881 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 882 { 883 struct variant_data *variant = host->variant; 884 unsigned int datactrl, timeout, irqmask; 885 unsigned long long clks; 886 void __iomem *base; 887 int blksz_bits; 888 889 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 890 data->blksz, data->blocks, data->flags); 891 892 host->data = data; 893 host->size = data->blksz * data->blocks; 894 data->bytes_xfered = 0; 895 896 clks = (unsigned long long)data->timeout_ns * host->cclk; 897 do_div(clks, NSEC_PER_SEC); 898 899 timeout = data->timeout_clks + (unsigned int)clks; 900 901 base = host->base; 902 writel(timeout, base + MMCIDATATIMER); 903 writel(host->size, base + MMCIDATALENGTH); 904 905 blksz_bits = ffs(data->blksz) - 1; 906 BUG_ON(1 << blksz_bits != data->blksz); 907 908 if (variant->blksz_datactrl16) 909 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 910 else if (variant->blksz_datactrl4) 911 datactrl = MCI_DPSM_ENABLE | (data->blksz << 4); 912 else 913 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 914 915 if (data->flags & MMC_DATA_READ) 916 datactrl |= MCI_DPSM_DIRECTION; 917 918 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) { 919 u32 clk; 920 921 datactrl |= variant->datactrl_mask_sdio; 922 923 /* 924 * The ST Micro variant for SDIO small write transfers 925 * needs to have clock H/W flow control disabled, 926 * otherwise the transfer will not start. The threshold 927 * depends on the rate of MCLK. 928 */ 929 if (variant->st_sdio && data->flags & MMC_DATA_WRITE && 930 (host->size < 8 || 931 (host->size <= 8 && host->mclk > 50000000))) 932 clk = host->clk_reg & ~variant->clkreg_enable; 933 else 934 clk = host->clk_reg | variant->clkreg_enable; 935 936 mmci_write_clkreg(host, clk); 937 } 938 939 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || 940 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) 941 datactrl |= variant->datactrl_mask_ddrmode; 942 943 /* 944 * Attempt to use DMA operation mode, if this 945 * should fail, fall back to PIO mode 946 */ 947 if (!mmci_dma_start_data(host, datactrl)) 948 return; 949 950 /* IRQ mode, map the SG list for CPU reading/writing */ 951 mmci_init_sg(host, data); 952 953 if (data->flags & MMC_DATA_READ) { 954 irqmask = MCI_RXFIFOHALFFULLMASK; 955 956 /* 957 * If we have less than the fifo 'half-full' threshold to 958 * transfer, trigger a PIO interrupt as soon as any data 959 * is available. 960 */ 961 if (host->size < variant->fifohalfsize) 962 irqmask |= MCI_RXDATAAVLBLMASK; 963 } else { 964 /* 965 * We don't actually need to include "FIFO empty" here 966 * since its implicit in "FIFO half empty". 967 */ 968 irqmask = MCI_TXFIFOHALFEMPTYMASK; 969 } 970 971 mmci_write_datactrlreg(host, datactrl); 972 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 973 mmci_set_mask1(host, irqmask); 974 } 975 976 static void 977 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 978 { 979 void __iomem *base = host->base; 980 981 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 982 cmd->opcode, cmd->arg, cmd->flags); 983 984 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 985 writel(0, base + MMCICOMMAND); 986 mmci_reg_delay(host); 987 } 988 989 c |= cmd->opcode | MCI_CPSM_ENABLE; 990 if (cmd->flags & MMC_RSP_PRESENT) { 991 if (cmd->flags & MMC_RSP_136) 992 c |= MCI_CPSM_LONGRSP; 993 c |= MCI_CPSM_RESPONSE; 994 } 995 if (/*interrupt*/0) 996 c |= MCI_CPSM_INTERRUPT; 997 998 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) 999 c |= host->variant->data_cmd_enable; 1000 1001 host->cmd = cmd; 1002 1003 writel(cmd->arg, base + MMCIARGUMENT); 1004 writel(c, base + MMCICOMMAND); 1005 } 1006 1007 static void 1008 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 1009 unsigned int status) 1010 { 1011 /* Make sure we have data to handle */ 1012 if (!data) 1013 return; 1014 1015 /* First check for errors */ 1016 if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT | 1017 host->variant->start_err | 1018 MCI_TXUNDERRUN | MCI_RXOVERRUN)) { 1019 u32 remain, success; 1020 1021 /* Terminate the DMA transfer */ 1022 mmci_dma_data_error(host); 1023 1024 /* 1025 * Calculate how far we are into the transfer. Note that 1026 * the data counter gives the number of bytes transferred 1027 * on the MMC bus, not on the host side. On reads, this 1028 * can be as much as a FIFO-worth of data ahead. This 1029 * matters for FIFO overruns only. 1030 */ 1031 remain = readl(host->base + MMCIDATACNT); 1032 success = data->blksz * data->blocks - remain; 1033 1034 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 1035 status, success); 1036 if (status & MCI_DATACRCFAIL) { 1037 /* Last block was not successful */ 1038 success -= 1; 1039 data->error = -EILSEQ; 1040 } else if (status & MCI_DATATIMEOUT) { 1041 data->error = -ETIMEDOUT; 1042 } else if (status & MCI_STARTBITERR) { 1043 data->error = -ECOMM; 1044 } else if (status & MCI_TXUNDERRUN) { 1045 data->error = -EIO; 1046 } else if (status & MCI_RXOVERRUN) { 1047 if (success > host->variant->fifosize) 1048 success -= host->variant->fifosize; 1049 else 1050 success = 0; 1051 data->error = -EIO; 1052 } 1053 data->bytes_xfered = round_down(success, data->blksz); 1054 } 1055 1056 if (status & MCI_DATABLOCKEND) 1057 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 1058 1059 if (status & MCI_DATAEND || data->error) { 1060 mmci_dma_finalize(host, data); 1061 1062 mmci_stop_data(host); 1063 1064 if (!data->error) 1065 /* The error clause is handled above, success! */ 1066 data->bytes_xfered = data->blksz * data->blocks; 1067 1068 if (!data->stop || host->mrq->sbc) { 1069 mmci_request_end(host, data->mrq); 1070 } else { 1071 mmci_start_command(host, data->stop, 0); 1072 } 1073 } 1074 } 1075 1076 static void 1077 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 1078 unsigned int status) 1079 { 1080 void __iomem *base = host->base; 1081 bool sbc; 1082 1083 if (!cmd) 1084 return; 1085 1086 sbc = (cmd == host->mrq->sbc); 1087 1088 /* 1089 * We need to be one of these interrupts to be considered worth 1090 * handling. Note that we tag on any latent IRQs postponed 1091 * due to waiting for busy status. 1092 */ 1093 if (!((status|host->busy_status) & 1094 (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND))) 1095 return; 1096 1097 /* 1098 * ST Micro variant: handle busy detection. 1099 */ 1100 if (host->variant->busy_detect) { 1101 bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY); 1102 1103 /* We are busy with a command, return */ 1104 if (host->busy_status && 1105 (status & host->variant->busy_detect_flag)) 1106 return; 1107 1108 /* 1109 * We were not busy, but we now got a busy response on 1110 * something that was not an error, and we double-check 1111 * that the special busy status bit is still set before 1112 * proceeding. 1113 */ 1114 if (!host->busy_status && busy_resp && 1115 !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) && 1116 (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) { 1117 1118 /* Clear the busy start IRQ */ 1119 writel(host->variant->busy_detect_mask, 1120 host->base + MMCICLEAR); 1121 1122 /* Unmask the busy end IRQ */ 1123 writel(readl(base + MMCIMASK0) | 1124 host->variant->busy_detect_mask, 1125 base + MMCIMASK0); 1126 /* 1127 * Now cache the last response status code (until 1128 * the busy bit goes low), and return. 1129 */ 1130 host->busy_status = 1131 status & (MCI_CMDSENT|MCI_CMDRESPEND); 1132 return; 1133 } 1134 1135 /* 1136 * At this point we are not busy with a command, we have 1137 * not received a new busy request, clear and mask the busy 1138 * end IRQ and fall through to process the IRQ. 1139 */ 1140 if (host->busy_status) { 1141 1142 writel(host->variant->busy_detect_mask, 1143 host->base + MMCICLEAR); 1144 1145 writel(readl(base + MMCIMASK0) & 1146 ~host->variant->busy_detect_mask, 1147 base + MMCIMASK0); 1148 host->busy_status = 0; 1149 } 1150 } 1151 1152 host->cmd = NULL; 1153 1154 if (status & MCI_CMDTIMEOUT) { 1155 cmd->error = -ETIMEDOUT; 1156 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 1157 cmd->error = -EILSEQ; 1158 } else { 1159 cmd->resp[0] = readl(base + MMCIRESPONSE0); 1160 cmd->resp[1] = readl(base + MMCIRESPONSE1); 1161 cmd->resp[2] = readl(base + MMCIRESPONSE2); 1162 cmd->resp[3] = readl(base + MMCIRESPONSE3); 1163 } 1164 1165 if ((!sbc && !cmd->data) || cmd->error) { 1166 if (host->data) { 1167 /* Terminate the DMA transfer */ 1168 mmci_dma_data_error(host); 1169 1170 mmci_stop_data(host); 1171 } 1172 mmci_request_end(host, host->mrq); 1173 } else if (sbc) { 1174 mmci_start_command(host, host->mrq->cmd, 0); 1175 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 1176 mmci_start_data(host, cmd->data); 1177 } 1178 } 1179 1180 static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain) 1181 { 1182 return remain - (readl(host->base + MMCIFIFOCNT) << 2); 1183 } 1184 1185 static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r) 1186 { 1187 /* 1188 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses 1189 * from the fifo range should be used 1190 */ 1191 if (status & MCI_RXFIFOHALFFULL) 1192 return host->variant->fifohalfsize; 1193 else if (status & MCI_RXDATAAVLBL) 1194 return 4; 1195 1196 return 0; 1197 } 1198 1199 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 1200 { 1201 void __iomem *base = host->base; 1202 char *ptr = buffer; 1203 u32 status = readl(host->base + MMCISTATUS); 1204 int host_remain = host->size; 1205 1206 do { 1207 int count = host->get_rx_fifocnt(host, status, host_remain); 1208 1209 if (count > remain) 1210 count = remain; 1211 1212 if (count <= 0) 1213 break; 1214 1215 /* 1216 * SDIO especially may want to send something that is 1217 * not divisible by 4 (as opposed to card sectors 1218 * etc). Therefore make sure to always read the last bytes 1219 * while only doing full 32-bit reads towards the FIFO. 1220 */ 1221 if (unlikely(count & 0x3)) { 1222 if (count < 4) { 1223 unsigned char buf[4]; 1224 ioread32_rep(base + MMCIFIFO, buf, 1); 1225 memcpy(ptr, buf, count); 1226 } else { 1227 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 1228 count &= ~0x3; 1229 } 1230 } else { 1231 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 1232 } 1233 1234 ptr += count; 1235 remain -= count; 1236 host_remain -= count; 1237 1238 if (remain == 0) 1239 break; 1240 1241 status = readl(base + MMCISTATUS); 1242 } while (status & MCI_RXDATAAVLBL); 1243 1244 return ptr - buffer; 1245 } 1246 1247 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 1248 { 1249 struct variant_data *variant = host->variant; 1250 void __iomem *base = host->base; 1251 char *ptr = buffer; 1252 1253 do { 1254 unsigned int count, maxcnt; 1255 1256 maxcnt = status & MCI_TXFIFOEMPTY ? 1257 variant->fifosize : variant->fifohalfsize; 1258 count = min(remain, maxcnt); 1259 1260 /* 1261 * SDIO especially may want to send something that is 1262 * not divisible by 4 (as opposed to card sectors 1263 * etc), and the FIFO only accept full 32-bit writes. 1264 * So compensate by adding +3 on the count, a single 1265 * byte become a 32bit write, 7 bytes will be two 1266 * 32bit writes etc. 1267 */ 1268 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2); 1269 1270 ptr += count; 1271 remain -= count; 1272 1273 if (remain == 0) 1274 break; 1275 1276 status = readl(base + MMCISTATUS); 1277 } while (status & MCI_TXFIFOHALFEMPTY); 1278 1279 return ptr - buffer; 1280 } 1281 1282 /* 1283 * PIO data transfer IRQ handler. 1284 */ 1285 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 1286 { 1287 struct mmci_host *host = dev_id; 1288 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1289 struct variant_data *variant = host->variant; 1290 void __iomem *base = host->base; 1291 u32 status; 1292 1293 status = readl(base + MMCISTATUS); 1294 1295 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 1296 1297 do { 1298 unsigned int remain, len; 1299 char *buffer; 1300 1301 /* 1302 * For write, we only need to test the half-empty flag 1303 * here - if the FIFO is completely empty, then by 1304 * definition it is more than half empty. 1305 * 1306 * For read, check for data available. 1307 */ 1308 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 1309 break; 1310 1311 if (!sg_miter_next(sg_miter)) 1312 break; 1313 1314 buffer = sg_miter->addr; 1315 remain = sg_miter->length; 1316 1317 len = 0; 1318 if (status & MCI_RXACTIVE) 1319 len = mmci_pio_read(host, buffer, remain); 1320 if (status & MCI_TXACTIVE) 1321 len = mmci_pio_write(host, buffer, remain, status); 1322 1323 sg_miter->consumed = len; 1324 1325 host->size -= len; 1326 remain -= len; 1327 1328 if (remain) 1329 break; 1330 1331 status = readl(base + MMCISTATUS); 1332 } while (1); 1333 1334 sg_miter_stop(sg_miter); 1335 1336 /* 1337 * If we have less than the fifo 'half-full' threshold to transfer, 1338 * trigger a PIO interrupt as soon as any data is available. 1339 */ 1340 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 1341 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 1342 1343 /* 1344 * If we run out of data, disable the data IRQs; this 1345 * prevents a race where the FIFO becomes empty before 1346 * the chip itself has disabled the data path, and 1347 * stops us racing with our data end IRQ. 1348 */ 1349 if (host->size == 0) { 1350 mmci_set_mask1(host, 0); 1351 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 1352 } 1353 1354 return IRQ_HANDLED; 1355 } 1356 1357 /* 1358 * Handle completion of command and data transfers. 1359 */ 1360 static irqreturn_t mmci_irq(int irq, void *dev_id) 1361 { 1362 struct mmci_host *host = dev_id; 1363 u32 status; 1364 int ret = 0; 1365 1366 spin_lock(&host->lock); 1367 1368 do { 1369 status = readl(host->base + MMCISTATUS); 1370 1371 if (host->singleirq) { 1372 if (status & host->mask1_reg) 1373 mmci_pio_irq(irq, dev_id); 1374 1375 status &= ~MCI_IRQ1MASK; 1376 } 1377 1378 /* 1379 * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's 1380 * enabled) in mmci_cmd_irq() function where ST Micro busy 1381 * detection variant is handled. Considering the HW seems to be 1382 * triggering the IRQ on both edges while monitoring DAT0 for 1383 * busy completion and that same status bit is used to monitor 1384 * start and end of busy detection, special care must be taken 1385 * to make sure that both start and end interrupts are always 1386 * cleared one after the other. 1387 */ 1388 status &= readl(host->base + MMCIMASK0); 1389 if (host->variant->busy_detect) 1390 writel(status & ~host->variant->busy_detect_mask, 1391 host->base + MMCICLEAR); 1392 else 1393 writel(status, host->base + MMCICLEAR); 1394 1395 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1396 1397 if (host->variant->reversed_irq_handling) { 1398 mmci_data_irq(host, host->data, status); 1399 mmci_cmd_irq(host, host->cmd, status); 1400 } else { 1401 mmci_cmd_irq(host, host->cmd, status); 1402 mmci_data_irq(host, host->data, status); 1403 } 1404 1405 /* 1406 * Don't poll for busy completion in irq context. 1407 */ 1408 if (host->variant->busy_detect && host->busy_status) 1409 status &= ~host->variant->busy_detect_flag; 1410 1411 ret = 1; 1412 } while (status); 1413 1414 spin_unlock(&host->lock); 1415 1416 return IRQ_RETVAL(ret); 1417 } 1418 1419 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1420 { 1421 struct mmci_host *host = mmc_priv(mmc); 1422 unsigned long flags; 1423 1424 WARN_ON(host->mrq != NULL); 1425 1426 mrq->cmd->error = mmci_validate_data(host, mrq->data); 1427 if (mrq->cmd->error) { 1428 mmc_request_done(mmc, mrq); 1429 return; 1430 } 1431 1432 spin_lock_irqsave(&host->lock, flags); 1433 1434 host->mrq = mrq; 1435 1436 if (mrq->data) 1437 mmci_get_next_data(host, mrq->data); 1438 1439 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1440 mmci_start_data(host, mrq->data); 1441 1442 if (mrq->sbc) 1443 mmci_start_command(host, mrq->sbc, 0); 1444 else 1445 mmci_start_command(host, mrq->cmd, 0); 1446 1447 spin_unlock_irqrestore(&host->lock, flags); 1448 } 1449 1450 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1451 { 1452 struct mmci_host *host = mmc_priv(mmc); 1453 struct variant_data *variant = host->variant; 1454 u32 pwr = 0; 1455 unsigned long flags; 1456 int ret; 1457 1458 if (host->plat->ios_handler && 1459 host->plat->ios_handler(mmc_dev(mmc), ios)) 1460 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1461 1462 switch (ios->power_mode) { 1463 case MMC_POWER_OFF: 1464 if (!IS_ERR(mmc->supply.vmmc)) 1465 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1466 1467 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 1468 regulator_disable(mmc->supply.vqmmc); 1469 host->vqmmc_enabled = false; 1470 } 1471 1472 break; 1473 case MMC_POWER_UP: 1474 if (!IS_ERR(mmc->supply.vmmc)) 1475 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 1476 1477 /* 1478 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1479 * and instead uses MCI_PWR_ON so apply whatever value is 1480 * configured in the variant data. 1481 */ 1482 pwr |= variant->pwrreg_powerup; 1483 1484 break; 1485 case MMC_POWER_ON: 1486 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 1487 ret = regulator_enable(mmc->supply.vqmmc); 1488 if (ret < 0) 1489 dev_err(mmc_dev(mmc), 1490 "failed to enable vqmmc regulator\n"); 1491 else 1492 host->vqmmc_enabled = true; 1493 } 1494 1495 pwr |= MCI_PWR_ON; 1496 break; 1497 } 1498 1499 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1500 /* 1501 * The ST Micro variant has some additional bits 1502 * indicating signal direction for the signals in 1503 * the SD/MMC bus and feedback-clock usage. 1504 */ 1505 pwr |= host->pwr_reg_add; 1506 1507 if (ios->bus_width == MMC_BUS_WIDTH_4) 1508 pwr &= ~MCI_ST_DATA74DIREN; 1509 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1510 pwr &= (~MCI_ST_DATA74DIREN & 1511 ~MCI_ST_DATA31DIREN & 1512 ~MCI_ST_DATA2DIREN); 1513 } 1514 1515 if (variant->opendrain) { 1516 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 1517 pwr |= variant->opendrain; 1518 } else { 1519 /* 1520 * If the variant cannot configure the pads by its own, then we 1521 * expect the pinctrl to be able to do that for us 1522 */ 1523 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 1524 pinctrl_select_state(host->pinctrl, host->pins_opendrain); 1525 else 1526 pinctrl_select_state(host->pinctrl, host->pins_default); 1527 } 1528 1529 /* 1530 * If clock = 0 and the variant requires the MMCIPOWER to be used for 1531 * gating the clock, the MCI_PWR_ON bit is cleared. 1532 */ 1533 if (!ios->clock && variant->pwrreg_clkgate) 1534 pwr &= ~MCI_PWR_ON; 1535 1536 if (host->variant->explicit_mclk_control && 1537 ios->clock != host->clock_cache) { 1538 ret = clk_set_rate(host->clk, ios->clock); 1539 if (ret < 0) 1540 dev_err(mmc_dev(host->mmc), 1541 "Error setting clock rate (%d)\n", ret); 1542 else 1543 host->mclk = clk_get_rate(host->clk); 1544 } 1545 host->clock_cache = ios->clock; 1546 1547 spin_lock_irqsave(&host->lock, flags); 1548 1549 mmci_set_clkreg(host, ios->clock); 1550 mmci_write_pwrreg(host, pwr); 1551 mmci_reg_delay(host); 1552 1553 spin_unlock_irqrestore(&host->lock, flags); 1554 } 1555 1556 static int mmci_get_cd(struct mmc_host *mmc) 1557 { 1558 struct mmci_host *host = mmc_priv(mmc); 1559 struct mmci_platform_data *plat = host->plat; 1560 unsigned int status = mmc_gpio_get_cd(mmc); 1561 1562 if (status == -ENOSYS) { 1563 if (!plat->status) 1564 return 1; /* Assume always present */ 1565 1566 status = plat->status(mmc_dev(host->mmc)); 1567 } 1568 return status; 1569 } 1570 1571 static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) 1572 { 1573 int ret = 0; 1574 1575 if (!IS_ERR(mmc->supply.vqmmc)) { 1576 1577 switch (ios->signal_voltage) { 1578 case MMC_SIGNAL_VOLTAGE_330: 1579 ret = regulator_set_voltage(mmc->supply.vqmmc, 1580 2700000, 3600000); 1581 break; 1582 case MMC_SIGNAL_VOLTAGE_180: 1583 ret = regulator_set_voltage(mmc->supply.vqmmc, 1584 1700000, 1950000); 1585 break; 1586 case MMC_SIGNAL_VOLTAGE_120: 1587 ret = regulator_set_voltage(mmc->supply.vqmmc, 1588 1100000, 1300000); 1589 break; 1590 } 1591 1592 if (ret) 1593 dev_warn(mmc_dev(mmc), "Voltage switch failed\n"); 1594 } 1595 1596 return ret; 1597 } 1598 1599 static struct mmc_host_ops mmci_ops = { 1600 .request = mmci_request, 1601 .pre_req = mmci_pre_request, 1602 .post_req = mmci_post_request, 1603 .set_ios = mmci_set_ios, 1604 .get_ro = mmc_gpio_get_ro, 1605 .get_cd = mmci_get_cd, 1606 .start_signal_voltage_switch = mmci_sig_volt_switch, 1607 }; 1608 1609 static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc) 1610 { 1611 struct mmci_host *host = mmc_priv(mmc); 1612 int ret = mmc_of_parse(mmc); 1613 1614 if (ret) 1615 return ret; 1616 1617 if (of_get_property(np, "st,sig-dir-dat0", NULL)) 1618 host->pwr_reg_add |= MCI_ST_DATA0DIREN; 1619 if (of_get_property(np, "st,sig-dir-dat2", NULL)) 1620 host->pwr_reg_add |= MCI_ST_DATA2DIREN; 1621 if (of_get_property(np, "st,sig-dir-dat31", NULL)) 1622 host->pwr_reg_add |= MCI_ST_DATA31DIREN; 1623 if (of_get_property(np, "st,sig-dir-dat74", NULL)) 1624 host->pwr_reg_add |= MCI_ST_DATA74DIREN; 1625 if (of_get_property(np, "st,sig-dir-cmd", NULL)) 1626 host->pwr_reg_add |= MCI_ST_CMDDIREN; 1627 if (of_get_property(np, "st,sig-pin-fbclk", NULL)) 1628 host->pwr_reg_add |= MCI_ST_FBCLKEN; 1629 1630 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) 1631 mmc->caps |= MMC_CAP_MMC_HIGHSPEED; 1632 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) 1633 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1634 1635 return 0; 1636 } 1637 1638 static int mmci_probe(struct amba_device *dev, 1639 const struct amba_id *id) 1640 { 1641 struct mmci_platform_data *plat = dev->dev.platform_data; 1642 struct device_node *np = dev->dev.of_node; 1643 struct variant_data *variant = id->data; 1644 struct mmci_host *host; 1645 struct mmc_host *mmc; 1646 int ret; 1647 1648 /* Must have platform data or Device Tree. */ 1649 if (!plat && !np) { 1650 dev_err(&dev->dev, "No plat data or DT found\n"); 1651 return -EINVAL; 1652 } 1653 1654 if (!plat) { 1655 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); 1656 if (!plat) 1657 return -ENOMEM; 1658 } 1659 1660 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1661 if (!mmc) 1662 return -ENOMEM; 1663 1664 ret = mmci_of_parse(np, mmc); 1665 if (ret) 1666 goto host_free; 1667 1668 host = mmc_priv(mmc); 1669 host->mmc = mmc; 1670 1671 /* 1672 * Some variant (STM32) doesn't have opendrain bit, nevertheless 1673 * pins can be set accordingly using pinctrl 1674 */ 1675 if (!variant->opendrain) { 1676 host->pinctrl = devm_pinctrl_get(&dev->dev); 1677 if (IS_ERR(host->pinctrl)) { 1678 dev_err(&dev->dev, "failed to get pinctrl"); 1679 ret = PTR_ERR(host->pinctrl); 1680 goto host_free; 1681 } 1682 1683 host->pins_default = pinctrl_lookup_state(host->pinctrl, 1684 PINCTRL_STATE_DEFAULT); 1685 if (IS_ERR(host->pins_default)) { 1686 dev_err(mmc_dev(mmc), "Can't select default pins\n"); 1687 ret = PTR_ERR(host->pins_default); 1688 goto host_free; 1689 } 1690 1691 host->pins_opendrain = pinctrl_lookup_state(host->pinctrl, 1692 MMCI_PINCTRL_STATE_OPENDRAIN); 1693 if (IS_ERR(host->pins_opendrain)) { 1694 dev_err(mmc_dev(mmc), "Can't select opendrain pins\n"); 1695 ret = PTR_ERR(host->pins_opendrain); 1696 goto host_free; 1697 } 1698 } 1699 1700 host->hw_designer = amba_manf(dev); 1701 host->hw_revision = amba_rev(dev); 1702 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1703 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1704 1705 host->clk = devm_clk_get(&dev->dev, NULL); 1706 if (IS_ERR(host->clk)) { 1707 ret = PTR_ERR(host->clk); 1708 goto host_free; 1709 } 1710 1711 ret = clk_prepare_enable(host->clk); 1712 if (ret) 1713 goto host_free; 1714 1715 if (variant->qcom_fifo) 1716 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt; 1717 else 1718 host->get_rx_fifocnt = mmci_get_rx_fifocnt; 1719 1720 host->plat = plat; 1721 host->variant = variant; 1722 host->mclk = clk_get_rate(host->clk); 1723 /* 1724 * According to the spec, mclk is max 100 MHz, 1725 * so we try to adjust the clock down to this, 1726 * (if possible). 1727 */ 1728 if (host->mclk > variant->f_max) { 1729 ret = clk_set_rate(host->clk, variant->f_max); 1730 if (ret < 0) 1731 goto clk_disable; 1732 host->mclk = clk_get_rate(host->clk); 1733 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1734 host->mclk); 1735 } 1736 1737 host->phybase = dev->res.start; 1738 host->base = devm_ioremap_resource(&dev->dev, &dev->res); 1739 if (IS_ERR(host->base)) { 1740 ret = PTR_ERR(host->base); 1741 goto clk_disable; 1742 } 1743 1744 if (variant->init) 1745 variant->init(host); 1746 1747 /* 1748 * The ARM and ST versions of the block have slightly different 1749 * clock divider equations which means that the minimum divider 1750 * differs too. 1751 * on Qualcomm like controllers get the nearest minimum clock to 100Khz 1752 */ 1753 if (variant->st_clkdiv) 1754 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1755 else if (variant->explicit_mclk_control) 1756 mmc->f_min = clk_round_rate(host->clk, 100000); 1757 else 1758 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1759 /* 1760 * If no maximum operating frequency is supplied, fall back to use 1761 * the module parameter, which has a (low) default value in case it 1762 * is not specified. Either value must not exceed the clock rate into 1763 * the block, of course. 1764 */ 1765 if (mmc->f_max) 1766 mmc->f_max = variant->explicit_mclk_control ? 1767 min(variant->f_max, mmc->f_max) : 1768 min(host->mclk, mmc->f_max); 1769 else 1770 mmc->f_max = variant->explicit_mclk_control ? 1771 fmax : min(host->mclk, fmax); 1772 1773 1774 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1775 1776 /* Get regulators and the supported OCR mask */ 1777 ret = mmc_regulator_get_supply(mmc); 1778 if (ret) 1779 goto clk_disable; 1780 1781 if (!mmc->ocr_avail) 1782 mmc->ocr_avail = plat->ocr_mask; 1783 else if (plat->ocr_mask) 1784 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); 1785 1786 /* We support these capabilities. */ 1787 mmc->caps |= MMC_CAP_CMD23; 1788 1789 /* 1790 * Enable busy detection. 1791 */ 1792 if (variant->busy_detect) { 1793 mmci_ops.card_busy = mmci_card_busy; 1794 /* 1795 * Not all variants have a flag to enable busy detection 1796 * in the DPSM, but if they do, set it here. 1797 */ 1798 if (variant->busy_dpsm_flag) 1799 mmci_write_datactrlreg(host, 1800 host->variant->busy_dpsm_flag); 1801 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1802 mmc->max_busy_timeout = 0; 1803 } 1804 1805 mmc->ops = &mmci_ops; 1806 1807 /* We support these PM capabilities. */ 1808 mmc->pm_caps |= MMC_PM_KEEP_POWER; 1809 1810 /* 1811 * We can do SGIO 1812 */ 1813 mmc->max_segs = NR_SG; 1814 1815 /* 1816 * Since only a certain number of bits are valid in the data length 1817 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1818 * single request. 1819 */ 1820 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1821 1822 /* 1823 * Set the maximum segment size. Since we aren't doing DMA 1824 * (yet) we are only limited by the data length register. 1825 */ 1826 mmc->max_seg_size = mmc->max_req_size; 1827 1828 /* 1829 * Block size can be up to 2048 bytes, but must be a power of two. 1830 */ 1831 mmc->max_blk_size = 1 << 11; 1832 1833 /* 1834 * Limit the number of blocks transferred so that we don't overflow 1835 * the maximum request size. 1836 */ 1837 mmc->max_blk_count = mmc->max_req_size >> 11; 1838 1839 spin_lock_init(&host->lock); 1840 1841 writel(0, host->base + MMCIMASK0); 1842 1843 if (variant->mmcimask1) 1844 writel(0, host->base + MMCIMASK1); 1845 1846 writel(0xfff, host->base + MMCICLEAR); 1847 1848 /* 1849 * If: 1850 * - not using DT but using a descriptor table, or 1851 * - using a table of descriptors ALONGSIDE DT, or 1852 * look up these descriptors named "cd" and "wp" right here, fail 1853 * silently of these do not exist 1854 */ 1855 if (!np) { 1856 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); 1857 if (ret == -EPROBE_DEFER) 1858 goto clk_disable; 1859 1860 ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); 1861 if (ret == -EPROBE_DEFER) 1862 goto clk_disable; 1863 } 1864 1865 ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, 1866 DRIVER_NAME " (cmd)", host); 1867 if (ret) 1868 goto clk_disable; 1869 1870 if (!dev->irq[1]) 1871 host->singleirq = true; 1872 else { 1873 ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq, 1874 IRQF_SHARED, DRIVER_NAME " (pio)", host); 1875 if (ret) 1876 goto clk_disable; 1877 } 1878 1879 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1880 1881 amba_set_drvdata(dev, mmc); 1882 1883 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1884 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1885 amba_rev(dev), (unsigned long long)dev->res.start, 1886 dev->irq[0], dev->irq[1]); 1887 1888 mmci_dma_setup(host); 1889 1890 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1891 pm_runtime_use_autosuspend(&dev->dev); 1892 1893 mmc_add_host(mmc); 1894 1895 pm_runtime_put(&dev->dev); 1896 return 0; 1897 1898 clk_disable: 1899 clk_disable_unprepare(host->clk); 1900 host_free: 1901 mmc_free_host(mmc); 1902 return ret; 1903 } 1904 1905 static int mmci_remove(struct amba_device *dev) 1906 { 1907 struct mmc_host *mmc = amba_get_drvdata(dev); 1908 1909 if (mmc) { 1910 struct mmci_host *host = mmc_priv(mmc); 1911 struct variant_data *variant = host->variant; 1912 1913 /* 1914 * Undo pm_runtime_put() in probe. We use the _sync 1915 * version here so that we can access the primecell. 1916 */ 1917 pm_runtime_get_sync(&dev->dev); 1918 1919 mmc_remove_host(mmc); 1920 1921 writel(0, host->base + MMCIMASK0); 1922 1923 if (variant->mmcimask1) 1924 writel(0, host->base + MMCIMASK1); 1925 1926 writel(0, host->base + MMCICOMMAND); 1927 writel(0, host->base + MMCIDATACTRL); 1928 1929 mmci_dma_release(host); 1930 clk_disable_unprepare(host->clk); 1931 mmc_free_host(mmc); 1932 } 1933 1934 return 0; 1935 } 1936 1937 #ifdef CONFIG_PM 1938 static void mmci_save(struct mmci_host *host) 1939 { 1940 unsigned long flags; 1941 1942 spin_lock_irqsave(&host->lock, flags); 1943 1944 writel(0, host->base + MMCIMASK0); 1945 if (host->variant->pwrreg_nopower) { 1946 writel(0, host->base + MMCIDATACTRL); 1947 writel(0, host->base + MMCIPOWER); 1948 writel(0, host->base + MMCICLOCK); 1949 } 1950 mmci_reg_delay(host); 1951 1952 spin_unlock_irqrestore(&host->lock, flags); 1953 } 1954 1955 static void mmci_restore(struct mmci_host *host) 1956 { 1957 unsigned long flags; 1958 1959 spin_lock_irqsave(&host->lock, flags); 1960 1961 if (host->variant->pwrreg_nopower) { 1962 writel(host->clk_reg, host->base + MMCICLOCK); 1963 writel(host->datactrl_reg, host->base + MMCIDATACTRL); 1964 writel(host->pwr_reg, host->base + MMCIPOWER); 1965 } 1966 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1967 mmci_reg_delay(host); 1968 1969 spin_unlock_irqrestore(&host->lock, flags); 1970 } 1971 1972 static int mmci_runtime_suspend(struct device *dev) 1973 { 1974 struct amba_device *adev = to_amba_device(dev); 1975 struct mmc_host *mmc = amba_get_drvdata(adev); 1976 1977 if (mmc) { 1978 struct mmci_host *host = mmc_priv(mmc); 1979 pinctrl_pm_select_sleep_state(dev); 1980 mmci_save(host); 1981 clk_disable_unprepare(host->clk); 1982 } 1983 1984 return 0; 1985 } 1986 1987 static int mmci_runtime_resume(struct device *dev) 1988 { 1989 struct amba_device *adev = to_amba_device(dev); 1990 struct mmc_host *mmc = amba_get_drvdata(adev); 1991 1992 if (mmc) { 1993 struct mmci_host *host = mmc_priv(mmc); 1994 clk_prepare_enable(host->clk); 1995 mmci_restore(host); 1996 pinctrl_pm_select_default_state(dev); 1997 } 1998 1999 return 0; 2000 } 2001 #endif 2002 2003 static const struct dev_pm_ops mmci_dev_pm_ops = { 2004 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 2005 pm_runtime_force_resume) 2006 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) 2007 }; 2008 2009 static const struct amba_id mmci_ids[] = { 2010 { 2011 .id = 0x00041180, 2012 .mask = 0xff0fffff, 2013 .data = &variant_arm, 2014 }, 2015 { 2016 .id = 0x01041180, 2017 .mask = 0xff0fffff, 2018 .data = &variant_arm_extended_fifo, 2019 }, 2020 { 2021 .id = 0x02041180, 2022 .mask = 0xff0fffff, 2023 .data = &variant_arm_extended_fifo_hwfc, 2024 }, 2025 { 2026 .id = 0x00041181, 2027 .mask = 0x000fffff, 2028 .data = &variant_arm, 2029 }, 2030 /* ST Micro variants */ 2031 { 2032 .id = 0x00180180, 2033 .mask = 0x00ffffff, 2034 .data = &variant_u300, 2035 }, 2036 { 2037 .id = 0x10180180, 2038 .mask = 0xf0ffffff, 2039 .data = &variant_nomadik, 2040 }, 2041 { 2042 .id = 0x00280180, 2043 .mask = 0x00ffffff, 2044 .data = &variant_nomadik, 2045 }, 2046 { 2047 .id = 0x00480180, 2048 .mask = 0xf0ffffff, 2049 .data = &variant_ux500, 2050 }, 2051 { 2052 .id = 0x10480180, 2053 .mask = 0xf0ffffff, 2054 .data = &variant_ux500v2, 2055 }, 2056 { 2057 .id = 0x00880180, 2058 .mask = 0x00ffffff, 2059 .data = &variant_stm32, 2060 }, 2061 /* Qualcomm variants */ 2062 { 2063 .id = 0x00051180, 2064 .mask = 0x000fffff, 2065 .data = &variant_qcom, 2066 }, 2067 { 0, 0 }, 2068 }; 2069 2070 MODULE_DEVICE_TABLE(amba, mmci_ids); 2071 2072 static struct amba_driver mmci_driver = { 2073 .drv = { 2074 .name = DRIVER_NAME, 2075 .pm = &mmci_dev_pm_ops, 2076 }, 2077 .probe = mmci_probe, 2078 .remove = mmci_remove, 2079 .id_table = mmci_ids, 2080 }; 2081 2082 module_amba_driver(mmci_driver); 2083 2084 module_param(fmax, uint, 0444); 2085 2086 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 2087 MODULE_LICENSE("GPL"); 2088