1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/io.h> 17 #include <linux/interrupt.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/delay.h> 21 #include <linux/err.h> 22 #include <linux/highmem.h> 23 #include <linux/log2.h> 24 #include <linux/mmc/pm.h> 25 #include <linux/mmc/host.h> 26 #include <linux/mmc/card.h> 27 #include <linux/mmc/slot-gpio.h> 28 #include <linux/amba/bus.h> 29 #include <linux/clk.h> 30 #include <linux/scatterlist.h> 31 #include <linux/gpio.h> 32 #include <linux/of_gpio.h> 33 #include <linux/regulator/consumer.h> 34 #include <linux/dmaengine.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/amba/mmci.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/types.h> 39 #include <linux/pinctrl/consumer.h> 40 41 #include <asm/div64.h> 42 #include <asm/io.h> 43 #include <asm/sizes.h> 44 45 #include "mmci.h" 46 47 #define DRIVER_NAME "mmci-pl18x" 48 49 static unsigned int fmax = 515633; 50 51 /** 52 * struct variant_data - MMCI variant-specific quirks 53 * @clkreg: default value for MCICLOCK register 54 * @clkreg_enable: enable value for MMCICLOCK register 55 * @datalength_bits: number of bits in the MMCIDATALENGTH register 56 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 57 * is asserted (likewise for RX) 58 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 59 * is asserted (likewise for RX) 60 * @sdio: variant supports SDIO 61 * @st_clkdiv: true if using a ST-specific clock divider algorithm 62 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 63 * @pwrreg_powerup: power up value for MMCIPOWER register 64 * @signal_direction: input/out direction of bus signals can be indicated 65 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock 66 * @busy_detect: true if busy detection on dat0 is supported 67 * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply 68 */ 69 struct variant_data { 70 unsigned int clkreg; 71 unsigned int clkreg_enable; 72 unsigned int datalength_bits; 73 unsigned int fifosize; 74 unsigned int fifohalfsize; 75 bool sdio; 76 bool st_clkdiv; 77 bool blksz_datactrl16; 78 u32 pwrreg_powerup; 79 bool signal_direction; 80 bool pwrreg_clkgate; 81 bool busy_detect; 82 bool pwrreg_nopower; 83 }; 84 85 static struct variant_data variant_arm = { 86 .fifosize = 16 * 4, 87 .fifohalfsize = 8 * 4, 88 .datalength_bits = 16, 89 .pwrreg_powerup = MCI_PWR_UP, 90 }; 91 92 static struct variant_data variant_arm_extended_fifo = { 93 .fifosize = 128 * 4, 94 .fifohalfsize = 64 * 4, 95 .datalength_bits = 16, 96 .pwrreg_powerup = MCI_PWR_UP, 97 }; 98 99 static struct variant_data variant_arm_extended_fifo_hwfc = { 100 .fifosize = 128 * 4, 101 .fifohalfsize = 64 * 4, 102 .clkreg_enable = MCI_ARM_HWFCEN, 103 .datalength_bits = 16, 104 .pwrreg_powerup = MCI_PWR_UP, 105 }; 106 107 static struct variant_data variant_u300 = { 108 .fifosize = 16 * 4, 109 .fifohalfsize = 8 * 4, 110 .clkreg_enable = MCI_ST_U300_HWFCEN, 111 .datalength_bits = 16, 112 .sdio = true, 113 .pwrreg_powerup = MCI_PWR_ON, 114 .signal_direction = true, 115 .pwrreg_clkgate = true, 116 .pwrreg_nopower = true, 117 }; 118 119 static struct variant_data variant_nomadik = { 120 .fifosize = 16 * 4, 121 .fifohalfsize = 8 * 4, 122 .clkreg = MCI_CLK_ENABLE, 123 .datalength_bits = 24, 124 .sdio = true, 125 .st_clkdiv = true, 126 .pwrreg_powerup = MCI_PWR_ON, 127 .signal_direction = true, 128 .pwrreg_clkgate = true, 129 .pwrreg_nopower = true, 130 }; 131 132 static struct variant_data variant_ux500 = { 133 .fifosize = 30 * 4, 134 .fifohalfsize = 8 * 4, 135 .clkreg = MCI_CLK_ENABLE, 136 .clkreg_enable = MCI_ST_UX500_HWFCEN, 137 .datalength_bits = 24, 138 .sdio = true, 139 .st_clkdiv = true, 140 .pwrreg_powerup = MCI_PWR_ON, 141 .signal_direction = true, 142 .pwrreg_clkgate = true, 143 .busy_detect = true, 144 .pwrreg_nopower = true, 145 }; 146 147 static struct variant_data variant_ux500v2 = { 148 .fifosize = 30 * 4, 149 .fifohalfsize = 8 * 4, 150 .clkreg = MCI_CLK_ENABLE, 151 .clkreg_enable = MCI_ST_UX500_HWFCEN, 152 .datalength_bits = 24, 153 .sdio = true, 154 .st_clkdiv = true, 155 .blksz_datactrl16 = true, 156 .pwrreg_powerup = MCI_PWR_ON, 157 .signal_direction = true, 158 .pwrreg_clkgate = true, 159 .busy_detect = true, 160 .pwrreg_nopower = true, 161 }; 162 163 static int mmci_card_busy(struct mmc_host *mmc) 164 { 165 struct mmci_host *host = mmc_priv(mmc); 166 unsigned long flags; 167 int busy = 0; 168 169 pm_runtime_get_sync(mmc_dev(mmc)); 170 171 spin_lock_irqsave(&host->lock, flags); 172 if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY) 173 busy = 1; 174 spin_unlock_irqrestore(&host->lock, flags); 175 176 pm_runtime_mark_last_busy(mmc_dev(mmc)); 177 pm_runtime_put_autosuspend(mmc_dev(mmc)); 178 179 return busy; 180 } 181 182 /* 183 * Validate mmc prerequisites 184 */ 185 static int mmci_validate_data(struct mmci_host *host, 186 struct mmc_data *data) 187 { 188 if (!data) 189 return 0; 190 191 if (!is_power_of_2(data->blksz)) { 192 dev_err(mmc_dev(host->mmc), 193 "unsupported block size (%d bytes)\n", data->blksz); 194 return -EINVAL; 195 } 196 197 return 0; 198 } 199 200 static void mmci_reg_delay(struct mmci_host *host) 201 { 202 /* 203 * According to the spec, at least three feedback clock cycles 204 * of max 52 MHz must pass between two writes to the MMCICLOCK reg. 205 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes. 206 * Worst delay time during card init is at 100 kHz => 30 us. 207 * Worst delay time when up and running is at 25 MHz => 120 ns. 208 */ 209 if (host->cclk < 25000000) 210 udelay(30); 211 else 212 ndelay(120); 213 } 214 215 /* 216 * This must be called with host->lock held 217 */ 218 static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 219 { 220 if (host->clk_reg != clk) { 221 host->clk_reg = clk; 222 writel(clk, host->base + MMCICLOCK); 223 } 224 } 225 226 /* 227 * This must be called with host->lock held 228 */ 229 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 230 { 231 if (host->pwr_reg != pwr) { 232 host->pwr_reg = pwr; 233 writel(pwr, host->base + MMCIPOWER); 234 } 235 } 236 237 /* 238 * This must be called with host->lock held 239 */ 240 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl) 241 { 242 /* Keep ST Micro busy mode if enabled */ 243 datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE; 244 245 if (host->datactrl_reg != datactrl) { 246 host->datactrl_reg = datactrl; 247 writel(datactrl, host->base + MMCIDATACTRL); 248 } 249 } 250 251 /* 252 * This must be called with host->lock held 253 */ 254 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 255 { 256 struct variant_data *variant = host->variant; 257 u32 clk = variant->clkreg; 258 259 /* Make sure cclk reflects the current calculated clock */ 260 host->cclk = 0; 261 262 if (desired) { 263 if (desired >= host->mclk) { 264 clk = MCI_CLK_BYPASS; 265 if (variant->st_clkdiv) 266 clk |= MCI_ST_UX500_NEG_EDGE; 267 host->cclk = host->mclk; 268 } else if (variant->st_clkdiv) { 269 /* 270 * DB8500 TRM says f = mclk / (clkdiv + 2) 271 * => clkdiv = (mclk / f) - 2 272 * Round the divider up so we don't exceed the max 273 * frequency 274 */ 275 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 276 if (clk >= 256) 277 clk = 255; 278 host->cclk = host->mclk / (clk + 2); 279 } else { 280 /* 281 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 282 * => clkdiv = mclk / (2 * f) - 1 283 */ 284 clk = host->mclk / (2 * desired) - 1; 285 if (clk >= 256) 286 clk = 255; 287 host->cclk = host->mclk / (2 * (clk + 1)); 288 } 289 290 clk |= variant->clkreg_enable; 291 clk |= MCI_CLK_ENABLE; 292 /* This hasn't proven to be worthwhile */ 293 /* clk |= MCI_CLK_PWRSAVE; */ 294 } 295 296 /* Set actual clock for debug */ 297 host->mmc->actual_clock = host->cclk; 298 299 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 300 clk |= MCI_4BIT_BUS; 301 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 302 clk |= MCI_ST_8BIT_BUS; 303 304 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || 305 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) 306 clk |= MCI_ST_UX500_NEG_EDGE; 307 308 mmci_write_clkreg(host, clk); 309 } 310 311 static void 312 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 313 { 314 writel(0, host->base + MMCICOMMAND); 315 316 BUG_ON(host->data); 317 318 host->mrq = NULL; 319 host->cmd = NULL; 320 321 mmc_request_done(host->mmc, mrq); 322 323 pm_runtime_mark_last_busy(mmc_dev(host->mmc)); 324 pm_runtime_put_autosuspend(mmc_dev(host->mmc)); 325 } 326 327 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 328 { 329 void __iomem *base = host->base; 330 331 if (host->singleirq) { 332 unsigned int mask0 = readl(base + MMCIMASK0); 333 334 mask0 &= ~MCI_IRQ1MASK; 335 mask0 |= mask; 336 337 writel(mask0, base + MMCIMASK0); 338 } 339 340 writel(mask, base + MMCIMASK1); 341 } 342 343 static void mmci_stop_data(struct mmci_host *host) 344 { 345 mmci_write_datactrlreg(host, 0); 346 mmci_set_mask1(host, 0); 347 host->data = NULL; 348 } 349 350 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 351 { 352 unsigned int flags = SG_MITER_ATOMIC; 353 354 if (data->flags & MMC_DATA_READ) 355 flags |= SG_MITER_TO_SG; 356 else 357 flags |= SG_MITER_FROM_SG; 358 359 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 360 } 361 362 /* 363 * All the DMA operation mode stuff goes inside this ifdef. 364 * This assumes that you have a generic DMA device interface, 365 * no custom DMA interfaces are supported. 366 */ 367 #ifdef CONFIG_DMA_ENGINE 368 static void mmci_dma_setup(struct mmci_host *host) 369 { 370 const char *rxname, *txname; 371 dma_cap_mask_t mask; 372 373 host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); 374 host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); 375 376 /* initialize pre request cookie */ 377 host->next_data.cookie = 1; 378 379 /* Try to acquire a generic DMA engine slave channel */ 380 dma_cap_zero(mask); 381 dma_cap_set(DMA_SLAVE, mask); 382 383 /* 384 * If only an RX channel is specified, the driver will 385 * attempt to use it bidirectionally, however if it is 386 * is specified but cannot be located, DMA will be disabled. 387 */ 388 if (host->dma_rx_channel && !host->dma_tx_channel) 389 host->dma_tx_channel = host->dma_rx_channel; 390 391 if (host->dma_rx_channel) 392 rxname = dma_chan_name(host->dma_rx_channel); 393 else 394 rxname = "none"; 395 396 if (host->dma_tx_channel) 397 txname = dma_chan_name(host->dma_tx_channel); 398 else 399 txname = "none"; 400 401 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 402 rxname, txname); 403 404 /* 405 * Limit the maximum segment size in any SG entry according to 406 * the parameters of the DMA engine device. 407 */ 408 if (host->dma_tx_channel) { 409 struct device *dev = host->dma_tx_channel->device->dev; 410 unsigned int max_seg_size = dma_get_max_seg_size(dev); 411 412 if (max_seg_size < host->mmc->max_seg_size) 413 host->mmc->max_seg_size = max_seg_size; 414 } 415 if (host->dma_rx_channel) { 416 struct device *dev = host->dma_rx_channel->device->dev; 417 unsigned int max_seg_size = dma_get_max_seg_size(dev); 418 419 if (max_seg_size < host->mmc->max_seg_size) 420 host->mmc->max_seg_size = max_seg_size; 421 } 422 } 423 424 /* 425 * This is used in or so inline it 426 * so it can be discarded. 427 */ 428 static inline void mmci_dma_release(struct mmci_host *host) 429 { 430 if (host->dma_rx_channel) 431 dma_release_channel(host->dma_rx_channel); 432 if (host->dma_tx_channel) 433 dma_release_channel(host->dma_tx_channel); 434 host->dma_rx_channel = host->dma_tx_channel = NULL; 435 } 436 437 static void mmci_dma_data_error(struct mmci_host *host) 438 { 439 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 440 dmaengine_terminate_all(host->dma_current); 441 host->dma_current = NULL; 442 host->dma_desc_current = NULL; 443 host->data->host_cookie = 0; 444 } 445 446 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 447 { 448 struct dma_chan *chan; 449 enum dma_data_direction dir; 450 451 if (data->flags & MMC_DATA_READ) { 452 dir = DMA_FROM_DEVICE; 453 chan = host->dma_rx_channel; 454 } else { 455 dir = DMA_TO_DEVICE; 456 chan = host->dma_tx_channel; 457 } 458 459 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 460 } 461 462 static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) 463 { 464 u32 status; 465 int i; 466 467 /* Wait up to 1ms for the DMA to complete */ 468 for (i = 0; ; i++) { 469 status = readl(host->base + MMCISTATUS); 470 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 471 break; 472 udelay(10); 473 } 474 475 /* 476 * Check to see whether we still have some data left in the FIFO - 477 * this catches DMA controllers which are unable to monitor the 478 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 479 * contiguous buffers. On TX, we'll get a FIFO underrun error. 480 */ 481 if (status & MCI_RXDATAAVLBLMASK) { 482 mmci_dma_data_error(host); 483 if (!data->error) 484 data->error = -EIO; 485 } 486 487 if (!data->host_cookie) 488 mmci_dma_unmap(host, data); 489 490 /* 491 * Use of DMA with scatter-gather is impossible. 492 * Give up with DMA and switch back to PIO mode. 493 */ 494 if (status & MCI_RXDATAAVLBLMASK) { 495 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 496 mmci_dma_release(host); 497 } 498 499 host->dma_current = NULL; 500 host->dma_desc_current = NULL; 501 } 502 503 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */ 504 static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 505 struct dma_chan **dma_chan, 506 struct dma_async_tx_descriptor **dma_desc) 507 { 508 struct variant_data *variant = host->variant; 509 struct dma_slave_config conf = { 510 .src_addr = host->phybase + MMCIFIFO, 511 .dst_addr = host->phybase + MMCIFIFO, 512 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 513 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 514 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 515 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 516 .device_fc = false, 517 }; 518 struct dma_chan *chan; 519 struct dma_device *device; 520 struct dma_async_tx_descriptor *desc; 521 enum dma_data_direction buffer_dirn; 522 int nr_sg; 523 524 if (data->flags & MMC_DATA_READ) { 525 conf.direction = DMA_DEV_TO_MEM; 526 buffer_dirn = DMA_FROM_DEVICE; 527 chan = host->dma_rx_channel; 528 } else { 529 conf.direction = DMA_MEM_TO_DEV; 530 buffer_dirn = DMA_TO_DEVICE; 531 chan = host->dma_tx_channel; 532 } 533 534 /* If there's no DMA channel, fall back to PIO */ 535 if (!chan) 536 return -EINVAL; 537 538 /* If less than or equal to the fifo size, don't bother with DMA */ 539 if (data->blksz * data->blocks <= variant->fifosize) 540 return -EINVAL; 541 542 device = chan->device; 543 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 544 if (nr_sg == 0) 545 return -EINVAL; 546 547 dmaengine_slave_config(chan, &conf); 548 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 549 conf.direction, DMA_CTRL_ACK); 550 if (!desc) 551 goto unmap_exit; 552 553 *dma_chan = chan; 554 *dma_desc = desc; 555 556 return 0; 557 558 unmap_exit: 559 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 560 return -ENOMEM; 561 } 562 563 static inline int mmci_dma_prep_data(struct mmci_host *host, 564 struct mmc_data *data) 565 { 566 /* Check if next job is already prepared. */ 567 if (host->dma_current && host->dma_desc_current) 568 return 0; 569 570 /* No job were prepared thus do it now. */ 571 return __mmci_dma_prep_data(host, data, &host->dma_current, 572 &host->dma_desc_current); 573 } 574 575 static inline int mmci_dma_prep_next(struct mmci_host *host, 576 struct mmc_data *data) 577 { 578 struct mmci_host_next *nd = &host->next_data; 579 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); 580 } 581 582 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 583 { 584 int ret; 585 struct mmc_data *data = host->data; 586 587 ret = mmci_dma_prep_data(host, host->data); 588 if (ret) 589 return ret; 590 591 /* Okay, go for it. */ 592 dev_vdbg(mmc_dev(host->mmc), 593 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 594 data->sg_len, data->blksz, data->blocks, data->flags); 595 dmaengine_submit(host->dma_desc_current); 596 dma_async_issue_pending(host->dma_current); 597 598 datactrl |= MCI_DPSM_DMAENABLE; 599 600 /* Trigger the DMA transfer */ 601 mmci_write_datactrlreg(host, datactrl); 602 603 /* 604 * Let the MMCI say when the data is ended and it's time 605 * to fire next DMA request. When that happens, MMCI will 606 * call mmci_data_end() 607 */ 608 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 609 host->base + MMCIMASK0); 610 return 0; 611 } 612 613 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 614 { 615 struct mmci_host_next *next = &host->next_data; 616 617 WARN_ON(data->host_cookie && data->host_cookie != next->cookie); 618 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan)); 619 620 host->dma_desc_current = next->dma_desc; 621 host->dma_current = next->dma_chan; 622 next->dma_desc = NULL; 623 next->dma_chan = NULL; 624 } 625 626 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 627 bool is_first_req) 628 { 629 struct mmci_host *host = mmc_priv(mmc); 630 struct mmc_data *data = mrq->data; 631 struct mmci_host_next *nd = &host->next_data; 632 633 if (!data) 634 return; 635 636 BUG_ON(data->host_cookie); 637 638 if (mmci_validate_data(host, data)) 639 return; 640 641 if (!mmci_dma_prep_next(host, data)) 642 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 643 } 644 645 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 646 int err) 647 { 648 struct mmci_host *host = mmc_priv(mmc); 649 struct mmc_data *data = mrq->data; 650 651 if (!data || !data->host_cookie) 652 return; 653 654 mmci_dma_unmap(host, data); 655 656 if (err) { 657 struct mmci_host_next *next = &host->next_data; 658 struct dma_chan *chan; 659 if (data->flags & MMC_DATA_READ) 660 chan = host->dma_rx_channel; 661 else 662 chan = host->dma_tx_channel; 663 dmaengine_terminate_all(chan); 664 665 next->dma_desc = NULL; 666 next->dma_chan = NULL; 667 } 668 } 669 670 #else 671 /* Blank functions if the DMA engine is not available */ 672 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 673 { 674 } 675 static inline void mmci_dma_setup(struct mmci_host *host) 676 { 677 } 678 679 static inline void mmci_dma_release(struct mmci_host *host) 680 { 681 } 682 683 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 684 { 685 } 686 687 static inline void mmci_dma_finalize(struct mmci_host *host, 688 struct mmc_data *data) 689 { 690 } 691 692 static inline void mmci_dma_data_error(struct mmci_host *host) 693 { 694 } 695 696 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 697 { 698 return -ENOSYS; 699 } 700 701 #define mmci_pre_request NULL 702 #define mmci_post_request NULL 703 704 #endif 705 706 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 707 { 708 struct variant_data *variant = host->variant; 709 unsigned int datactrl, timeout, irqmask; 710 unsigned long long clks; 711 void __iomem *base; 712 int blksz_bits; 713 714 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 715 data->blksz, data->blocks, data->flags); 716 717 host->data = data; 718 host->size = data->blksz * data->blocks; 719 data->bytes_xfered = 0; 720 721 clks = (unsigned long long)data->timeout_ns * host->cclk; 722 do_div(clks, 1000000000UL); 723 724 timeout = data->timeout_clks + (unsigned int)clks; 725 726 base = host->base; 727 writel(timeout, base + MMCIDATATIMER); 728 writel(host->size, base + MMCIDATALENGTH); 729 730 blksz_bits = ffs(data->blksz) - 1; 731 BUG_ON(1 << blksz_bits != data->blksz); 732 733 if (variant->blksz_datactrl16) 734 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 735 else 736 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 737 738 if (data->flags & MMC_DATA_READ) 739 datactrl |= MCI_DPSM_DIRECTION; 740 741 /* The ST Micro variants has a special bit to enable SDIO */ 742 if (variant->sdio && host->mmc->card) 743 if (mmc_card_sdio(host->mmc->card)) { 744 /* 745 * The ST Micro variants has a special bit 746 * to enable SDIO. 747 */ 748 u32 clk; 749 750 datactrl |= MCI_ST_DPSM_SDIOEN; 751 752 /* 753 * The ST Micro variant for SDIO small write transfers 754 * needs to have clock H/W flow control disabled, 755 * otherwise the transfer will not start. The threshold 756 * depends on the rate of MCLK. 757 */ 758 if (data->flags & MMC_DATA_WRITE && 759 (host->size < 8 || 760 (host->size <= 8 && host->mclk > 50000000))) 761 clk = host->clk_reg & ~variant->clkreg_enable; 762 else 763 clk = host->clk_reg | variant->clkreg_enable; 764 765 mmci_write_clkreg(host, clk); 766 } 767 768 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || 769 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) 770 datactrl |= MCI_ST_DPSM_DDRMODE; 771 772 /* 773 * Attempt to use DMA operation mode, if this 774 * should fail, fall back to PIO mode 775 */ 776 if (!mmci_dma_start_data(host, datactrl)) 777 return; 778 779 /* IRQ mode, map the SG list for CPU reading/writing */ 780 mmci_init_sg(host, data); 781 782 if (data->flags & MMC_DATA_READ) { 783 irqmask = MCI_RXFIFOHALFFULLMASK; 784 785 /* 786 * If we have less than the fifo 'half-full' threshold to 787 * transfer, trigger a PIO interrupt as soon as any data 788 * is available. 789 */ 790 if (host->size < variant->fifohalfsize) 791 irqmask |= MCI_RXDATAAVLBLMASK; 792 } else { 793 /* 794 * We don't actually need to include "FIFO empty" here 795 * since its implicit in "FIFO half empty". 796 */ 797 irqmask = MCI_TXFIFOHALFEMPTYMASK; 798 } 799 800 mmci_write_datactrlreg(host, datactrl); 801 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 802 mmci_set_mask1(host, irqmask); 803 } 804 805 static void 806 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 807 { 808 void __iomem *base = host->base; 809 810 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 811 cmd->opcode, cmd->arg, cmd->flags); 812 813 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 814 writel(0, base + MMCICOMMAND); 815 udelay(1); 816 } 817 818 c |= cmd->opcode | MCI_CPSM_ENABLE; 819 if (cmd->flags & MMC_RSP_PRESENT) { 820 if (cmd->flags & MMC_RSP_136) 821 c |= MCI_CPSM_LONGRSP; 822 c |= MCI_CPSM_RESPONSE; 823 } 824 if (/*interrupt*/0) 825 c |= MCI_CPSM_INTERRUPT; 826 827 host->cmd = cmd; 828 829 writel(cmd->arg, base + MMCIARGUMENT); 830 writel(c, base + MMCICOMMAND); 831 } 832 833 static void 834 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 835 unsigned int status) 836 { 837 /* First check for errors */ 838 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 839 MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 840 u32 remain, success; 841 842 /* Terminate the DMA transfer */ 843 if (dma_inprogress(host)) { 844 mmci_dma_data_error(host); 845 mmci_dma_unmap(host, data); 846 } 847 848 /* 849 * Calculate how far we are into the transfer. Note that 850 * the data counter gives the number of bytes transferred 851 * on the MMC bus, not on the host side. On reads, this 852 * can be as much as a FIFO-worth of data ahead. This 853 * matters for FIFO overruns only. 854 */ 855 remain = readl(host->base + MMCIDATACNT); 856 success = data->blksz * data->blocks - remain; 857 858 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 859 status, success); 860 if (status & MCI_DATACRCFAIL) { 861 /* Last block was not successful */ 862 success -= 1; 863 data->error = -EILSEQ; 864 } else if (status & MCI_DATATIMEOUT) { 865 data->error = -ETIMEDOUT; 866 } else if (status & MCI_STARTBITERR) { 867 data->error = -ECOMM; 868 } else if (status & MCI_TXUNDERRUN) { 869 data->error = -EIO; 870 } else if (status & MCI_RXOVERRUN) { 871 if (success > host->variant->fifosize) 872 success -= host->variant->fifosize; 873 else 874 success = 0; 875 data->error = -EIO; 876 } 877 data->bytes_xfered = round_down(success, data->blksz); 878 } 879 880 if (status & MCI_DATABLOCKEND) 881 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 882 883 if (status & MCI_DATAEND || data->error) { 884 if (dma_inprogress(host)) 885 mmci_dma_finalize(host, data); 886 mmci_stop_data(host); 887 888 if (!data->error) 889 /* The error clause is handled above, success! */ 890 data->bytes_xfered = data->blksz * data->blocks; 891 892 if (!data->stop || host->mrq->sbc) { 893 mmci_request_end(host, data->mrq); 894 } else { 895 mmci_start_command(host, data->stop, 0); 896 } 897 } 898 } 899 900 static void 901 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 902 unsigned int status) 903 { 904 void __iomem *base = host->base; 905 bool sbc = (cmd == host->mrq->sbc); 906 bool busy_resp = host->variant->busy_detect && 907 (cmd->flags & MMC_RSP_BUSY); 908 909 /* Check if we need to wait for busy completion. */ 910 if (host->busy_status && (status & MCI_ST_CARDBUSY)) 911 return; 912 913 /* Enable busy completion if needed and supported. */ 914 if (!host->busy_status && busy_resp && 915 !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) && 916 (readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) { 917 writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND, 918 base + MMCIMASK0); 919 host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND); 920 return; 921 } 922 923 /* At busy completion, mask the IRQ and complete the request. */ 924 if (host->busy_status) { 925 writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND, 926 base + MMCIMASK0); 927 host->busy_status = 0; 928 } 929 930 host->cmd = NULL; 931 932 if (status & MCI_CMDTIMEOUT) { 933 cmd->error = -ETIMEDOUT; 934 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 935 cmd->error = -EILSEQ; 936 } else { 937 cmd->resp[0] = readl(base + MMCIRESPONSE0); 938 cmd->resp[1] = readl(base + MMCIRESPONSE1); 939 cmd->resp[2] = readl(base + MMCIRESPONSE2); 940 cmd->resp[3] = readl(base + MMCIRESPONSE3); 941 } 942 943 if ((!sbc && !cmd->data) || cmd->error) { 944 if (host->data) { 945 /* Terminate the DMA transfer */ 946 if (dma_inprogress(host)) { 947 mmci_dma_data_error(host); 948 mmci_dma_unmap(host, host->data); 949 } 950 mmci_stop_data(host); 951 } 952 mmci_request_end(host, host->mrq); 953 } else if (sbc) { 954 mmci_start_command(host, host->mrq->cmd, 0); 955 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 956 mmci_start_data(host, cmd->data); 957 } 958 } 959 960 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 961 { 962 void __iomem *base = host->base; 963 char *ptr = buffer; 964 u32 status; 965 int host_remain = host->size; 966 967 do { 968 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 969 970 if (count > remain) 971 count = remain; 972 973 if (count <= 0) 974 break; 975 976 /* 977 * SDIO especially may want to send something that is 978 * not divisible by 4 (as opposed to card sectors 979 * etc). Therefore make sure to always read the last bytes 980 * while only doing full 32-bit reads towards the FIFO. 981 */ 982 if (unlikely(count & 0x3)) { 983 if (count < 4) { 984 unsigned char buf[4]; 985 ioread32_rep(base + MMCIFIFO, buf, 1); 986 memcpy(ptr, buf, count); 987 } else { 988 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 989 count &= ~0x3; 990 } 991 } else { 992 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 993 } 994 995 ptr += count; 996 remain -= count; 997 host_remain -= count; 998 999 if (remain == 0) 1000 break; 1001 1002 status = readl(base + MMCISTATUS); 1003 } while (status & MCI_RXDATAAVLBL); 1004 1005 return ptr - buffer; 1006 } 1007 1008 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 1009 { 1010 struct variant_data *variant = host->variant; 1011 void __iomem *base = host->base; 1012 char *ptr = buffer; 1013 1014 do { 1015 unsigned int count, maxcnt; 1016 1017 maxcnt = status & MCI_TXFIFOEMPTY ? 1018 variant->fifosize : variant->fifohalfsize; 1019 count = min(remain, maxcnt); 1020 1021 /* 1022 * SDIO especially may want to send something that is 1023 * not divisible by 4 (as opposed to card sectors 1024 * etc), and the FIFO only accept full 32-bit writes. 1025 * So compensate by adding +3 on the count, a single 1026 * byte become a 32bit write, 7 bytes will be two 1027 * 32bit writes etc. 1028 */ 1029 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2); 1030 1031 ptr += count; 1032 remain -= count; 1033 1034 if (remain == 0) 1035 break; 1036 1037 status = readl(base + MMCISTATUS); 1038 } while (status & MCI_TXFIFOHALFEMPTY); 1039 1040 return ptr - buffer; 1041 } 1042 1043 /* 1044 * PIO data transfer IRQ handler. 1045 */ 1046 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 1047 { 1048 struct mmci_host *host = dev_id; 1049 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1050 struct variant_data *variant = host->variant; 1051 void __iomem *base = host->base; 1052 unsigned long flags; 1053 u32 status; 1054 1055 status = readl(base + MMCISTATUS); 1056 1057 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 1058 1059 local_irq_save(flags); 1060 1061 do { 1062 unsigned int remain, len; 1063 char *buffer; 1064 1065 /* 1066 * For write, we only need to test the half-empty flag 1067 * here - if the FIFO is completely empty, then by 1068 * definition it is more than half empty. 1069 * 1070 * For read, check for data available. 1071 */ 1072 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 1073 break; 1074 1075 if (!sg_miter_next(sg_miter)) 1076 break; 1077 1078 buffer = sg_miter->addr; 1079 remain = sg_miter->length; 1080 1081 len = 0; 1082 if (status & MCI_RXACTIVE) 1083 len = mmci_pio_read(host, buffer, remain); 1084 if (status & MCI_TXACTIVE) 1085 len = mmci_pio_write(host, buffer, remain, status); 1086 1087 sg_miter->consumed = len; 1088 1089 host->size -= len; 1090 remain -= len; 1091 1092 if (remain) 1093 break; 1094 1095 status = readl(base + MMCISTATUS); 1096 } while (1); 1097 1098 sg_miter_stop(sg_miter); 1099 1100 local_irq_restore(flags); 1101 1102 /* 1103 * If we have less than the fifo 'half-full' threshold to transfer, 1104 * trigger a PIO interrupt as soon as any data is available. 1105 */ 1106 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 1107 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 1108 1109 /* 1110 * If we run out of data, disable the data IRQs; this 1111 * prevents a race where the FIFO becomes empty before 1112 * the chip itself has disabled the data path, and 1113 * stops us racing with our data end IRQ. 1114 */ 1115 if (host->size == 0) { 1116 mmci_set_mask1(host, 0); 1117 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 1118 } 1119 1120 return IRQ_HANDLED; 1121 } 1122 1123 /* 1124 * Handle completion of command and data transfers. 1125 */ 1126 static irqreturn_t mmci_irq(int irq, void *dev_id) 1127 { 1128 struct mmci_host *host = dev_id; 1129 u32 status; 1130 int ret = 0; 1131 1132 spin_lock(&host->lock); 1133 1134 do { 1135 struct mmc_command *cmd; 1136 struct mmc_data *data; 1137 1138 status = readl(host->base + MMCISTATUS); 1139 1140 if (host->singleirq) { 1141 if (status & readl(host->base + MMCIMASK1)) 1142 mmci_pio_irq(irq, dev_id); 1143 1144 status &= ~MCI_IRQ1MASK; 1145 } 1146 1147 /* 1148 * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's 1149 * enabled) since the HW seems to be triggering the IRQ on both 1150 * edges while monitoring DAT0 for busy completion. 1151 */ 1152 status &= readl(host->base + MMCIMASK0); 1153 writel(status, host->base + MMCICLEAR); 1154 1155 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1156 1157 cmd = host->cmd; 1158 if ((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT| 1159 MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 1160 mmci_cmd_irq(host, cmd, status); 1161 1162 data = host->data; 1163 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| 1164 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| 1165 MCI_DATABLOCKEND) && data) 1166 mmci_data_irq(host, data, status); 1167 1168 /* Don't poll for busy completion in irq context. */ 1169 if (host->busy_status) 1170 status &= ~MCI_ST_CARDBUSY; 1171 1172 ret = 1; 1173 } while (status); 1174 1175 spin_unlock(&host->lock); 1176 1177 return IRQ_RETVAL(ret); 1178 } 1179 1180 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1181 { 1182 struct mmci_host *host = mmc_priv(mmc); 1183 unsigned long flags; 1184 1185 WARN_ON(host->mrq != NULL); 1186 1187 mrq->cmd->error = mmci_validate_data(host, mrq->data); 1188 if (mrq->cmd->error) { 1189 mmc_request_done(mmc, mrq); 1190 return; 1191 } 1192 1193 pm_runtime_get_sync(mmc_dev(mmc)); 1194 1195 spin_lock_irqsave(&host->lock, flags); 1196 1197 host->mrq = mrq; 1198 1199 if (mrq->data) 1200 mmci_get_next_data(host, mrq->data); 1201 1202 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1203 mmci_start_data(host, mrq->data); 1204 1205 if (mrq->sbc) 1206 mmci_start_command(host, mrq->sbc, 0); 1207 else 1208 mmci_start_command(host, mrq->cmd, 0); 1209 1210 spin_unlock_irqrestore(&host->lock, flags); 1211 } 1212 1213 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1214 { 1215 struct mmci_host *host = mmc_priv(mmc); 1216 struct variant_data *variant = host->variant; 1217 u32 pwr = 0; 1218 unsigned long flags; 1219 int ret; 1220 1221 pm_runtime_get_sync(mmc_dev(mmc)); 1222 1223 if (host->plat->ios_handler && 1224 host->plat->ios_handler(mmc_dev(mmc), ios)) 1225 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1226 1227 switch (ios->power_mode) { 1228 case MMC_POWER_OFF: 1229 if (!IS_ERR(mmc->supply.vmmc)) 1230 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1231 1232 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 1233 regulator_disable(mmc->supply.vqmmc); 1234 host->vqmmc_enabled = false; 1235 } 1236 1237 break; 1238 case MMC_POWER_UP: 1239 if (!IS_ERR(mmc->supply.vmmc)) 1240 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 1241 1242 /* 1243 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1244 * and instead uses MCI_PWR_ON so apply whatever value is 1245 * configured in the variant data. 1246 */ 1247 pwr |= variant->pwrreg_powerup; 1248 1249 break; 1250 case MMC_POWER_ON: 1251 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 1252 ret = regulator_enable(mmc->supply.vqmmc); 1253 if (ret < 0) 1254 dev_err(mmc_dev(mmc), 1255 "failed to enable vqmmc regulator\n"); 1256 else 1257 host->vqmmc_enabled = true; 1258 } 1259 1260 pwr |= MCI_PWR_ON; 1261 break; 1262 } 1263 1264 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1265 /* 1266 * The ST Micro variant has some additional bits 1267 * indicating signal direction for the signals in 1268 * the SD/MMC bus and feedback-clock usage. 1269 */ 1270 pwr |= host->pwr_reg_add; 1271 1272 if (ios->bus_width == MMC_BUS_WIDTH_4) 1273 pwr &= ~MCI_ST_DATA74DIREN; 1274 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1275 pwr &= (~MCI_ST_DATA74DIREN & 1276 ~MCI_ST_DATA31DIREN & 1277 ~MCI_ST_DATA2DIREN); 1278 } 1279 1280 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1281 if (host->hw_designer != AMBA_VENDOR_ST) 1282 pwr |= MCI_ROD; 1283 else { 1284 /* 1285 * The ST Micro variant use the ROD bit for something 1286 * else and only has OD (Open Drain). 1287 */ 1288 pwr |= MCI_OD; 1289 } 1290 } 1291 1292 /* 1293 * If clock = 0 and the variant requires the MMCIPOWER to be used for 1294 * gating the clock, the MCI_PWR_ON bit is cleared. 1295 */ 1296 if (!ios->clock && variant->pwrreg_clkgate) 1297 pwr &= ~MCI_PWR_ON; 1298 1299 spin_lock_irqsave(&host->lock, flags); 1300 1301 mmci_set_clkreg(host, ios->clock); 1302 mmci_write_pwrreg(host, pwr); 1303 mmci_reg_delay(host); 1304 1305 spin_unlock_irqrestore(&host->lock, flags); 1306 1307 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1308 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1309 } 1310 1311 static int mmci_get_cd(struct mmc_host *mmc) 1312 { 1313 struct mmci_host *host = mmc_priv(mmc); 1314 struct mmci_platform_data *plat = host->plat; 1315 unsigned int status = mmc_gpio_get_cd(mmc); 1316 1317 if (status == -ENOSYS) { 1318 if (!plat->status) 1319 return 1; /* Assume always present */ 1320 1321 status = plat->status(mmc_dev(host->mmc)); 1322 } 1323 return status; 1324 } 1325 1326 static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) 1327 { 1328 int ret = 0; 1329 1330 if (!IS_ERR(mmc->supply.vqmmc)) { 1331 1332 pm_runtime_get_sync(mmc_dev(mmc)); 1333 1334 switch (ios->signal_voltage) { 1335 case MMC_SIGNAL_VOLTAGE_330: 1336 ret = regulator_set_voltage(mmc->supply.vqmmc, 1337 2700000, 3600000); 1338 break; 1339 case MMC_SIGNAL_VOLTAGE_180: 1340 ret = regulator_set_voltage(mmc->supply.vqmmc, 1341 1700000, 1950000); 1342 break; 1343 case MMC_SIGNAL_VOLTAGE_120: 1344 ret = regulator_set_voltage(mmc->supply.vqmmc, 1345 1100000, 1300000); 1346 break; 1347 } 1348 1349 if (ret) 1350 dev_warn(mmc_dev(mmc), "Voltage switch failed\n"); 1351 1352 pm_runtime_mark_last_busy(mmc_dev(mmc)); 1353 pm_runtime_put_autosuspend(mmc_dev(mmc)); 1354 } 1355 1356 return ret; 1357 } 1358 1359 static struct mmc_host_ops mmci_ops = { 1360 .request = mmci_request, 1361 .pre_req = mmci_pre_request, 1362 .post_req = mmci_post_request, 1363 .set_ios = mmci_set_ios, 1364 .get_ro = mmc_gpio_get_ro, 1365 .get_cd = mmci_get_cd, 1366 .start_signal_voltage_switch = mmci_sig_volt_switch, 1367 }; 1368 1369 static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc) 1370 { 1371 struct mmci_host *host = mmc_priv(mmc); 1372 int ret = mmc_of_parse(mmc); 1373 1374 if (ret) 1375 return ret; 1376 1377 if (of_get_property(np, "st,sig-dir-dat0", NULL)) 1378 host->pwr_reg_add |= MCI_ST_DATA0DIREN; 1379 if (of_get_property(np, "st,sig-dir-dat2", NULL)) 1380 host->pwr_reg_add |= MCI_ST_DATA2DIREN; 1381 if (of_get_property(np, "st,sig-dir-dat31", NULL)) 1382 host->pwr_reg_add |= MCI_ST_DATA31DIREN; 1383 if (of_get_property(np, "st,sig-dir-dat74", NULL)) 1384 host->pwr_reg_add |= MCI_ST_DATA74DIREN; 1385 if (of_get_property(np, "st,sig-dir-cmd", NULL)) 1386 host->pwr_reg_add |= MCI_ST_CMDDIREN; 1387 if (of_get_property(np, "st,sig-pin-fbclk", NULL)) 1388 host->pwr_reg_add |= MCI_ST_FBCLKEN; 1389 1390 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) 1391 mmc->caps |= MMC_CAP_MMC_HIGHSPEED; 1392 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) 1393 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1394 1395 return 0; 1396 } 1397 1398 static int mmci_probe(struct amba_device *dev, 1399 const struct amba_id *id) 1400 { 1401 struct mmci_platform_data *plat = dev->dev.platform_data; 1402 struct device_node *np = dev->dev.of_node; 1403 struct variant_data *variant = id->data; 1404 struct mmci_host *host; 1405 struct mmc_host *mmc; 1406 int ret; 1407 1408 /* Must have platform data or Device Tree. */ 1409 if (!plat && !np) { 1410 dev_err(&dev->dev, "No plat data or DT found\n"); 1411 return -EINVAL; 1412 } 1413 1414 if (!plat) { 1415 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); 1416 if (!plat) 1417 return -ENOMEM; 1418 } 1419 1420 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1421 if (!mmc) 1422 return -ENOMEM; 1423 1424 ret = mmci_of_parse(np, mmc); 1425 if (ret) 1426 goto host_free; 1427 1428 host = mmc_priv(mmc); 1429 host->mmc = mmc; 1430 1431 host->hw_designer = amba_manf(dev); 1432 host->hw_revision = amba_rev(dev); 1433 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1434 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1435 1436 host->clk = devm_clk_get(&dev->dev, NULL); 1437 if (IS_ERR(host->clk)) { 1438 ret = PTR_ERR(host->clk); 1439 goto host_free; 1440 } 1441 1442 ret = clk_prepare_enable(host->clk); 1443 if (ret) 1444 goto host_free; 1445 1446 host->plat = plat; 1447 host->variant = variant; 1448 host->mclk = clk_get_rate(host->clk); 1449 /* 1450 * According to the spec, mclk is max 100 MHz, 1451 * so we try to adjust the clock down to this, 1452 * (if possible). 1453 */ 1454 if (host->mclk > 100000000) { 1455 ret = clk_set_rate(host->clk, 100000000); 1456 if (ret < 0) 1457 goto clk_disable; 1458 host->mclk = clk_get_rate(host->clk); 1459 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1460 host->mclk); 1461 } 1462 1463 host->phybase = dev->res.start; 1464 host->base = devm_ioremap_resource(&dev->dev, &dev->res); 1465 if (IS_ERR(host->base)) { 1466 ret = PTR_ERR(host->base); 1467 goto clk_disable; 1468 } 1469 1470 /* 1471 * The ARM and ST versions of the block have slightly different 1472 * clock divider equations which means that the minimum divider 1473 * differs too. 1474 */ 1475 if (variant->st_clkdiv) 1476 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1477 else 1478 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1479 /* 1480 * If no maximum operating frequency is supplied, fall back to use 1481 * the module parameter, which has a (low) default value in case it 1482 * is not specified. Either value must not exceed the clock rate into 1483 * the block, of course. 1484 */ 1485 if (mmc->f_max) 1486 mmc->f_max = min(host->mclk, mmc->f_max); 1487 else 1488 mmc->f_max = min(host->mclk, fmax); 1489 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1490 1491 /* Get regulators and the supported OCR mask */ 1492 mmc_regulator_get_supply(mmc); 1493 if (!mmc->ocr_avail) 1494 mmc->ocr_avail = plat->ocr_mask; 1495 else if (plat->ocr_mask) 1496 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); 1497 1498 /* DT takes precedence over platform data. */ 1499 if (!np) { 1500 if (!plat->cd_invert) 1501 mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; 1502 mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; 1503 } 1504 1505 /* We support these capabilities. */ 1506 mmc->caps |= MMC_CAP_CMD23; 1507 1508 if (variant->busy_detect) { 1509 mmci_ops.card_busy = mmci_card_busy; 1510 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE); 1511 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1512 mmc->max_busy_timeout = 0; 1513 } 1514 1515 mmc->ops = &mmci_ops; 1516 1517 /* We support these PM capabilities. */ 1518 mmc->pm_caps |= MMC_PM_KEEP_POWER; 1519 1520 /* 1521 * We can do SGIO 1522 */ 1523 mmc->max_segs = NR_SG; 1524 1525 /* 1526 * Since only a certain number of bits are valid in the data length 1527 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1528 * single request. 1529 */ 1530 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1531 1532 /* 1533 * Set the maximum segment size. Since we aren't doing DMA 1534 * (yet) we are only limited by the data length register. 1535 */ 1536 mmc->max_seg_size = mmc->max_req_size; 1537 1538 /* 1539 * Block size can be up to 2048 bytes, but must be a power of two. 1540 */ 1541 mmc->max_blk_size = 1 << 11; 1542 1543 /* 1544 * Limit the number of blocks transferred so that we don't overflow 1545 * the maximum request size. 1546 */ 1547 mmc->max_blk_count = mmc->max_req_size >> 11; 1548 1549 spin_lock_init(&host->lock); 1550 1551 writel(0, host->base + MMCIMASK0); 1552 writel(0, host->base + MMCIMASK1); 1553 writel(0xfff, host->base + MMCICLEAR); 1554 1555 /* If DT, cd/wp gpios must be supplied through it. */ 1556 if (!np && gpio_is_valid(plat->gpio_cd)) { 1557 ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0); 1558 if (ret) 1559 goto clk_disable; 1560 } 1561 if (!np && gpio_is_valid(plat->gpio_wp)) { 1562 ret = mmc_gpio_request_ro(mmc, plat->gpio_wp); 1563 if (ret) 1564 goto clk_disable; 1565 } 1566 1567 ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, 1568 DRIVER_NAME " (cmd)", host); 1569 if (ret) 1570 goto clk_disable; 1571 1572 if (!dev->irq[1]) 1573 host->singleirq = true; 1574 else { 1575 ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq, 1576 IRQF_SHARED, DRIVER_NAME " (pio)", host); 1577 if (ret) 1578 goto clk_disable; 1579 } 1580 1581 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1582 1583 amba_set_drvdata(dev, mmc); 1584 1585 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1586 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1587 amba_rev(dev), (unsigned long long)dev->res.start, 1588 dev->irq[0], dev->irq[1]); 1589 1590 mmci_dma_setup(host); 1591 1592 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1593 pm_runtime_use_autosuspend(&dev->dev); 1594 pm_runtime_put(&dev->dev); 1595 1596 mmc_add_host(mmc); 1597 1598 return 0; 1599 1600 clk_disable: 1601 clk_disable_unprepare(host->clk); 1602 host_free: 1603 mmc_free_host(mmc); 1604 return ret; 1605 } 1606 1607 static int mmci_remove(struct amba_device *dev) 1608 { 1609 struct mmc_host *mmc = amba_get_drvdata(dev); 1610 1611 if (mmc) { 1612 struct mmci_host *host = mmc_priv(mmc); 1613 1614 /* 1615 * Undo pm_runtime_put() in probe. We use the _sync 1616 * version here so that we can access the primecell. 1617 */ 1618 pm_runtime_get_sync(&dev->dev); 1619 1620 mmc_remove_host(mmc); 1621 1622 writel(0, host->base + MMCIMASK0); 1623 writel(0, host->base + MMCIMASK1); 1624 1625 writel(0, host->base + MMCICOMMAND); 1626 writel(0, host->base + MMCIDATACTRL); 1627 1628 mmci_dma_release(host); 1629 clk_disable_unprepare(host->clk); 1630 mmc_free_host(mmc); 1631 } 1632 1633 return 0; 1634 } 1635 1636 #ifdef CONFIG_PM 1637 static void mmci_save(struct mmci_host *host) 1638 { 1639 unsigned long flags; 1640 1641 spin_lock_irqsave(&host->lock, flags); 1642 1643 writel(0, host->base + MMCIMASK0); 1644 if (host->variant->pwrreg_nopower) { 1645 writel(0, host->base + MMCIDATACTRL); 1646 writel(0, host->base + MMCIPOWER); 1647 writel(0, host->base + MMCICLOCK); 1648 } 1649 mmci_reg_delay(host); 1650 1651 spin_unlock_irqrestore(&host->lock, flags); 1652 } 1653 1654 static void mmci_restore(struct mmci_host *host) 1655 { 1656 unsigned long flags; 1657 1658 spin_lock_irqsave(&host->lock, flags); 1659 1660 if (host->variant->pwrreg_nopower) { 1661 writel(host->clk_reg, host->base + MMCICLOCK); 1662 writel(host->datactrl_reg, host->base + MMCIDATACTRL); 1663 writel(host->pwr_reg, host->base + MMCIPOWER); 1664 } 1665 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1666 mmci_reg_delay(host); 1667 1668 spin_unlock_irqrestore(&host->lock, flags); 1669 } 1670 1671 static int mmci_runtime_suspend(struct device *dev) 1672 { 1673 struct amba_device *adev = to_amba_device(dev); 1674 struct mmc_host *mmc = amba_get_drvdata(adev); 1675 1676 if (mmc) { 1677 struct mmci_host *host = mmc_priv(mmc); 1678 pinctrl_pm_select_sleep_state(dev); 1679 mmci_save(host); 1680 clk_disable_unprepare(host->clk); 1681 } 1682 1683 return 0; 1684 } 1685 1686 static int mmci_runtime_resume(struct device *dev) 1687 { 1688 struct amba_device *adev = to_amba_device(dev); 1689 struct mmc_host *mmc = amba_get_drvdata(adev); 1690 1691 if (mmc) { 1692 struct mmci_host *host = mmc_priv(mmc); 1693 clk_prepare_enable(host->clk); 1694 mmci_restore(host); 1695 pinctrl_pm_select_default_state(dev); 1696 } 1697 1698 return 0; 1699 } 1700 #endif 1701 1702 static const struct dev_pm_ops mmci_dev_pm_ops = { 1703 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1704 pm_runtime_force_resume) 1705 SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) 1706 }; 1707 1708 static struct amba_id mmci_ids[] = { 1709 { 1710 .id = 0x00041180, 1711 .mask = 0xff0fffff, 1712 .data = &variant_arm, 1713 }, 1714 { 1715 .id = 0x01041180, 1716 .mask = 0xff0fffff, 1717 .data = &variant_arm_extended_fifo, 1718 }, 1719 { 1720 .id = 0x02041180, 1721 .mask = 0xff0fffff, 1722 .data = &variant_arm_extended_fifo_hwfc, 1723 }, 1724 { 1725 .id = 0x00041181, 1726 .mask = 0x000fffff, 1727 .data = &variant_arm, 1728 }, 1729 /* ST Micro variants */ 1730 { 1731 .id = 0x00180180, 1732 .mask = 0x00ffffff, 1733 .data = &variant_u300, 1734 }, 1735 { 1736 .id = 0x10180180, 1737 .mask = 0xf0ffffff, 1738 .data = &variant_nomadik, 1739 }, 1740 { 1741 .id = 0x00280180, 1742 .mask = 0x00ffffff, 1743 .data = &variant_u300, 1744 }, 1745 { 1746 .id = 0x00480180, 1747 .mask = 0xf0ffffff, 1748 .data = &variant_ux500, 1749 }, 1750 { 1751 .id = 0x10480180, 1752 .mask = 0xf0ffffff, 1753 .data = &variant_ux500v2, 1754 }, 1755 { 0, 0 }, 1756 }; 1757 1758 MODULE_DEVICE_TABLE(amba, mmci_ids); 1759 1760 static struct amba_driver mmci_driver = { 1761 .drv = { 1762 .name = DRIVER_NAME, 1763 .pm = &mmci_dev_pm_ops, 1764 }, 1765 .probe = mmci_probe, 1766 .remove = mmci_remove, 1767 .id_table = mmci_ids, 1768 }; 1769 1770 module_amba_driver(mmci_driver); 1771 1772 module_param(fmax, uint, 0444); 1773 1774 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1775 MODULE_LICENSE("GPL"); 1776