1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/io.h> 17 #include <linux/interrupt.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/delay.h> 21 #include <linux/err.h> 22 #include <linux/highmem.h> 23 #include <linux/log2.h> 24 #include <linux/mmc/pm.h> 25 #include <linux/mmc/host.h> 26 #include <linux/mmc/card.h> 27 #include <linux/mmc/slot-gpio.h> 28 #include <linux/amba/bus.h> 29 #include <linux/clk.h> 30 #include <linux/scatterlist.h> 31 #include <linux/of.h> 32 #include <linux/regulator/consumer.h> 33 #include <linux/dmaengine.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/amba/mmci.h> 36 #include <linux/pm_runtime.h> 37 #include <linux/types.h> 38 #include <linux/pinctrl/consumer.h> 39 40 #include <asm/div64.h> 41 #include <asm/io.h> 42 43 #include "mmci.h" 44 #include "mmci_qcom_dml.h" 45 46 #define DRIVER_NAME "mmci-pl18x" 47 48 #ifdef CONFIG_DMA_ENGINE 49 void mmci_variant_init(struct mmci_host *host); 50 #else 51 static inline void mmci_variant_init(struct mmci_host *host) {} 52 #endif 53 54 static unsigned int fmax = 515633; 55 56 static struct variant_data variant_arm = { 57 .fifosize = 16 * 4, 58 .fifohalfsize = 8 * 4, 59 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, 60 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, 61 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, 62 .cmdreg_srsp = MCI_CPSM_RESPONSE, 63 .datalength_bits = 16, 64 .datactrl_blocksz = 11, 65 .datactrl_dpsm_enable = MCI_DPSM_ENABLE, 66 .pwrreg_powerup = MCI_PWR_UP, 67 .f_max = 100000000, 68 .reversed_irq_handling = true, 69 .mmcimask1 = true, 70 .irq_pio_mask = MCI_IRQ_PIO_MASK, 71 .start_err = MCI_STARTBITERR, 72 .opendrain = MCI_ROD, 73 .init = mmci_variant_init, 74 }; 75 76 static struct variant_data variant_arm_extended_fifo = { 77 .fifosize = 128 * 4, 78 .fifohalfsize = 64 * 4, 79 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, 80 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, 81 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, 82 .cmdreg_srsp = MCI_CPSM_RESPONSE, 83 .datalength_bits = 16, 84 .datactrl_blocksz = 11, 85 .datactrl_dpsm_enable = MCI_DPSM_ENABLE, 86 .pwrreg_powerup = MCI_PWR_UP, 87 .f_max = 100000000, 88 .mmcimask1 = true, 89 .irq_pio_mask = MCI_IRQ_PIO_MASK, 90 .start_err = MCI_STARTBITERR, 91 .opendrain = MCI_ROD, 92 .init = mmci_variant_init, 93 }; 94 95 static struct variant_data variant_arm_extended_fifo_hwfc = { 96 .fifosize = 128 * 4, 97 .fifohalfsize = 64 * 4, 98 .clkreg_enable = MCI_ARM_HWFCEN, 99 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, 100 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, 101 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, 102 .cmdreg_srsp = MCI_CPSM_RESPONSE, 103 .datalength_bits = 16, 104 .datactrl_blocksz = 11, 105 .datactrl_dpsm_enable = MCI_DPSM_ENABLE, 106 .pwrreg_powerup = MCI_PWR_UP, 107 .f_max = 100000000, 108 .mmcimask1 = true, 109 .irq_pio_mask = MCI_IRQ_PIO_MASK, 110 .start_err = MCI_STARTBITERR, 111 .opendrain = MCI_ROD, 112 .init = mmci_variant_init, 113 }; 114 115 static struct variant_data variant_u300 = { 116 .fifosize = 16 * 4, 117 .fifohalfsize = 8 * 4, 118 .clkreg_enable = MCI_ST_U300_HWFCEN, 119 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, 120 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, 121 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, 122 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, 123 .cmdreg_srsp = MCI_CPSM_RESPONSE, 124 .datalength_bits = 16, 125 .datactrl_blocksz = 11, 126 .datactrl_dpsm_enable = MCI_DPSM_ENABLE, 127 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 128 .st_sdio = true, 129 .pwrreg_powerup = MCI_PWR_ON, 130 .f_max = 100000000, 131 .signal_direction = true, 132 .pwrreg_clkgate = true, 133 .pwrreg_nopower = true, 134 .mmcimask1 = true, 135 .irq_pio_mask = MCI_IRQ_PIO_MASK, 136 .start_err = MCI_STARTBITERR, 137 .opendrain = MCI_OD, 138 .init = mmci_variant_init, 139 }; 140 141 static struct variant_data variant_nomadik = { 142 .fifosize = 16 * 4, 143 .fifohalfsize = 8 * 4, 144 .clkreg = MCI_CLK_ENABLE, 145 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, 146 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, 147 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, 148 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, 149 .cmdreg_srsp = MCI_CPSM_RESPONSE, 150 .datalength_bits = 24, 151 .datactrl_blocksz = 11, 152 .datactrl_dpsm_enable = MCI_DPSM_ENABLE, 153 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 154 .st_sdio = true, 155 .st_clkdiv = true, 156 .pwrreg_powerup = MCI_PWR_ON, 157 .f_max = 100000000, 158 .signal_direction = true, 159 .pwrreg_clkgate = true, 160 .pwrreg_nopower = true, 161 .mmcimask1 = true, 162 .irq_pio_mask = MCI_IRQ_PIO_MASK, 163 .start_err = MCI_STARTBITERR, 164 .opendrain = MCI_OD, 165 .init = mmci_variant_init, 166 }; 167 168 static struct variant_data variant_ux500 = { 169 .fifosize = 30 * 4, 170 .fifohalfsize = 8 * 4, 171 .clkreg = MCI_CLK_ENABLE, 172 .clkreg_enable = MCI_ST_UX500_HWFCEN, 173 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, 174 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, 175 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, 176 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, 177 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, 178 .cmdreg_srsp = MCI_CPSM_RESPONSE, 179 .datalength_bits = 24, 180 .datactrl_blocksz = 11, 181 .datactrl_dpsm_enable = MCI_DPSM_ENABLE, 182 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 183 .st_sdio = true, 184 .st_clkdiv = true, 185 .pwrreg_powerup = MCI_PWR_ON, 186 .f_max = 100000000, 187 .signal_direction = true, 188 .pwrreg_clkgate = true, 189 .busy_detect = true, 190 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE, 191 .busy_detect_flag = MCI_ST_CARDBUSY, 192 .busy_detect_mask = MCI_ST_BUSYENDMASK, 193 .pwrreg_nopower = true, 194 .mmcimask1 = true, 195 .irq_pio_mask = MCI_IRQ_PIO_MASK, 196 .start_err = MCI_STARTBITERR, 197 .opendrain = MCI_OD, 198 .init = mmci_variant_init, 199 }; 200 201 static struct variant_data variant_ux500v2 = { 202 .fifosize = 30 * 4, 203 .fifohalfsize = 8 * 4, 204 .clkreg = MCI_CLK_ENABLE, 205 .clkreg_enable = MCI_ST_UX500_HWFCEN, 206 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, 207 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, 208 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, 209 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, 210 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, 211 .cmdreg_srsp = MCI_CPSM_RESPONSE, 212 .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE, 213 .datalength_bits = 24, 214 .datactrl_blocksz = 11, 215 .datactrl_dpsm_enable = MCI_DPSM_ENABLE, 216 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 217 .st_sdio = true, 218 .st_clkdiv = true, 219 .blksz_datactrl16 = true, 220 .pwrreg_powerup = MCI_PWR_ON, 221 .f_max = 100000000, 222 .signal_direction = true, 223 .pwrreg_clkgate = true, 224 .busy_detect = true, 225 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE, 226 .busy_detect_flag = MCI_ST_CARDBUSY, 227 .busy_detect_mask = MCI_ST_BUSYENDMASK, 228 .pwrreg_nopower = true, 229 .mmcimask1 = true, 230 .irq_pio_mask = MCI_IRQ_PIO_MASK, 231 .start_err = MCI_STARTBITERR, 232 .opendrain = MCI_OD, 233 .init = mmci_variant_init, 234 }; 235 236 static struct variant_data variant_stm32 = { 237 .fifosize = 32 * 4, 238 .fifohalfsize = 8 * 4, 239 .clkreg = MCI_CLK_ENABLE, 240 .clkreg_enable = MCI_ST_UX500_HWFCEN, 241 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, 242 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, 243 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, 244 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, 245 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, 246 .cmdreg_srsp = MCI_CPSM_RESPONSE, 247 .irq_pio_mask = MCI_IRQ_PIO_MASK, 248 .datalength_bits = 24, 249 .datactrl_blocksz = 11, 250 .datactrl_dpsm_enable = MCI_DPSM_ENABLE, 251 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, 252 .st_sdio = true, 253 .st_clkdiv = true, 254 .pwrreg_powerup = MCI_PWR_ON, 255 .f_max = 48000000, 256 .pwrreg_clkgate = true, 257 .pwrreg_nopower = true, 258 .init = mmci_variant_init, 259 }; 260 261 static struct variant_data variant_qcom = { 262 .fifosize = 16 * 4, 263 .fifohalfsize = 8 * 4, 264 .clkreg = MCI_CLK_ENABLE, 265 .clkreg_enable = MCI_QCOM_CLK_FLOWENA | 266 MCI_QCOM_CLK_SELECT_IN_FBCLK, 267 .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8, 268 .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE, 269 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE, 270 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP, 271 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE, 272 .cmdreg_srsp = MCI_CPSM_RESPONSE, 273 .data_cmd_enable = MCI_CPSM_QCOM_DATCMD, 274 .blksz_datactrl4 = true, 275 .datalength_bits = 24, 276 .datactrl_blocksz = 11, 277 .datactrl_dpsm_enable = MCI_DPSM_ENABLE, 278 .pwrreg_powerup = MCI_PWR_UP, 279 .f_max = 208000000, 280 .explicit_mclk_control = true, 281 .qcom_fifo = true, 282 .qcom_dml = true, 283 .mmcimask1 = true, 284 .irq_pio_mask = MCI_IRQ_PIO_MASK, 285 .start_err = MCI_STARTBITERR, 286 .opendrain = MCI_ROD, 287 .init = qcom_variant_init, 288 }; 289 290 /* Busy detection for the ST Micro variant */ 291 static int mmci_card_busy(struct mmc_host *mmc) 292 { 293 struct mmci_host *host = mmc_priv(mmc); 294 unsigned long flags; 295 int busy = 0; 296 297 spin_lock_irqsave(&host->lock, flags); 298 if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag) 299 busy = 1; 300 spin_unlock_irqrestore(&host->lock, flags); 301 302 return busy; 303 } 304 305 static void mmci_reg_delay(struct mmci_host *host) 306 { 307 /* 308 * According to the spec, at least three feedback clock cycles 309 * of max 52 MHz must pass between two writes to the MMCICLOCK reg. 310 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes. 311 * Worst delay time during card init is at 100 kHz => 30 us. 312 * Worst delay time when up and running is at 25 MHz => 120 ns. 313 */ 314 if (host->cclk < 25000000) 315 udelay(30); 316 else 317 ndelay(120); 318 } 319 320 /* 321 * This must be called with host->lock held 322 */ 323 void mmci_write_clkreg(struct mmci_host *host, u32 clk) 324 { 325 if (host->clk_reg != clk) { 326 host->clk_reg = clk; 327 writel(clk, host->base + MMCICLOCK); 328 } 329 } 330 331 /* 332 * This must be called with host->lock held 333 */ 334 void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) 335 { 336 if (host->pwr_reg != pwr) { 337 host->pwr_reg = pwr; 338 writel(pwr, host->base + MMCIPOWER); 339 } 340 } 341 342 /* 343 * This must be called with host->lock held 344 */ 345 static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl) 346 { 347 /* Keep busy mode in DPSM if enabled */ 348 datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag; 349 350 if (host->datactrl_reg != datactrl) { 351 host->datactrl_reg = datactrl; 352 writel(datactrl, host->base + MMCIDATACTRL); 353 } 354 } 355 356 /* 357 * This must be called with host->lock held 358 */ 359 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 360 { 361 struct variant_data *variant = host->variant; 362 u32 clk = variant->clkreg; 363 364 /* Make sure cclk reflects the current calculated clock */ 365 host->cclk = 0; 366 367 if (desired) { 368 if (variant->explicit_mclk_control) { 369 host->cclk = host->mclk; 370 } else if (desired >= host->mclk) { 371 clk = MCI_CLK_BYPASS; 372 if (variant->st_clkdiv) 373 clk |= MCI_ST_UX500_NEG_EDGE; 374 host->cclk = host->mclk; 375 } else if (variant->st_clkdiv) { 376 /* 377 * DB8500 TRM says f = mclk / (clkdiv + 2) 378 * => clkdiv = (mclk / f) - 2 379 * Round the divider up so we don't exceed the max 380 * frequency 381 */ 382 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 383 if (clk >= 256) 384 clk = 255; 385 host->cclk = host->mclk / (clk + 2); 386 } else { 387 /* 388 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 389 * => clkdiv = mclk / (2 * f) - 1 390 */ 391 clk = host->mclk / (2 * desired) - 1; 392 if (clk >= 256) 393 clk = 255; 394 host->cclk = host->mclk / (2 * (clk + 1)); 395 } 396 397 clk |= variant->clkreg_enable; 398 clk |= MCI_CLK_ENABLE; 399 /* This hasn't proven to be worthwhile */ 400 /* clk |= MCI_CLK_PWRSAVE; */ 401 } 402 403 /* Set actual clock for debug */ 404 host->mmc->actual_clock = host->cclk; 405 406 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 407 clk |= MCI_4BIT_BUS; 408 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 409 clk |= variant->clkreg_8bit_bus_enable; 410 411 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || 412 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) 413 clk |= variant->clkreg_neg_edge_enable; 414 415 mmci_write_clkreg(host, clk); 416 } 417 418 void mmci_dma_release(struct mmci_host *host) 419 { 420 if (host->ops && host->ops->dma_release) 421 host->ops->dma_release(host); 422 423 host->use_dma = false; 424 } 425 426 void mmci_dma_setup(struct mmci_host *host) 427 { 428 if (!host->ops || !host->ops->dma_setup) 429 return; 430 431 if (host->ops->dma_setup(host)) 432 return; 433 434 /* initialize pre request cookie */ 435 host->next_cookie = 1; 436 437 host->use_dma = true; 438 } 439 440 /* 441 * Validate mmc prerequisites 442 */ 443 static int mmci_validate_data(struct mmci_host *host, 444 struct mmc_data *data) 445 { 446 if (!data) 447 return 0; 448 449 if (!is_power_of_2(data->blksz)) { 450 dev_err(mmc_dev(host->mmc), 451 "unsupported block size (%d bytes)\n", data->blksz); 452 return -EINVAL; 453 } 454 455 if (host->ops && host->ops->validate_data) 456 return host->ops->validate_data(host, data); 457 458 return 0; 459 } 460 461 int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next) 462 { 463 int err; 464 465 if (!host->ops || !host->ops->prep_data) 466 return 0; 467 468 err = host->ops->prep_data(host, data, next); 469 470 if (next && !err) 471 data->host_cookie = ++host->next_cookie < 0 ? 472 1 : host->next_cookie; 473 474 return err; 475 } 476 477 void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data, 478 int err) 479 { 480 if (host->ops && host->ops->unprep_data) 481 host->ops->unprep_data(host, data, err); 482 483 data->host_cookie = 0; 484 } 485 486 void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 487 { 488 WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie); 489 490 if (host->ops && host->ops->get_next_data) 491 host->ops->get_next_data(host, data); 492 } 493 494 int mmci_dma_start(struct mmci_host *host, unsigned int datactrl) 495 { 496 struct mmc_data *data = host->data; 497 int ret; 498 499 if (!host->use_dma) 500 return -EINVAL; 501 502 ret = mmci_prep_data(host, data, false); 503 if (ret) 504 return ret; 505 506 if (!host->ops || !host->ops->dma_start) 507 return -EINVAL; 508 509 /* Okay, go for it. */ 510 dev_vdbg(mmc_dev(host->mmc), 511 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 512 data->sg_len, data->blksz, data->blocks, data->flags); 513 514 host->ops->dma_start(host, &datactrl); 515 516 /* Trigger the DMA transfer */ 517 mmci_write_datactrlreg(host, datactrl); 518 519 /* 520 * Let the MMCI say when the data is ended and it's time 521 * to fire next DMA request. When that happens, MMCI will 522 * call mmci_data_end() 523 */ 524 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 525 host->base + MMCIMASK0); 526 return 0; 527 } 528 529 void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) 530 { 531 if (!host->use_dma) 532 return; 533 534 if (host->ops && host->ops->dma_finalize) 535 host->ops->dma_finalize(host, data); 536 } 537 538 void mmci_dma_error(struct mmci_host *host) 539 { 540 if (!host->use_dma) 541 return; 542 543 if (host->ops && host->ops->dma_error) 544 host->ops->dma_error(host); 545 } 546 547 static void 548 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 549 { 550 writel(0, host->base + MMCICOMMAND); 551 552 BUG_ON(host->data); 553 554 host->mrq = NULL; 555 host->cmd = NULL; 556 557 mmc_request_done(host->mmc, mrq); 558 } 559 560 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 561 { 562 void __iomem *base = host->base; 563 struct variant_data *variant = host->variant; 564 565 if (host->singleirq) { 566 unsigned int mask0 = readl(base + MMCIMASK0); 567 568 mask0 &= ~variant->irq_pio_mask; 569 mask0 |= mask; 570 571 writel(mask0, base + MMCIMASK0); 572 } 573 574 if (variant->mmcimask1) 575 writel(mask, base + MMCIMASK1); 576 577 host->mask1_reg = mask; 578 } 579 580 static void mmci_stop_data(struct mmci_host *host) 581 { 582 mmci_write_datactrlreg(host, 0); 583 mmci_set_mask1(host, 0); 584 host->data = NULL; 585 } 586 587 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 588 { 589 unsigned int flags = SG_MITER_ATOMIC; 590 591 if (data->flags & MMC_DATA_READ) 592 flags |= SG_MITER_TO_SG; 593 else 594 flags |= SG_MITER_FROM_SG; 595 596 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 597 } 598 599 /* 600 * All the DMA operation mode stuff goes inside this ifdef. 601 * This assumes that you have a generic DMA device interface, 602 * no custom DMA interfaces are supported. 603 */ 604 #ifdef CONFIG_DMA_ENGINE 605 struct mmci_dmae_next { 606 struct dma_async_tx_descriptor *desc; 607 struct dma_chan *chan; 608 }; 609 610 struct mmci_dmae_priv { 611 struct dma_chan *cur; 612 struct dma_chan *rx_channel; 613 struct dma_chan *tx_channel; 614 struct dma_async_tx_descriptor *desc_current; 615 struct mmci_dmae_next next_data; 616 }; 617 618 int mmci_dmae_setup(struct mmci_host *host) 619 { 620 const char *rxname, *txname; 621 struct mmci_dmae_priv *dmae; 622 623 dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL); 624 if (!dmae) 625 return -ENOMEM; 626 627 host->dma_priv = dmae; 628 629 dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), 630 "rx"); 631 dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), 632 "tx"); 633 634 /* 635 * If only an RX channel is specified, the driver will 636 * attempt to use it bidirectionally, however if it is 637 * is specified but cannot be located, DMA will be disabled. 638 */ 639 if (dmae->rx_channel && !dmae->tx_channel) 640 dmae->tx_channel = dmae->rx_channel; 641 642 if (dmae->rx_channel) 643 rxname = dma_chan_name(dmae->rx_channel); 644 else 645 rxname = "none"; 646 647 if (dmae->tx_channel) 648 txname = dma_chan_name(dmae->tx_channel); 649 else 650 txname = "none"; 651 652 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 653 rxname, txname); 654 655 /* 656 * Limit the maximum segment size in any SG entry according to 657 * the parameters of the DMA engine device. 658 */ 659 if (dmae->tx_channel) { 660 struct device *dev = dmae->tx_channel->device->dev; 661 unsigned int max_seg_size = dma_get_max_seg_size(dev); 662 663 if (max_seg_size < host->mmc->max_seg_size) 664 host->mmc->max_seg_size = max_seg_size; 665 } 666 if (dmae->rx_channel) { 667 struct device *dev = dmae->rx_channel->device->dev; 668 unsigned int max_seg_size = dma_get_max_seg_size(dev); 669 670 if (max_seg_size < host->mmc->max_seg_size) 671 host->mmc->max_seg_size = max_seg_size; 672 } 673 674 if (!dmae->tx_channel || !dmae->rx_channel) { 675 mmci_dmae_release(host); 676 return -EINVAL; 677 } 678 679 return 0; 680 } 681 682 /* 683 * This is used in or so inline it 684 * so it can be discarded. 685 */ 686 void mmci_dmae_release(struct mmci_host *host) 687 { 688 struct mmci_dmae_priv *dmae = host->dma_priv; 689 690 if (dmae->rx_channel) 691 dma_release_channel(dmae->rx_channel); 692 if (dmae->tx_channel) 693 dma_release_channel(dmae->tx_channel); 694 dmae->rx_channel = dmae->tx_channel = NULL; 695 } 696 697 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 698 { 699 struct mmci_dmae_priv *dmae = host->dma_priv; 700 struct dma_chan *chan; 701 702 if (data->flags & MMC_DATA_READ) 703 chan = dmae->rx_channel; 704 else 705 chan = dmae->tx_channel; 706 707 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, 708 mmc_get_dma_dir(data)); 709 } 710 711 void mmci_dmae_error(struct mmci_host *host) 712 { 713 struct mmci_dmae_priv *dmae = host->dma_priv; 714 715 if (!dma_inprogress(host)) 716 return; 717 718 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 719 dmaengine_terminate_all(dmae->cur); 720 host->dma_in_progress = false; 721 dmae->cur = NULL; 722 dmae->desc_current = NULL; 723 host->data->host_cookie = 0; 724 725 mmci_dma_unmap(host, host->data); 726 } 727 728 void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data) 729 { 730 struct mmci_dmae_priv *dmae = host->dma_priv; 731 u32 status; 732 int i; 733 734 if (!dma_inprogress(host)) 735 return; 736 737 /* Wait up to 1ms for the DMA to complete */ 738 for (i = 0; ; i++) { 739 status = readl(host->base + MMCISTATUS); 740 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 741 break; 742 udelay(10); 743 } 744 745 /* 746 * Check to see whether we still have some data left in the FIFO - 747 * this catches DMA controllers which are unable to monitor the 748 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 749 * contiguous buffers. On TX, we'll get a FIFO underrun error. 750 */ 751 if (status & MCI_RXDATAAVLBLMASK) { 752 mmci_dma_error(host); 753 if (!data->error) 754 data->error = -EIO; 755 } else if (!data->host_cookie) { 756 mmci_dma_unmap(host, data); 757 } 758 759 /* 760 * Use of DMA with scatter-gather is impossible. 761 * Give up with DMA and switch back to PIO mode. 762 */ 763 if (status & MCI_RXDATAAVLBLMASK) { 764 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 765 mmci_dma_release(host); 766 } 767 768 host->dma_in_progress = false; 769 dmae->cur = NULL; 770 dmae->desc_current = NULL; 771 } 772 773 /* prepares DMA channel and DMA descriptor, returns non-zero on failure */ 774 static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data, 775 struct dma_chan **dma_chan, 776 struct dma_async_tx_descriptor **dma_desc) 777 { 778 struct mmci_dmae_priv *dmae = host->dma_priv; 779 struct variant_data *variant = host->variant; 780 struct dma_slave_config conf = { 781 .src_addr = host->phybase + MMCIFIFO, 782 .dst_addr = host->phybase + MMCIFIFO, 783 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 784 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 785 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 786 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 787 .device_fc = false, 788 }; 789 struct dma_chan *chan; 790 struct dma_device *device; 791 struct dma_async_tx_descriptor *desc; 792 int nr_sg; 793 unsigned long flags = DMA_CTRL_ACK; 794 795 if (data->flags & MMC_DATA_READ) { 796 conf.direction = DMA_DEV_TO_MEM; 797 chan = dmae->rx_channel; 798 } else { 799 conf.direction = DMA_MEM_TO_DEV; 800 chan = dmae->tx_channel; 801 } 802 803 /* If there's no DMA channel, fall back to PIO */ 804 if (!chan) 805 return -EINVAL; 806 807 /* If less than or equal to the fifo size, don't bother with DMA */ 808 if (data->blksz * data->blocks <= variant->fifosize) 809 return -EINVAL; 810 811 device = chan->device; 812 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, 813 mmc_get_dma_dir(data)); 814 if (nr_sg == 0) 815 return -EINVAL; 816 817 if (host->variant->qcom_dml) 818 flags |= DMA_PREP_INTERRUPT; 819 820 dmaengine_slave_config(chan, &conf); 821 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, 822 conf.direction, flags); 823 if (!desc) 824 goto unmap_exit; 825 826 *dma_chan = chan; 827 *dma_desc = desc; 828 829 return 0; 830 831 unmap_exit: 832 dma_unmap_sg(device->dev, data->sg, data->sg_len, 833 mmc_get_dma_dir(data)); 834 return -ENOMEM; 835 } 836 837 int mmci_dmae_prep_data(struct mmci_host *host, 838 struct mmc_data *data, 839 bool next) 840 { 841 struct mmci_dmae_priv *dmae = host->dma_priv; 842 struct mmci_dmae_next *nd = &dmae->next_data; 843 844 if (!host->use_dma) 845 return -EINVAL; 846 847 if (next) 848 return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc); 849 /* Check if next job is already prepared. */ 850 if (dmae->cur && dmae->desc_current) 851 return 0; 852 853 /* No job were prepared thus do it now. */ 854 return _mmci_dmae_prep_data(host, data, &dmae->cur, 855 &dmae->desc_current); 856 } 857 858 int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl) 859 { 860 struct mmci_dmae_priv *dmae = host->dma_priv; 861 struct mmc_data *data = host->data; 862 863 host->dma_in_progress = true; 864 dmaengine_submit(dmae->desc_current); 865 dma_async_issue_pending(dmae->cur); 866 867 if (host->variant->qcom_dml) 868 dml_start_xfer(host, data); 869 870 *datactrl |= MCI_DPSM_DMAENABLE; 871 872 return 0; 873 } 874 875 void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data) 876 { 877 struct mmci_dmae_priv *dmae = host->dma_priv; 878 struct mmci_dmae_next *next = &dmae->next_data; 879 880 if (!host->use_dma) 881 return; 882 883 WARN_ON(!data->host_cookie && (next->desc || next->chan)); 884 885 dmae->desc_current = next->desc; 886 dmae->cur = next->chan; 887 next->desc = NULL; 888 next->chan = NULL; 889 } 890 891 void mmci_dmae_unprep_data(struct mmci_host *host, 892 struct mmc_data *data, int err) 893 894 { 895 struct mmci_dmae_priv *dmae = host->dma_priv; 896 897 if (!host->use_dma) 898 return; 899 900 mmci_dma_unmap(host, data); 901 902 if (err) { 903 struct mmci_dmae_next *next = &dmae->next_data; 904 struct dma_chan *chan; 905 if (data->flags & MMC_DATA_READ) 906 chan = dmae->rx_channel; 907 else 908 chan = dmae->tx_channel; 909 dmaengine_terminate_all(chan); 910 911 if (dmae->desc_current == next->desc) 912 dmae->desc_current = NULL; 913 914 if (dmae->cur == next->chan) { 915 host->dma_in_progress = false; 916 dmae->cur = NULL; 917 } 918 919 next->desc = NULL; 920 next->chan = NULL; 921 } 922 } 923 924 static struct mmci_host_ops mmci_variant_ops = { 925 .prep_data = mmci_dmae_prep_data, 926 .unprep_data = mmci_dmae_unprep_data, 927 .get_next_data = mmci_dmae_get_next_data, 928 .dma_setup = mmci_dmae_setup, 929 .dma_release = mmci_dmae_release, 930 .dma_start = mmci_dmae_start, 931 .dma_finalize = mmci_dmae_finalize, 932 .dma_error = mmci_dmae_error, 933 }; 934 935 void mmci_variant_init(struct mmci_host *host) 936 { 937 host->ops = &mmci_variant_ops; 938 } 939 #endif 940 941 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq) 942 { 943 struct mmci_host *host = mmc_priv(mmc); 944 struct mmc_data *data = mrq->data; 945 946 if (!data) 947 return; 948 949 WARN_ON(data->host_cookie); 950 951 if (mmci_validate_data(host, data)) 952 return; 953 954 mmci_prep_data(host, data, true); 955 } 956 957 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 958 int err) 959 { 960 struct mmci_host *host = mmc_priv(mmc); 961 struct mmc_data *data = mrq->data; 962 963 if (!data || !data->host_cookie) 964 return; 965 966 mmci_unprep_data(host, data, err); 967 } 968 969 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 970 { 971 struct variant_data *variant = host->variant; 972 unsigned int datactrl, timeout, irqmask; 973 unsigned long long clks; 974 void __iomem *base; 975 int blksz_bits; 976 977 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 978 data->blksz, data->blocks, data->flags); 979 980 host->data = data; 981 host->size = data->blksz * data->blocks; 982 data->bytes_xfered = 0; 983 984 clks = (unsigned long long)data->timeout_ns * host->cclk; 985 do_div(clks, NSEC_PER_SEC); 986 987 timeout = data->timeout_clks + (unsigned int)clks; 988 989 base = host->base; 990 writel(timeout, base + MMCIDATATIMER); 991 writel(host->size, base + MMCIDATALENGTH); 992 993 blksz_bits = ffs(data->blksz) - 1; 994 BUG_ON(1 << blksz_bits != data->blksz); 995 996 if (variant->blksz_datactrl16) 997 datactrl = variant->datactrl_dpsm_enable | (data->blksz << 16); 998 else if (variant->blksz_datactrl4) 999 datactrl = variant->datactrl_dpsm_enable | (data->blksz << 4); 1000 else 1001 datactrl = variant->datactrl_dpsm_enable | blksz_bits << 4; 1002 1003 if (data->flags & MMC_DATA_READ) 1004 datactrl |= MCI_DPSM_DIRECTION; 1005 1006 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) { 1007 u32 clk; 1008 1009 datactrl |= variant->datactrl_mask_sdio; 1010 1011 /* 1012 * The ST Micro variant for SDIO small write transfers 1013 * needs to have clock H/W flow control disabled, 1014 * otherwise the transfer will not start. The threshold 1015 * depends on the rate of MCLK. 1016 */ 1017 if (variant->st_sdio && data->flags & MMC_DATA_WRITE && 1018 (host->size < 8 || 1019 (host->size <= 8 && host->mclk > 50000000))) 1020 clk = host->clk_reg & ~variant->clkreg_enable; 1021 else 1022 clk = host->clk_reg | variant->clkreg_enable; 1023 1024 mmci_write_clkreg(host, clk); 1025 } 1026 1027 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || 1028 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) 1029 datactrl |= variant->datactrl_mask_ddrmode; 1030 1031 /* 1032 * Attempt to use DMA operation mode, if this 1033 * should fail, fall back to PIO mode 1034 */ 1035 if (!mmci_dma_start(host, datactrl)) 1036 return; 1037 1038 /* IRQ mode, map the SG list for CPU reading/writing */ 1039 mmci_init_sg(host, data); 1040 1041 if (data->flags & MMC_DATA_READ) { 1042 irqmask = MCI_RXFIFOHALFFULLMASK; 1043 1044 /* 1045 * If we have less than the fifo 'half-full' threshold to 1046 * transfer, trigger a PIO interrupt as soon as any data 1047 * is available. 1048 */ 1049 if (host->size < variant->fifohalfsize) 1050 irqmask |= MCI_RXDATAAVLBLMASK; 1051 } else { 1052 /* 1053 * We don't actually need to include "FIFO empty" here 1054 * since its implicit in "FIFO half empty". 1055 */ 1056 irqmask = MCI_TXFIFOHALFEMPTYMASK; 1057 } 1058 1059 mmci_write_datactrlreg(host, datactrl); 1060 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 1061 mmci_set_mask1(host, irqmask); 1062 } 1063 1064 static void 1065 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 1066 { 1067 void __iomem *base = host->base; 1068 1069 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 1070 cmd->opcode, cmd->arg, cmd->flags); 1071 1072 if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) { 1073 writel(0, base + MMCICOMMAND); 1074 mmci_reg_delay(host); 1075 } 1076 1077 c |= cmd->opcode | host->variant->cmdreg_cpsm_enable; 1078 if (cmd->flags & MMC_RSP_PRESENT) { 1079 if (cmd->flags & MMC_RSP_136) 1080 c |= host->variant->cmdreg_lrsp_crc; 1081 else if (cmd->flags & MMC_RSP_CRC) 1082 c |= host->variant->cmdreg_srsp_crc; 1083 else 1084 c |= host->variant->cmdreg_srsp; 1085 } 1086 if (/*interrupt*/0) 1087 c |= MCI_CPSM_INTERRUPT; 1088 1089 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) 1090 c |= host->variant->data_cmd_enable; 1091 1092 host->cmd = cmd; 1093 1094 writel(cmd->arg, base + MMCIARGUMENT); 1095 writel(c, base + MMCICOMMAND); 1096 } 1097 1098 static void 1099 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 1100 unsigned int status) 1101 { 1102 unsigned int status_err; 1103 1104 /* Make sure we have data to handle */ 1105 if (!data) 1106 return; 1107 1108 /* First check for errors */ 1109 status_err = status & (host->variant->start_err | 1110 MCI_DATACRCFAIL | MCI_DATATIMEOUT | 1111 MCI_TXUNDERRUN | MCI_RXOVERRUN); 1112 1113 if (status_err) { 1114 u32 remain, success; 1115 1116 /* Terminate the DMA transfer */ 1117 mmci_dma_error(host); 1118 1119 /* 1120 * Calculate how far we are into the transfer. Note that 1121 * the data counter gives the number of bytes transferred 1122 * on the MMC bus, not on the host side. On reads, this 1123 * can be as much as a FIFO-worth of data ahead. This 1124 * matters for FIFO overruns only. 1125 */ 1126 remain = readl(host->base + MMCIDATACNT); 1127 success = data->blksz * data->blocks - remain; 1128 1129 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 1130 status_err, success); 1131 if (status_err & MCI_DATACRCFAIL) { 1132 /* Last block was not successful */ 1133 success -= 1; 1134 data->error = -EILSEQ; 1135 } else if (status_err & MCI_DATATIMEOUT) { 1136 data->error = -ETIMEDOUT; 1137 } else if (status_err & MCI_STARTBITERR) { 1138 data->error = -ECOMM; 1139 } else if (status_err & MCI_TXUNDERRUN) { 1140 data->error = -EIO; 1141 } else if (status_err & MCI_RXOVERRUN) { 1142 if (success > host->variant->fifosize) 1143 success -= host->variant->fifosize; 1144 else 1145 success = 0; 1146 data->error = -EIO; 1147 } 1148 data->bytes_xfered = round_down(success, data->blksz); 1149 } 1150 1151 if (status & MCI_DATABLOCKEND) 1152 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 1153 1154 if (status & MCI_DATAEND || data->error) { 1155 mmci_dma_finalize(host, data); 1156 1157 mmci_stop_data(host); 1158 1159 if (!data->error) 1160 /* The error clause is handled above, success! */ 1161 data->bytes_xfered = data->blksz * data->blocks; 1162 1163 if (!data->stop || host->mrq->sbc) { 1164 mmci_request_end(host, data->mrq); 1165 } else { 1166 mmci_start_command(host, data->stop, 0); 1167 } 1168 } 1169 } 1170 1171 static void 1172 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 1173 unsigned int status) 1174 { 1175 void __iomem *base = host->base; 1176 bool sbc; 1177 1178 if (!cmd) 1179 return; 1180 1181 sbc = (cmd == host->mrq->sbc); 1182 1183 /* 1184 * We need to be one of these interrupts to be considered worth 1185 * handling. Note that we tag on any latent IRQs postponed 1186 * due to waiting for busy status. 1187 */ 1188 if (!((status|host->busy_status) & 1189 (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND))) 1190 return; 1191 1192 /* 1193 * ST Micro variant: handle busy detection. 1194 */ 1195 if (host->variant->busy_detect) { 1196 bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY); 1197 1198 /* We are busy with a command, return */ 1199 if (host->busy_status && 1200 (status & host->variant->busy_detect_flag)) 1201 return; 1202 1203 /* 1204 * We were not busy, but we now got a busy response on 1205 * something that was not an error, and we double-check 1206 * that the special busy status bit is still set before 1207 * proceeding. 1208 */ 1209 if (!host->busy_status && busy_resp && 1210 !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) && 1211 (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) { 1212 1213 /* Clear the busy start IRQ */ 1214 writel(host->variant->busy_detect_mask, 1215 host->base + MMCICLEAR); 1216 1217 /* Unmask the busy end IRQ */ 1218 writel(readl(base + MMCIMASK0) | 1219 host->variant->busy_detect_mask, 1220 base + MMCIMASK0); 1221 /* 1222 * Now cache the last response status code (until 1223 * the busy bit goes low), and return. 1224 */ 1225 host->busy_status = 1226 status & (MCI_CMDSENT|MCI_CMDRESPEND); 1227 return; 1228 } 1229 1230 /* 1231 * At this point we are not busy with a command, we have 1232 * not received a new busy request, clear and mask the busy 1233 * end IRQ and fall through to process the IRQ. 1234 */ 1235 if (host->busy_status) { 1236 1237 writel(host->variant->busy_detect_mask, 1238 host->base + MMCICLEAR); 1239 1240 writel(readl(base + MMCIMASK0) & 1241 ~host->variant->busy_detect_mask, 1242 base + MMCIMASK0); 1243 host->busy_status = 0; 1244 } 1245 } 1246 1247 host->cmd = NULL; 1248 1249 if (status & MCI_CMDTIMEOUT) { 1250 cmd->error = -ETIMEDOUT; 1251 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 1252 cmd->error = -EILSEQ; 1253 } else { 1254 cmd->resp[0] = readl(base + MMCIRESPONSE0); 1255 cmd->resp[1] = readl(base + MMCIRESPONSE1); 1256 cmd->resp[2] = readl(base + MMCIRESPONSE2); 1257 cmd->resp[3] = readl(base + MMCIRESPONSE3); 1258 } 1259 1260 if ((!sbc && !cmd->data) || cmd->error) { 1261 if (host->data) { 1262 /* Terminate the DMA transfer */ 1263 mmci_dma_error(host); 1264 1265 mmci_stop_data(host); 1266 } 1267 mmci_request_end(host, host->mrq); 1268 } else if (sbc) { 1269 mmci_start_command(host, host->mrq->cmd, 0); 1270 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 1271 mmci_start_data(host, cmd->data); 1272 } 1273 } 1274 1275 static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain) 1276 { 1277 return remain - (readl(host->base + MMCIFIFOCNT) << 2); 1278 } 1279 1280 static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r) 1281 { 1282 /* 1283 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses 1284 * from the fifo range should be used 1285 */ 1286 if (status & MCI_RXFIFOHALFFULL) 1287 return host->variant->fifohalfsize; 1288 else if (status & MCI_RXDATAAVLBL) 1289 return 4; 1290 1291 return 0; 1292 } 1293 1294 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 1295 { 1296 void __iomem *base = host->base; 1297 char *ptr = buffer; 1298 u32 status = readl(host->base + MMCISTATUS); 1299 int host_remain = host->size; 1300 1301 do { 1302 int count = host->get_rx_fifocnt(host, status, host_remain); 1303 1304 if (count > remain) 1305 count = remain; 1306 1307 if (count <= 0) 1308 break; 1309 1310 /* 1311 * SDIO especially may want to send something that is 1312 * not divisible by 4 (as opposed to card sectors 1313 * etc). Therefore make sure to always read the last bytes 1314 * while only doing full 32-bit reads towards the FIFO. 1315 */ 1316 if (unlikely(count & 0x3)) { 1317 if (count < 4) { 1318 unsigned char buf[4]; 1319 ioread32_rep(base + MMCIFIFO, buf, 1); 1320 memcpy(ptr, buf, count); 1321 } else { 1322 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 1323 count &= ~0x3; 1324 } 1325 } else { 1326 ioread32_rep(base + MMCIFIFO, ptr, count >> 2); 1327 } 1328 1329 ptr += count; 1330 remain -= count; 1331 host_remain -= count; 1332 1333 if (remain == 0) 1334 break; 1335 1336 status = readl(base + MMCISTATUS); 1337 } while (status & MCI_RXDATAAVLBL); 1338 1339 return ptr - buffer; 1340 } 1341 1342 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 1343 { 1344 struct variant_data *variant = host->variant; 1345 void __iomem *base = host->base; 1346 char *ptr = buffer; 1347 1348 do { 1349 unsigned int count, maxcnt; 1350 1351 maxcnt = status & MCI_TXFIFOEMPTY ? 1352 variant->fifosize : variant->fifohalfsize; 1353 count = min(remain, maxcnt); 1354 1355 /* 1356 * SDIO especially may want to send something that is 1357 * not divisible by 4 (as opposed to card sectors 1358 * etc), and the FIFO only accept full 32-bit writes. 1359 * So compensate by adding +3 on the count, a single 1360 * byte become a 32bit write, 7 bytes will be two 1361 * 32bit writes etc. 1362 */ 1363 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2); 1364 1365 ptr += count; 1366 remain -= count; 1367 1368 if (remain == 0) 1369 break; 1370 1371 status = readl(base + MMCISTATUS); 1372 } while (status & MCI_TXFIFOHALFEMPTY); 1373 1374 return ptr - buffer; 1375 } 1376 1377 /* 1378 * PIO data transfer IRQ handler. 1379 */ 1380 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 1381 { 1382 struct mmci_host *host = dev_id; 1383 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1384 struct variant_data *variant = host->variant; 1385 void __iomem *base = host->base; 1386 u32 status; 1387 1388 status = readl(base + MMCISTATUS); 1389 1390 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 1391 1392 do { 1393 unsigned int remain, len; 1394 char *buffer; 1395 1396 /* 1397 * For write, we only need to test the half-empty flag 1398 * here - if the FIFO is completely empty, then by 1399 * definition it is more than half empty. 1400 * 1401 * For read, check for data available. 1402 */ 1403 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 1404 break; 1405 1406 if (!sg_miter_next(sg_miter)) 1407 break; 1408 1409 buffer = sg_miter->addr; 1410 remain = sg_miter->length; 1411 1412 len = 0; 1413 if (status & MCI_RXACTIVE) 1414 len = mmci_pio_read(host, buffer, remain); 1415 if (status & MCI_TXACTIVE) 1416 len = mmci_pio_write(host, buffer, remain, status); 1417 1418 sg_miter->consumed = len; 1419 1420 host->size -= len; 1421 remain -= len; 1422 1423 if (remain) 1424 break; 1425 1426 status = readl(base + MMCISTATUS); 1427 } while (1); 1428 1429 sg_miter_stop(sg_miter); 1430 1431 /* 1432 * If we have less than the fifo 'half-full' threshold to transfer, 1433 * trigger a PIO interrupt as soon as any data is available. 1434 */ 1435 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 1436 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 1437 1438 /* 1439 * If we run out of data, disable the data IRQs; this 1440 * prevents a race where the FIFO becomes empty before 1441 * the chip itself has disabled the data path, and 1442 * stops us racing with our data end IRQ. 1443 */ 1444 if (host->size == 0) { 1445 mmci_set_mask1(host, 0); 1446 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 1447 } 1448 1449 return IRQ_HANDLED; 1450 } 1451 1452 /* 1453 * Handle completion of command and data transfers. 1454 */ 1455 static irqreturn_t mmci_irq(int irq, void *dev_id) 1456 { 1457 struct mmci_host *host = dev_id; 1458 u32 status; 1459 int ret = 0; 1460 1461 spin_lock(&host->lock); 1462 1463 do { 1464 status = readl(host->base + MMCISTATUS); 1465 1466 if (host->singleirq) { 1467 if (status & host->mask1_reg) 1468 mmci_pio_irq(irq, dev_id); 1469 1470 status &= ~host->variant->irq_pio_mask; 1471 } 1472 1473 /* 1474 * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's 1475 * enabled) in mmci_cmd_irq() function where ST Micro busy 1476 * detection variant is handled. Considering the HW seems to be 1477 * triggering the IRQ on both edges while monitoring DAT0 for 1478 * busy completion and that same status bit is used to monitor 1479 * start and end of busy detection, special care must be taken 1480 * to make sure that both start and end interrupts are always 1481 * cleared one after the other. 1482 */ 1483 status &= readl(host->base + MMCIMASK0); 1484 if (host->variant->busy_detect) 1485 writel(status & ~host->variant->busy_detect_mask, 1486 host->base + MMCICLEAR); 1487 else 1488 writel(status, host->base + MMCICLEAR); 1489 1490 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1491 1492 if (host->variant->reversed_irq_handling) { 1493 mmci_data_irq(host, host->data, status); 1494 mmci_cmd_irq(host, host->cmd, status); 1495 } else { 1496 mmci_cmd_irq(host, host->cmd, status); 1497 mmci_data_irq(host, host->data, status); 1498 } 1499 1500 /* 1501 * Don't poll for busy completion in irq context. 1502 */ 1503 if (host->variant->busy_detect && host->busy_status) 1504 status &= ~host->variant->busy_detect_flag; 1505 1506 ret = 1; 1507 } while (status); 1508 1509 spin_unlock(&host->lock); 1510 1511 return IRQ_RETVAL(ret); 1512 } 1513 1514 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1515 { 1516 struct mmci_host *host = mmc_priv(mmc); 1517 unsigned long flags; 1518 1519 WARN_ON(host->mrq != NULL); 1520 1521 mrq->cmd->error = mmci_validate_data(host, mrq->data); 1522 if (mrq->cmd->error) { 1523 mmc_request_done(mmc, mrq); 1524 return; 1525 } 1526 1527 spin_lock_irqsave(&host->lock, flags); 1528 1529 host->mrq = mrq; 1530 1531 if (mrq->data) 1532 mmci_get_next_data(host, mrq->data); 1533 1534 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 1535 mmci_start_data(host, mrq->data); 1536 1537 if (mrq->sbc) 1538 mmci_start_command(host, mrq->sbc, 0); 1539 else 1540 mmci_start_command(host, mrq->cmd, 0); 1541 1542 spin_unlock_irqrestore(&host->lock, flags); 1543 } 1544 1545 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1546 { 1547 struct mmci_host *host = mmc_priv(mmc); 1548 struct variant_data *variant = host->variant; 1549 u32 pwr = 0; 1550 unsigned long flags; 1551 int ret; 1552 1553 if (host->plat->ios_handler && 1554 host->plat->ios_handler(mmc_dev(mmc), ios)) 1555 dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); 1556 1557 switch (ios->power_mode) { 1558 case MMC_POWER_OFF: 1559 if (!IS_ERR(mmc->supply.vmmc)) 1560 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1561 1562 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 1563 regulator_disable(mmc->supply.vqmmc); 1564 host->vqmmc_enabled = false; 1565 } 1566 1567 break; 1568 case MMC_POWER_UP: 1569 if (!IS_ERR(mmc->supply.vmmc)) 1570 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 1571 1572 /* 1573 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP 1574 * and instead uses MCI_PWR_ON so apply whatever value is 1575 * configured in the variant data. 1576 */ 1577 pwr |= variant->pwrreg_powerup; 1578 1579 break; 1580 case MMC_POWER_ON: 1581 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 1582 ret = regulator_enable(mmc->supply.vqmmc); 1583 if (ret < 0) 1584 dev_err(mmc_dev(mmc), 1585 "failed to enable vqmmc regulator\n"); 1586 else 1587 host->vqmmc_enabled = true; 1588 } 1589 1590 pwr |= MCI_PWR_ON; 1591 break; 1592 } 1593 1594 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { 1595 /* 1596 * The ST Micro variant has some additional bits 1597 * indicating signal direction for the signals in 1598 * the SD/MMC bus and feedback-clock usage. 1599 */ 1600 pwr |= host->pwr_reg_add; 1601 1602 if (ios->bus_width == MMC_BUS_WIDTH_4) 1603 pwr &= ~MCI_ST_DATA74DIREN; 1604 else if (ios->bus_width == MMC_BUS_WIDTH_1) 1605 pwr &= (~MCI_ST_DATA74DIREN & 1606 ~MCI_ST_DATA31DIREN & 1607 ~MCI_ST_DATA2DIREN); 1608 } 1609 1610 if (variant->opendrain) { 1611 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 1612 pwr |= variant->opendrain; 1613 } else { 1614 /* 1615 * If the variant cannot configure the pads by its own, then we 1616 * expect the pinctrl to be able to do that for us 1617 */ 1618 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 1619 pinctrl_select_state(host->pinctrl, host->pins_opendrain); 1620 else 1621 pinctrl_select_state(host->pinctrl, host->pins_default); 1622 } 1623 1624 /* 1625 * If clock = 0 and the variant requires the MMCIPOWER to be used for 1626 * gating the clock, the MCI_PWR_ON bit is cleared. 1627 */ 1628 if (!ios->clock && variant->pwrreg_clkgate) 1629 pwr &= ~MCI_PWR_ON; 1630 1631 if (host->variant->explicit_mclk_control && 1632 ios->clock != host->clock_cache) { 1633 ret = clk_set_rate(host->clk, ios->clock); 1634 if (ret < 0) 1635 dev_err(mmc_dev(host->mmc), 1636 "Error setting clock rate (%d)\n", ret); 1637 else 1638 host->mclk = clk_get_rate(host->clk); 1639 } 1640 host->clock_cache = ios->clock; 1641 1642 spin_lock_irqsave(&host->lock, flags); 1643 1644 if (host->ops && host->ops->set_clkreg) 1645 host->ops->set_clkreg(host, ios->clock); 1646 else 1647 mmci_set_clkreg(host, ios->clock); 1648 1649 if (host->ops && host->ops->set_pwrreg) 1650 host->ops->set_pwrreg(host, pwr); 1651 else 1652 mmci_write_pwrreg(host, pwr); 1653 1654 mmci_reg_delay(host); 1655 1656 spin_unlock_irqrestore(&host->lock, flags); 1657 } 1658 1659 static int mmci_get_cd(struct mmc_host *mmc) 1660 { 1661 struct mmci_host *host = mmc_priv(mmc); 1662 struct mmci_platform_data *plat = host->plat; 1663 unsigned int status = mmc_gpio_get_cd(mmc); 1664 1665 if (status == -ENOSYS) { 1666 if (!plat->status) 1667 return 1; /* Assume always present */ 1668 1669 status = plat->status(mmc_dev(host->mmc)); 1670 } 1671 return status; 1672 } 1673 1674 static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) 1675 { 1676 int ret = 0; 1677 1678 if (!IS_ERR(mmc->supply.vqmmc)) { 1679 1680 switch (ios->signal_voltage) { 1681 case MMC_SIGNAL_VOLTAGE_330: 1682 ret = regulator_set_voltage(mmc->supply.vqmmc, 1683 2700000, 3600000); 1684 break; 1685 case MMC_SIGNAL_VOLTAGE_180: 1686 ret = regulator_set_voltage(mmc->supply.vqmmc, 1687 1700000, 1950000); 1688 break; 1689 case MMC_SIGNAL_VOLTAGE_120: 1690 ret = regulator_set_voltage(mmc->supply.vqmmc, 1691 1100000, 1300000); 1692 break; 1693 } 1694 1695 if (ret) 1696 dev_warn(mmc_dev(mmc), "Voltage switch failed\n"); 1697 } 1698 1699 return ret; 1700 } 1701 1702 static struct mmc_host_ops mmci_ops = { 1703 .request = mmci_request, 1704 .pre_req = mmci_pre_request, 1705 .post_req = mmci_post_request, 1706 .set_ios = mmci_set_ios, 1707 .get_ro = mmc_gpio_get_ro, 1708 .get_cd = mmci_get_cd, 1709 .start_signal_voltage_switch = mmci_sig_volt_switch, 1710 }; 1711 1712 static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc) 1713 { 1714 struct mmci_host *host = mmc_priv(mmc); 1715 int ret = mmc_of_parse(mmc); 1716 1717 if (ret) 1718 return ret; 1719 1720 if (of_get_property(np, "st,sig-dir-dat0", NULL)) 1721 host->pwr_reg_add |= MCI_ST_DATA0DIREN; 1722 if (of_get_property(np, "st,sig-dir-dat2", NULL)) 1723 host->pwr_reg_add |= MCI_ST_DATA2DIREN; 1724 if (of_get_property(np, "st,sig-dir-dat31", NULL)) 1725 host->pwr_reg_add |= MCI_ST_DATA31DIREN; 1726 if (of_get_property(np, "st,sig-dir-dat74", NULL)) 1727 host->pwr_reg_add |= MCI_ST_DATA74DIREN; 1728 if (of_get_property(np, "st,sig-dir-cmd", NULL)) 1729 host->pwr_reg_add |= MCI_ST_CMDDIREN; 1730 if (of_get_property(np, "st,sig-pin-fbclk", NULL)) 1731 host->pwr_reg_add |= MCI_ST_FBCLKEN; 1732 1733 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) 1734 mmc->caps |= MMC_CAP_MMC_HIGHSPEED; 1735 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) 1736 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 1737 1738 return 0; 1739 } 1740 1741 static int mmci_probe(struct amba_device *dev, 1742 const struct amba_id *id) 1743 { 1744 struct mmci_platform_data *plat = dev->dev.platform_data; 1745 struct device_node *np = dev->dev.of_node; 1746 struct variant_data *variant = id->data; 1747 struct mmci_host *host; 1748 struct mmc_host *mmc; 1749 int ret; 1750 1751 /* Must have platform data or Device Tree. */ 1752 if (!plat && !np) { 1753 dev_err(&dev->dev, "No plat data or DT found\n"); 1754 return -EINVAL; 1755 } 1756 1757 if (!plat) { 1758 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); 1759 if (!plat) 1760 return -ENOMEM; 1761 } 1762 1763 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1764 if (!mmc) 1765 return -ENOMEM; 1766 1767 ret = mmci_of_parse(np, mmc); 1768 if (ret) 1769 goto host_free; 1770 1771 host = mmc_priv(mmc); 1772 host->mmc = mmc; 1773 1774 /* 1775 * Some variant (STM32) doesn't have opendrain bit, nevertheless 1776 * pins can be set accordingly using pinctrl 1777 */ 1778 if (!variant->opendrain) { 1779 host->pinctrl = devm_pinctrl_get(&dev->dev); 1780 if (IS_ERR(host->pinctrl)) { 1781 dev_err(&dev->dev, "failed to get pinctrl"); 1782 ret = PTR_ERR(host->pinctrl); 1783 goto host_free; 1784 } 1785 1786 host->pins_default = pinctrl_lookup_state(host->pinctrl, 1787 PINCTRL_STATE_DEFAULT); 1788 if (IS_ERR(host->pins_default)) { 1789 dev_err(mmc_dev(mmc), "Can't select default pins\n"); 1790 ret = PTR_ERR(host->pins_default); 1791 goto host_free; 1792 } 1793 1794 host->pins_opendrain = pinctrl_lookup_state(host->pinctrl, 1795 MMCI_PINCTRL_STATE_OPENDRAIN); 1796 if (IS_ERR(host->pins_opendrain)) { 1797 dev_err(mmc_dev(mmc), "Can't select opendrain pins\n"); 1798 ret = PTR_ERR(host->pins_opendrain); 1799 goto host_free; 1800 } 1801 } 1802 1803 host->hw_designer = amba_manf(dev); 1804 host->hw_revision = amba_rev(dev); 1805 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1806 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1807 1808 host->clk = devm_clk_get(&dev->dev, NULL); 1809 if (IS_ERR(host->clk)) { 1810 ret = PTR_ERR(host->clk); 1811 goto host_free; 1812 } 1813 1814 ret = clk_prepare_enable(host->clk); 1815 if (ret) 1816 goto host_free; 1817 1818 if (variant->qcom_fifo) 1819 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt; 1820 else 1821 host->get_rx_fifocnt = mmci_get_rx_fifocnt; 1822 1823 host->plat = plat; 1824 host->variant = variant; 1825 host->mclk = clk_get_rate(host->clk); 1826 /* 1827 * According to the spec, mclk is max 100 MHz, 1828 * so we try to adjust the clock down to this, 1829 * (if possible). 1830 */ 1831 if (host->mclk > variant->f_max) { 1832 ret = clk_set_rate(host->clk, variant->f_max); 1833 if (ret < 0) 1834 goto clk_disable; 1835 host->mclk = clk_get_rate(host->clk); 1836 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1837 host->mclk); 1838 } 1839 1840 host->phybase = dev->res.start; 1841 host->base = devm_ioremap_resource(&dev->dev, &dev->res); 1842 if (IS_ERR(host->base)) { 1843 ret = PTR_ERR(host->base); 1844 goto clk_disable; 1845 } 1846 1847 if (variant->init) 1848 variant->init(host); 1849 1850 /* 1851 * The ARM and ST versions of the block have slightly different 1852 * clock divider equations which means that the minimum divider 1853 * differs too. 1854 * on Qualcomm like controllers get the nearest minimum clock to 100Khz 1855 */ 1856 if (variant->st_clkdiv) 1857 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1858 else if (variant->explicit_mclk_control) 1859 mmc->f_min = clk_round_rate(host->clk, 100000); 1860 else 1861 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1862 /* 1863 * If no maximum operating frequency is supplied, fall back to use 1864 * the module parameter, which has a (low) default value in case it 1865 * is not specified. Either value must not exceed the clock rate into 1866 * the block, of course. 1867 */ 1868 if (mmc->f_max) 1869 mmc->f_max = variant->explicit_mclk_control ? 1870 min(variant->f_max, mmc->f_max) : 1871 min(host->mclk, mmc->f_max); 1872 else 1873 mmc->f_max = variant->explicit_mclk_control ? 1874 fmax : min(host->mclk, fmax); 1875 1876 1877 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1878 1879 /* Get regulators and the supported OCR mask */ 1880 ret = mmc_regulator_get_supply(mmc); 1881 if (ret) 1882 goto clk_disable; 1883 1884 if (!mmc->ocr_avail) 1885 mmc->ocr_avail = plat->ocr_mask; 1886 else if (plat->ocr_mask) 1887 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); 1888 1889 /* We support these capabilities. */ 1890 mmc->caps |= MMC_CAP_CMD23; 1891 1892 /* 1893 * Enable busy detection. 1894 */ 1895 if (variant->busy_detect) { 1896 mmci_ops.card_busy = mmci_card_busy; 1897 /* 1898 * Not all variants have a flag to enable busy detection 1899 * in the DPSM, but if they do, set it here. 1900 */ 1901 if (variant->busy_dpsm_flag) 1902 mmci_write_datactrlreg(host, 1903 host->variant->busy_dpsm_flag); 1904 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1905 mmc->max_busy_timeout = 0; 1906 } 1907 1908 mmc->ops = &mmci_ops; 1909 1910 /* We support these PM capabilities. */ 1911 mmc->pm_caps |= MMC_PM_KEEP_POWER; 1912 1913 /* 1914 * We can do SGIO 1915 */ 1916 mmc->max_segs = NR_SG; 1917 1918 /* 1919 * Since only a certain number of bits are valid in the data length 1920 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1921 * single request. 1922 */ 1923 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1924 1925 /* 1926 * Set the maximum segment size. Since we aren't doing DMA 1927 * (yet) we are only limited by the data length register. 1928 */ 1929 mmc->max_seg_size = mmc->max_req_size; 1930 1931 /* 1932 * Block size can be up to 2048 bytes, but must be a power of two. 1933 */ 1934 mmc->max_blk_size = 1 << variant->datactrl_blocksz; 1935 1936 /* 1937 * Limit the number of blocks transferred so that we don't overflow 1938 * the maximum request size. 1939 */ 1940 mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz; 1941 1942 spin_lock_init(&host->lock); 1943 1944 writel(0, host->base + MMCIMASK0); 1945 1946 if (variant->mmcimask1) 1947 writel(0, host->base + MMCIMASK1); 1948 1949 writel(0xfff, host->base + MMCICLEAR); 1950 1951 /* 1952 * If: 1953 * - not using DT but using a descriptor table, or 1954 * - using a table of descriptors ALONGSIDE DT, or 1955 * look up these descriptors named "cd" and "wp" right here, fail 1956 * silently of these do not exist 1957 */ 1958 if (!np) { 1959 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); 1960 if (ret == -EPROBE_DEFER) 1961 goto clk_disable; 1962 1963 ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); 1964 if (ret == -EPROBE_DEFER) 1965 goto clk_disable; 1966 } 1967 1968 ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, 1969 DRIVER_NAME " (cmd)", host); 1970 if (ret) 1971 goto clk_disable; 1972 1973 if (!dev->irq[1]) 1974 host->singleirq = true; 1975 else { 1976 ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq, 1977 IRQF_SHARED, DRIVER_NAME " (pio)", host); 1978 if (ret) 1979 goto clk_disable; 1980 } 1981 1982 writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0); 1983 1984 amba_set_drvdata(dev, mmc); 1985 1986 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1987 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1988 amba_rev(dev), (unsigned long long)dev->res.start, 1989 dev->irq[0], dev->irq[1]); 1990 1991 mmci_dma_setup(host); 1992 1993 pm_runtime_set_autosuspend_delay(&dev->dev, 50); 1994 pm_runtime_use_autosuspend(&dev->dev); 1995 1996 mmc_add_host(mmc); 1997 1998 pm_runtime_put(&dev->dev); 1999 return 0; 2000 2001 clk_disable: 2002 clk_disable_unprepare(host->clk); 2003 host_free: 2004 mmc_free_host(mmc); 2005 return ret; 2006 } 2007 2008 static int mmci_remove(struct amba_device *dev) 2009 { 2010 struct mmc_host *mmc = amba_get_drvdata(dev); 2011 2012 if (mmc) { 2013 struct mmci_host *host = mmc_priv(mmc); 2014 struct variant_data *variant = host->variant; 2015 2016 /* 2017 * Undo pm_runtime_put() in probe. We use the _sync 2018 * version here so that we can access the primecell. 2019 */ 2020 pm_runtime_get_sync(&dev->dev); 2021 2022 mmc_remove_host(mmc); 2023 2024 writel(0, host->base + MMCIMASK0); 2025 2026 if (variant->mmcimask1) 2027 writel(0, host->base + MMCIMASK1); 2028 2029 writel(0, host->base + MMCICOMMAND); 2030 writel(0, host->base + MMCIDATACTRL); 2031 2032 mmci_dma_release(host); 2033 clk_disable_unprepare(host->clk); 2034 mmc_free_host(mmc); 2035 } 2036 2037 return 0; 2038 } 2039 2040 #ifdef CONFIG_PM 2041 static void mmci_save(struct mmci_host *host) 2042 { 2043 unsigned long flags; 2044 2045 spin_lock_irqsave(&host->lock, flags); 2046 2047 writel(0, host->base + MMCIMASK0); 2048 if (host->variant->pwrreg_nopower) { 2049 writel(0, host->base + MMCIDATACTRL); 2050 writel(0, host->base + MMCIPOWER); 2051 writel(0, host->base + MMCICLOCK); 2052 } 2053 mmci_reg_delay(host); 2054 2055 spin_unlock_irqrestore(&host->lock, flags); 2056 } 2057 2058 static void mmci_restore(struct mmci_host *host) 2059 { 2060 unsigned long flags; 2061 2062 spin_lock_irqsave(&host->lock, flags); 2063 2064 if (host->variant->pwrreg_nopower) { 2065 writel(host->clk_reg, host->base + MMCICLOCK); 2066 writel(host->datactrl_reg, host->base + MMCIDATACTRL); 2067 writel(host->pwr_reg, host->base + MMCIPOWER); 2068 } 2069 writel(MCI_IRQENABLE | host->variant->start_err, 2070 host->base + MMCIMASK0); 2071 mmci_reg_delay(host); 2072 2073 spin_unlock_irqrestore(&host->lock, flags); 2074 } 2075 2076 static int mmci_runtime_suspend(struct device *dev) 2077 { 2078 struct amba_device *adev = to_amba_device(dev); 2079 struct mmc_host *mmc = amba_get_drvdata(adev); 2080 2081 if (mmc) { 2082 struct mmci_host *host = mmc_priv(mmc); 2083 pinctrl_pm_select_sleep_state(dev); 2084 mmci_save(host); 2085 clk_disable_unprepare(host->clk); 2086 } 2087 2088 return 0; 2089 } 2090 2091 static int mmci_runtime_resume(struct device *dev) 2092 { 2093 struct amba_device *adev = to_amba_device(dev); 2094 struct mmc_host *mmc = amba_get_drvdata(adev); 2095 2096 if (mmc) { 2097 struct mmci_host *host = mmc_priv(mmc); 2098 clk_prepare_enable(host->clk); 2099 mmci_restore(host); 2100 pinctrl_pm_select_default_state(dev); 2101 } 2102 2103 return 0; 2104 } 2105 #endif 2106 2107 static const struct dev_pm_ops mmci_dev_pm_ops = { 2108 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 2109 pm_runtime_force_resume) 2110 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) 2111 }; 2112 2113 static const struct amba_id mmci_ids[] = { 2114 { 2115 .id = 0x00041180, 2116 .mask = 0xff0fffff, 2117 .data = &variant_arm, 2118 }, 2119 { 2120 .id = 0x01041180, 2121 .mask = 0xff0fffff, 2122 .data = &variant_arm_extended_fifo, 2123 }, 2124 { 2125 .id = 0x02041180, 2126 .mask = 0xff0fffff, 2127 .data = &variant_arm_extended_fifo_hwfc, 2128 }, 2129 { 2130 .id = 0x00041181, 2131 .mask = 0x000fffff, 2132 .data = &variant_arm, 2133 }, 2134 /* ST Micro variants */ 2135 { 2136 .id = 0x00180180, 2137 .mask = 0x00ffffff, 2138 .data = &variant_u300, 2139 }, 2140 { 2141 .id = 0x10180180, 2142 .mask = 0xf0ffffff, 2143 .data = &variant_nomadik, 2144 }, 2145 { 2146 .id = 0x00280180, 2147 .mask = 0x00ffffff, 2148 .data = &variant_nomadik, 2149 }, 2150 { 2151 .id = 0x00480180, 2152 .mask = 0xf0ffffff, 2153 .data = &variant_ux500, 2154 }, 2155 { 2156 .id = 0x10480180, 2157 .mask = 0xf0ffffff, 2158 .data = &variant_ux500v2, 2159 }, 2160 { 2161 .id = 0x00880180, 2162 .mask = 0x00ffffff, 2163 .data = &variant_stm32, 2164 }, 2165 /* Qualcomm variants */ 2166 { 2167 .id = 0x00051180, 2168 .mask = 0x000fffff, 2169 .data = &variant_qcom, 2170 }, 2171 { 0, 0 }, 2172 }; 2173 2174 MODULE_DEVICE_TABLE(amba, mmci_ids); 2175 2176 static struct amba_driver mmci_driver = { 2177 .drv = { 2178 .name = DRIVER_NAME, 2179 .pm = &mmci_dev_pm_ops, 2180 }, 2181 .probe = mmci_probe, 2182 .remove = mmci_remove, 2183 .id_table = mmci_ids, 2184 }; 2185 2186 module_amba_driver(mmci_driver); 2187 2188 module_param(fmax, uint, 0444); 2189 2190 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 2191 MODULE_LICENSE("GPL"); 2192