1 /* 2 * drivers/mmc/host/omap_hsmmc.c 3 * 4 * Driver for OMAP2430/3430 MMC controller. 5 * 6 * Copyright (C) 2007 Texas Instruments. 7 * 8 * Authors: 9 * Syed Mohammed Khasim <x0khasim@ti.com> 10 * Madhusudhan <madhu.cr@ti.com> 11 * Mohit Jalori <mjalori@ti.com> 12 * 13 * This file is licensed under the terms of the GNU General Public License 14 * version 2. This program is licensed "as is" without any warranty of any 15 * kind, whether express or implied. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/kernel.h> 21 #include <linux/debugfs.h> 22 #include <linux/dmaengine.h> 23 #include <linux/seq_file.h> 24 #include <linux/sizes.h> 25 #include <linux/interrupt.h> 26 #include <linux/delay.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/platform_device.h> 29 #include <linux/timer.h> 30 #include <linux/clk.h> 31 #include <linux/of.h> 32 #include <linux/of_irq.h> 33 #include <linux/of_gpio.h> 34 #include <linux/of_device.h> 35 #include <linux/omap-dmaengine.h> 36 #include <linux/mmc/host.h> 37 #include <linux/mmc/core.h> 38 #include <linux/mmc/mmc.h> 39 #include <linux/mmc/slot-gpio.h> 40 #include <linux/io.h> 41 #include <linux/irq.h> 42 #include <linux/gpio.h> 43 #include <linux/regulator/consumer.h> 44 #include <linux/pinctrl/consumer.h> 45 #include <linux/pm_runtime.h> 46 #include <linux/pm_wakeirq.h> 47 #include <linux/platform_data/hsmmc-omap.h> 48 49 /* OMAP HSMMC Host Controller Registers */ 50 #define OMAP_HSMMC_SYSSTATUS 0x0014 51 #define OMAP_HSMMC_CON 0x002C 52 #define OMAP_HSMMC_SDMASA 0x0100 53 #define OMAP_HSMMC_BLK 0x0104 54 #define OMAP_HSMMC_ARG 0x0108 55 #define OMAP_HSMMC_CMD 0x010C 56 #define OMAP_HSMMC_RSP10 0x0110 57 #define OMAP_HSMMC_RSP32 0x0114 58 #define OMAP_HSMMC_RSP54 0x0118 59 #define OMAP_HSMMC_RSP76 0x011C 60 #define OMAP_HSMMC_DATA 0x0120 61 #define OMAP_HSMMC_PSTATE 0x0124 62 #define OMAP_HSMMC_HCTL 0x0128 63 #define OMAP_HSMMC_SYSCTL 0x012C 64 #define OMAP_HSMMC_STAT 0x0130 65 #define OMAP_HSMMC_IE 0x0134 66 #define OMAP_HSMMC_ISE 0x0138 67 #define OMAP_HSMMC_AC12 0x013C 68 #define OMAP_HSMMC_CAPA 0x0140 69 70 #define VS18 (1 << 26) 71 #define VS30 (1 << 25) 72 #define HSS (1 << 21) 73 #define SDVS18 (0x5 << 9) 74 #define SDVS30 (0x6 << 9) 75 #define SDVS33 (0x7 << 9) 76 #define SDVS_MASK 0x00000E00 77 #define SDVSCLR 0xFFFFF1FF 78 #define SDVSDET 0x00000400 79 #define AUTOIDLE 0x1 80 #define SDBP (1 << 8) 81 #define DTO 0xe 82 #define ICE 0x1 83 #define ICS 0x2 84 #define CEN (1 << 2) 85 #define CLKD_MAX 0x3FF /* max clock divisor: 1023 */ 86 #define CLKD_MASK 0x0000FFC0 87 #define CLKD_SHIFT 6 88 #define DTO_MASK 0x000F0000 89 #define DTO_SHIFT 16 90 #define INIT_STREAM (1 << 1) 91 #define ACEN_ACMD23 (2 << 2) 92 #define DP_SELECT (1 << 21) 93 #define DDIR (1 << 4) 94 #define DMAE 0x1 95 #define MSBS (1 << 5) 96 #define BCE (1 << 1) 97 #define FOUR_BIT (1 << 1) 98 #define HSPE (1 << 2) 99 #define IWE (1 << 24) 100 #define DDR (1 << 19) 101 #define CLKEXTFREE (1 << 16) 102 #define CTPL (1 << 11) 103 #define DW8 (1 << 5) 104 #define OD 0x1 105 #define STAT_CLEAR 0xFFFFFFFF 106 #define INIT_STREAM_CMD 0x00000000 107 #define DUAL_VOLT_OCR_BIT 7 108 #define SRC (1 << 25) 109 #define SRD (1 << 26) 110 #define SOFTRESET (1 << 1) 111 112 /* PSTATE */ 113 #define DLEV_DAT(x) (1 << (20 + (x))) 114 115 /* Interrupt masks for IE and ISE register */ 116 #define CC_EN (1 << 0) 117 #define TC_EN (1 << 1) 118 #define BWR_EN (1 << 4) 119 #define BRR_EN (1 << 5) 120 #define CIRQ_EN (1 << 8) 121 #define ERR_EN (1 << 15) 122 #define CTO_EN (1 << 16) 123 #define CCRC_EN (1 << 17) 124 #define CEB_EN (1 << 18) 125 #define CIE_EN (1 << 19) 126 #define DTO_EN (1 << 20) 127 #define DCRC_EN (1 << 21) 128 #define DEB_EN (1 << 22) 129 #define ACE_EN (1 << 24) 130 #define CERR_EN (1 << 28) 131 #define BADA_EN (1 << 29) 132 133 #define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\ 134 DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \ 135 BRR_EN | BWR_EN | TC_EN | CC_EN) 136 137 #define CNI (1 << 7) 138 #define ACIE (1 << 4) 139 #define ACEB (1 << 3) 140 #define ACCE (1 << 2) 141 #define ACTO (1 << 1) 142 #define ACNE (1 << 0) 143 144 #define MMC_AUTOSUSPEND_DELAY 100 145 #define MMC_TIMEOUT_MS 20 /* 20 mSec */ 146 #define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */ 147 #define OMAP_MMC_MIN_CLOCK 400000 148 #define OMAP_MMC_MAX_CLOCK 52000000 149 #define DRIVER_NAME "omap_hsmmc" 150 151 #define VDD_1V8 1800000 /* 180000 uV */ 152 #define VDD_3V0 3000000 /* 300000 uV */ 153 #define VDD_165_195 (ffs(MMC_VDD_165_195) - 1) 154 155 /* 156 * One controller can have multiple slots, like on some omap boards using 157 * omap.c controller driver. Luckily this is not currently done on any known 158 * omap_hsmmc.c device. 159 */ 160 #define mmc_pdata(host) host->pdata 161 162 /* 163 * MMC Host controller read/write API's 164 */ 165 #define OMAP_HSMMC_READ(base, reg) \ 166 __raw_readl((base) + OMAP_HSMMC_##reg) 167 168 #define OMAP_HSMMC_WRITE(base, reg, val) \ 169 __raw_writel((val), (base) + OMAP_HSMMC_##reg) 170 171 struct omap_hsmmc_next { 172 unsigned int dma_len; 173 s32 cookie; 174 }; 175 176 struct omap_hsmmc_host { 177 struct device *dev; 178 struct mmc_host *mmc; 179 struct mmc_request *mrq; 180 struct mmc_command *cmd; 181 struct mmc_data *data; 182 struct clk *fclk; 183 struct clk *dbclk; 184 struct regulator *pbias; 185 bool pbias_enabled; 186 void __iomem *base; 187 int vqmmc_enabled; 188 resource_size_t mapbase; 189 spinlock_t irq_lock; /* Prevent races with irq handler */ 190 unsigned int dma_len; 191 unsigned int dma_sg_idx; 192 unsigned char bus_mode; 193 unsigned char power_mode; 194 int suspended; 195 u32 con; 196 u32 hctl; 197 u32 sysctl; 198 u32 capa; 199 int irq; 200 int wake_irq; 201 int use_dma, dma_ch; 202 struct dma_chan *tx_chan; 203 struct dma_chan *rx_chan; 204 int response_busy; 205 int context_loss; 206 int protect_card; 207 int reqs_blocked; 208 int req_in_progress; 209 unsigned long clk_rate; 210 unsigned int flags; 211 #define AUTO_CMD23 (1 << 0) /* Auto CMD23 support */ 212 #define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */ 213 struct omap_hsmmc_next next_data; 214 struct omap_hsmmc_platform_data *pdata; 215 216 /* return MMC cover switch state, can be NULL if not supported. 217 * 218 * possible return values: 219 * 0 - closed 220 * 1 - open 221 */ 222 int (*get_cover_state)(struct device *dev); 223 224 int (*card_detect)(struct device *dev); 225 }; 226 227 struct omap_mmc_of_data { 228 u32 reg_offset; 229 u8 controller_flags; 230 }; 231 232 static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host); 233 234 static int omap_hsmmc_card_detect(struct device *dev) 235 { 236 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 237 238 return mmc_gpio_get_cd(host->mmc); 239 } 240 241 static int omap_hsmmc_get_cover_state(struct device *dev) 242 { 243 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 244 245 return mmc_gpio_get_cd(host->mmc); 246 } 247 248 static int omap_hsmmc_enable_supply(struct mmc_host *mmc) 249 { 250 int ret; 251 struct omap_hsmmc_host *host = mmc_priv(mmc); 252 struct mmc_ios *ios = &mmc->ios; 253 254 if (mmc->supply.vmmc) { 255 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 256 if (ret) 257 return ret; 258 } 259 260 /* Enable interface voltage rail, if needed */ 261 if (mmc->supply.vqmmc && !host->vqmmc_enabled) { 262 ret = regulator_enable(mmc->supply.vqmmc); 263 if (ret) { 264 dev_err(mmc_dev(mmc), "vmmc_aux reg enable failed\n"); 265 goto err_vqmmc; 266 } 267 host->vqmmc_enabled = 1; 268 } 269 270 return 0; 271 272 err_vqmmc: 273 if (mmc->supply.vmmc) 274 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 275 276 return ret; 277 } 278 279 static int omap_hsmmc_disable_supply(struct mmc_host *mmc) 280 { 281 int ret; 282 int status; 283 struct omap_hsmmc_host *host = mmc_priv(mmc); 284 285 if (mmc->supply.vqmmc && host->vqmmc_enabled) { 286 ret = regulator_disable(mmc->supply.vqmmc); 287 if (ret) { 288 dev_err(mmc_dev(mmc), "vmmc_aux reg disable failed\n"); 289 return ret; 290 } 291 host->vqmmc_enabled = 0; 292 } 293 294 if (mmc->supply.vmmc) { 295 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 296 if (ret) 297 goto err_set_ocr; 298 } 299 300 return 0; 301 302 err_set_ocr: 303 if (mmc->supply.vqmmc) { 304 status = regulator_enable(mmc->supply.vqmmc); 305 if (status) 306 dev_err(mmc_dev(mmc), "vmmc_aux re-enable failed\n"); 307 } 308 309 return ret; 310 } 311 312 static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, 313 int vdd) 314 { 315 int ret; 316 317 if (!host->pbias) 318 return 0; 319 320 if (power_on) { 321 if (vdd <= VDD_165_195) 322 ret = regulator_set_voltage(host->pbias, VDD_1V8, 323 VDD_1V8); 324 else 325 ret = regulator_set_voltage(host->pbias, VDD_3V0, 326 VDD_3V0); 327 if (ret < 0) { 328 dev_err(host->dev, "pbias set voltage fail\n"); 329 return ret; 330 } 331 332 if (host->pbias_enabled == 0) { 333 ret = regulator_enable(host->pbias); 334 if (ret) { 335 dev_err(host->dev, "pbias reg enable fail\n"); 336 return ret; 337 } 338 host->pbias_enabled = 1; 339 } 340 } else { 341 if (host->pbias_enabled == 1) { 342 ret = regulator_disable(host->pbias); 343 if (ret) { 344 dev_err(host->dev, "pbias reg disable fail\n"); 345 return ret; 346 } 347 host->pbias_enabled = 0; 348 } 349 } 350 351 return 0; 352 } 353 354 static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd) 355 { 356 struct omap_hsmmc_host *host = 357 platform_get_drvdata(to_platform_device(dev)); 358 struct mmc_host *mmc = host->mmc; 359 int ret = 0; 360 361 if (mmc_pdata(host)->set_power) 362 return mmc_pdata(host)->set_power(dev, power_on, vdd); 363 364 /* 365 * If we don't see a Vcc regulator, assume it's a fixed 366 * voltage always-on regulator. 367 */ 368 if (!mmc->supply.vmmc) 369 return 0; 370 371 if (mmc_pdata(host)->before_set_reg) 372 mmc_pdata(host)->before_set_reg(dev, power_on, vdd); 373 374 ret = omap_hsmmc_set_pbias(host, false, 0); 375 if (ret) 376 return ret; 377 378 /* 379 * Assume Vcc regulator is used only to power the card ... OMAP 380 * VDDS is used to power the pins, optionally with a transceiver to 381 * support cards using voltages other than VDDS (1.8V nominal). When a 382 * transceiver is used, DAT3..7 are muxed as transceiver control pins. 383 * 384 * In some cases this regulator won't support enable/disable; 385 * e.g. it's a fixed rail for a WLAN chip. 386 * 387 * In other cases vcc_aux switches interface power. Example, for 388 * eMMC cards it represents VccQ. Sometimes transceivers or SDIO 389 * chips/cards need an interface voltage rail too. 390 */ 391 if (power_on) { 392 ret = omap_hsmmc_enable_supply(mmc); 393 if (ret) 394 return ret; 395 396 ret = omap_hsmmc_set_pbias(host, true, vdd); 397 if (ret) 398 goto err_set_voltage; 399 } else { 400 ret = omap_hsmmc_disable_supply(mmc); 401 if (ret) 402 return ret; 403 } 404 405 if (mmc_pdata(host)->after_set_reg) 406 mmc_pdata(host)->after_set_reg(dev, power_on, vdd); 407 408 return 0; 409 410 err_set_voltage: 411 omap_hsmmc_disable_supply(mmc); 412 413 return ret; 414 } 415 416 static int omap_hsmmc_disable_boot_regulator(struct regulator *reg) 417 { 418 int ret; 419 420 if (!reg) 421 return 0; 422 423 if (regulator_is_enabled(reg)) { 424 ret = regulator_enable(reg); 425 if (ret) 426 return ret; 427 428 ret = regulator_disable(reg); 429 if (ret) 430 return ret; 431 } 432 433 return 0; 434 } 435 436 static int omap_hsmmc_disable_boot_regulators(struct omap_hsmmc_host *host) 437 { 438 struct mmc_host *mmc = host->mmc; 439 int ret; 440 441 /* 442 * disable regulators enabled during boot and get the usecount 443 * right so that regulators can be enabled/disabled by checking 444 * the return value of regulator_is_enabled 445 */ 446 ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vmmc); 447 if (ret) { 448 dev_err(host->dev, "fail to disable boot enabled vmmc reg\n"); 449 return ret; 450 } 451 452 ret = omap_hsmmc_disable_boot_regulator(mmc->supply.vqmmc); 453 if (ret) { 454 dev_err(host->dev, 455 "fail to disable boot enabled vmmc_aux reg\n"); 456 return ret; 457 } 458 459 ret = omap_hsmmc_disable_boot_regulator(host->pbias); 460 if (ret) { 461 dev_err(host->dev, 462 "failed to disable boot enabled pbias reg\n"); 463 return ret; 464 } 465 466 return 0; 467 } 468 469 static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) 470 { 471 int ocr_value = 0; 472 int ret; 473 struct mmc_host *mmc = host->mmc; 474 475 if (mmc_pdata(host)->set_power) 476 return 0; 477 478 mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc"); 479 if (IS_ERR(mmc->supply.vmmc)) { 480 ret = PTR_ERR(mmc->supply.vmmc); 481 if ((ret != -ENODEV) && host->dev->of_node) 482 return ret; 483 dev_dbg(host->dev, "unable to get vmmc regulator %ld\n", 484 PTR_ERR(mmc->supply.vmmc)); 485 mmc->supply.vmmc = NULL; 486 } else { 487 ocr_value = mmc_regulator_get_ocrmask(mmc->supply.vmmc); 488 if (ocr_value > 0) 489 mmc_pdata(host)->ocr_mask = ocr_value; 490 } 491 492 /* Allow an aux regulator */ 493 mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux"); 494 if (IS_ERR(mmc->supply.vqmmc)) { 495 ret = PTR_ERR(mmc->supply.vqmmc); 496 if ((ret != -ENODEV) && host->dev->of_node) 497 return ret; 498 dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n", 499 PTR_ERR(mmc->supply.vqmmc)); 500 mmc->supply.vqmmc = NULL; 501 } 502 503 host->pbias = devm_regulator_get_optional(host->dev, "pbias"); 504 if (IS_ERR(host->pbias)) { 505 ret = PTR_ERR(host->pbias); 506 if ((ret != -ENODEV) && host->dev->of_node) 507 return ret; 508 dev_dbg(host->dev, "unable to get pbias regulator %ld\n", 509 PTR_ERR(host->pbias)); 510 host->pbias = NULL; 511 } 512 513 /* For eMMC do not power off when not in sleep state */ 514 if (mmc_pdata(host)->no_regulator_off_init) 515 return 0; 516 517 ret = omap_hsmmc_disable_boot_regulators(host); 518 if (ret) 519 return ret; 520 521 return 0; 522 } 523 524 static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id); 525 526 static int omap_hsmmc_gpio_init(struct mmc_host *mmc, 527 struct omap_hsmmc_host *host, 528 struct omap_hsmmc_platform_data *pdata) 529 { 530 int ret; 531 532 if (gpio_is_valid(pdata->gpio_cod)) { 533 ret = mmc_gpio_request_cd(mmc, pdata->gpio_cod, 0); 534 if (ret) 535 return ret; 536 537 host->get_cover_state = omap_hsmmc_get_cover_state; 538 mmc_gpio_set_cd_isr(mmc, omap_hsmmc_cover_irq); 539 } else if (gpio_is_valid(pdata->gpio_cd)) { 540 ret = mmc_gpio_request_cd(mmc, pdata->gpio_cd, 0); 541 if (ret) 542 return ret; 543 544 host->card_detect = omap_hsmmc_card_detect; 545 } 546 547 if (gpio_is_valid(pdata->gpio_wp)) { 548 ret = mmc_gpio_request_ro(mmc, pdata->gpio_wp); 549 if (ret) 550 return ret; 551 } 552 553 return 0; 554 } 555 556 /* 557 * Start clock to the card 558 */ 559 static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host) 560 { 561 OMAP_HSMMC_WRITE(host->base, SYSCTL, 562 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); 563 } 564 565 /* 566 * Stop clock to the card 567 */ 568 static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) 569 { 570 OMAP_HSMMC_WRITE(host->base, SYSCTL, 571 OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); 572 if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0) 573 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n"); 574 } 575 576 static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, 577 struct mmc_command *cmd) 578 { 579 u32 irq_mask = INT_EN_MASK; 580 unsigned long flags; 581 582 if (host->use_dma) 583 irq_mask &= ~(BRR_EN | BWR_EN); 584 585 /* Disable timeout for erases */ 586 if (cmd->opcode == MMC_ERASE) 587 irq_mask &= ~DTO_EN; 588 589 spin_lock_irqsave(&host->irq_lock, flags); 590 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 591 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); 592 593 /* latch pending CIRQ, but don't signal MMC core */ 594 if (host->flags & HSMMC_SDIO_IRQ_ENABLED) 595 irq_mask |= CIRQ_EN; 596 OMAP_HSMMC_WRITE(host->base, IE, irq_mask); 597 spin_unlock_irqrestore(&host->irq_lock, flags); 598 } 599 600 static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host) 601 { 602 u32 irq_mask = 0; 603 unsigned long flags; 604 605 spin_lock_irqsave(&host->irq_lock, flags); 606 /* no transfer running but need to keep cirq if enabled */ 607 if (host->flags & HSMMC_SDIO_IRQ_ENABLED) 608 irq_mask |= CIRQ_EN; 609 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); 610 OMAP_HSMMC_WRITE(host->base, IE, irq_mask); 611 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 612 spin_unlock_irqrestore(&host->irq_lock, flags); 613 } 614 615 /* Calculate divisor for the given clock frequency */ 616 static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios) 617 { 618 u16 dsor = 0; 619 620 if (ios->clock) { 621 dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock); 622 if (dsor > CLKD_MAX) 623 dsor = CLKD_MAX; 624 } 625 626 return dsor; 627 } 628 629 static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host) 630 { 631 struct mmc_ios *ios = &host->mmc->ios; 632 unsigned long regval; 633 unsigned long timeout; 634 unsigned long clkdiv; 635 636 dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); 637 638 omap_hsmmc_stop_clock(host); 639 640 regval = OMAP_HSMMC_READ(host->base, SYSCTL); 641 regval = regval & ~(CLKD_MASK | DTO_MASK); 642 clkdiv = calc_divisor(host, ios); 643 regval = regval | (clkdiv << 6) | (DTO << 16); 644 OMAP_HSMMC_WRITE(host->base, SYSCTL, regval); 645 OMAP_HSMMC_WRITE(host->base, SYSCTL, 646 OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); 647 648 /* Wait till the ICS bit is set */ 649 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); 650 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS 651 && time_before(jiffies, timeout)) 652 cpu_relax(); 653 654 /* 655 * Enable High-Speed Support 656 * Pre-Requisites 657 * - Controller should support High-Speed-Enable Bit 658 * - Controller should not be using DDR Mode 659 * - Controller should advertise that it supports High Speed 660 * in capabilities register 661 * - MMC/SD clock coming out of controller > 25MHz 662 */ 663 if ((mmc_pdata(host)->features & HSMMC_HAS_HSPE_SUPPORT) && 664 (ios->timing != MMC_TIMING_MMC_DDR52) && 665 (ios->timing != MMC_TIMING_UHS_DDR50) && 666 ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) { 667 regval = OMAP_HSMMC_READ(host->base, HCTL); 668 if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000) 669 regval |= HSPE; 670 else 671 regval &= ~HSPE; 672 673 OMAP_HSMMC_WRITE(host->base, HCTL, regval); 674 } 675 676 omap_hsmmc_start_clock(host); 677 } 678 679 static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host) 680 { 681 struct mmc_ios *ios = &host->mmc->ios; 682 u32 con; 683 684 con = OMAP_HSMMC_READ(host->base, CON); 685 if (ios->timing == MMC_TIMING_MMC_DDR52 || 686 ios->timing == MMC_TIMING_UHS_DDR50) 687 con |= DDR; /* configure in DDR mode */ 688 else 689 con &= ~DDR; 690 switch (ios->bus_width) { 691 case MMC_BUS_WIDTH_8: 692 OMAP_HSMMC_WRITE(host->base, CON, con | DW8); 693 break; 694 case MMC_BUS_WIDTH_4: 695 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); 696 OMAP_HSMMC_WRITE(host->base, HCTL, 697 OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); 698 break; 699 case MMC_BUS_WIDTH_1: 700 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); 701 OMAP_HSMMC_WRITE(host->base, HCTL, 702 OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); 703 break; 704 } 705 } 706 707 static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host) 708 { 709 struct mmc_ios *ios = &host->mmc->ios; 710 u32 con; 711 712 con = OMAP_HSMMC_READ(host->base, CON); 713 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 714 OMAP_HSMMC_WRITE(host->base, CON, con | OD); 715 else 716 OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); 717 } 718 719 #ifdef CONFIG_PM 720 721 /* 722 * Restore the MMC host context, if it was lost as result of a 723 * power state change. 724 */ 725 static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) 726 { 727 struct mmc_ios *ios = &host->mmc->ios; 728 u32 hctl, capa; 729 unsigned long timeout; 730 731 if (host->con == OMAP_HSMMC_READ(host->base, CON) && 732 host->hctl == OMAP_HSMMC_READ(host->base, HCTL) && 733 host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) && 734 host->capa == OMAP_HSMMC_READ(host->base, CAPA)) 735 return 0; 736 737 host->context_loss++; 738 739 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { 740 if (host->power_mode != MMC_POWER_OFF && 741 (1 << ios->vdd) <= MMC_VDD_23_24) 742 hctl = SDVS18; 743 else 744 hctl = SDVS30; 745 capa = VS30 | VS18; 746 } else { 747 hctl = SDVS18; 748 capa = VS18; 749 } 750 751 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) 752 hctl |= IWE; 753 754 OMAP_HSMMC_WRITE(host->base, HCTL, 755 OMAP_HSMMC_READ(host->base, HCTL) | hctl); 756 757 OMAP_HSMMC_WRITE(host->base, CAPA, 758 OMAP_HSMMC_READ(host->base, CAPA) | capa); 759 760 OMAP_HSMMC_WRITE(host->base, HCTL, 761 OMAP_HSMMC_READ(host->base, HCTL) | SDBP); 762 763 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); 764 while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP 765 && time_before(jiffies, timeout)) 766 ; 767 768 OMAP_HSMMC_WRITE(host->base, ISE, 0); 769 OMAP_HSMMC_WRITE(host->base, IE, 0); 770 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 771 772 /* Do not initialize card-specific things if the power is off */ 773 if (host->power_mode == MMC_POWER_OFF) 774 goto out; 775 776 omap_hsmmc_set_bus_width(host); 777 778 omap_hsmmc_set_clock(host); 779 780 omap_hsmmc_set_bus_mode(host); 781 782 out: 783 dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n", 784 host->context_loss); 785 return 0; 786 } 787 788 /* 789 * Save the MMC host context (store the number of power state changes so far). 790 */ 791 static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) 792 { 793 host->con = OMAP_HSMMC_READ(host->base, CON); 794 host->hctl = OMAP_HSMMC_READ(host->base, HCTL); 795 host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL); 796 host->capa = OMAP_HSMMC_READ(host->base, CAPA); 797 } 798 799 #else 800 801 static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) 802 { 803 return 0; 804 } 805 806 static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) 807 { 808 } 809 810 #endif 811 812 /* 813 * Send init stream sequence to card 814 * before sending IDLE command 815 */ 816 static void send_init_stream(struct omap_hsmmc_host *host) 817 { 818 int reg = 0; 819 unsigned long timeout; 820 821 if (host->protect_card) 822 return; 823 824 disable_irq(host->irq); 825 826 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); 827 OMAP_HSMMC_WRITE(host->base, CON, 828 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); 829 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); 830 831 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); 832 while ((reg != CC_EN) && time_before(jiffies, timeout)) 833 reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN; 834 835 OMAP_HSMMC_WRITE(host->base, CON, 836 OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM); 837 838 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 839 OMAP_HSMMC_READ(host->base, STAT); 840 841 enable_irq(host->irq); 842 } 843 844 static inline 845 int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host) 846 { 847 int r = 1; 848 849 if (host->get_cover_state) 850 r = host->get_cover_state(host->dev); 851 return r; 852 } 853 854 static ssize_t 855 omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr, 856 char *buf) 857 { 858 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); 859 struct omap_hsmmc_host *host = mmc_priv(mmc); 860 861 return sprintf(buf, "%s\n", 862 omap_hsmmc_cover_is_closed(host) ? "closed" : "open"); 863 } 864 865 static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL); 866 867 static ssize_t 868 omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr, 869 char *buf) 870 { 871 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); 872 struct omap_hsmmc_host *host = mmc_priv(mmc); 873 874 return sprintf(buf, "%s\n", mmc_pdata(host)->name); 875 } 876 877 static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL); 878 879 /* 880 * Configure the response type and send the cmd. 881 */ 882 static void 883 omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, 884 struct mmc_data *data) 885 { 886 int cmdreg = 0, resptype = 0, cmdtype = 0; 887 888 dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n", 889 mmc_hostname(host->mmc), cmd->opcode, cmd->arg); 890 host->cmd = cmd; 891 892 omap_hsmmc_enable_irq(host, cmd); 893 894 host->response_busy = 0; 895 if (cmd->flags & MMC_RSP_PRESENT) { 896 if (cmd->flags & MMC_RSP_136) 897 resptype = 1; 898 else if (cmd->flags & MMC_RSP_BUSY) { 899 resptype = 3; 900 host->response_busy = 1; 901 } else 902 resptype = 2; 903 } 904 905 /* 906 * Unlike OMAP1 controller, the cmdtype does not seem to be based on 907 * ac, bc, adtc, bcr. Only commands ending an open ended transfer need 908 * a val of 0x3, rest 0x0. 909 */ 910 if (cmd == host->mrq->stop) 911 cmdtype = 0x3; 912 913 cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22); 914 915 if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) && 916 host->mrq->sbc) { 917 cmdreg |= ACEN_ACMD23; 918 OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg); 919 } 920 if (data) { 921 cmdreg |= DP_SELECT | MSBS | BCE; 922 if (data->flags & MMC_DATA_READ) 923 cmdreg |= DDIR; 924 else 925 cmdreg &= ~(DDIR); 926 } 927 928 if (host->use_dma) 929 cmdreg |= DMAE; 930 931 host->req_in_progress = 1; 932 933 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); 934 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); 935 } 936 937 static int 938 omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) 939 { 940 if (data->flags & MMC_DATA_WRITE) 941 return DMA_TO_DEVICE; 942 else 943 return DMA_FROM_DEVICE; 944 } 945 946 static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host, 947 struct mmc_data *data) 948 { 949 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; 950 } 951 952 static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) 953 { 954 int dma_ch; 955 unsigned long flags; 956 957 spin_lock_irqsave(&host->irq_lock, flags); 958 host->req_in_progress = 0; 959 dma_ch = host->dma_ch; 960 spin_unlock_irqrestore(&host->irq_lock, flags); 961 962 omap_hsmmc_disable_irq(host); 963 /* Do not complete the request if DMA is still in progress */ 964 if (mrq->data && host->use_dma && dma_ch != -1) 965 return; 966 host->mrq = NULL; 967 mmc_request_done(host->mmc, mrq); 968 pm_runtime_mark_last_busy(host->dev); 969 pm_runtime_put_autosuspend(host->dev); 970 } 971 972 /* 973 * Notify the transfer complete to MMC core 974 */ 975 static void 976 omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data) 977 { 978 if (!data) { 979 struct mmc_request *mrq = host->mrq; 980 981 /* TC before CC from CMD6 - don't know why, but it happens */ 982 if (host->cmd && host->cmd->opcode == 6 && 983 host->response_busy) { 984 host->response_busy = 0; 985 return; 986 } 987 988 omap_hsmmc_request_done(host, mrq); 989 return; 990 } 991 992 host->data = NULL; 993 994 if (!data->error) 995 data->bytes_xfered += data->blocks * (data->blksz); 996 else 997 data->bytes_xfered = 0; 998 999 if (data->stop && (data->error || !host->mrq->sbc)) 1000 omap_hsmmc_start_command(host, data->stop, NULL); 1001 else 1002 omap_hsmmc_request_done(host, data->mrq); 1003 } 1004 1005 /* 1006 * Notify the core about command completion 1007 */ 1008 static void 1009 omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) 1010 { 1011 if (host->mrq->sbc && (host->cmd == host->mrq->sbc) && 1012 !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) { 1013 host->cmd = NULL; 1014 omap_hsmmc_start_dma_transfer(host); 1015 omap_hsmmc_start_command(host, host->mrq->cmd, 1016 host->mrq->data); 1017 return; 1018 } 1019 1020 host->cmd = NULL; 1021 1022 if (cmd->flags & MMC_RSP_PRESENT) { 1023 if (cmd->flags & MMC_RSP_136) { 1024 /* response type 2 */ 1025 cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10); 1026 cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32); 1027 cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54); 1028 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76); 1029 } else { 1030 /* response types 1, 1b, 3, 4, 5, 6 */ 1031 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); 1032 } 1033 } 1034 if ((host->data == NULL && !host->response_busy) || cmd->error) 1035 omap_hsmmc_request_done(host, host->mrq); 1036 } 1037 1038 /* 1039 * DMA clean up for command errors 1040 */ 1041 static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) 1042 { 1043 int dma_ch; 1044 unsigned long flags; 1045 1046 host->data->error = errno; 1047 1048 spin_lock_irqsave(&host->irq_lock, flags); 1049 dma_ch = host->dma_ch; 1050 host->dma_ch = -1; 1051 spin_unlock_irqrestore(&host->irq_lock, flags); 1052 1053 if (host->use_dma && dma_ch != -1) { 1054 struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data); 1055 1056 dmaengine_terminate_all(chan); 1057 dma_unmap_sg(chan->device->dev, 1058 host->data->sg, host->data->sg_len, 1059 omap_hsmmc_get_dma_dir(host, host->data)); 1060 1061 host->data->host_cookie = 0; 1062 } 1063 host->data = NULL; 1064 } 1065 1066 /* 1067 * Readable error output 1068 */ 1069 #ifdef CONFIG_MMC_DEBUG 1070 static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status) 1071 { 1072 /* --- means reserved bit without definition at documentation */ 1073 static const char *omap_hsmmc_status_bits[] = { 1074 "CC" , "TC" , "BGE", "---", "BWR" , "BRR" , "---" , "---" , 1075 "CIRQ", "OBI" , "---", "---", "---" , "---" , "---" , "ERRI", 1076 "CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" , 1077 "ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---" 1078 }; 1079 char res[256]; 1080 char *buf = res; 1081 int len, i; 1082 1083 len = sprintf(buf, "MMC IRQ 0x%x :", status); 1084 buf += len; 1085 1086 for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++) 1087 if (status & (1 << i)) { 1088 len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]); 1089 buf += len; 1090 } 1091 1092 dev_vdbg(mmc_dev(host->mmc), "%s\n", res); 1093 } 1094 #else 1095 static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, 1096 u32 status) 1097 { 1098 } 1099 #endif /* CONFIG_MMC_DEBUG */ 1100 1101 /* 1102 * MMC controller internal state machines reset 1103 * 1104 * Used to reset command or data internal state machines, using respectively 1105 * SRC or SRD bit of SYSCTL register 1106 * Can be called from interrupt context 1107 */ 1108 static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, 1109 unsigned long bit) 1110 { 1111 unsigned long i = 0; 1112 unsigned long limit = MMC_TIMEOUT_US; 1113 1114 OMAP_HSMMC_WRITE(host->base, SYSCTL, 1115 OMAP_HSMMC_READ(host->base, SYSCTL) | bit); 1116 1117 /* 1118 * OMAP4 ES2 and greater has an updated reset logic. 1119 * Monitor a 0->1 transition first 1120 */ 1121 if (mmc_pdata(host)->features & HSMMC_HAS_UPDATED_RESET) { 1122 while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit)) 1123 && (i++ < limit)) 1124 udelay(1); 1125 } 1126 i = 0; 1127 1128 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) && 1129 (i++ < limit)) 1130 udelay(1); 1131 1132 if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit) 1133 dev_err(mmc_dev(host->mmc), 1134 "Timeout waiting on controller reset in %s\n", 1135 __func__); 1136 } 1137 1138 static void hsmmc_command_incomplete(struct omap_hsmmc_host *host, 1139 int err, int end_cmd) 1140 { 1141 if (end_cmd) { 1142 omap_hsmmc_reset_controller_fsm(host, SRC); 1143 if (host->cmd) 1144 host->cmd->error = err; 1145 } 1146 1147 if (host->data) { 1148 omap_hsmmc_reset_controller_fsm(host, SRD); 1149 omap_hsmmc_dma_cleanup(host, err); 1150 } else if (host->mrq && host->mrq->cmd) 1151 host->mrq->cmd->error = err; 1152 } 1153 1154 static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) 1155 { 1156 struct mmc_data *data; 1157 int end_cmd = 0, end_trans = 0; 1158 int error = 0; 1159 1160 data = host->data; 1161 dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); 1162 1163 if (status & ERR_EN) { 1164 omap_hsmmc_dbg_report_irq(host, status); 1165 1166 if (status & (CTO_EN | CCRC_EN)) 1167 end_cmd = 1; 1168 if (host->data || host->response_busy) { 1169 end_trans = !end_cmd; 1170 host->response_busy = 0; 1171 } 1172 if (status & (CTO_EN | DTO_EN)) 1173 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); 1174 else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN | 1175 BADA_EN)) 1176 hsmmc_command_incomplete(host, -EILSEQ, end_cmd); 1177 1178 if (status & ACE_EN) { 1179 u32 ac12; 1180 ac12 = OMAP_HSMMC_READ(host->base, AC12); 1181 if (!(ac12 & ACNE) && host->mrq->sbc) { 1182 end_cmd = 1; 1183 if (ac12 & ACTO) 1184 error = -ETIMEDOUT; 1185 else if (ac12 & (ACCE | ACEB | ACIE)) 1186 error = -EILSEQ; 1187 host->mrq->sbc->error = error; 1188 hsmmc_command_incomplete(host, error, end_cmd); 1189 } 1190 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); 1191 } 1192 } 1193 1194 OMAP_HSMMC_WRITE(host->base, STAT, status); 1195 if (end_cmd || ((status & CC_EN) && host->cmd)) 1196 omap_hsmmc_cmd_done(host, host->cmd); 1197 if ((end_trans || (status & TC_EN)) && host->mrq) 1198 omap_hsmmc_xfer_done(host, data); 1199 } 1200 1201 /* 1202 * MMC controller IRQ handler 1203 */ 1204 static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) 1205 { 1206 struct omap_hsmmc_host *host = dev_id; 1207 int status; 1208 1209 status = OMAP_HSMMC_READ(host->base, STAT); 1210 while (status & (INT_EN_MASK | CIRQ_EN)) { 1211 if (host->req_in_progress) 1212 omap_hsmmc_do_irq(host, status); 1213 1214 if (status & CIRQ_EN) 1215 mmc_signal_sdio_irq(host->mmc); 1216 1217 /* Flush posted write */ 1218 status = OMAP_HSMMC_READ(host->base, STAT); 1219 } 1220 1221 return IRQ_HANDLED; 1222 } 1223 1224 static void set_sd_bus_power(struct omap_hsmmc_host *host) 1225 { 1226 unsigned long i; 1227 1228 OMAP_HSMMC_WRITE(host->base, HCTL, 1229 OMAP_HSMMC_READ(host->base, HCTL) | SDBP); 1230 for (i = 0; i < loops_per_jiffy; i++) { 1231 if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP) 1232 break; 1233 cpu_relax(); 1234 } 1235 } 1236 1237 /* 1238 * Switch MMC interface voltage ... only relevant for MMC1. 1239 * 1240 * MMC2 and MMC3 use fixed 1.8V levels, and maybe a transceiver. 1241 * The MMC2 transceiver controls are used instead of DAT4..DAT7. 1242 * Some chips, like eMMC ones, use internal transceivers. 1243 */ 1244 static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) 1245 { 1246 u32 reg_val = 0; 1247 int ret; 1248 1249 /* Disable the clocks */ 1250 pm_runtime_put_sync(host->dev); 1251 if (host->dbclk) 1252 clk_disable_unprepare(host->dbclk); 1253 1254 /* Turn the power off */ 1255 ret = omap_hsmmc_set_power(host->dev, 0, 0); 1256 1257 /* Turn the power ON with given VDD 1.8 or 3.0v */ 1258 if (!ret) 1259 ret = omap_hsmmc_set_power(host->dev, 1, vdd); 1260 pm_runtime_get_sync(host->dev); 1261 if (host->dbclk) 1262 clk_prepare_enable(host->dbclk); 1263 1264 if (ret != 0) 1265 goto err; 1266 1267 OMAP_HSMMC_WRITE(host->base, HCTL, 1268 OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR); 1269 reg_val = OMAP_HSMMC_READ(host->base, HCTL); 1270 1271 /* 1272 * If a MMC dual voltage card is detected, the set_ios fn calls 1273 * this fn with VDD bit set for 1.8V. Upon card removal from the 1274 * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF. 1275 * 1276 * Cope with a bit of slop in the range ... per data sheets: 1277 * - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max, 1278 * but recommended values are 1.71V to 1.89V 1279 * - "3.0V" for vdds_mmc1/vdds_mmc1a can be up to 3.5V max, 1280 * but recommended values are 2.7V to 3.3V 1281 * 1282 * Board setup code shouldn't permit anything very out-of-range. 1283 * TWL4030-family VMMC1 and VSIM regulators are fine (avoiding the 1284 * middle range) but VSIM can't power DAT4..DAT7 at more than 3V. 1285 */ 1286 if ((1 << vdd) <= MMC_VDD_23_24) 1287 reg_val |= SDVS18; 1288 else 1289 reg_val |= SDVS30; 1290 1291 OMAP_HSMMC_WRITE(host->base, HCTL, reg_val); 1292 set_sd_bus_power(host); 1293 1294 return 0; 1295 err: 1296 dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n"); 1297 return ret; 1298 } 1299 1300 /* Protect the card while the cover is open */ 1301 static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host) 1302 { 1303 if (!host->get_cover_state) 1304 return; 1305 1306 host->reqs_blocked = 0; 1307 if (host->get_cover_state(host->dev)) { 1308 if (host->protect_card) { 1309 dev_info(host->dev, "%s: cover is closed, " 1310 "card is now accessible\n", 1311 mmc_hostname(host->mmc)); 1312 host->protect_card = 0; 1313 } 1314 } else { 1315 if (!host->protect_card) { 1316 dev_info(host->dev, "%s: cover is open, " 1317 "card is now inaccessible\n", 1318 mmc_hostname(host->mmc)); 1319 host->protect_card = 1; 1320 } 1321 } 1322 } 1323 1324 /* 1325 * irq handler when (cell-phone) cover is mounted/removed 1326 */ 1327 static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id) 1328 { 1329 struct omap_hsmmc_host *host = dev_id; 1330 1331 sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); 1332 1333 omap_hsmmc_protect_card(host); 1334 mmc_detect_change(host->mmc, (HZ * 200) / 1000); 1335 return IRQ_HANDLED; 1336 } 1337 1338 static void omap_hsmmc_dma_callback(void *param) 1339 { 1340 struct omap_hsmmc_host *host = param; 1341 struct dma_chan *chan; 1342 struct mmc_data *data; 1343 int req_in_progress; 1344 1345 spin_lock_irq(&host->irq_lock); 1346 if (host->dma_ch < 0) { 1347 spin_unlock_irq(&host->irq_lock); 1348 return; 1349 } 1350 1351 data = host->mrq->data; 1352 chan = omap_hsmmc_get_dma_chan(host, data); 1353 if (!data->host_cookie) 1354 dma_unmap_sg(chan->device->dev, 1355 data->sg, data->sg_len, 1356 omap_hsmmc_get_dma_dir(host, data)); 1357 1358 req_in_progress = host->req_in_progress; 1359 host->dma_ch = -1; 1360 spin_unlock_irq(&host->irq_lock); 1361 1362 /* If DMA has finished after TC, complete the request */ 1363 if (!req_in_progress) { 1364 struct mmc_request *mrq = host->mrq; 1365 1366 host->mrq = NULL; 1367 mmc_request_done(host->mmc, mrq); 1368 pm_runtime_mark_last_busy(host->dev); 1369 pm_runtime_put_autosuspend(host->dev); 1370 } 1371 } 1372 1373 static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, 1374 struct mmc_data *data, 1375 struct omap_hsmmc_next *next, 1376 struct dma_chan *chan) 1377 { 1378 int dma_len; 1379 1380 if (!next && data->host_cookie && 1381 data->host_cookie != host->next_data.cookie) { 1382 dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d" 1383 " host->next_data.cookie %d\n", 1384 __func__, data->host_cookie, host->next_data.cookie); 1385 data->host_cookie = 0; 1386 } 1387 1388 /* Check if next job is already prepared */ 1389 if (next || data->host_cookie != host->next_data.cookie) { 1390 dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, 1391 omap_hsmmc_get_dma_dir(host, data)); 1392 1393 } else { 1394 dma_len = host->next_data.dma_len; 1395 host->next_data.dma_len = 0; 1396 } 1397 1398 1399 if (dma_len == 0) 1400 return -EINVAL; 1401 1402 if (next) { 1403 next->dma_len = dma_len; 1404 data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie; 1405 } else 1406 host->dma_len = dma_len; 1407 1408 return 0; 1409 } 1410 1411 /* 1412 * Routine to configure and start DMA for the MMC card 1413 */ 1414 static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host, 1415 struct mmc_request *req) 1416 { 1417 struct dma_slave_config cfg; 1418 struct dma_async_tx_descriptor *tx; 1419 int ret = 0, i; 1420 struct mmc_data *data = req->data; 1421 struct dma_chan *chan; 1422 1423 /* Sanity check: all the SG entries must be aligned by block size. */ 1424 for (i = 0; i < data->sg_len; i++) { 1425 struct scatterlist *sgl; 1426 1427 sgl = data->sg + i; 1428 if (sgl->length % data->blksz) 1429 return -EINVAL; 1430 } 1431 if ((data->blksz % 4) != 0) 1432 /* REVISIT: The MMC buffer increments only when MSB is written. 1433 * Return error for blksz which is non multiple of four. 1434 */ 1435 return -EINVAL; 1436 1437 BUG_ON(host->dma_ch != -1); 1438 1439 chan = omap_hsmmc_get_dma_chan(host, data); 1440 1441 cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA; 1442 cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA; 1443 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1444 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1445 cfg.src_maxburst = data->blksz / 4; 1446 cfg.dst_maxburst = data->blksz / 4; 1447 1448 ret = dmaengine_slave_config(chan, &cfg); 1449 if (ret) 1450 return ret; 1451 1452 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan); 1453 if (ret) 1454 return ret; 1455 1456 tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, 1457 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 1458 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1459 if (!tx) { 1460 dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); 1461 /* FIXME: cleanup */ 1462 return -1; 1463 } 1464 1465 tx->callback = omap_hsmmc_dma_callback; 1466 tx->callback_param = host; 1467 1468 /* Does not fail */ 1469 dmaengine_submit(tx); 1470 1471 host->dma_ch = 1; 1472 1473 return 0; 1474 } 1475 1476 static void set_data_timeout(struct omap_hsmmc_host *host, 1477 unsigned int timeout_ns, 1478 unsigned int timeout_clks) 1479 { 1480 unsigned int timeout, cycle_ns; 1481 uint32_t reg, clkd, dto = 0; 1482 1483 reg = OMAP_HSMMC_READ(host->base, SYSCTL); 1484 clkd = (reg & CLKD_MASK) >> CLKD_SHIFT; 1485 if (clkd == 0) 1486 clkd = 1; 1487 1488 cycle_ns = 1000000000 / (host->clk_rate / clkd); 1489 timeout = timeout_ns / cycle_ns; 1490 timeout += timeout_clks; 1491 if (timeout) { 1492 while ((timeout & 0x80000000) == 0) { 1493 dto += 1; 1494 timeout <<= 1; 1495 } 1496 dto = 31 - dto; 1497 timeout <<= 1; 1498 if (timeout && dto) 1499 dto += 1; 1500 if (dto >= 13) 1501 dto -= 13; 1502 else 1503 dto = 0; 1504 if (dto > 14) 1505 dto = 14; 1506 } 1507 1508 reg &= ~DTO_MASK; 1509 reg |= dto << DTO_SHIFT; 1510 OMAP_HSMMC_WRITE(host->base, SYSCTL, reg); 1511 } 1512 1513 static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host) 1514 { 1515 struct mmc_request *req = host->mrq; 1516 struct dma_chan *chan; 1517 1518 if (!req->data) 1519 return; 1520 OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz) 1521 | (req->data->blocks << 16)); 1522 set_data_timeout(host, req->data->timeout_ns, 1523 req->data->timeout_clks); 1524 chan = omap_hsmmc_get_dma_chan(host, req->data); 1525 dma_async_issue_pending(chan); 1526 } 1527 1528 /* 1529 * Configure block length for MMC/SD cards and initiate the transfer. 1530 */ 1531 static int 1532 omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req) 1533 { 1534 int ret; 1535 host->data = req->data; 1536 1537 if (req->data == NULL) { 1538 OMAP_HSMMC_WRITE(host->base, BLK, 0); 1539 /* 1540 * Set an arbitrary 100ms data timeout for commands with 1541 * busy signal. 1542 */ 1543 if (req->cmd->flags & MMC_RSP_BUSY) 1544 set_data_timeout(host, 100000000U, 0); 1545 return 0; 1546 } 1547 1548 if (host->use_dma) { 1549 ret = omap_hsmmc_setup_dma_transfer(host, req); 1550 if (ret != 0) { 1551 dev_err(mmc_dev(host->mmc), "MMC start dma failure\n"); 1552 return ret; 1553 } 1554 } 1555 return 0; 1556 } 1557 1558 static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 1559 int err) 1560 { 1561 struct omap_hsmmc_host *host = mmc_priv(mmc); 1562 struct mmc_data *data = mrq->data; 1563 1564 if (host->use_dma && data->host_cookie) { 1565 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); 1566 1567 dma_unmap_sg(c->device->dev, data->sg, data->sg_len, 1568 omap_hsmmc_get_dma_dir(host, data)); 1569 data->host_cookie = 0; 1570 } 1571 } 1572 1573 static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, 1574 bool is_first_req) 1575 { 1576 struct omap_hsmmc_host *host = mmc_priv(mmc); 1577 1578 if (mrq->data->host_cookie) { 1579 mrq->data->host_cookie = 0; 1580 return ; 1581 } 1582 1583 if (host->use_dma) { 1584 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); 1585 1586 if (omap_hsmmc_pre_dma_transfer(host, mrq->data, 1587 &host->next_data, c)) 1588 mrq->data->host_cookie = 0; 1589 } 1590 } 1591 1592 /* 1593 * Request function. for read/write operation 1594 */ 1595 static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) 1596 { 1597 struct omap_hsmmc_host *host = mmc_priv(mmc); 1598 int err; 1599 1600 BUG_ON(host->req_in_progress); 1601 BUG_ON(host->dma_ch != -1); 1602 pm_runtime_get_sync(host->dev); 1603 if (host->protect_card) { 1604 if (host->reqs_blocked < 3) { 1605 /* 1606 * Ensure the controller is left in a consistent 1607 * state by resetting the command and data state 1608 * machines. 1609 */ 1610 omap_hsmmc_reset_controller_fsm(host, SRD); 1611 omap_hsmmc_reset_controller_fsm(host, SRC); 1612 host->reqs_blocked += 1; 1613 } 1614 req->cmd->error = -EBADF; 1615 if (req->data) 1616 req->data->error = -EBADF; 1617 req->cmd->retries = 0; 1618 mmc_request_done(mmc, req); 1619 pm_runtime_mark_last_busy(host->dev); 1620 pm_runtime_put_autosuspend(host->dev); 1621 return; 1622 } else if (host->reqs_blocked) 1623 host->reqs_blocked = 0; 1624 WARN_ON(host->mrq != NULL); 1625 host->mrq = req; 1626 host->clk_rate = clk_get_rate(host->fclk); 1627 err = omap_hsmmc_prepare_data(host, req); 1628 if (err) { 1629 req->cmd->error = err; 1630 if (req->data) 1631 req->data->error = err; 1632 host->mrq = NULL; 1633 mmc_request_done(mmc, req); 1634 pm_runtime_mark_last_busy(host->dev); 1635 pm_runtime_put_autosuspend(host->dev); 1636 return; 1637 } 1638 if (req->sbc && !(host->flags & AUTO_CMD23)) { 1639 omap_hsmmc_start_command(host, req->sbc, NULL); 1640 return; 1641 } 1642 1643 omap_hsmmc_start_dma_transfer(host); 1644 omap_hsmmc_start_command(host, req->cmd, req->data); 1645 } 1646 1647 /* Routine to configure clock values. Exposed API to core */ 1648 static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1649 { 1650 struct omap_hsmmc_host *host = mmc_priv(mmc); 1651 int do_send_init_stream = 0; 1652 1653 pm_runtime_get_sync(host->dev); 1654 1655 if (ios->power_mode != host->power_mode) { 1656 switch (ios->power_mode) { 1657 case MMC_POWER_OFF: 1658 omap_hsmmc_set_power(host->dev, 0, 0); 1659 break; 1660 case MMC_POWER_UP: 1661 omap_hsmmc_set_power(host->dev, 1, ios->vdd); 1662 break; 1663 case MMC_POWER_ON: 1664 do_send_init_stream = 1; 1665 break; 1666 } 1667 host->power_mode = ios->power_mode; 1668 } 1669 1670 /* FIXME: set registers based only on changes to ios */ 1671 1672 omap_hsmmc_set_bus_width(host); 1673 1674 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { 1675 /* Only MMC1 can interface at 3V without some flavor 1676 * of external transceiver; but they all handle 1.8V. 1677 */ 1678 if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) && 1679 (ios->vdd == DUAL_VOLT_OCR_BIT)) { 1680 /* 1681 * The mmc_select_voltage fn of the core does 1682 * not seem to set the power_mode to 1683 * MMC_POWER_UP upon recalculating the voltage. 1684 * vdd 1.8v. 1685 */ 1686 if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0) 1687 dev_dbg(mmc_dev(host->mmc), 1688 "Switch operation failed\n"); 1689 } 1690 } 1691 1692 omap_hsmmc_set_clock(host); 1693 1694 if (do_send_init_stream) 1695 send_init_stream(host); 1696 1697 omap_hsmmc_set_bus_mode(host); 1698 1699 pm_runtime_put_autosuspend(host->dev); 1700 } 1701 1702 static int omap_hsmmc_get_cd(struct mmc_host *mmc) 1703 { 1704 struct omap_hsmmc_host *host = mmc_priv(mmc); 1705 1706 if (!host->card_detect) 1707 return -ENOSYS; 1708 return host->card_detect(host->dev); 1709 } 1710 1711 static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card) 1712 { 1713 struct omap_hsmmc_host *host = mmc_priv(mmc); 1714 1715 if (mmc_pdata(host)->init_card) 1716 mmc_pdata(host)->init_card(card); 1717 } 1718 1719 static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 1720 { 1721 struct omap_hsmmc_host *host = mmc_priv(mmc); 1722 u32 irq_mask, con; 1723 unsigned long flags; 1724 1725 spin_lock_irqsave(&host->irq_lock, flags); 1726 1727 con = OMAP_HSMMC_READ(host->base, CON); 1728 irq_mask = OMAP_HSMMC_READ(host->base, ISE); 1729 if (enable) { 1730 host->flags |= HSMMC_SDIO_IRQ_ENABLED; 1731 irq_mask |= CIRQ_EN; 1732 con |= CTPL | CLKEXTFREE; 1733 } else { 1734 host->flags &= ~HSMMC_SDIO_IRQ_ENABLED; 1735 irq_mask &= ~CIRQ_EN; 1736 con &= ~(CTPL | CLKEXTFREE); 1737 } 1738 OMAP_HSMMC_WRITE(host->base, CON, con); 1739 OMAP_HSMMC_WRITE(host->base, IE, irq_mask); 1740 1741 /* 1742 * if enable, piggy back detection on current request 1743 * but always disable immediately 1744 */ 1745 if (!host->req_in_progress || !enable) 1746 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); 1747 1748 /* flush posted write */ 1749 OMAP_HSMMC_READ(host->base, IE); 1750 1751 spin_unlock_irqrestore(&host->irq_lock, flags); 1752 } 1753 1754 static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) 1755 { 1756 int ret; 1757 1758 /* 1759 * For omaps with wake-up path, wakeirq will be irq from pinctrl and 1760 * for other omaps, wakeirq will be from GPIO (dat line remuxed to 1761 * gpio). wakeirq is needed to detect sdio irq in runtime suspend state 1762 * with functional clock disabled. 1763 */ 1764 if (!host->dev->of_node || !host->wake_irq) 1765 return -ENODEV; 1766 1767 ret = dev_pm_set_dedicated_wake_irq(host->dev, host->wake_irq); 1768 if (ret) { 1769 dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n"); 1770 goto err; 1771 } 1772 1773 /* 1774 * Some omaps don't have wake-up path from deeper idle states 1775 * and need to remux SDIO DAT1 to GPIO for wake-up from idle. 1776 */ 1777 if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) { 1778 struct pinctrl *p = devm_pinctrl_get(host->dev); 1779 if (!p) { 1780 ret = -ENODEV; 1781 goto err_free_irq; 1782 } 1783 if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) { 1784 dev_info(host->dev, "missing default pinctrl state\n"); 1785 devm_pinctrl_put(p); 1786 ret = -EINVAL; 1787 goto err_free_irq; 1788 } 1789 1790 if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) { 1791 dev_info(host->dev, "missing idle pinctrl state\n"); 1792 devm_pinctrl_put(p); 1793 ret = -EINVAL; 1794 goto err_free_irq; 1795 } 1796 devm_pinctrl_put(p); 1797 } 1798 1799 OMAP_HSMMC_WRITE(host->base, HCTL, 1800 OMAP_HSMMC_READ(host->base, HCTL) | IWE); 1801 return 0; 1802 1803 err_free_irq: 1804 dev_pm_clear_wake_irq(host->dev); 1805 err: 1806 dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n"); 1807 host->wake_irq = 0; 1808 return ret; 1809 } 1810 1811 static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) 1812 { 1813 u32 hctl, capa, value; 1814 1815 /* Only MMC1 supports 3.0V */ 1816 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { 1817 hctl = SDVS30; 1818 capa = VS30 | VS18; 1819 } else { 1820 hctl = SDVS18; 1821 capa = VS18; 1822 } 1823 1824 value = OMAP_HSMMC_READ(host->base, HCTL) & ~SDVS_MASK; 1825 OMAP_HSMMC_WRITE(host->base, HCTL, value | hctl); 1826 1827 value = OMAP_HSMMC_READ(host->base, CAPA); 1828 OMAP_HSMMC_WRITE(host->base, CAPA, value | capa); 1829 1830 /* Set SD bus power bit */ 1831 set_sd_bus_power(host); 1832 } 1833 1834 static int omap_hsmmc_multi_io_quirk(struct mmc_card *card, 1835 unsigned int direction, int blk_size) 1836 { 1837 /* This controller can't do multiblock reads due to hw bugs */ 1838 if (direction == MMC_DATA_READ) 1839 return 1; 1840 1841 return blk_size; 1842 } 1843 1844 static struct mmc_host_ops omap_hsmmc_ops = { 1845 .post_req = omap_hsmmc_post_req, 1846 .pre_req = omap_hsmmc_pre_req, 1847 .request = omap_hsmmc_request, 1848 .set_ios = omap_hsmmc_set_ios, 1849 .get_cd = omap_hsmmc_get_cd, 1850 .get_ro = mmc_gpio_get_ro, 1851 .init_card = omap_hsmmc_init_card, 1852 .enable_sdio_irq = omap_hsmmc_enable_sdio_irq, 1853 }; 1854 1855 #ifdef CONFIG_DEBUG_FS 1856 1857 static int omap_hsmmc_regs_show(struct seq_file *s, void *data) 1858 { 1859 struct mmc_host *mmc = s->private; 1860 struct omap_hsmmc_host *host = mmc_priv(mmc); 1861 1862 seq_printf(s, "mmc%d:\n", mmc->index); 1863 seq_printf(s, "sdio irq mode\t%s\n", 1864 (mmc->caps & MMC_CAP_SDIO_IRQ) ? "interrupt" : "polling"); 1865 1866 if (mmc->caps & MMC_CAP_SDIO_IRQ) { 1867 seq_printf(s, "sdio irq \t%s\n", 1868 (host->flags & HSMMC_SDIO_IRQ_ENABLED) ? "enabled" 1869 : "disabled"); 1870 } 1871 seq_printf(s, "ctx_loss:\t%d\n", host->context_loss); 1872 1873 pm_runtime_get_sync(host->dev); 1874 seq_puts(s, "\nregs:\n"); 1875 seq_printf(s, "CON:\t\t0x%08x\n", 1876 OMAP_HSMMC_READ(host->base, CON)); 1877 seq_printf(s, "PSTATE:\t\t0x%08x\n", 1878 OMAP_HSMMC_READ(host->base, PSTATE)); 1879 seq_printf(s, "HCTL:\t\t0x%08x\n", 1880 OMAP_HSMMC_READ(host->base, HCTL)); 1881 seq_printf(s, "SYSCTL:\t\t0x%08x\n", 1882 OMAP_HSMMC_READ(host->base, SYSCTL)); 1883 seq_printf(s, "IE:\t\t0x%08x\n", 1884 OMAP_HSMMC_READ(host->base, IE)); 1885 seq_printf(s, "ISE:\t\t0x%08x\n", 1886 OMAP_HSMMC_READ(host->base, ISE)); 1887 seq_printf(s, "CAPA:\t\t0x%08x\n", 1888 OMAP_HSMMC_READ(host->base, CAPA)); 1889 1890 pm_runtime_mark_last_busy(host->dev); 1891 pm_runtime_put_autosuspend(host->dev); 1892 1893 return 0; 1894 } 1895 1896 static int omap_hsmmc_regs_open(struct inode *inode, struct file *file) 1897 { 1898 return single_open(file, omap_hsmmc_regs_show, inode->i_private); 1899 } 1900 1901 static const struct file_operations mmc_regs_fops = { 1902 .open = omap_hsmmc_regs_open, 1903 .read = seq_read, 1904 .llseek = seq_lseek, 1905 .release = single_release, 1906 }; 1907 1908 static void omap_hsmmc_debugfs(struct mmc_host *mmc) 1909 { 1910 if (mmc->debugfs_root) 1911 debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root, 1912 mmc, &mmc_regs_fops); 1913 } 1914 1915 #else 1916 1917 static void omap_hsmmc_debugfs(struct mmc_host *mmc) 1918 { 1919 } 1920 1921 #endif 1922 1923 #ifdef CONFIG_OF 1924 static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = { 1925 /* See 35xx errata 2.1.1.128 in SPRZ278F */ 1926 .controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ, 1927 }; 1928 1929 static const struct omap_mmc_of_data omap4_mmc_of_data = { 1930 .reg_offset = 0x100, 1931 }; 1932 static const struct omap_mmc_of_data am33xx_mmc_of_data = { 1933 .reg_offset = 0x100, 1934 .controller_flags = OMAP_HSMMC_SWAKEUP_MISSING, 1935 }; 1936 1937 static const struct of_device_id omap_mmc_of_match[] = { 1938 { 1939 .compatible = "ti,omap2-hsmmc", 1940 }, 1941 { 1942 .compatible = "ti,omap3-pre-es3-hsmmc", 1943 .data = &omap3_pre_es3_mmc_of_data, 1944 }, 1945 { 1946 .compatible = "ti,omap3-hsmmc", 1947 }, 1948 { 1949 .compatible = "ti,omap4-hsmmc", 1950 .data = &omap4_mmc_of_data, 1951 }, 1952 { 1953 .compatible = "ti,am33xx-hsmmc", 1954 .data = &am33xx_mmc_of_data, 1955 }, 1956 {}, 1957 }; 1958 MODULE_DEVICE_TABLE(of, omap_mmc_of_match); 1959 1960 static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev) 1961 { 1962 struct omap_hsmmc_platform_data *pdata; 1963 struct device_node *np = dev->of_node; 1964 1965 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 1966 if (!pdata) 1967 return ERR_PTR(-ENOMEM); /* out of memory */ 1968 1969 if (of_find_property(np, "ti,dual-volt", NULL)) 1970 pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT; 1971 1972 pdata->gpio_cd = -EINVAL; 1973 pdata->gpio_cod = -EINVAL; 1974 pdata->gpio_wp = -EINVAL; 1975 1976 if (of_find_property(np, "ti,non-removable", NULL)) { 1977 pdata->nonremovable = true; 1978 pdata->no_regulator_off_init = true; 1979 } 1980 1981 if (of_find_property(np, "ti,needs-special-reset", NULL)) 1982 pdata->features |= HSMMC_HAS_UPDATED_RESET; 1983 1984 if (of_find_property(np, "ti,needs-special-hs-handling", NULL)) 1985 pdata->features |= HSMMC_HAS_HSPE_SUPPORT; 1986 1987 return pdata; 1988 } 1989 #else 1990 static inline struct omap_hsmmc_platform_data 1991 *of_get_hsmmc_pdata(struct device *dev) 1992 { 1993 return ERR_PTR(-EINVAL); 1994 } 1995 #endif 1996 1997 static int omap_hsmmc_probe(struct platform_device *pdev) 1998 { 1999 struct omap_hsmmc_platform_data *pdata = pdev->dev.platform_data; 2000 struct mmc_host *mmc; 2001 struct omap_hsmmc_host *host = NULL; 2002 struct resource *res; 2003 int ret, irq; 2004 const struct of_device_id *match; 2005 dma_cap_mask_t mask; 2006 unsigned tx_req, rx_req; 2007 const struct omap_mmc_of_data *data; 2008 void __iomem *base; 2009 2010 match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); 2011 if (match) { 2012 pdata = of_get_hsmmc_pdata(&pdev->dev); 2013 2014 if (IS_ERR(pdata)) 2015 return PTR_ERR(pdata); 2016 2017 if (match->data) { 2018 data = match->data; 2019 pdata->reg_offset = data->reg_offset; 2020 pdata->controller_flags |= data->controller_flags; 2021 } 2022 } 2023 2024 if (pdata == NULL) { 2025 dev_err(&pdev->dev, "Platform Data is missing\n"); 2026 return -ENXIO; 2027 } 2028 2029 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2030 irq = platform_get_irq(pdev, 0); 2031 if (res == NULL || irq < 0) 2032 return -ENXIO; 2033 2034 base = devm_ioremap_resource(&pdev->dev, res); 2035 if (IS_ERR(base)) 2036 return PTR_ERR(base); 2037 2038 mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev); 2039 if (!mmc) { 2040 ret = -ENOMEM; 2041 goto err; 2042 } 2043 2044 ret = mmc_of_parse(mmc); 2045 if (ret) 2046 goto err1; 2047 2048 host = mmc_priv(mmc); 2049 host->mmc = mmc; 2050 host->pdata = pdata; 2051 host->dev = &pdev->dev; 2052 host->use_dma = 1; 2053 host->dma_ch = -1; 2054 host->irq = irq; 2055 host->mapbase = res->start + pdata->reg_offset; 2056 host->base = base + pdata->reg_offset; 2057 host->power_mode = MMC_POWER_OFF; 2058 host->next_data.cookie = 1; 2059 host->pbias_enabled = 0; 2060 host->vqmmc_enabled = 0; 2061 2062 ret = omap_hsmmc_gpio_init(mmc, host, pdata); 2063 if (ret) 2064 goto err_gpio; 2065 2066 platform_set_drvdata(pdev, host); 2067 2068 if (pdev->dev.of_node) 2069 host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1); 2070 2071 mmc->ops = &omap_hsmmc_ops; 2072 2073 mmc->f_min = OMAP_MMC_MIN_CLOCK; 2074 2075 if (pdata->max_freq > 0) 2076 mmc->f_max = pdata->max_freq; 2077 else if (mmc->f_max == 0) 2078 mmc->f_max = OMAP_MMC_MAX_CLOCK; 2079 2080 spin_lock_init(&host->irq_lock); 2081 2082 host->fclk = devm_clk_get(&pdev->dev, "fck"); 2083 if (IS_ERR(host->fclk)) { 2084 ret = PTR_ERR(host->fclk); 2085 host->fclk = NULL; 2086 goto err1; 2087 } 2088 2089 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) { 2090 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n"); 2091 omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk; 2092 } 2093 2094 device_init_wakeup(&pdev->dev, true); 2095 pm_runtime_enable(host->dev); 2096 pm_runtime_get_sync(host->dev); 2097 pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY); 2098 pm_runtime_use_autosuspend(host->dev); 2099 2100 omap_hsmmc_context_save(host); 2101 2102 host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck"); 2103 /* 2104 * MMC can still work without debounce clock. 2105 */ 2106 if (IS_ERR(host->dbclk)) { 2107 host->dbclk = NULL; 2108 } else if (clk_prepare_enable(host->dbclk) != 0) { 2109 dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n"); 2110 host->dbclk = NULL; 2111 } 2112 2113 /* Since we do only SG emulation, we can have as many segs 2114 * as we want. */ 2115 mmc->max_segs = 1024; 2116 2117 mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ 2118 mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ 2119 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 2120 mmc->max_seg_size = mmc->max_req_size; 2121 2122 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 2123 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; 2124 2125 mmc->caps |= mmc_pdata(host)->caps; 2126 if (mmc->caps & MMC_CAP_8_BIT_DATA) 2127 mmc->caps |= MMC_CAP_4_BIT_DATA; 2128 2129 if (mmc_pdata(host)->nonremovable) 2130 mmc->caps |= MMC_CAP_NONREMOVABLE; 2131 2132 mmc->pm_caps |= mmc_pdata(host)->pm_caps; 2133 2134 omap_hsmmc_conf_bus_power(host); 2135 2136 if (!pdev->dev.of_node) { 2137 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); 2138 if (!res) { 2139 dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); 2140 ret = -ENXIO; 2141 goto err_irq; 2142 } 2143 tx_req = res->start; 2144 2145 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); 2146 if (!res) { 2147 dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); 2148 ret = -ENXIO; 2149 goto err_irq; 2150 } 2151 rx_req = res->start; 2152 } 2153 2154 dma_cap_zero(mask); 2155 dma_cap_set(DMA_SLAVE, mask); 2156 2157 host->rx_chan = 2158 dma_request_slave_channel_compat(mask, omap_dma_filter_fn, 2159 &rx_req, &pdev->dev, "rx"); 2160 2161 if (!host->rx_chan) { 2162 dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); 2163 ret = -ENXIO; 2164 goto err_irq; 2165 } 2166 2167 host->tx_chan = 2168 dma_request_slave_channel_compat(mask, omap_dma_filter_fn, 2169 &tx_req, &pdev->dev, "tx"); 2170 2171 if (!host->tx_chan) { 2172 dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); 2173 ret = -ENXIO; 2174 goto err_irq; 2175 } 2176 2177 /* Request IRQ for MMC operations */ 2178 ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0, 2179 mmc_hostname(mmc), host); 2180 if (ret) { 2181 dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); 2182 goto err_irq; 2183 } 2184 2185 ret = omap_hsmmc_reg_get(host); 2186 if (ret) 2187 goto err_irq; 2188 2189 mmc->ocr_avail = mmc_pdata(host)->ocr_mask; 2190 2191 omap_hsmmc_disable_irq(host); 2192 2193 /* 2194 * For now, only support SDIO interrupt if we have a separate 2195 * wake-up interrupt configured from device tree. This is because 2196 * the wake-up interrupt is needed for idle state and some 2197 * platforms need special quirks. And we don't want to add new 2198 * legacy mux platform init code callbacks any longer as we 2199 * are moving to DT based booting anyways. 2200 */ 2201 ret = omap_hsmmc_configure_wake_irq(host); 2202 if (!ret) 2203 mmc->caps |= MMC_CAP_SDIO_IRQ; 2204 2205 omap_hsmmc_protect_card(host); 2206 2207 mmc_add_host(mmc); 2208 2209 if (mmc_pdata(host)->name != NULL) { 2210 ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name); 2211 if (ret < 0) 2212 goto err_slot_name; 2213 } 2214 if (host->get_cover_state) { 2215 ret = device_create_file(&mmc->class_dev, 2216 &dev_attr_cover_switch); 2217 if (ret < 0) 2218 goto err_slot_name; 2219 } 2220 2221 omap_hsmmc_debugfs(mmc); 2222 pm_runtime_mark_last_busy(host->dev); 2223 pm_runtime_put_autosuspend(host->dev); 2224 2225 return 0; 2226 2227 err_slot_name: 2228 mmc_remove_host(mmc); 2229 err_irq: 2230 device_init_wakeup(&pdev->dev, false); 2231 if (host->tx_chan) 2232 dma_release_channel(host->tx_chan); 2233 if (host->rx_chan) 2234 dma_release_channel(host->rx_chan); 2235 pm_runtime_put_sync(host->dev); 2236 pm_runtime_disable(host->dev); 2237 if (host->dbclk) 2238 clk_disable_unprepare(host->dbclk); 2239 err1: 2240 err_gpio: 2241 mmc_free_host(mmc); 2242 err: 2243 return ret; 2244 } 2245 2246 static int omap_hsmmc_remove(struct platform_device *pdev) 2247 { 2248 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2249 2250 pm_runtime_get_sync(host->dev); 2251 mmc_remove_host(host->mmc); 2252 2253 if (host->tx_chan) 2254 dma_release_channel(host->tx_chan); 2255 if (host->rx_chan) 2256 dma_release_channel(host->rx_chan); 2257 2258 pm_runtime_put_sync(host->dev); 2259 pm_runtime_disable(host->dev); 2260 device_init_wakeup(&pdev->dev, false); 2261 if (host->dbclk) 2262 clk_disable_unprepare(host->dbclk); 2263 2264 mmc_free_host(host->mmc); 2265 2266 return 0; 2267 } 2268 2269 #ifdef CONFIG_PM_SLEEP 2270 static int omap_hsmmc_suspend(struct device *dev) 2271 { 2272 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 2273 2274 if (!host) 2275 return 0; 2276 2277 pm_runtime_get_sync(host->dev); 2278 2279 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) { 2280 OMAP_HSMMC_WRITE(host->base, ISE, 0); 2281 OMAP_HSMMC_WRITE(host->base, IE, 0); 2282 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 2283 OMAP_HSMMC_WRITE(host->base, HCTL, 2284 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); 2285 } 2286 2287 if (host->dbclk) 2288 clk_disable_unprepare(host->dbclk); 2289 2290 pm_runtime_put_sync(host->dev); 2291 return 0; 2292 } 2293 2294 /* Routine to resume the MMC device */ 2295 static int omap_hsmmc_resume(struct device *dev) 2296 { 2297 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 2298 2299 if (!host) 2300 return 0; 2301 2302 pm_runtime_get_sync(host->dev); 2303 2304 if (host->dbclk) 2305 clk_prepare_enable(host->dbclk); 2306 2307 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) 2308 omap_hsmmc_conf_bus_power(host); 2309 2310 omap_hsmmc_protect_card(host); 2311 pm_runtime_mark_last_busy(host->dev); 2312 pm_runtime_put_autosuspend(host->dev); 2313 return 0; 2314 } 2315 #endif 2316 2317 static int omap_hsmmc_runtime_suspend(struct device *dev) 2318 { 2319 struct omap_hsmmc_host *host; 2320 unsigned long flags; 2321 int ret = 0; 2322 2323 host = platform_get_drvdata(to_platform_device(dev)); 2324 omap_hsmmc_context_save(host); 2325 dev_dbg(dev, "disabled\n"); 2326 2327 spin_lock_irqsave(&host->irq_lock, flags); 2328 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && 2329 (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { 2330 /* disable sdio irq handling to prevent race */ 2331 OMAP_HSMMC_WRITE(host->base, ISE, 0); 2332 OMAP_HSMMC_WRITE(host->base, IE, 0); 2333 2334 if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) { 2335 /* 2336 * dat1 line low, pending sdio irq 2337 * race condition: possible irq handler running on 2338 * multi-core, abort 2339 */ 2340 dev_dbg(dev, "pending sdio irq, abort suspend\n"); 2341 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 2342 OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); 2343 OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); 2344 pm_runtime_mark_last_busy(dev); 2345 ret = -EBUSY; 2346 goto abort; 2347 } 2348 2349 pinctrl_pm_select_idle_state(dev); 2350 } else { 2351 pinctrl_pm_select_idle_state(dev); 2352 } 2353 2354 abort: 2355 spin_unlock_irqrestore(&host->irq_lock, flags); 2356 return ret; 2357 } 2358 2359 static int omap_hsmmc_runtime_resume(struct device *dev) 2360 { 2361 struct omap_hsmmc_host *host; 2362 unsigned long flags; 2363 2364 host = platform_get_drvdata(to_platform_device(dev)); 2365 omap_hsmmc_context_restore(host); 2366 dev_dbg(dev, "enabled\n"); 2367 2368 spin_lock_irqsave(&host->irq_lock, flags); 2369 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && 2370 (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { 2371 2372 pinctrl_pm_select_default_state(host->dev); 2373 2374 /* irq lost, if pinmux incorrect */ 2375 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 2376 OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); 2377 OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); 2378 } else { 2379 pinctrl_pm_select_default_state(host->dev); 2380 } 2381 spin_unlock_irqrestore(&host->irq_lock, flags); 2382 return 0; 2383 } 2384 2385 static struct dev_pm_ops omap_hsmmc_dev_pm_ops = { 2386 SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume) 2387 .runtime_suspend = omap_hsmmc_runtime_suspend, 2388 .runtime_resume = omap_hsmmc_runtime_resume, 2389 }; 2390 2391 static struct platform_driver omap_hsmmc_driver = { 2392 .probe = omap_hsmmc_probe, 2393 .remove = omap_hsmmc_remove, 2394 .driver = { 2395 .name = DRIVER_NAME, 2396 .pm = &omap_hsmmc_dev_pm_ops, 2397 .of_match_table = of_match_ptr(omap_mmc_of_match), 2398 }, 2399 }; 2400 2401 module_platform_driver(omap_hsmmc_driver); 2402 MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver"); 2403 MODULE_LICENSE("GPL"); 2404 MODULE_ALIAS("platform:" DRIVER_NAME); 2405 MODULE_AUTHOR("Texas Instruments Inc"); 2406