1 /* 2 * drivers/mmc/host/omap_hsmmc.c 3 * 4 * Driver for OMAP2430/3430 MMC controller. 5 * 6 * Copyright (C) 2007 Texas Instruments. 7 * 8 * Authors: 9 * Syed Mohammed Khasim <x0khasim@ti.com> 10 * Madhusudhan <madhu.cr@ti.com> 11 * Mohit Jalori <mjalori@ti.com> 12 * 13 * This file is licensed under the terms of the GNU General Public License 14 * version 2. This program is licensed "as is" without any warranty of any 15 * kind, whether express or implied. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/kernel.h> 21 #include <linux/debugfs.h> 22 #include <linux/dmaengine.h> 23 #include <linux/seq_file.h> 24 #include <linux/sizes.h> 25 #include <linux/interrupt.h> 26 #include <linux/delay.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/platform_device.h> 29 #include <linux/timer.h> 30 #include <linux/clk.h> 31 #include <linux/of.h> 32 #include <linux/of_irq.h> 33 #include <linux/of_gpio.h> 34 #include <linux/of_device.h> 35 #include <linux/omap-dmaengine.h> 36 #include <linux/mmc/host.h> 37 #include <linux/mmc/core.h> 38 #include <linux/mmc/mmc.h> 39 #include <linux/io.h> 40 #include <linux/irq.h> 41 #include <linux/gpio.h> 42 #include <linux/regulator/consumer.h> 43 #include <linux/pinctrl/consumer.h> 44 #include <linux/pm_runtime.h> 45 #include <linux/platform_data/mmc-omap.h> 46 47 /* OMAP HSMMC Host Controller Registers */ 48 #define OMAP_HSMMC_SYSSTATUS 0x0014 49 #define OMAP_HSMMC_CON 0x002C 50 #define OMAP_HSMMC_SDMASA 0x0100 51 #define OMAP_HSMMC_BLK 0x0104 52 #define OMAP_HSMMC_ARG 0x0108 53 #define OMAP_HSMMC_CMD 0x010C 54 #define OMAP_HSMMC_RSP10 0x0110 55 #define OMAP_HSMMC_RSP32 0x0114 56 #define OMAP_HSMMC_RSP54 0x0118 57 #define OMAP_HSMMC_RSP76 0x011C 58 #define OMAP_HSMMC_DATA 0x0120 59 #define OMAP_HSMMC_PSTATE 0x0124 60 #define OMAP_HSMMC_HCTL 0x0128 61 #define OMAP_HSMMC_SYSCTL 0x012C 62 #define OMAP_HSMMC_STAT 0x0130 63 #define OMAP_HSMMC_IE 0x0134 64 #define OMAP_HSMMC_ISE 0x0138 65 #define OMAP_HSMMC_AC12 0x013C 66 #define OMAP_HSMMC_CAPA 0x0140 67 68 #define VS18 (1 << 26) 69 #define VS30 (1 << 25) 70 #define HSS (1 << 21) 71 #define SDVS18 (0x5 << 9) 72 #define SDVS30 (0x6 << 9) 73 #define SDVS33 (0x7 << 9) 74 #define SDVS_MASK 0x00000E00 75 #define SDVSCLR 0xFFFFF1FF 76 #define SDVSDET 0x00000400 77 #define AUTOIDLE 0x1 78 #define SDBP (1 << 8) 79 #define DTO 0xe 80 #define ICE 0x1 81 #define ICS 0x2 82 #define CEN (1 << 2) 83 #define CLKD_MAX 0x3FF /* max clock divisor: 1023 */ 84 #define CLKD_MASK 0x0000FFC0 85 #define CLKD_SHIFT 6 86 #define DTO_MASK 0x000F0000 87 #define DTO_SHIFT 16 88 #define INIT_STREAM (1 << 1) 89 #define ACEN_ACMD23 (2 << 2) 90 #define DP_SELECT (1 << 21) 91 #define DDIR (1 << 4) 92 #define DMAE 0x1 93 #define MSBS (1 << 5) 94 #define BCE (1 << 1) 95 #define FOUR_BIT (1 << 1) 96 #define HSPE (1 << 2) 97 #define IWE (1 << 24) 98 #define DDR (1 << 19) 99 #define CLKEXTFREE (1 << 16) 100 #define CTPL (1 << 11) 101 #define DW8 (1 << 5) 102 #define OD 0x1 103 #define STAT_CLEAR 0xFFFFFFFF 104 #define INIT_STREAM_CMD 0x00000000 105 #define DUAL_VOLT_OCR_BIT 7 106 #define SRC (1 << 25) 107 #define SRD (1 << 26) 108 #define SOFTRESET (1 << 1) 109 110 /* PSTATE */ 111 #define DLEV_DAT(x) (1 << (20 + (x))) 112 113 /* Interrupt masks for IE and ISE register */ 114 #define CC_EN (1 << 0) 115 #define TC_EN (1 << 1) 116 #define BWR_EN (1 << 4) 117 #define BRR_EN (1 << 5) 118 #define CIRQ_EN (1 << 8) 119 #define ERR_EN (1 << 15) 120 #define CTO_EN (1 << 16) 121 #define CCRC_EN (1 << 17) 122 #define CEB_EN (1 << 18) 123 #define CIE_EN (1 << 19) 124 #define DTO_EN (1 << 20) 125 #define DCRC_EN (1 << 21) 126 #define DEB_EN (1 << 22) 127 #define ACE_EN (1 << 24) 128 #define CERR_EN (1 << 28) 129 #define BADA_EN (1 << 29) 130 131 #define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\ 132 DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \ 133 BRR_EN | BWR_EN | TC_EN | CC_EN) 134 135 #define CNI (1 << 7) 136 #define ACIE (1 << 4) 137 #define ACEB (1 << 3) 138 #define ACCE (1 << 2) 139 #define ACTO (1 << 1) 140 #define ACNE (1 << 0) 141 142 #define MMC_AUTOSUSPEND_DELAY 100 143 #define MMC_TIMEOUT_MS 20 /* 20 mSec */ 144 #define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */ 145 #define OMAP_MMC_MIN_CLOCK 400000 146 #define OMAP_MMC_MAX_CLOCK 52000000 147 #define DRIVER_NAME "omap_hsmmc" 148 149 #define VDD_1V8 1800000 /* 180000 uV */ 150 #define VDD_3V0 3000000 /* 300000 uV */ 151 #define VDD_165_195 (ffs(MMC_VDD_165_195) - 1) 152 153 /* 154 * One controller can have multiple slots, like on some omap boards using 155 * omap.c controller driver. Luckily this is not currently done on any known 156 * omap_hsmmc.c device. 157 */ 158 #define mmc_slot(host) (host->pdata->slots[host->slot_id]) 159 160 /* 161 * MMC Host controller read/write API's 162 */ 163 #define OMAP_HSMMC_READ(base, reg) \ 164 __raw_readl((base) + OMAP_HSMMC_##reg) 165 166 #define OMAP_HSMMC_WRITE(base, reg, val) \ 167 __raw_writel((val), (base) + OMAP_HSMMC_##reg) 168 169 struct omap_hsmmc_next { 170 unsigned int dma_len; 171 s32 cookie; 172 }; 173 174 struct omap_hsmmc_host { 175 struct device *dev; 176 struct mmc_host *mmc; 177 struct mmc_request *mrq; 178 struct mmc_command *cmd; 179 struct mmc_data *data; 180 struct clk *fclk; 181 struct clk *dbclk; 182 /* 183 * vcc == configured supply 184 * vcc_aux == optional 185 * - MMC1, supply for DAT4..DAT7 186 * - MMC2/MMC2, external level shifter voltage supply, for 187 * chip (SDIO, eMMC, etc) or transceiver (MMC2 only) 188 */ 189 struct regulator *vcc; 190 struct regulator *vcc_aux; 191 struct regulator *pbias; 192 bool pbias_enabled; 193 void __iomem *base; 194 resource_size_t mapbase; 195 spinlock_t irq_lock; /* Prevent races with irq handler */ 196 unsigned int dma_len; 197 unsigned int dma_sg_idx; 198 unsigned char bus_mode; 199 unsigned char power_mode; 200 int suspended; 201 u32 con; 202 u32 hctl; 203 u32 sysctl; 204 u32 capa; 205 int irq; 206 int wake_irq; 207 int use_dma, dma_ch; 208 struct dma_chan *tx_chan; 209 struct dma_chan *rx_chan; 210 int slot_id; 211 int response_busy; 212 int context_loss; 213 int protect_card; 214 int reqs_blocked; 215 int use_reg; 216 int req_in_progress; 217 unsigned long clk_rate; 218 unsigned int flags; 219 #define AUTO_CMD23 (1 << 0) /* Auto CMD23 support */ 220 #define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */ 221 #define HSMMC_WAKE_IRQ_ENABLED (1 << 2) 222 struct omap_hsmmc_next next_data; 223 struct omap_mmc_platform_data *pdata; 224 }; 225 226 struct omap_mmc_of_data { 227 u32 reg_offset; 228 u8 controller_flags; 229 }; 230 231 static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host); 232 233 static int omap_hsmmc_card_detect(struct device *dev, int slot) 234 { 235 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 236 struct omap_mmc_platform_data *mmc = host->pdata; 237 238 /* NOTE: assumes card detect signal is active-low */ 239 return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); 240 } 241 242 static int omap_hsmmc_get_wp(struct device *dev, int slot) 243 { 244 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 245 struct omap_mmc_platform_data *mmc = host->pdata; 246 247 /* NOTE: assumes write protect signal is active-high */ 248 return gpio_get_value_cansleep(mmc->slots[0].gpio_wp); 249 } 250 251 static int omap_hsmmc_get_cover_state(struct device *dev, int slot) 252 { 253 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 254 struct omap_mmc_platform_data *mmc = host->pdata; 255 256 /* NOTE: assumes card detect signal is active-low */ 257 return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); 258 } 259 260 #ifdef CONFIG_PM 261 262 static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot) 263 { 264 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 265 struct omap_mmc_platform_data *mmc = host->pdata; 266 267 disable_irq(mmc->slots[0].card_detect_irq); 268 return 0; 269 } 270 271 static int omap_hsmmc_resume_cdirq(struct device *dev, int slot) 272 { 273 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 274 struct omap_mmc_platform_data *mmc = host->pdata; 275 276 enable_irq(mmc->slots[0].card_detect_irq); 277 return 0; 278 } 279 280 #else 281 282 #define omap_hsmmc_suspend_cdirq NULL 283 #define omap_hsmmc_resume_cdirq NULL 284 285 #endif 286 287 #ifdef CONFIG_REGULATOR 288 289 static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on, 290 int vdd) 291 { 292 struct omap_hsmmc_host *host = 293 platform_get_drvdata(to_platform_device(dev)); 294 int ret = 0; 295 296 /* 297 * If we don't see a Vcc regulator, assume it's a fixed 298 * voltage always-on regulator. 299 */ 300 if (!host->vcc) 301 return 0; 302 303 if (mmc_slot(host).before_set_reg) 304 mmc_slot(host).before_set_reg(dev, slot, power_on, vdd); 305 306 if (host->pbias) { 307 if (host->pbias_enabled == 1) { 308 ret = regulator_disable(host->pbias); 309 if (!ret) 310 host->pbias_enabled = 0; 311 } 312 regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0); 313 } 314 315 /* 316 * Assume Vcc regulator is used only to power the card ... OMAP 317 * VDDS is used to power the pins, optionally with a transceiver to 318 * support cards using voltages other than VDDS (1.8V nominal). When a 319 * transceiver is used, DAT3..7 are muxed as transceiver control pins. 320 * 321 * In some cases this regulator won't support enable/disable; 322 * e.g. it's a fixed rail for a WLAN chip. 323 * 324 * In other cases vcc_aux switches interface power. Example, for 325 * eMMC cards it represents VccQ. Sometimes transceivers or SDIO 326 * chips/cards need an interface voltage rail too. 327 */ 328 if (power_on) { 329 if (host->vcc) 330 ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); 331 /* Enable interface voltage rail, if needed */ 332 if (ret == 0 && host->vcc_aux) { 333 ret = regulator_enable(host->vcc_aux); 334 if (ret < 0 && host->vcc) 335 ret = mmc_regulator_set_ocr(host->mmc, 336 host->vcc, 0); 337 } 338 } else { 339 /* Shut down the rail */ 340 if (host->vcc_aux) 341 ret = regulator_disable(host->vcc_aux); 342 if (host->vcc) { 343 /* Then proceed to shut down the local regulator */ 344 ret = mmc_regulator_set_ocr(host->mmc, 345 host->vcc, 0); 346 } 347 } 348 349 if (host->pbias) { 350 if (vdd <= VDD_165_195) 351 ret = regulator_set_voltage(host->pbias, VDD_1V8, 352 VDD_1V8); 353 else 354 ret = regulator_set_voltage(host->pbias, VDD_3V0, 355 VDD_3V0); 356 if (ret < 0) 357 goto error_set_power; 358 359 if (host->pbias_enabled == 0) { 360 ret = regulator_enable(host->pbias); 361 if (!ret) 362 host->pbias_enabled = 1; 363 } 364 } 365 366 if (mmc_slot(host).after_set_reg) 367 mmc_slot(host).after_set_reg(dev, slot, power_on, vdd); 368 369 error_set_power: 370 return ret; 371 } 372 373 static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) 374 { 375 struct regulator *reg; 376 int ocr_value = 0; 377 378 reg = devm_regulator_get(host->dev, "vmmc"); 379 if (IS_ERR(reg)) { 380 dev_err(host->dev, "unable to get vmmc regulator %ld\n", 381 PTR_ERR(reg)); 382 return PTR_ERR(reg); 383 } else { 384 host->vcc = reg; 385 ocr_value = mmc_regulator_get_ocrmask(reg); 386 if (!mmc_slot(host).ocr_mask) { 387 mmc_slot(host).ocr_mask = ocr_value; 388 } else { 389 if (!(mmc_slot(host).ocr_mask & ocr_value)) { 390 dev_err(host->dev, "ocrmask %x is not supported\n", 391 mmc_slot(host).ocr_mask); 392 mmc_slot(host).ocr_mask = 0; 393 return -EINVAL; 394 } 395 } 396 } 397 mmc_slot(host).set_power = omap_hsmmc_set_power; 398 399 /* Allow an aux regulator */ 400 reg = devm_regulator_get_optional(host->dev, "vmmc_aux"); 401 host->vcc_aux = IS_ERR(reg) ? NULL : reg; 402 403 reg = devm_regulator_get_optional(host->dev, "pbias"); 404 host->pbias = IS_ERR(reg) ? NULL : reg; 405 406 /* For eMMC do not power off when not in sleep state */ 407 if (mmc_slot(host).no_regulator_off_init) 408 return 0; 409 /* 410 * To disable boot_on regulator, enable regulator 411 * to increase usecount and then disable it. 412 */ 413 if ((host->vcc && regulator_is_enabled(host->vcc) > 0) || 414 (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) { 415 int vdd = ffs(mmc_slot(host).ocr_mask) - 1; 416 417 mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd); 418 mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); 419 } 420 421 return 0; 422 } 423 424 static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host) 425 { 426 mmc_slot(host).set_power = NULL; 427 } 428 429 static inline int omap_hsmmc_have_reg(void) 430 { 431 return 1; 432 } 433 434 #else 435 436 static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) 437 { 438 return -EINVAL; 439 } 440 441 static inline void omap_hsmmc_reg_put(struct omap_hsmmc_host *host) 442 { 443 } 444 445 static inline int omap_hsmmc_have_reg(void) 446 { 447 return 0; 448 } 449 450 #endif 451 452 static int omap_hsmmc_gpio_init(struct omap_mmc_platform_data *pdata) 453 { 454 int ret; 455 456 if (gpio_is_valid(pdata->slots[0].switch_pin)) { 457 if (pdata->slots[0].cover) 458 pdata->slots[0].get_cover_state = 459 omap_hsmmc_get_cover_state; 460 else 461 pdata->slots[0].card_detect = omap_hsmmc_card_detect; 462 pdata->slots[0].card_detect_irq = 463 gpio_to_irq(pdata->slots[0].switch_pin); 464 ret = gpio_request(pdata->slots[0].switch_pin, "mmc_cd"); 465 if (ret) 466 return ret; 467 ret = gpio_direction_input(pdata->slots[0].switch_pin); 468 if (ret) 469 goto err_free_sp; 470 } else 471 pdata->slots[0].switch_pin = -EINVAL; 472 473 if (gpio_is_valid(pdata->slots[0].gpio_wp)) { 474 pdata->slots[0].get_ro = omap_hsmmc_get_wp; 475 ret = gpio_request(pdata->slots[0].gpio_wp, "mmc_wp"); 476 if (ret) 477 goto err_free_cd; 478 ret = gpio_direction_input(pdata->slots[0].gpio_wp); 479 if (ret) 480 goto err_free_wp; 481 } else 482 pdata->slots[0].gpio_wp = -EINVAL; 483 484 return 0; 485 486 err_free_wp: 487 gpio_free(pdata->slots[0].gpio_wp); 488 err_free_cd: 489 if (gpio_is_valid(pdata->slots[0].switch_pin)) 490 err_free_sp: 491 gpio_free(pdata->slots[0].switch_pin); 492 return ret; 493 } 494 495 static void omap_hsmmc_gpio_free(struct omap_mmc_platform_data *pdata) 496 { 497 if (gpio_is_valid(pdata->slots[0].gpio_wp)) 498 gpio_free(pdata->slots[0].gpio_wp); 499 if (gpio_is_valid(pdata->slots[0].switch_pin)) 500 gpio_free(pdata->slots[0].switch_pin); 501 } 502 503 /* 504 * Start clock to the card 505 */ 506 static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host) 507 { 508 OMAP_HSMMC_WRITE(host->base, SYSCTL, 509 OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); 510 } 511 512 /* 513 * Stop clock to the card 514 */ 515 static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) 516 { 517 OMAP_HSMMC_WRITE(host->base, SYSCTL, 518 OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); 519 if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0) 520 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n"); 521 } 522 523 static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, 524 struct mmc_command *cmd) 525 { 526 u32 irq_mask = INT_EN_MASK; 527 unsigned long flags; 528 529 if (host->use_dma) 530 irq_mask &= ~(BRR_EN | BWR_EN); 531 532 /* Disable timeout for erases */ 533 if (cmd->opcode == MMC_ERASE) 534 irq_mask &= ~DTO_EN; 535 536 spin_lock_irqsave(&host->irq_lock, flags); 537 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 538 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); 539 540 /* latch pending CIRQ, but don't signal MMC core */ 541 if (host->flags & HSMMC_SDIO_IRQ_ENABLED) 542 irq_mask |= CIRQ_EN; 543 OMAP_HSMMC_WRITE(host->base, IE, irq_mask); 544 spin_unlock_irqrestore(&host->irq_lock, flags); 545 } 546 547 static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host) 548 { 549 u32 irq_mask = 0; 550 unsigned long flags; 551 552 spin_lock_irqsave(&host->irq_lock, flags); 553 /* no transfer running but need to keep cirq if enabled */ 554 if (host->flags & HSMMC_SDIO_IRQ_ENABLED) 555 irq_mask |= CIRQ_EN; 556 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); 557 OMAP_HSMMC_WRITE(host->base, IE, irq_mask); 558 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 559 spin_unlock_irqrestore(&host->irq_lock, flags); 560 } 561 562 /* Calculate divisor for the given clock frequency */ 563 static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios) 564 { 565 u16 dsor = 0; 566 567 if (ios->clock) { 568 dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock); 569 if (dsor > CLKD_MAX) 570 dsor = CLKD_MAX; 571 } 572 573 return dsor; 574 } 575 576 static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host) 577 { 578 struct mmc_ios *ios = &host->mmc->ios; 579 unsigned long regval; 580 unsigned long timeout; 581 unsigned long clkdiv; 582 583 dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); 584 585 omap_hsmmc_stop_clock(host); 586 587 regval = OMAP_HSMMC_READ(host->base, SYSCTL); 588 regval = regval & ~(CLKD_MASK | DTO_MASK); 589 clkdiv = calc_divisor(host, ios); 590 regval = regval | (clkdiv << 6) | (DTO << 16); 591 OMAP_HSMMC_WRITE(host->base, SYSCTL, regval); 592 OMAP_HSMMC_WRITE(host->base, SYSCTL, 593 OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); 594 595 /* Wait till the ICS bit is set */ 596 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); 597 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS 598 && time_before(jiffies, timeout)) 599 cpu_relax(); 600 601 /* 602 * Enable High-Speed Support 603 * Pre-Requisites 604 * - Controller should support High-Speed-Enable Bit 605 * - Controller should not be using DDR Mode 606 * - Controller should advertise that it supports High Speed 607 * in capabilities register 608 * - MMC/SD clock coming out of controller > 25MHz 609 */ 610 if ((mmc_slot(host).features & HSMMC_HAS_HSPE_SUPPORT) && 611 (ios->timing != MMC_TIMING_MMC_DDR52) && 612 ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) { 613 regval = OMAP_HSMMC_READ(host->base, HCTL); 614 if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000) 615 regval |= HSPE; 616 else 617 regval &= ~HSPE; 618 619 OMAP_HSMMC_WRITE(host->base, HCTL, regval); 620 } 621 622 omap_hsmmc_start_clock(host); 623 } 624 625 static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host) 626 { 627 struct mmc_ios *ios = &host->mmc->ios; 628 u32 con; 629 630 con = OMAP_HSMMC_READ(host->base, CON); 631 if (ios->timing == MMC_TIMING_MMC_DDR52) 632 con |= DDR; /* configure in DDR mode */ 633 else 634 con &= ~DDR; 635 switch (ios->bus_width) { 636 case MMC_BUS_WIDTH_8: 637 OMAP_HSMMC_WRITE(host->base, CON, con | DW8); 638 break; 639 case MMC_BUS_WIDTH_4: 640 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); 641 OMAP_HSMMC_WRITE(host->base, HCTL, 642 OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); 643 break; 644 case MMC_BUS_WIDTH_1: 645 OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); 646 OMAP_HSMMC_WRITE(host->base, HCTL, 647 OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); 648 break; 649 } 650 } 651 652 static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host) 653 { 654 struct mmc_ios *ios = &host->mmc->ios; 655 u32 con; 656 657 con = OMAP_HSMMC_READ(host->base, CON); 658 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 659 OMAP_HSMMC_WRITE(host->base, CON, con | OD); 660 else 661 OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); 662 } 663 664 #ifdef CONFIG_PM 665 666 /* 667 * Restore the MMC host context, if it was lost as result of a 668 * power state change. 669 */ 670 static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) 671 { 672 struct mmc_ios *ios = &host->mmc->ios; 673 u32 hctl, capa; 674 unsigned long timeout; 675 676 if (host->con == OMAP_HSMMC_READ(host->base, CON) && 677 host->hctl == OMAP_HSMMC_READ(host->base, HCTL) && 678 host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) && 679 host->capa == OMAP_HSMMC_READ(host->base, CAPA)) 680 return 0; 681 682 host->context_loss++; 683 684 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { 685 if (host->power_mode != MMC_POWER_OFF && 686 (1 << ios->vdd) <= MMC_VDD_23_24) 687 hctl = SDVS18; 688 else 689 hctl = SDVS30; 690 capa = VS30 | VS18; 691 } else { 692 hctl = SDVS18; 693 capa = VS18; 694 } 695 696 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) 697 hctl |= IWE; 698 699 OMAP_HSMMC_WRITE(host->base, HCTL, 700 OMAP_HSMMC_READ(host->base, HCTL) | hctl); 701 702 OMAP_HSMMC_WRITE(host->base, CAPA, 703 OMAP_HSMMC_READ(host->base, CAPA) | capa); 704 705 OMAP_HSMMC_WRITE(host->base, HCTL, 706 OMAP_HSMMC_READ(host->base, HCTL) | SDBP); 707 708 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); 709 while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP 710 && time_before(jiffies, timeout)) 711 ; 712 713 OMAP_HSMMC_WRITE(host->base, ISE, 0); 714 OMAP_HSMMC_WRITE(host->base, IE, 0); 715 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 716 717 /* Do not initialize card-specific things if the power is off */ 718 if (host->power_mode == MMC_POWER_OFF) 719 goto out; 720 721 omap_hsmmc_set_bus_width(host); 722 723 omap_hsmmc_set_clock(host); 724 725 omap_hsmmc_set_bus_mode(host); 726 727 out: 728 dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n", 729 host->context_loss); 730 return 0; 731 } 732 733 /* 734 * Save the MMC host context (store the number of power state changes so far). 735 */ 736 static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) 737 { 738 host->con = OMAP_HSMMC_READ(host->base, CON); 739 host->hctl = OMAP_HSMMC_READ(host->base, HCTL); 740 host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL); 741 host->capa = OMAP_HSMMC_READ(host->base, CAPA); 742 } 743 744 #else 745 746 static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) 747 { 748 return 0; 749 } 750 751 static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) 752 { 753 } 754 755 #endif 756 757 /* 758 * Send init stream sequence to card 759 * before sending IDLE command 760 */ 761 static void send_init_stream(struct omap_hsmmc_host *host) 762 { 763 int reg = 0; 764 unsigned long timeout; 765 766 if (host->protect_card) 767 return; 768 769 disable_irq(host->irq); 770 771 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); 772 OMAP_HSMMC_WRITE(host->base, CON, 773 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); 774 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); 775 776 timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); 777 while ((reg != CC_EN) && time_before(jiffies, timeout)) 778 reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN; 779 780 OMAP_HSMMC_WRITE(host->base, CON, 781 OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM); 782 783 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 784 OMAP_HSMMC_READ(host->base, STAT); 785 786 enable_irq(host->irq); 787 } 788 789 static inline 790 int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host) 791 { 792 int r = 1; 793 794 if (mmc_slot(host).get_cover_state) 795 r = mmc_slot(host).get_cover_state(host->dev, host->slot_id); 796 return r; 797 } 798 799 static ssize_t 800 omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr, 801 char *buf) 802 { 803 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); 804 struct omap_hsmmc_host *host = mmc_priv(mmc); 805 806 return sprintf(buf, "%s\n", 807 omap_hsmmc_cover_is_closed(host) ? "closed" : "open"); 808 } 809 810 static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL); 811 812 static ssize_t 813 omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr, 814 char *buf) 815 { 816 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); 817 struct omap_hsmmc_host *host = mmc_priv(mmc); 818 819 return sprintf(buf, "%s\n", mmc_slot(host).name); 820 } 821 822 static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL); 823 824 /* 825 * Configure the response type and send the cmd. 826 */ 827 static void 828 omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, 829 struct mmc_data *data) 830 { 831 int cmdreg = 0, resptype = 0, cmdtype = 0; 832 833 dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n", 834 mmc_hostname(host->mmc), cmd->opcode, cmd->arg); 835 host->cmd = cmd; 836 837 omap_hsmmc_enable_irq(host, cmd); 838 839 host->response_busy = 0; 840 if (cmd->flags & MMC_RSP_PRESENT) { 841 if (cmd->flags & MMC_RSP_136) 842 resptype = 1; 843 else if (cmd->flags & MMC_RSP_BUSY) { 844 resptype = 3; 845 host->response_busy = 1; 846 } else 847 resptype = 2; 848 } 849 850 /* 851 * Unlike OMAP1 controller, the cmdtype does not seem to be based on 852 * ac, bc, adtc, bcr. Only commands ending an open ended transfer need 853 * a val of 0x3, rest 0x0. 854 */ 855 if (cmd == host->mrq->stop) 856 cmdtype = 0x3; 857 858 cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22); 859 860 if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) && 861 host->mrq->sbc) { 862 cmdreg |= ACEN_ACMD23; 863 OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg); 864 } 865 if (data) { 866 cmdreg |= DP_SELECT | MSBS | BCE; 867 if (data->flags & MMC_DATA_READ) 868 cmdreg |= DDIR; 869 else 870 cmdreg &= ~(DDIR); 871 } 872 873 if (host->use_dma) 874 cmdreg |= DMAE; 875 876 host->req_in_progress = 1; 877 878 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); 879 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); 880 } 881 882 static int 883 omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) 884 { 885 if (data->flags & MMC_DATA_WRITE) 886 return DMA_TO_DEVICE; 887 else 888 return DMA_FROM_DEVICE; 889 } 890 891 static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host, 892 struct mmc_data *data) 893 { 894 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; 895 } 896 897 static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) 898 { 899 int dma_ch; 900 unsigned long flags; 901 902 spin_lock_irqsave(&host->irq_lock, flags); 903 host->req_in_progress = 0; 904 dma_ch = host->dma_ch; 905 spin_unlock_irqrestore(&host->irq_lock, flags); 906 907 omap_hsmmc_disable_irq(host); 908 /* Do not complete the request if DMA is still in progress */ 909 if (mrq->data && host->use_dma && dma_ch != -1) 910 return; 911 host->mrq = NULL; 912 mmc_request_done(host->mmc, mrq); 913 } 914 915 /* 916 * Notify the transfer complete to MMC core 917 */ 918 static void 919 omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data) 920 { 921 if (!data) { 922 struct mmc_request *mrq = host->mrq; 923 924 /* TC before CC from CMD6 - don't know why, but it happens */ 925 if (host->cmd && host->cmd->opcode == 6 && 926 host->response_busy) { 927 host->response_busy = 0; 928 return; 929 } 930 931 omap_hsmmc_request_done(host, mrq); 932 return; 933 } 934 935 host->data = NULL; 936 937 if (!data->error) 938 data->bytes_xfered += data->blocks * (data->blksz); 939 else 940 data->bytes_xfered = 0; 941 942 if (data->stop && (data->error || !host->mrq->sbc)) 943 omap_hsmmc_start_command(host, data->stop, NULL); 944 else 945 omap_hsmmc_request_done(host, data->mrq); 946 } 947 948 /* 949 * Notify the core about command completion 950 */ 951 static void 952 omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) 953 { 954 if (host->mrq->sbc && (host->cmd == host->mrq->sbc) && 955 !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) { 956 host->cmd = NULL; 957 omap_hsmmc_start_dma_transfer(host); 958 omap_hsmmc_start_command(host, host->mrq->cmd, 959 host->mrq->data); 960 return; 961 } 962 963 host->cmd = NULL; 964 965 if (cmd->flags & MMC_RSP_PRESENT) { 966 if (cmd->flags & MMC_RSP_136) { 967 /* response type 2 */ 968 cmd->resp[3] = OMAP_HSMMC_READ(host->base, RSP10); 969 cmd->resp[2] = OMAP_HSMMC_READ(host->base, RSP32); 970 cmd->resp[1] = OMAP_HSMMC_READ(host->base, RSP54); 971 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP76); 972 } else { 973 /* response types 1, 1b, 3, 4, 5, 6 */ 974 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); 975 } 976 } 977 if ((host->data == NULL && !host->response_busy) || cmd->error) 978 omap_hsmmc_request_done(host, host->mrq); 979 } 980 981 /* 982 * DMA clean up for command errors 983 */ 984 static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) 985 { 986 int dma_ch; 987 unsigned long flags; 988 989 host->data->error = errno; 990 991 spin_lock_irqsave(&host->irq_lock, flags); 992 dma_ch = host->dma_ch; 993 host->dma_ch = -1; 994 spin_unlock_irqrestore(&host->irq_lock, flags); 995 996 if (host->use_dma && dma_ch != -1) { 997 struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data); 998 999 dmaengine_terminate_all(chan); 1000 dma_unmap_sg(chan->device->dev, 1001 host->data->sg, host->data->sg_len, 1002 omap_hsmmc_get_dma_dir(host, host->data)); 1003 1004 host->data->host_cookie = 0; 1005 } 1006 host->data = NULL; 1007 } 1008 1009 /* 1010 * Readable error output 1011 */ 1012 #ifdef CONFIG_MMC_DEBUG 1013 static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status) 1014 { 1015 /* --- means reserved bit without definition at documentation */ 1016 static const char *omap_hsmmc_status_bits[] = { 1017 "CC" , "TC" , "BGE", "---", "BWR" , "BRR" , "---" , "---" , 1018 "CIRQ", "OBI" , "---", "---", "---" , "---" , "---" , "ERRI", 1019 "CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" , 1020 "ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---" 1021 }; 1022 char res[256]; 1023 char *buf = res; 1024 int len, i; 1025 1026 len = sprintf(buf, "MMC IRQ 0x%x :", status); 1027 buf += len; 1028 1029 for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++) 1030 if (status & (1 << i)) { 1031 len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]); 1032 buf += len; 1033 } 1034 1035 dev_vdbg(mmc_dev(host->mmc), "%s\n", res); 1036 } 1037 #else 1038 static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, 1039 u32 status) 1040 { 1041 } 1042 #endif /* CONFIG_MMC_DEBUG */ 1043 1044 /* 1045 * MMC controller internal state machines reset 1046 * 1047 * Used to reset command or data internal state machines, using respectively 1048 * SRC or SRD bit of SYSCTL register 1049 * Can be called from interrupt context 1050 */ 1051 static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, 1052 unsigned long bit) 1053 { 1054 unsigned long i = 0; 1055 unsigned long limit = MMC_TIMEOUT_US; 1056 1057 OMAP_HSMMC_WRITE(host->base, SYSCTL, 1058 OMAP_HSMMC_READ(host->base, SYSCTL) | bit); 1059 1060 /* 1061 * OMAP4 ES2 and greater has an updated reset logic. 1062 * Monitor a 0->1 transition first 1063 */ 1064 if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) { 1065 while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit)) 1066 && (i++ < limit)) 1067 udelay(1); 1068 } 1069 i = 0; 1070 1071 while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) && 1072 (i++ < limit)) 1073 udelay(1); 1074 1075 if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit) 1076 dev_err(mmc_dev(host->mmc), 1077 "Timeout waiting on controller reset in %s\n", 1078 __func__); 1079 } 1080 1081 static void hsmmc_command_incomplete(struct omap_hsmmc_host *host, 1082 int err, int end_cmd) 1083 { 1084 if (end_cmd) { 1085 omap_hsmmc_reset_controller_fsm(host, SRC); 1086 if (host->cmd) 1087 host->cmd->error = err; 1088 } 1089 1090 if (host->data) { 1091 omap_hsmmc_reset_controller_fsm(host, SRD); 1092 omap_hsmmc_dma_cleanup(host, err); 1093 } else if (host->mrq && host->mrq->cmd) 1094 host->mrq->cmd->error = err; 1095 } 1096 1097 static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) 1098 { 1099 struct mmc_data *data; 1100 int end_cmd = 0, end_trans = 0; 1101 int error = 0; 1102 1103 data = host->data; 1104 dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); 1105 1106 if (status & ERR_EN) { 1107 omap_hsmmc_dbg_report_irq(host, status); 1108 1109 if (status & (CTO_EN | CCRC_EN)) 1110 end_cmd = 1; 1111 if (status & (CTO_EN | DTO_EN)) 1112 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); 1113 else if (status & (CCRC_EN | DCRC_EN)) 1114 hsmmc_command_incomplete(host, -EILSEQ, end_cmd); 1115 1116 if (status & ACE_EN) { 1117 u32 ac12; 1118 ac12 = OMAP_HSMMC_READ(host->base, AC12); 1119 if (!(ac12 & ACNE) && host->mrq->sbc) { 1120 end_cmd = 1; 1121 if (ac12 & ACTO) 1122 error = -ETIMEDOUT; 1123 else if (ac12 & (ACCE | ACEB | ACIE)) 1124 error = -EILSEQ; 1125 host->mrq->sbc->error = error; 1126 hsmmc_command_incomplete(host, error, end_cmd); 1127 } 1128 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); 1129 } 1130 if (host->data || host->response_busy) { 1131 end_trans = !end_cmd; 1132 host->response_busy = 0; 1133 } 1134 } 1135 1136 OMAP_HSMMC_WRITE(host->base, STAT, status); 1137 if (end_cmd || ((status & CC_EN) && host->cmd)) 1138 omap_hsmmc_cmd_done(host, host->cmd); 1139 if ((end_trans || (status & TC_EN)) && host->mrq) 1140 omap_hsmmc_xfer_done(host, data); 1141 } 1142 1143 /* 1144 * MMC controller IRQ handler 1145 */ 1146 static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) 1147 { 1148 struct omap_hsmmc_host *host = dev_id; 1149 int status; 1150 1151 status = OMAP_HSMMC_READ(host->base, STAT); 1152 while (status & (INT_EN_MASK | CIRQ_EN)) { 1153 if (host->req_in_progress) 1154 omap_hsmmc_do_irq(host, status); 1155 1156 if (status & CIRQ_EN) 1157 mmc_signal_sdio_irq(host->mmc); 1158 1159 /* Flush posted write */ 1160 status = OMAP_HSMMC_READ(host->base, STAT); 1161 } 1162 1163 return IRQ_HANDLED; 1164 } 1165 1166 static irqreturn_t omap_hsmmc_wake_irq(int irq, void *dev_id) 1167 { 1168 struct omap_hsmmc_host *host = dev_id; 1169 1170 /* cirq is level triggered, disable to avoid infinite loop */ 1171 spin_lock(&host->irq_lock); 1172 if (host->flags & HSMMC_WAKE_IRQ_ENABLED) { 1173 disable_irq_nosync(host->wake_irq); 1174 host->flags &= ~HSMMC_WAKE_IRQ_ENABLED; 1175 } 1176 spin_unlock(&host->irq_lock); 1177 pm_request_resume(host->dev); /* no use counter */ 1178 1179 return IRQ_HANDLED; 1180 } 1181 1182 static void set_sd_bus_power(struct omap_hsmmc_host *host) 1183 { 1184 unsigned long i; 1185 1186 OMAP_HSMMC_WRITE(host->base, HCTL, 1187 OMAP_HSMMC_READ(host->base, HCTL) | SDBP); 1188 for (i = 0; i < loops_per_jiffy; i++) { 1189 if (OMAP_HSMMC_READ(host->base, HCTL) & SDBP) 1190 break; 1191 cpu_relax(); 1192 } 1193 } 1194 1195 /* 1196 * Switch MMC interface voltage ... only relevant for MMC1. 1197 * 1198 * MMC2 and MMC3 use fixed 1.8V levels, and maybe a transceiver. 1199 * The MMC2 transceiver controls are used instead of DAT4..DAT7. 1200 * Some chips, like eMMC ones, use internal transceivers. 1201 */ 1202 static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) 1203 { 1204 u32 reg_val = 0; 1205 int ret; 1206 1207 /* Disable the clocks */ 1208 pm_runtime_put_sync(host->dev); 1209 if (host->dbclk) 1210 clk_disable_unprepare(host->dbclk); 1211 1212 /* Turn the power off */ 1213 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); 1214 1215 /* Turn the power ON with given VDD 1.8 or 3.0v */ 1216 if (!ret) 1217 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, 1218 vdd); 1219 pm_runtime_get_sync(host->dev); 1220 if (host->dbclk) 1221 clk_prepare_enable(host->dbclk); 1222 1223 if (ret != 0) 1224 goto err; 1225 1226 OMAP_HSMMC_WRITE(host->base, HCTL, 1227 OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR); 1228 reg_val = OMAP_HSMMC_READ(host->base, HCTL); 1229 1230 /* 1231 * If a MMC dual voltage card is detected, the set_ios fn calls 1232 * this fn with VDD bit set for 1.8V. Upon card removal from the 1233 * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF. 1234 * 1235 * Cope with a bit of slop in the range ... per data sheets: 1236 * - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max, 1237 * but recommended values are 1.71V to 1.89V 1238 * - "3.0V" for vdds_mmc1/vdds_mmc1a can be up to 3.5V max, 1239 * but recommended values are 2.7V to 3.3V 1240 * 1241 * Board setup code shouldn't permit anything very out-of-range. 1242 * TWL4030-family VMMC1 and VSIM regulators are fine (avoiding the 1243 * middle range) but VSIM can't power DAT4..DAT7 at more than 3V. 1244 */ 1245 if ((1 << vdd) <= MMC_VDD_23_24) 1246 reg_val |= SDVS18; 1247 else 1248 reg_val |= SDVS30; 1249 1250 OMAP_HSMMC_WRITE(host->base, HCTL, reg_val); 1251 set_sd_bus_power(host); 1252 1253 return 0; 1254 err: 1255 dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n"); 1256 return ret; 1257 } 1258 1259 /* Protect the card while the cover is open */ 1260 static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host) 1261 { 1262 if (!mmc_slot(host).get_cover_state) 1263 return; 1264 1265 host->reqs_blocked = 0; 1266 if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) { 1267 if (host->protect_card) { 1268 dev_info(host->dev, "%s: cover is closed, " 1269 "card is now accessible\n", 1270 mmc_hostname(host->mmc)); 1271 host->protect_card = 0; 1272 } 1273 } else { 1274 if (!host->protect_card) { 1275 dev_info(host->dev, "%s: cover is open, " 1276 "card is now inaccessible\n", 1277 mmc_hostname(host->mmc)); 1278 host->protect_card = 1; 1279 } 1280 } 1281 } 1282 1283 /* 1284 * irq handler to notify the core about card insertion/removal 1285 */ 1286 static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) 1287 { 1288 struct omap_hsmmc_host *host = dev_id; 1289 struct omap_mmc_slot_data *slot = &mmc_slot(host); 1290 int carddetect; 1291 1292 sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); 1293 1294 if (slot->card_detect) 1295 carddetect = slot->card_detect(host->dev, host->slot_id); 1296 else { 1297 omap_hsmmc_protect_card(host); 1298 carddetect = -ENOSYS; 1299 } 1300 1301 if (carddetect) 1302 mmc_detect_change(host->mmc, (HZ * 200) / 1000); 1303 else 1304 mmc_detect_change(host->mmc, (HZ * 50) / 1000); 1305 return IRQ_HANDLED; 1306 } 1307 1308 static void omap_hsmmc_dma_callback(void *param) 1309 { 1310 struct omap_hsmmc_host *host = param; 1311 struct dma_chan *chan; 1312 struct mmc_data *data; 1313 int req_in_progress; 1314 1315 spin_lock_irq(&host->irq_lock); 1316 if (host->dma_ch < 0) { 1317 spin_unlock_irq(&host->irq_lock); 1318 return; 1319 } 1320 1321 data = host->mrq->data; 1322 chan = omap_hsmmc_get_dma_chan(host, data); 1323 if (!data->host_cookie) 1324 dma_unmap_sg(chan->device->dev, 1325 data->sg, data->sg_len, 1326 omap_hsmmc_get_dma_dir(host, data)); 1327 1328 req_in_progress = host->req_in_progress; 1329 host->dma_ch = -1; 1330 spin_unlock_irq(&host->irq_lock); 1331 1332 /* If DMA has finished after TC, complete the request */ 1333 if (!req_in_progress) { 1334 struct mmc_request *mrq = host->mrq; 1335 1336 host->mrq = NULL; 1337 mmc_request_done(host->mmc, mrq); 1338 } 1339 } 1340 1341 static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, 1342 struct mmc_data *data, 1343 struct omap_hsmmc_next *next, 1344 struct dma_chan *chan) 1345 { 1346 int dma_len; 1347 1348 if (!next && data->host_cookie && 1349 data->host_cookie != host->next_data.cookie) { 1350 dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d" 1351 " host->next_data.cookie %d\n", 1352 __func__, data->host_cookie, host->next_data.cookie); 1353 data->host_cookie = 0; 1354 } 1355 1356 /* Check if next job is already prepared */ 1357 if (next || data->host_cookie != host->next_data.cookie) { 1358 dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, 1359 omap_hsmmc_get_dma_dir(host, data)); 1360 1361 } else { 1362 dma_len = host->next_data.dma_len; 1363 host->next_data.dma_len = 0; 1364 } 1365 1366 1367 if (dma_len == 0) 1368 return -EINVAL; 1369 1370 if (next) { 1371 next->dma_len = dma_len; 1372 data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie; 1373 } else 1374 host->dma_len = dma_len; 1375 1376 return 0; 1377 } 1378 1379 /* 1380 * Routine to configure and start DMA for the MMC card 1381 */ 1382 static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host, 1383 struct mmc_request *req) 1384 { 1385 struct dma_slave_config cfg; 1386 struct dma_async_tx_descriptor *tx; 1387 int ret = 0, i; 1388 struct mmc_data *data = req->data; 1389 struct dma_chan *chan; 1390 1391 /* Sanity check: all the SG entries must be aligned by block size. */ 1392 for (i = 0; i < data->sg_len; i++) { 1393 struct scatterlist *sgl; 1394 1395 sgl = data->sg + i; 1396 if (sgl->length % data->blksz) 1397 return -EINVAL; 1398 } 1399 if ((data->blksz % 4) != 0) 1400 /* REVISIT: The MMC buffer increments only when MSB is written. 1401 * Return error for blksz which is non multiple of four. 1402 */ 1403 return -EINVAL; 1404 1405 BUG_ON(host->dma_ch != -1); 1406 1407 chan = omap_hsmmc_get_dma_chan(host, data); 1408 1409 cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA; 1410 cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA; 1411 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1412 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1413 cfg.src_maxburst = data->blksz / 4; 1414 cfg.dst_maxburst = data->blksz / 4; 1415 1416 ret = dmaengine_slave_config(chan, &cfg); 1417 if (ret) 1418 return ret; 1419 1420 ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan); 1421 if (ret) 1422 return ret; 1423 1424 tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, 1425 data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 1426 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1427 if (!tx) { 1428 dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); 1429 /* FIXME: cleanup */ 1430 return -1; 1431 } 1432 1433 tx->callback = omap_hsmmc_dma_callback; 1434 tx->callback_param = host; 1435 1436 /* Does not fail */ 1437 dmaengine_submit(tx); 1438 1439 host->dma_ch = 1; 1440 1441 return 0; 1442 } 1443 1444 static void set_data_timeout(struct omap_hsmmc_host *host, 1445 unsigned int timeout_ns, 1446 unsigned int timeout_clks) 1447 { 1448 unsigned int timeout, cycle_ns; 1449 uint32_t reg, clkd, dto = 0; 1450 1451 reg = OMAP_HSMMC_READ(host->base, SYSCTL); 1452 clkd = (reg & CLKD_MASK) >> CLKD_SHIFT; 1453 if (clkd == 0) 1454 clkd = 1; 1455 1456 cycle_ns = 1000000000 / (host->clk_rate / clkd); 1457 timeout = timeout_ns / cycle_ns; 1458 timeout += timeout_clks; 1459 if (timeout) { 1460 while ((timeout & 0x80000000) == 0) { 1461 dto += 1; 1462 timeout <<= 1; 1463 } 1464 dto = 31 - dto; 1465 timeout <<= 1; 1466 if (timeout && dto) 1467 dto += 1; 1468 if (dto >= 13) 1469 dto -= 13; 1470 else 1471 dto = 0; 1472 if (dto > 14) 1473 dto = 14; 1474 } 1475 1476 reg &= ~DTO_MASK; 1477 reg |= dto << DTO_SHIFT; 1478 OMAP_HSMMC_WRITE(host->base, SYSCTL, reg); 1479 } 1480 1481 static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host) 1482 { 1483 struct mmc_request *req = host->mrq; 1484 struct dma_chan *chan; 1485 1486 if (!req->data) 1487 return; 1488 OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz) 1489 | (req->data->blocks << 16)); 1490 set_data_timeout(host, req->data->timeout_ns, 1491 req->data->timeout_clks); 1492 chan = omap_hsmmc_get_dma_chan(host, req->data); 1493 dma_async_issue_pending(chan); 1494 } 1495 1496 /* 1497 * Configure block length for MMC/SD cards and initiate the transfer. 1498 */ 1499 static int 1500 omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req) 1501 { 1502 int ret; 1503 host->data = req->data; 1504 1505 if (req->data == NULL) { 1506 OMAP_HSMMC_WRITE(host->base, BLK, 0); 1507 /* 1508 * Set an arbitrary 100ms data timeout for commands with 1509 * busy signal. 1510 */ 1511 if (req->cmd->flags & MMC_RSP_BUSY) 1512 set_data_timeout(host, 100000000U, 0); 1513 return 0; 1514 } 1515 1516 if (host->use_dma) { 1517 ret = omap_hsmmc_setup_dma_transfer(host, req); 1518 if (ret != 0) { 1519 dev_err(mmc_dev(host->mmc), "MMC start dma failure\n"); 1520 return ret; 1521 } 1522 } 1523 return 0; 1524 } 1525 1526 static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 1527 int err) 1528 { 1529 struct omap_hsmmc_host *host = mmc_priv(mmc); 1530 struct mmc_data *data = mrq->data; 1531 1532 if (host->use_dma && data->host_cookie) { 1533 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); 1534 1535 dma_unmap_sg(c->device->dev, data->sg, data->sg_len, 1536 omap_hsmmc_get_dma_dir(host, data)); 1537 data->host_cookie = 0; 1538 } 1539 } 1540 1541 static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, 1542 bool is_first_req) 1543 { 1544 struct omap_hsmmc_host *host = mmc_priv(mmc); 1545 1546 if (mrq->data->host_cookie) { 1547 mrq->data->host_cookie = 0; 1548 return ; 1549 } 1550 1551 if (host->use_dma) { 1552 struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); 1553 1554 if (omap_hsmmc_pre_dma_transfer(host, mrq->data, 1555 &host->next_data, c)) 1556 mrq->data->host_cookie = 0; 1557 } 1558 } 1559 1560 /* 1561 * Request function. for read/write operation 1562 */ 1563 static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) 1564 { 1565 struct omap_hsmmc_host *host = mmc_priv(mmc); 1566 int err; 1567 1568 BUG_ON(host->req_in_progress); 1569 BUG_ON(host->dma_ch != -1); 1570 if (host->protect_card) { 1571 if (host->reqs_blocked < 3) { 1572 /* 1573 * Ensure the controller is left in a consistent 1574 * state by resetting the command and data state 1575 * machines. 1576 */ 1577 omap_hsmmc_reset_controller_fsm(host, SRD); 1578 omap_hsmmc_reset_controller_fsm(host, SRC); 1579 host->reqs_blocked += 1; 1580 } 1581 req->cmd->error = -EBADF; 1582 if (req->data) 1583 req->data->error = -EBADF; 1584 req->cmd->retries = 0; 1585 mmc_request_done(mmc, req); 1586 return; 1587 } else if (host->reqs_blocked) 1588 host->reqs_blocked = 0; 1589 WARN_ON(host->mrq != NULL); 1590 host->mrq = req; 1591 host->clk_rate = clk_get_rate(host->fclk); 1592 err = omap_hsmmc_prepare_data(host, req); 1593 if (err) { 1594 req->cmd->error = err; 1595 if (req->data) 1596 req->data->error = err; 1597 host->mrq = NULL; 1598 mmc_request_done(mmc, req); 1599 return; 1600 } 1601 if (req->sbc && !(host->flags & AUTO_CMD23)) { 1602 omap_hsmmc_start_command(host, req->sbc, NULL); 1603 return; 1604 } 1605 1606 omap_hsmmc_start_dma_transfer(host); 1607 omap_hsmmc_start_command(host, req->cmd, req->data); 1608 } 1609 1610 /* Routine to configure clock values. Exposed API to core */ 1611 static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1612 { 1613 struct omap_hsmmc_host *host = mmc_priv(mmc); 1614 int do_send_init_stream = 0; 1615 1616 pm_runtime_get_sync(host->dev); 1617 1618 if (ios->power_mode != host->power_mode) { 1619 switch (ios->power_mode) { 1620 case MMC_POWER_OFF: 1621 mmc_slot(host).set_power(host->dev, host->slot_id, 1622 0, 0); 1623 break; 1624 case MMC_POWER_UP: 1625 mmc_slot(host).set_power(host->dev, host->slot_id, 1626 1, ios->vdd); 1627 break; 1628 case MMC_POWER_ON: 1629 do_send_init_stream = 1; 1630 break; 1631 } 1632 host->power_mode = ios->power_mode; 1633 } 1634 1635 /* FIXME: set registers based only on changes to ios */ 1636 1637 omap_hsmmc_set_bus_width(host); 1638 1639 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { 1640 /* Only MMC1 can interface at 3V without some flavor 1641 * of external transceiver; but they all handle 1.8V. 1642 */ 1643 if ((OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET) && 1644 (ios->vdd == DUAL_VOLT_OCR_BIT)) { 1645 /* 1646 * The mmc_select_voltage fn of the core does 1647 * not seem to set the power_mode to 1648 * MMC_POWER_UP upon recalculating the voltage. 1649 * vdd 1.8v. 1650 */ 1651 if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0) 1652 dev_dbg(mmc_dev(host->mmc), 1653 "Switch operation failed\n"); 1654 } 1655 } 1656 1657 omap_hsmmc_set_clock(host); 1658 1659 if (do_send_init_stream) 1660 send_init_stream(host); 1661 1662 omap_hsmmc_set_bus_mode(host); 1663 1664 pm_runtime_put_autosuspend(host->dev); 1665 } 1666 1667 static int omap_hsmmc_get_cd(struct mmc_host *mmc) 1668 { 1669 struct omap_hsmmc_host *host = mmc_priv(mmc); 1670 1671 if (!mmc_slot(host).card_detect) 1672 return -ENOSYS; 1673 return mmc_slot(host).card_detect(host->dev, host->slot_id); 1674 } 1675 1676 static int omap_hsmmc_get_ro(struct mmc_host *mmc) 1677 { 1678 struct omap_hsmmc_host *host = mmc_priv(mmc); 1679 1680 if (!mmc_slot(host).get_ro) 1681 return -ENOSYS; 1682 return mmc_slot(host).get_ro(host->dev, 0); 1683 } 1684 1685 static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card) 1686 { 1687 struct omap_hsmmc_host *host = mmc_priv(mmc); 1688 1689 if (mmc_slot(host).init_card) 1690 mmc_slot(host).init_card(card); 1691 } 1692 1693 static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 1694 { 1695 struct omap_hsmmc_host *host = mmc_priv(mmc); 1696 u32 irq_mask, con; 1697 unsigned long flags; 1698 1699 spin_lock_irqsave(&host->irq_lock, flags); 1700 1701 con = OMAP_HSMMC_READ(host->base, CON); 1702 irq_mask = OMAP_HSMMC_READ(host->base, ISE); 1703 if (enable) { 1704 host->flags |= HSMMC_SDIO_IRQ_ENABLED; 1705 irq_mask |= CIRQ_EN; 1706 con |= CTPL | CLKEXTFREE; 1707 } else { 1708 host->flags &= ~HSMMC_SDIO_IRQ_ENABLED; 1709 irq_mask &= ~CIRQ_EN; 1710 con &= ~(CTPL | CLKEXTFREE); 1711 } 1712 OMAP_HSMMC_WRITE(host->base, CON, con); 1713 OMAP_HSMMC_WRITE(host->base, IE, irq_mask); 1714 1715 /* 1716 * if enable, piggy back detection on current request 1717 * but always disable immediately 1718 */ 1719 if (!host->req_in_progress || !enable) 1720 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); 1721 1722 /* flush posted write */ 1723 OMAP_HSMMC_READ(host->base, IE); 1724 1725 spin_unlock_irqrestore(&host->irq_lock, flags); 1726 } 1727 1728 static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) 1729 { 1730 struct mmc_host *mmc = host->mmc; 1731 int ret; 1732 1733 /* 1734 * For omaps with wake-up path, wakeirq will be irq from pinctrl and 1735 * for other omaps, wakeirq will be from GPIO (dat line remuxed to 1736 * gpio). wakeirq is needed to detect sdio irq in runtime suspend state 1737 * with functional clock disabled. 1738 */ 1739 if (!host->dev->of_node || !host->wake_irq) 1740 return -ENODEV; 1741 1742 /* Prevent auto-enabling of IRQ */ 1743 irq_set_status_flags(host->wake_irq, IRQ_NOAUTOEN); 1744 ret = devm_request_irq(host->dev, host->wake_irq, omap_hsmmc_wake_irq, 1745 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 1746 mmc_hostname(mmc), host); 1747 if (ret) { 1748 dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n"); 1749 goto err; 1750 } 1751 1752 /* 1753 * Some omaps don't have wake-up path from deeper idle states 1754 * and need to remux SDIO DAT1 to GPIO for wake-up from idle. 1755 */ 1756 if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) { 1757 struct pinctrl *p = devm_pinctrl_get(host->dev); 1758 if (!p) { 1759 ret = -ENODEV; 1760 goto err_free_irq; 1761 } 1762 if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) { 1763 dev_info(host->dev, "missing default pinctrl state\n"); 1764 devm_pinctrl_put(p); 1765 ret = -EINVAL; 1766 goto err_free_irq; 1767 } 1768 1769 if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) { 1770 dev_info(host->dev, "missing idle pinctrl state\n"); 1771 devm_pinctrl_put(p); 1772 ret = -EINVAL; 1773 goto err_free_irq; 1774 } 1775 devm_pinctrl_put(p); 1776 } 1777 1778 OMAP_HSMMC_WRITE(host->base, HCTL, 1779 OMAP_HSMMC_READ(host->base, HCTL) | IWE); 1780 return 0; 1781 1782 err_free_irq: 1783 devm_free_irq(host->dev, host->wake_irq, host); 1784 err: 1785 dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n"); 1786 host->wake_irq = 0; 1787 return ret; 1788 } 1789 1790 static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) 1791 { 1792 u32 hctl, capa, value; 1793 1794 /* Only MMC1 supports 3.0V */ 1795 if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { 1796 hctl = SDVS30; 1797 capa = VS30 | VS18; 1798 } else { 1799 hctl = SDVS18; 1800 capa = VS18; 1801 } 1802 1803 value = OMAP_HSMMC_READ(host->base, HCTL) & ~SDVS_MASK; 1804 OMAP_HSMMC_WRITE(host->base, HCTL, value | hctl); 1805 1806 value = OMAP_HSMMC_READ(host->base, CAPA); 1807 OMAP_HSMMC_WRITE(host->base, CAPA, value | capa); 1808 1809 /* Set SD bus power bit */ 1810 set_sd_bus_power(host); 1811 } 1812 1813 static int omap_hsmmc_enable_fclk(struct mmc_host *mmc) 1814 { 1815 struct omap_hsmmc_host *host = mmc_priv(mmc); 1816 1817 pm_runtime_get_sync(host->dev); 1818 1819 return 0; 1820 } 1821 1822 static int omap_hsmmc_disable_fclk(struct mmc_host *mmc) 1823 { 1824 struct omap_hsmmc_host *host = mmc_priv(mmc); 1825 1826 pm_runtime_mark_last_busy(host->dev); 1827 pm_runtime_put_autosuspend(host->dev); 1828 1829 return 0; 1830 } 1831 1832 static const struct mmc_host_ops omap_hsmmc_ops = { 1833 .enable = omap_hsmmc_enable_fclk, 1834 .disable = omap_hsmmc_disable_fclk, 1835 .post_req = omap_hsmmc_post_req, 1836 .pre_req = omap_hsmmc_pre_req, 1837 .request = omap_hsmmc_request, 1838 .set_ios = omap_hsmmc_set_ios, 1839 .get_cd = omap_hsmmc_get_cd, 1840 .get_ro = omap_hsmmc_get_ro, 1841 .init_card = omap_hsmmc_init_card, 1842 .enable_sdio_irq = omap_hsmmc_enable_sdio_irq, 1843 }; 1844 1845 #ifdef CONFIG_DEBUG_FS 1846 1847 static int omap_hsmmc_regs_show(struct seq_file *s, void *data) 1848 { 1849 struct mmc_host *mmc = s->private; 1850 struct omap_hsmmc_host *host = mmc_priv(mmc); 1851 1852 seq_printf(s, "mmc%d:\n", mmc->index); 1853 seq_printf(s, "sdio irq mode\t%s\n", 1854 (mmc->caps & MMC_CAP_SDIO_IRQ) ? "interrupt" : "polling"); 1855 1856 if (mmc->caps & MMC_CAP_SDIO_IRQ) { 1857 seq_printf(s, "sdio irq \t%s\n", 1858 (host->flags & HSMMC_SDIO_IRQ_ENABLED) ? "enabled" 1859 : "disabled"); 1860 } 1861 seq_printf(s, "ctx_loss:\t%d\n", host->context_loss); 1862 1863 pm_runtime_get_sync(host->dev); 1864 seq_puts(s, "\nregs:\n"); 1865 seq_printf(s, "CON:\t\t0x%08x\n", 1866 OMAP_HSMMC_READ(host->base, CON)); 1867 seq_printf(s, "PSTATE:\t\t0x%08x\n", 1868 OMAP_HSMMC_READ(host->base, PSTATE)); 1869 seq_printf(s, "HCTL:\t\t0x%08x\n", 1870 OMAP_HSMMC_READ(host->base, HCTL)); 1871 seq_printf(s, "SYSCTL:\t\t0x%08x\n", 1872 OMAP_HSMMC_READ(host->base, SYSCTL)); 1873 seq_printf(s, "IE:\t\t0x%08x\n", 1874 OMAP_HSMMC_READ(host->base, IE)); 1875 seq_printf(s, "ISE:\t\t0x%08x\n", 1876 OMAP_HSMMC_READ(host->base, ISE)); 1877 seq_printf(s, "CAPA:\t\t0x%08x\n", 1878 OMAP_HSMMC_READ(host->base, CAPA)); 1879 1880 pm_runtime_mark_last_busy(host->dev); 1881 pm_runtime_put_autosuspend(host->dev); 1882 1883 return 0; 1884 } 1885 1886 static int omap_hsmmc_regs_open(struct inode *inode, struct file *file) 1887 { 1888 return single_open(file, omap_hsmmc_regs_show, inode->i_private); 1889 } 1890 1891 static const struct file_operations mmc_regs_fops = { 1892 .open = omap_hsmmc_regs_open, 1893 .read = seq_read, 1894 .llseek = seq_lseek, 1895 .release = single_release, 1896 }; 1897 1898 static void omap_hsmmc_debugfs(struct mmc_host *mmc) 1899 { 1900 if (mmc->debugfs_root) 1901 debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root, 1902 mmc, &mmc_regs_fops); 1903 } 1904 1905 #else 1906 1907 static void omap_hsmmc_debugfs(struct mmc_host *mmc) 1908 { 1909 } 1910 1911 #endif 1912 1913 #ifdef CONFIG_OF 1914 static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = { 1915 /* See 35xx errata 2.1.1.128 in SPRZ278F */ 1916 .controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ, 1917 }; 1918 1919 static const struct omap_mmc_of_data omap4_mmc_of_data = { 1920 .reg_offset = 0x100, 1921 }; 1922 static const struct omap_mmc_of_data am33xx_mmc_of_data = { 1923 .reg_offset = 0x100, 1924 .controller_flags = OMAP_HSMMC_SWAKEUP_MISSING, 1925 }; 1926 1927 static const struct of_device_id omap_mmc_of_match[] = { 1928 { 1929 .compatible = "ti,omap2-hsmmc", 1930 }, 1931 { 1932 .compatible = "ti,omap3-pre-es3-hsmmc", 1933 .data = &omap3_pre_es3_mmc_of_data, 1934 }, 1935 { 1936 .compatible = "ti,omap3-hsmmc", 1937 }, 1938 { 1939 .compatible = "ti,omap4-hsmmc", 1940 .data = &omap4_mmc_of_data, 1941 }, 1942 { 1943 .compatible = "ti,am33xx-hsmmc", 1944 .data = &am33xx_mmc_of_data, 1945 }, 1946 {}, 1947 }; 1948 MODULE_DEVICE_TABLE(of, omap_mmc_of_match); 1949 1950 static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev) 1951 { 1952 struct omap_mmc_platform_data *pdata; 1953 struct device_node *np = dev->of_node; 1954 u32 bus_width, max_freq; 1955 int cd_gpio, wp_gpio; 1956 1957 cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); 1958 wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); 1959 if (cd_gpio == -EPROBE_DEFER || wp_gpio == -EPROBE_DEFER) 1960 return ERR_PTR(-EPROBE_DEFER); 1961 1962 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 1963 if (!pdata) 1964 return ERR_PTR(-ENOMEM); /* out of memory */ 1965 1966 if (of_find_property(np, "ti,dual-volt", NULL)) 1967 pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT; 1968 1969 /* This driver only supports 1 slot */ 1970 pdata->nr_slots = 1; 1971 pdata->slots[0].switch_pin = cd_gpio; 1972 pdata->slots[0].gpio_wp = wp_gpio; 1973 1974 if (of_find_property(np, "ti,non-removable", NULL)) { 1975 pdata->slots[0].nonremovable = true; 1976 pdata->slots[0].no_regulator_off_init = true; 1977 } 1978 of_property_read_u32(np, "bus-width", &bus_width); 1979 if (bus_width == 4) 1980 pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA; 1981 else if (bus_width == 8) 1982 pdata->slots[0].caps |= MMC_CAP_8_BIT_DATA; 1983 1984 if (of_find_property(np, "ti,needs-special-reset", NULL)) 1985 pdata->slots[0].features |= HSMMC_HAS_UPDATED_RESET; 1986 1987 if (!of_property_read_u32(np, "max-frequency", &max_freq)) 1988 pdata->max_freq = max_freq; 1989 1990 if (of_find_property(np, "ti,needs-special-hs-handling", NULL)) 1991 pdata->slots[0].features |= HSMMC_HAS_HSPE_SUPPORT; 1992 1993 if (of_find_property(np, "keep-power-in-suspend", NULL)) 1994 pdata->slots[0].pm_caps |= MMC_PM_KEEP_POWER; 1995 1996 if (of_find_property(np, "enable-sdio-wakeup", NULL)) 1997 pdata->slots[0].pm_caps |= MMC_PM_WAKE_SDIO_IRQ; 1998 1999 return pdata; 2000 } 2001 #else 2002 static inline struct omap_mmc_platform_data 2003 *of_get_hsmmc_pdata(struct device *dev) 2004 { 2005 return ERR_PTR(-EINVAL); 2006 } 2007 #endif 2008 2009 static int omap_hsmmc_probe(struct platform_device *pdev) 2010 { 2011 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; 2012 struct mmc_host *mmc; 2013 struct omap_hsmmc_host *host = NULL; 2014 struct resource *res; 2015 int ret, irq; 2016 const struct of_device_id *match; 2017 dma_cap_mask_t mask; 2018 unsigned tx_req, rx_req; 2019 const struct omap_mmc_of_data *data; 2020 void __iomem *base; 2021 2022 match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); 2023 if (match) { 2024 pdata = of_get_hsmmc_pdata(&pdev->dev); 2025 2026 if (IS_ERR(pdata)) 2027 return PTR_ERR(pdata); 2028 2029 if (match->data) { 2030 data = match->data; 2031 pdata->reg_offset = data->reg_offset; 2032 pdata->controller_flags |= data->controller_flags; 2033 } 2034 } 2035 2036 if (pdata == NULL) { 2037 dev_err(&pdev->dev, "Platform Data is missing\n"); 2038 return -ENXIO; 2039 } 2040 2041 if (pdata->nr_slots == 0) { 2042 dev_err(&pdev->dev, "No Slots\n"); 2043 return -ENXIO; 2044 } 2045 2046 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2047 irq = platform_get_irq(pdev, 0); 2048 if (res == NULL || irq < 0) 2049 return -ENXIO; 2050 2051 base = devm_ioremap_resource(&pdev->dev, res); 2052 if (IS_ERR(base)) 2053 return PTR_ERR(base); 2054 2055 ret = omap_hsmmc_gpio_init(pdata); 2056 if (ret) 2057 goto err; 2058 2059 mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev); 2060 if (!mmc) { 2061 ret = -ENOMEM; 2062 goto err_alloc; 2063 } 2064 2065 host = mmc_priv(mmc); 2066 host->mmc = mmc; 2067 host->pdata = pdata; 2068 host->dev = &pdev->dev; 2069 host->use_dma = 1; 2070 host->dma_ch = -1; 2071 host->irq = irq; 2072 host->slot_id = 0; 2073 host->mapbase = res->start + pdata->reg_offset; 2074 host->base = base + pdata->reg_offset; 2075 host->power_mode = MMC_POWER_OFF; 2076 host->next_data.cookie = 1; 2077 host->pbias_enabled = 0; 2078 2079 platform_set_drvdata(pdev, host); 2080 2081 if (pdev->dev.of_node) 2082 host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1); 2083 2084 mmc->ops = &omap_hsmmc_ops; 2085 2086 mmc->f_min = OMAP_MMC_MIN_CLOCK; 2087 2088 if (pdata->max_freq > 0) 2089 mmc->f_max = pdata->max_freq; 2090 else 2091 mmc->f_max = OMAP_MMC_MAX_CLOCK; 2092 2093 spin_lock_init(&host->irq_lock); 2094 2095 host->fclk = devm_clk_get(&pdev->dev, "fck"); 2096 if (IS_ERR(host->fclk)) { 2097 ret = PTR_ERR(host->fclk); 2098 host->fclk = NULL; 2099 goto err1; 2100 } 2101 2102 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) { 2103 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n"); 2104 mmc->caps2 |= MMC_CAP2_NO_MULTI_READ; 2105 } 2106 2107 pm_runtime_enable(host->dev); 2108 pm_runtime_get_sync(host->dev); 2109 pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY); 2110 pm_runtime_use_autosuspend(host->dev); 2111 2112 omap_hsmmc_context_save(host); 2113 2114 host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck"); 2115 /* 2116 * MMC can still work without debounce clock. 2117 */ 2118 if (IS_ERR(host->dbclk)) { 2119 host->dbclk = NULL; 2120 } else if (clk_prepare_enable(host->dbclk) != 0) { 2121 dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n"); 2122 host->dbclk = NULL; 2123 } 2124 2125 /* Since we do only SG emulation, we can have as many segs 2126 * as we want. */ 2127 mmc->max_segs = 1024; 2128 2129 mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ 2130 mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ 2131 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 2132 mmc->max_seg_size = mmc->max_req_size; 2133 2134 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 2135 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; 2136 2137 mmc->caps |= mmc_slot(host).caps; 2138 if (mmc->caps & MMC_CAP_8_BIT_DATA) 2139 mmc->caps |= MMC_CAP_4_BIT_DATA; 2140 2141 if (mmc_slot(host).nonremovable) 2142 mmc->caps |= MMC_CAP_NONREMOVABLE; 2143 2144 mmc->pm_caps = mmc_slot(host).pm_caps; 2145 2146 omap_hsmmc_conf_bus_power(host); 2147 2148 if (!pdev->dev.of_node) { 2149 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); 2150 if (!res) { 2151 dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); 2152 ret = -ENXIO; 2153 goto err_irq; 2154 } 2155 tx_req = res->start; 2156 2157 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); 2158 if (!res) { 2159 dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); 2160 ret = -ENXIO; 2161 goto err_irq; 2162 } 2163 rx_req = res->start; 2164 } 2165 2166 dma_cap_zero(mask); 2167 dma_cap_set(DMA_SLAVE, mask); 2168 2169 host->rx_chan = 2170 dma_request_slave_channel_compat(mask, omap_dma_filter_fn, 2171 &rx_req, &pdev->dev, "rx"); 2172 2173 if (!host->rx_chan) { 2174 dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); 2175 ret = -ENXIO; 2176 goto err_irq; 2177 } 2178 2179 host->tx_chan = 2180 dma_request_slave_channel_compat(mask, omap_dma_filter_fn, 2181 &tx_req, &pdev->dev, "tx"); 2182 2183 if (!host->tx_chan) { 2184 dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); 2185 ret = -ENXIO; 2186 goto err_irq; 2187 } 2188 2189 /* Request IRQ for MMC operations */ 2190 ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0, 2191 mmc_hostname(mmc), host); 2192 if (ret) { 2193 dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); 2194 goto err_irq; 2195 } 2196 2197 if (pdata->init != NULL) { 2198 if (pdata->init(&pdev->dev) != 0) { 2199 dev_err(mmc_dev(host->mmc), 2200 "Unable to configure MMC IRQs\n"); 2201 goto err_irq; 2202 } 2203 } 2204 2205 if (omap_hsmmc_have_reg() && !mmc_slot(host).set_power) { 2206 ret = omap_hsmmc_reg_get(host); 2207 if (ret) 2208 goto err_reg; 2209 host->use_reg = 1; 2210 } 2211 2212 mmc->ocr_avail = mmc_slot(host).ocr_mask; 2213 2214 /* Request IRQ for card detect */ 2215 if ((mmc_slot(host).card_detect_irq)) { 2216 ret = devm_request_threaded_irq(&pdev->dev, 2217 mmc_slot(host).card_detect_irq, 2218 NULL, omap_hsmmc_detect, 2219 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 2220 mmc_hostname(mmc), host); 2221 if (ret) { 2222 dev_err(mmc_dev(host->mmc), 2223 "Unable to grab MMC CD IRQ\n"); 2224 goto err_irq_cd; 2225 } 2226 pdata->suspend = omap_hsmmc_suspend_cdirq; 2227 pdata->resume = omap_hsmmc_resume_cdirq; 2228 } 2229 2230 omap_hsmmc_disable_irq(host); 2231 2232 /* 2233 * For now, only support SDIO interrupt if we have a separate 2234 * wake-up interrupt configured from device tree. This is because 2235 * the wake-up interrupt is needed for idle state and some 2236 * platforms need special quirks. And we don't want to add new 2237 * legacy mux platform init code callbacks any longer as we 2238 * are moving to DT based booting anyways. 2239 */ 2240 ret = omap_hsmmc_configure_wake_irq(host); 2241 if (!ret) 2242 mmc->caps |= MMC_CAP_SDIO_IRQ; 2243 2244 omap_hsmmc_protect_card(host); 2245 2246 mmc_add_host(mmc); 2247 2248 if (mmc_slot(host).name != NULL) { 2249 ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name); 2250 if (ret < 0) 2251 goto err_slot_name; 2252 } 2253 if (mmc_slot(host).card_detect_irq && mmc_slot(host).get_cover_state) { 2254 ret = device_create_file(&mmc->class_dev, 2255 &dev_attr_cover_switch); 2256 if (ret < 0) 2257 goto err_slot_name; 2258 } 2259 2260 omap_hsmmc_debugfs(mmc); 2261 pm_runtime_mark_last_busy(host->dev); 2262 pm_runtime_put_autosuspend(host->dev); 2263 2264 return 0; 2265 2266 err_slot_name: 2267 mmc_remove_host(mmc); 2268 err_irq_cd: 2269 if (host->use_reg) 2270 omap_hsmmc_reg_put(host); 2271 err_reg: 2272 if (host->pdata->cleanup) 2273 host->pdata->cleanup(&pdev->dev); 2274 err_irq: 2275 if (host->tx_chan) 2276 dma_release_channel(host->tx_chan); 2277 if (host->rx_chan) 2278 dma_release_channel(host->rx_chan); 2279 pm_runtime_put_sync(host->dev); 2280 pm_runtime_disable(host->dev); 2281 if (host->dbclk) 2282 clk_disable_unprepare(host->dbclk); 2283 err1: 2284 mmc_free_host(mmc); 2285 err_alloc: 2286 omap_hsmmc_gpio_free(pdata); 2287 err: 2288 return ret; 2289 } 2290 2291 static int omap_hsmmc_remove(struct platform_device *pdev) 2292 { 2293 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2294 2295 pm_runtime_get_sync(host->dev); 2296 mmc_remove_host(host->mmc); 2297 if (host->use_reg) 2298 omap_hsmmc_reg_put(host); 2299 if (host->pdata->cleanup) 2300 host->pdata->cleanup(&pdev->dev); 2301 2302 if (host->tx_chan) 2303 dma_release_channel(host->tx_chan); 2304 if (host->rx_chan) 2305 dma_release_channel(host->rx_chan); 2306 2307 pm_runtime_put_sync(host->dev); 2308 pm_runtime_disable(host->dev); 2309 if (host->dbclk) 2310 clk_disable_unprepare(host->dbclk); 2311 2312 omap_hsmmc_gpio_free(host->pdata); 2313 mmc_free_host(host->mmc); 2314 2315 return 0; 2316 } 2317 2318 #ifdef CONFIG_PM 2319 static int omap_hsmmc_prepare(struct device *dev) 2320 { 2321 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 2322 2323 if (host->pdata->suspend) 2324 return host->pdata->suspend(dev, host->slot_id); 2325 2326 return 0; 2327 } 2328 2329 static void omap_hsmmc_complete(struct device *dev) 2330 { 2331 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 2332 2333 if (host->pdata->resume) 2334 host->pdata->resume(dev, host->slot_id); 2335 2336 } 2337 2338 static int omap_hsmmc_suspend(struct device *dev) 2339 { 2340 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 2341 2342 if (!host) 2343 return 0; 2344 2345 pm_runtime_get_sync(host->dev); 2346 2347 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) { 2348 OMAP_HSMMC_WRITE(host->base, ISE, 0); 2349 OMAP_HSMMC_WRITE(host->base, IE, 0); 2350 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 2351 OMAP_HSMMC_WRITE(host->base, HCTL, 2352 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); 2353 } 2354 2355 /* do not wake up due to sdio irq */ 2356 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && 2357 !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)) 2358 disable_irq(host->wake_irq); 2359 2360 if (host->dbclk) 2361 clk_disable_unprepare(host->dbclk); 2362 2363 pm_runtime_put_sync(host->dev); 2364 return 0; 2365 } 2366 2367 /* Routine to resume the MMC device */ 2368 static int omap_hsmmc_resume(struct device *dev) 2369 { 2370 struct omap_hsmmc_host *host = dev_get_drvdata(dev); 2371 2372 if (!host) 2373 return 0; 2374 2375 pm_runtime_get_sync(host->dev); 2376 2377 if (host->dbclk) 2378 clk_prepare_enable(host->dbclk); 2379 2380 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) 2381 omap_hsmmc_conf_bus_power(host); 2382 2383 omap_hsmmc_protect_card(host); 2384 2385 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && 2386 !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)) 2387 enable_irq(host->wake_irq); 2388 2389 pm_runtime_mark_last_busy(host->dev); 2390 pm_runtime_put_autosuspend(host->dev); 2391 return 0; 2392 } 2393 2394 #else 2395 #define omap_hsmmc_prepare NULL 2396 #define omap_hsmmc_complete NULL 2397 #define omap_hsmmc_suspend NULL 2398 #define omap_hsmmc_resume NULL 2399 #endif 2400 2401 static int omap_hsmmc_runtime_suspend(struct device *dev) 2402 { 2403 struct omap_hsmmc_host *host; 2404 unsigned long flags; 2405 int ret = 0; 2406 2407 host = platform_get_drvdata(to_platform_device(dev)); 2408 omap_hsmmc_context_save(host); 2409 dev_dbg(dev, "disabled\n"); 2410 2411 spin_lock_irqsave(&host->irq_lock, flags); 2412 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && 2413 (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { 2414 /* disable sdio irq handling to prevent race */ 2415 OMAP_HSMMC_WRITE(host->base, ISE, 0); 2416 OMAP_HSMMC_WRITE(host->base, IE, 0); 2417 2418 if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) { 2419 /* 2420 * dat1 line low, pending sdio irq 2421 * race condition: possible irq handler running on 2422 * multi-core, abort 2423 */ 2424 dev_dbg(dev, "pending sdio irq, abort suspend\n"); 2425 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 2426 OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); 2427 OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); 2428 pm_runtime_mark_last_busy(dev); 2429 ret = -EBUSY; 2430 goto abort; 2431 } 2432 2433 pinctrl_pm_select_idle_state(dev); 2434 2435 WARN_ON(host->flags & HSMMC_WAKE_IRQ_ENABLED); 2436 enable_irq(host->wake_irq); 2437 host->flags |= HSMMC_WAKE_IRQ_ENABLED; 2438 } else { 2439 pinctrl_pm_select_idle_state(dev); 2440 } 2441 2442 abort: 2443 spin_unlock_irqrestore(&host->irq_lock, flags); 2444 return ret; 2445 } 2446 2447 static int omap_hsmmc_runtime_resume(struct device *dev) 2448 { 2449 struct omap_hsmmc_host *host; 2450 unsigned long flags; 2451 2452 host = platform_get_drvdata(to_platform_device(dev)); 2453 omap_hsmmc_context_restore(host); 2454 dev_dbg(dev, "enabled\n"); 2455 2456 spin_lock_irqsave(&host->irq_lock, flags); 2457 if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && 2458 (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { 2459 /* sdio irq flag can't change while in runtime suspend */ 2460 if (host->flags & HSMMC_WAKE_IRQ_ENABLED) { 2461 disable_irq_nosync(host->wake_irq); 2462 host->flags &= ~HSMMC_WAKE_IRQ_ENABLED; 2463 } 2464 2465 pinctrl_pm_select_default_state(host->dev); 2466 2467 /* irq lost, if pinmux incorrect */ 2468 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 2469 OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); 2470 OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); 2471 } else { 2472 pinctrl_pm_select_default_state(host->dev); 2473 } 2474 spin_unlock_irqrestore(&host->irq_lock, flags); 2475 return 0; 2476 } 2477 2478 static struct dev_pm_ops omap_hsmmc_dev_pm_ops = { 2479 .suspend = omap_hsmmc_suspend, 2480 .resume = omap_hsmmc_resume, 2481 .prepare = omap_hsmmc_prepare, 2482 .complete = omap_hsmmc_complete, 2483 .runtime_suspend = omap_hsmmc_runtime_suspend, 2484 .runtime_resume = omap_hsmmc_runtime_resume, 2485 }; 2486 2487 static struct platform_driver omap_hsmmc_driver = { 2488 .probe = omap_hsmmc_probe, 2489 .remove = omap_hsmmc_remove, 2490 .driver = { 2491 .name = DRIVER_NAME, 2492 .owner = THIS_MODULE, 2493 .pm = &omap_hsmmc_dev_pm_ops, 2494 .of_match_table = of_match_ptr(omap_mmc_of_match), 2495 }, 2496 }; 2497 2498 module_platform_driver(omap_hsmmc_driver); 2499 MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver"); 2500 MODULE_LICENSE("GPL"); 2501 MODULE_ALIAS("platform:" DRIVER_NAME); 2502 MODULE_AUTHOR("Texas Instruments Inc"); 2503