1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface 3 * 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 5 * 6 * Thanks to the following companies for their support: 7 * 8 * - JMicron (hardware and technical support) 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/string.h> 13 #include <linux/delay.h> 14 #include <linux/highmem.h> 15 #include <linux/module.h> 16 #include <linux/pci.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/slab.h> 19 #include <linux/device.h> 20 #include <linux/scatterlist.h> 21 #include <linux/io.h> 22 #include <linux/iopoll.h> 23 #include <linux/gpio.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/pm_qos.h> 26 #include <linux/debugfs.h> 27 #include <linux/acpi.h> 28 #include <linux/dmi.h> 29 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/slot-gpio.h> 33 34 #ifdef CONFIG_X86 35 #include <asm/iosf_mbi.h> 36 #endif 37 38 #include "cqhci.h" 39 40 #include "sdhci.h" 41 #include "sdhci-cqhci.h" 42 #include "sdhci-pci.h" 43 44 static void sdhci_pci_hw_reset(struct sdhci_host *host); 45 46 #ifdef CONFIG_PM_SLEEP 47 static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip) 48 { 49 mmc_pm_flag_t pm_flags = 0; 50 bool cap_cd_wake = false; 51 int i; 52 53 for (i = 0; i < chip->num_slots; i++) { 54 struct sdhci_pci_slot *slot = chip->slots[i]; 55 56 if (slot) { 57 pm_flags |= slot->host->mmc->pm_flags; 58 if (slot->host->mmc->caps & MMC_CAP_CD_WAKE) 59 cap_cd_wake = true; 60 } 61 } 62 63 if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ)) 64 return device_wakeup_enable(&chip->pdev->dev); 65 else if (!cap_cd_wake) 66 return device_wakeup_disable(&chip->pdev->dev); 67 68 return 0; 69 } 70 71 static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip) 72 { 73 int i, ret; 74 75 sdhci_pci_init_wakeup(chip); 76 77 for (i = 0; i < chip->num_slots; i++) { 78 struct sdhci_pci_slot *slot = chip->slots[i]; 79 struct sdhci_host *host; 80 81 if (!slot) 82 continue; 83 84 host = slot->host; 85 86 if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3) 87 mmc_retune_needed(host->mmc); 88 89 ret = sdhci_suspend_host(host); 90 if (ret) 91 goto err_pci_suspend; 92 93 if (device_may_wakeup(&chip->pdev->dev)) 94 mmc_gpio_set_cd_wake(host->mmc, true); 95 } 96 97 return 0; 98 99 err_pci_suspend: 100 while (--i >= 0) 101 sdhci_resume_host(chip->slots[i]->host); 102 return ret; 103 } 104 105 int sdhci_pci_resume_host(struct sdhci_pci_chip *chip) 106 { 107 struct sdhci_pci_slot *slot; 108 int i, ret; 109 110 for (i = 0; i < chip->num_slots; i++) { 111 slot = chip->slots[i]; 112 if (!slot) 113 continue; 114 115 ret = sdhci_resume_host(slot->host); 116 if (ret) 117 return ret; 118 119 mmc_gpio_set_cd_wake(slot->host->mmc, false); 120 } 121 122 return 0; 123 } 124 125 static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip) 126 { 127 int ret; 128 129 ret = cqhci_suspend(chip->slots[0]->host->mmc); 130 if (ret) 131 return ret; 132 133 return sdhci_pci_suspend_host(chip); 134 } 135 136 static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip) 137 { 138 int ret; 139 140 ret = sdhci_pci_resume_host(chip); 141 if (ret) 142 return ret; 143 144 return cqhci_resume(chip->slots[0]->host->mmc); 145 } 146 #endif 147 148 #ifdef CONFIG_PM 149 static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip) 150 { 151 struct sdhci_pci_slot *slot; 152 struct sdhci_host *host; 153 int i, ret; 154 155 for (i = 0; i < chip->num_slots; i++) { 156 slot = chip->slots[i]; 157 if (!slot) 158 continue; 159 160 host = slot->host; 161 162 ret = sdhci_runtime_suspend_host(host); 163 if (ret) 164 goto err_pci_runtime_suspend; 165 166 if (chip->rpm_retune && 167 host->tuning_mode != SDHCI_TUNING_MODE_3) 168 mmc_retune_needed(host->mmc); 169 } 170 171 return 0; 172 173 err_pci_runtime_suspend: 174 while (--i >= 0) 175 sdhci_runtime_resume_host(chip->slots[i]->host, 0); 176 return ret; 177 } 178 179 static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip) 180 { 181 struct sdhci_pci_slot *slot; 182 int i, ret; 183 184 for (i = 0; i < chip->num_slots; i++) { 185 slot = chip->slots[i]; 186 if (!slot) 187 continue; 188 189 ret = sdhci_runtime_resume_host(slot->host, 0); 190 if (ret) 191 return ret; 192 } 193 194 return 0; 195 } 196 197 static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip) 198 { 199 int ret; 200 201 ret = cqhci_suspend(chip->slots[0]->host->mmc); 202 if (ret) 203 return ret; 204 205 return sdhci_pci_runtime_suspend_host(chip); 206 } 207 208 static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip) 209 { 210 int ret; 211 212 ret = sdhci_pci_runtime_resume_host(chip); 213 if (ret) 214 return ret; 215 216 return cqhci_resume(chip->slots[0]->host->mmc); 217 } 218 #endif 219 220 static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask) 221 { 222 int cmd_error = 0; 223 int data_error = 0; 224 225 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) 226 return intmask; 227 228 cqhci_irq(host->mmc, intmask, cmd_error, data_error); 229 230 return 0; 231 } 232 233 static void sdhci_pci_dumpregs(struct mmc_host *mmc) 234 { 235 sdhci_dumpregs(mmc_priv(mmc)); 236 } 237 238 /*****************************************************************************\ 239 * * 240 * Hardware specific quirk handling * 241 * * 242 \*****************************************************************************/ 243 244 static int ricoh_probe(struct sdhci_pci_chip *chip) 245 { 246 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG || 247 chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY) 248 chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET; 249 return 0; 250 } 251 252 static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot) 253 { 254 u32 caps = 255 FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) | 256 FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) | 257 SDHCI_TIMEOUT_CLK_UNIT | 258 SDHCI_CAN_VDD_330 | 259 SDHCI_CAN_DO_HISPD | 260 SDHCI_CAN_DO_SDMA; 261 u32 caps1 = 0; 262 263 __sdhci_read_caps(slot->host, NULL, &caps, &caps1); 264 return 0; 265 } 266 267 #ifdef CONFIG_PM_SLEEP 268 static int ricoh_mmc_resume(struct sdhci_pci_chip *chip) 269 { 270 /* Apply a delay to allow controller to settle */ 271 /* Otherwise it becomes confused if card state changed 272 during suspend */ 273 msleep(500); 274 return sdhci_pci_resume_host(chip); 275 } 276 #endif 277 278 static const struct sdhci_pci_fixes sdhci_ricoh = { 279 .probe = ricoh_probe, 280 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 281 SDHCI_QUIRK_FORCE_DMA | 282 SDHCI_QUIRK_CLOCK_BEFORE_RESET, 283 }; 284 285 static const struct sdhci_pci_fixes sdhci_ricoh_mmc = { 286 .probe_slot = ricoh_mmc_probe_slot, 287 #ifdef CONFIG_PM_SLEEP 288 .resume = ricoh_mmc_resume, 289 #endif 290 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 291 SDHCI_QUIRK_CLOCK_BEFORE_RESET | 292 SDHCI_QUIRK_NO_CARD_NO_RESET, 293 }; 294 295 static void ene_714_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 296 { 297 struct sdhci_host *host = mmc_priv(mmc); 298 299 sdhci_set_ios(mmc, ios); 300 301 /* 302 * Some (ENE) controllers misbehave on some ios operations, 303 * signalling timeout and CRC errors even on CMD0. Resetting 304 * it on each ios seems to solve the problem. 305 */ 306 if (!(host->flags & SDHCI_DEVICE_DEAD)) 307 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 308 } 309 310 static int ene_714_probe_slot(struct sdhci_pci_slot *slot) 311 { 312 slot->host->mmc_host_ops.set_ios = ene_714_set_ios; 313 return 0; 314 } 315 316 static const struct sdhci_pci_fixes sdhci_ene_712 = { 317 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | 318 SDHCI_QUIRK_BROKEN_DMA, 319 }; 320 321 static const struct sdhci_pci_fixes sdhci_ene_714 = { 322 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | 323 SDHCI_QUIRK_BROKEN_DMA, 324 .probe_slot = ene_714_probe_slot, 325 }; 326 327 static const struct sdhci_pci_fixes sdhci_cafe = { 328 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | 329 SDHCI_QUIRK_NO_BUSY_IRQ | 330 SDHCI_QUIRK_BROKEN_CARD_DETECTION | 331 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, 332 }; 333 334 static const struct sdhci_pci_fixes sdhci_intel_qrk = { 335 .quirks = SDHCI_QUIRK_NO_HISPD_BIT, 336 }; 337 338 static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot) 339 { 340 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; 341 return 0; 342 } 343 344 /* 345 * ADMA operation is disabled for Moorestown platform due to 346 * hardware bugs. 347 */ 348 static int mrst_hc_probe(struct sdhci_pci_chip *chip) 349 { 350 /* 351 * slots number is fixed here for MRST as SDIO3/5 are never used and 352 * have hardware bugs. 353 */ 354 chip->num_slots = 1; 355 return 0; 356 } 357 358 static int pch_hc_probe_slot(struct sdhci_pci_slot *slot) 359 { 360 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; 361 return 0; 362 } 363 364 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) 365 { 366 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; 367 slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC; 368 return 0; 369 } 370 371 static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot) 372 { 373 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; 374 return 0; 375 } 376 377 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { 378 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, 379 .probe_slot = mrst_hc_probe_slot, 380 }; 381 382 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { 383 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, 384 .probe = mrst_hc_probe, 385 }; 386 387 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { 388 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 389 .allow_runtime_pm = true, 390 .own_cd_for_runtime_pm = true, 391 }; 392 393 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { 394 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 395 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, 396 .allow_runtime_pm = true, 397 .probe_slot = mfd_sdio_probe_slot, 398 }; 399 400 static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { 401 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 402 .allow_runtime_pm = true, 403 .probe_slot = mfd_emmc_probe_slot, 404 }; 405 406 static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { 407 .quirks = SDHCI_QUIRK_BROKEN_ADMA, 408 .probe_slot = pch_hc_probe_slot, 409 }; 410 411 #ifdef CONFIG_X86 412 413 #define BYT_IOSF_SCCEP 0x63 414 #define BYT_IOSF_OCP_NETCTRL0 0x1078 415 #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8) 416 417 static void byt_ocp_setting(struct pci_dev *pdev) 418 { 419 u32 val = 0; 420 421 if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC && 422 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO && 423 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD && 424 pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2) 425 return; 426 427 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0, 428 &val)) { 429 dev_err(&pdev->dev, "%s read error\n", __func__); 430 return; 431 } 432 433 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE)) 434 return; 435 436 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE; 437 438 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0, 439 val)) { 440 dev_err(&pdev->dev, "%s write error\n", __func__); 441 return; 442 } 443 444 dev_dbg(&pdev->dev, "%s completed\n", __func__); 445 } 446 447 #else 448 449 static inline void byt_ocp_setting(struct pci_dev *pdev) 450 { 451 } 452 453 #endif 454 455 enum { 456 INTEL_DSM_FNS = 0, 457 INTEL_DSM_V18_SWITCH = 3, 458 INTEL_DSM_V33_SWITCH = 4, 459 INTEL_DSM_DRV_STRENGTH = 9, 460 INTEL_DSM_D3_RETUNE = 10, 461 }; 462 463 struct intel_host { 464 u32 dsm_fns; 465 int drv_strength; 466 bool d3_retune; 467 bool rpm_retune_ok; 468 bool needs_pwr_off; 469 u32 glk_rx_ctrl1; 470 u32 glk_tun_val; 471 u32 active_ltr; 472 u32 idle_ltr; 473 }; 474 475 static const guid_t intel_dsm_guid = 476 GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F, 477 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61); 478 479 static int __intel_dsm(struct intel_host *intel_host, struct device *dev, 480 unsigned int fn, u32 *result) 481 { 482 union acpi_object *obj; 483 int err = 0; 484 size_t len; 485 486 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL); 487 if (!obj) 488 return -EOPNOTSUPP; 489 490 if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) { 491 err = -EINVAL; 492 goto out; 493 } 494 495 len = min_t(size_t, obj->buffer.length, 4); 496 497 *result = 0; 498 memcpy(result, obj->buffer.pointer, len); 499 out: 500 ACPI_FREE(obj); 501 502 return err; 503 } 504 505 static int intel_dsm(struct intel_host *intel_host, struct device *dev, 506 unsigned int fn, u32 *result) 507 { 508 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn))) 509 return -EOPNOTSUPP; 510 511 return __intel_dsm(intel_host, dev, fn, result); 512 } 513 514 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, 515 struct mmc_host *mmc) 516 { 517 int err; 518 u32 val; 519 520 intel_host->d3_retune = true; 521 522 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); 523 if (err) { 524 pr_debug("%s: DSM not supported, error %d\n", 525 mmc_hostname(mmc), err); 526 return; 527 } 528 529 pr_debug("%s: DSM function mask %#x\n", 530 mmc_hostname(mmc), intel_host->dsm_fns); 531 532 err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val); 533 intel_host->drv_strength = err ? 0 : val; 534 535 err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val); 536 intel_host->d3_retune = err ? true : !!val; 537 } 538 539 static void sdhci_pci_int_hw_reset(struct sdhci_host *host) 540 { 541 u8 reg; 542 543 reg = sdhci_readb(host, SDHCI_POWER_CONTROL); 544 reg |= 0x10; 545 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 546 /* For eMMC, minimum is 1us but give it 9us for good measure */ 547 udelay(9); 548 reg &= ~0x10; 549 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 550 /* For eMMC, minimum is 200us but give it 300us for good measure */ 551 usleep_range(300, 1000); 552 } 553 554 static int intel_select_drive_strength(struct mmc_card *card, 555 unsigned int max_dtr, int host_drv, 556 int card_drv, int *drv_type) 557 { 558 struct sdhci_host *host = mmc_priv(card->host); 559 struct sdhci_pci_slot *slot = sdhci_priv(host); 560 struct intel_host *intel_host = sdhci_pci_priv(slot); 561 562 if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv)) 563 return 0; 564 565 return intel_host->drv_strength; 566 } 567 568 static int bxt_get_cd(struct mmc_host *mmc) 569 { 570 int gpio_cd = mmc_gpio_get_cd(mmc); 571 572 if (!gpio_cd) 573 return 0; 574 575 return sdhci_get_cd_nogpio(mmc); 576 } 577 578 static int mrfld_get_cd(struct mmc_host *mmc) 579 { 580 return sdhci_get_cd_nogpio(mmc); 581 } 582 583 #define SDHCI_INTEL_PWR_TIMEOUT_CNT 20 584 #define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100 585 586 static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, 587 unsigned short vdd) 588 { 589 struct sdhci_pci_slot *slot = sdhci_priv(host); 590 struct intel_host *intel_host = sdhci_pci_priv(slot); 591 int cntr; 592 u8 reg; 593 594 /* 595 * Bus power may control card power, but a full reset still may not 596 * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can. 597 * That might be needed to initialize correctly, if the card was left 598 * powered on previously. 599 */ 600 if (intel_host->needs_pwr_off) { 601 intel_host->needs_pwr_off = false; 602 if (mode != MMC_POWER_OFF) { 603 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 604 usleep_range(10000, 12500); 605 } 606 } 607 608 sdhci_set_power(host, mode, vdd); 609 610 if (mode == MMC_POWER_OFF) 611 return; 612 613 /* 614 * Bus power might not enable after D3 -> D0 transition due to the 615 * present state not yet having propagated. Retry for up to 2ms. 616 */ 617 for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) { 618 reg = sdhci_readb(host, SDHCI_POWER_CONTROL); 619 if (reg & SDHCI_POWER_ON) 620 break; 621 udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY); 622 reg |= SDHCI_POWER_ON; 623 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 624 } 625 } 626 627 static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host, 628 unsigned int timing) 629 { 630 /* Set UHS timing to SDR25 for High Speed mode */ 631 if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS) 632 timing = MMC_TIMING_UHS_SDR25; 633 sdhci_set_uhs_signaling(host, timing); 634 } 635 636 #define INTEL_HS400_ES_REG 0x78 637 #define INTEL_HS400_ES_BIT BIT(0) 638 639 static void intel_hs400_enhanced_strobe(struct mmc_host *mmc, 640 struct mmc_ios *ios) 641 { 642 struct sdhci_host *host = mmc_priv(mmc); 643 u32 val; 644 645 val = sdhci_readl(host, INTEL_HS400_ES_REG); 646 if (ios->enhanced_strobe) 647 val |= INTEL_HS400_ES_BIT; 648 else 649 val &= ~INTEL_HS400_ES_BIT; 650 sdhci_writel(host, val, INTEL_HS400_ES_REG); 651 } 652 653 static int intel_start_signal_voltage_switch(struct mmc_host *mmc, 654 struct mmc_ios *ios) 655 { 656 struct device *dev = mmc_dev(mmc); 657 struct sdhci_host *host = mmc_priv(mmc); 658 struct sdhci_pci_slot *slot = sdhci_priv(host); 659 struct intel_host *intel_host = sdhci_pci_priv(slot); 660 unsigned int fn; 661 u32 result = 0; 662 int err; 663 664 err = sdhci_start_signal_voltage_switch(mmc, ios); 665 if (err) 666 return err; 667 668 switch (ios->signal_voltage) { 669 case MMC_SIGNAL_VOLTAGE_330: 670 fn = INTEL_DSM_V33_SWITCH; 671 break; 672 case MMC_SIGNAL_VOLTAGE_180: 673 fn = INTEL_DSM_V18_SWITCH; 674 break; 675 default: 676 return 0; 677 } 678 679 err = intel_dsm(intel_host, dev, fn, &result); 680 pr_debug("%s: %s DSM fn %u error %d result %u\n", 681 mmc_hostname(mmc), __func__, fn, err, result); 682 683 return 0; 684 } 685 686 static const struct sdhci_ops sdhci_intel_byt_ops = { 687 .set_clock = sdhci_set_clock, 688 .set_power = sdhci_intel_set_power, 689 .enable_dma = sdhci_pci_enable_dma, 690 .set_bus_width = sdhci_set_bus_width, 691 .reset = sdhci_reset, 692 .set_uhs_signaling = sdhci_intel_set_uhs_signaling, 693 .hw_reset = sdhci_pci_hw_reset, 694 }; 695 696 static const struct sdhci_ops sdhci_intel_glk_ops = { 697 .set_clock = sdhci_set_clock, 698 .set_power = sdhci_intel_set_power, 699 .enable_dma = sdhci_pci_enable_dma, 700 .set_bus_width = sdhci_set_bus_width, 701 .reset = sdhci_and_cqhci_reset, 702 .set_uhs_signaling = sdhci_intel_set_uhs_signaling, 703 .hw_reset = sdhci_pci_hw_reset, 704 .irq = sdhci_cqhci_irq, 705 }; 706 707 static void byt_read_dsm(struct sdhci_pci_slot *slot) 708 { 709 struct intel_host *intel_host = sdhci_pci_priv(slot); 710 struct device *dev = &slot->chip->pdev->dev; 711 struct mmc_host *mmc = slot->host->mmc; 712 713 intel_dsm_init(intel_host, dev, mmc); 714 slot->chip->rpm_retune = intel_host->d3_retune; 715 } 716 717 static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode) 718 { 719 int err = sdhci_execute_tuning(mmc, opcode); 720 struct sdhci_host *host = mmc_priv(mmc); 721 722 if (err) 723 return err; 724 725 /* 726 * Tuning can leave the IP in an active state (Buffer Read Enable bit 727 * set) which prevents the entry to low power states (i.e. S0i3). Data 728 * reset will clear it. 729 */ 730 sdhci_reset(host, SDHCI_RESET_DATA); 731 732 return 0; 733 } 734 735 #define INTEL_ACTIVELTR 0x804 736 #define INTEL_IDLELTR 0x808 737 738 #define INTEL_LTR_REQ BIT(15) 739 #define INTEL_LTR_SCALE_MASK GENMASK(11, 10) 740 #define INTEL_LTR_SCALE_1US (2 << 10) 741 #define INTEL_LTR_SCALE_32US (3 << 10) 742 #define INTEL_LTR_VALUE_MASK GENMASK(9, 0) 743 744 static void intel_cache_ltr(struct sdhci_pci_slot *slot) 745 { 746 struct intel_host *intel_host = sdhci_pci_priv(slot); 747 struct sdhci_host *host = slot->host; 748 749 intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR); 750 intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR); 751 } 752 753 static void intel_ltr_set(struct device *dev, s32 val) 754 { 755 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 756 struct sdhci_pci_slot *slot = chip->slots[0]; 757 struct intel_host *intel_host = sdhci_pci_priv(slot); 758 struct sdhci_host *host = slot->host; 759 u32 ltr; 760 761 pm_runtime_get_sync(dev); 762 763 /* 764 * Program latency tolerance (LTR) accordingly what has been asked 765 * by the PM QoS layer or disable it in case we were passed 766 * negative value or PM_QOS_LATENCY_ANY. 767 */ 768 ltr = readl(host->ioaddr + INTEL_ACTIVELTR); 769 770 if (val == PM_QOS_LATENCY_ANY || val < 0) { 771 ltr &= ~INTEL_LTR_REQ; 772 } else { 773 ltr |= INTEL_LTR_REQ; 774 ltr &= ~INTEL_LTR_SCALE_MASK; 775 ltr &= ~INTEL_LTR_VALUE_MASK; 776 777 if (val > INTEL_LTR_VALUE_MASK) { 778 val >>= 5; 779 if (val > INTEL_LTR_VALUE_MASK) 780 val = INTEL_LTR_VALUE_MASK; 781 ltr |= INTEL_LTR_SCALE_32US | val; 782 } else { 783 ltr |= INTEL_LTR_SCALE_1US | val; 784 } 785 } 786 787 if (ltr == intel_host->active_ltr) 788 goto out; 789 790 writel(ltr, host->ioaddr + INTEL_ACTIVELTR); 791 writel(ltr, host->ioaddr + INTEL_IDLELTR); 792 793 /* Cache the values into lpss structure */ 794 intel_cache_ltr(slot); 795 out: 796 pm_runtime_put_autosuspend(dev); 797 } 798 799 static bool intel_use_ltr(struct sdhci_pci_chip *chip) 800 { 801 switch (chip->pdev->device) { 802 case PCI_DEVICE_ID_INTEL_BYT_EMMC: 803 case PCI_DEVICE_ID_INTEL_BYT_EMMC2: 804 case PCI_DEVICE_ID_INTEL_BYT_SDIO: 805 case PCI_DEVICE_ID_INTEL_BYT_SD: 806 case PCI_DEVICE_ID_INTEL_BSW_EMMC: 807 case PCI_DEVICE_ID_INTEL_BSW_SDIO: 808 case PCI_DEVICE_ID_INTEL_BSW_SD: 809 return false; 810 default: 811 return true; 812 } 813 } 814 815 static void intel_ltr_expose(struct sdhci_pci_chip *chip) 816 { 817 struct device *dev = &chip->pdev->dev; 818 819 if (!intel_use_ltr(chip)) 820 return; 821 822 dev->power.set_latency_tolerance = intel_ltr_set; 823 dev_pm_qos_expose_latency_tolerance(dev); 824 } 825 826 static void intel_ltr_hide(struct sdhci_pci_chip *chip) 827 { 828 struct device *dev = &chip->pdev->dev; 829 830 if (!intel_use_ltr(chip)) 831 return; 832 833 dev_pm_qos_hide_latency_tolerance(dev); 834 dev->power.set_latency_tolerance = NULL; 835 } 836 837 static void byt_probe_slot(struct sdhci_pci_slot *slot) 838 { 839 struct mmc_host_ops *ops = &slot->host->mmc_host_ops; 840 struct device *dev = &slot->chip->pdev->dev; 841 struct mmc_host *mmc = slot->host->mmc; 842 843 byt_read_dsm(slot); 844 845 byt_ocp_setting(slot->chip->pdev); 846 847 ops->execute_tuning = intel_execute_tuning; 848 ops->start_signal_voltage_switch = intel_start_signal_voltage_switch; 849 850 device_property_read_u32(dev, "max-frequency", &mmc->f_max); 851 852 if (!mmc->slotno) { 853 slot->chip->slots[mmc->slotno] = slot; 854 intel_ltr_expose(slot->chip); 855 } 856 } 857 858 static void byt_add_debugfs(struct sdhci_pci_slot *slot) 859 { 860 struct intel_host *intel_host = sdhci_pci_priv(slot); 861 struct mmc_host *mmc = slot->host->mmc; 862 struct dentry *dir = mmc->debugfs_root; 863 864 if (!intel_use_ltr(slot->chip)) 865 return; 866 867 debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr); 868 debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr); 869 870 intel_cache_ltr(slot); 871 } 872 873 static int byt_add_host(struct sdhci_pci_slot *slot) 874 { 875 int ret = sdhci_add_host(slot->host); 876 877 if (!ret) 878 byt_add_debugfs(slot); 879 return ret; 880 } 881 882 static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead) 883 { 884 struct mmc_host *mmc = slot->host->mmc; 885 886 if (!mmc->slotno) 887 intel_ltr_hide(slot->chip); 888 } 889 890 static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) 891 { 892 byt_probe_slot(slot); 893 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 894 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | 895 MMC_CAP_CMD_DURING_TFR | 896 MMC_CAP_WAIT_WHILE_BUSY; 897 slot->hw_reset = sdhci_pci_int_hw_reset; 898 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC) 899 slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ 900 slot->host->mmc_host_ops.select_drive_strength = 901 intel_select_drive_strength; 902 return 0; 903 } 904 905 static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) 906 { 907 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && 908 (dmi_match(DMI_BIOS_VENDOR, "LENOVO") || 909 dmi_match(DMI_SYS_VENDOR, "IRBIS")); 910 } 911 912 static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot) 913 { 914 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC && 915 dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC."); 916 } 917 918 static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) 919 { 920 int ret = byt_emmc_probe_slot(slot); 921 922 if (!glk_broken_cqhci(slot)) 923 slot->host->mmc->caps2 |= MMC_CAP2_CQE; 924 925 if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) { 926 if (!jsl_broken_hs400es(slot)) { 927 slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES; 928 slot->host->mmc_host_ops.hs400_enhanced_strobe = 929 intel_hs400_enhanced_strobe; 930 } 931 slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; 932 } 933 934 return ret; 935 } 936 937 static const struct cqhci_host_ops glk_cqhci_ops = { 938 .enable = sdhci_cqe_enable, 939 .disable = sdhci_cqe_disable, 940 .dumpregs = sdhci_pci_dumpregs, 941 }; 942 943 static int glk_emmc_add_host(struct sdhci_pci_slot *slot) 944 { 945 struct device *dev = &slot->chip->pdev->dev; 946 struct sdhci_host *host = slot->host; 947 struct cqhci_host *cq_host; 948 bool dma64; 949 int ret; 950 951 ret = sdhci_setup_host(host); 952 if (ret) 953 return ret; 954 955 cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL); 956 if (!cq_host) { 957 ret = -ENOMEM; 958 goto cleanup; 959 } 960 961 cq_host->mmio = host->ioaddr + 0x200; 962 cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ; 963 cq_host->ops = &glk_cqhci_ops; 964 965 dma64 = host->flags & SDHCI_USE_64_BIT_DMA; 966 if (dma64) 967 cq_host->caps |= CQHCI_TASK_DESC_SZ_128; 968 969 ret = cqhci_init(cq_host, host->mmc, dma64); 970 if (ret) 971 goto cleanup; 972 973 ret = __sdhci_add_host(host); 974 if (ret) 975 goto cleanup; 976 977 byt_add_debugfs(slot); 978 979 return 0; 980 981 cleanup: 982 sdhci_cleanup_host(host); 983 return ret; 984 } 985 986 #ifdef CONFIG_PM 987 #define GLK_RX_CTRL1 0x834 988 #define GLK_TUN_VAL 0x840 989 #define GLK_PATH_PLL GENMASK(13, 8) 990 #define GLK_DLY GENMASK(6, 0) 991 /* Workaround firmware failing to restore the tuning value */ 992 static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp) 993 { 994 struct sdhci_pci_slot *slot = chip->slots[0]; 995 struct intel_host *intel_host = sdhci_pci_priv(slot); 996 struct sdhci_host *host = slot->host; 997 u32 glk_rx_ctrl1; 998 u32 glk_tun_val; 999 u32 dly; 1000 1001 if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc)) 1002 return; 1003 1004 glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1); 1005 glk_tun_val = sdhci_readl(host, GLK_TUN_VAL); 1006 1007 if (susp) { 1008 intel_host->glk_rx_ctrl1 = glk_rx_ctrl1; 1009 intel_host->glk_tun_val = glk_tun_val; 1010 return; 1011 } 1012 1013 if (!intel_host->glk_tun_val) 1014 return; 1015 1016 if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) { 1017 intel_host->rpm_retune_ok = true; 1018 return; 1019 } 1020 1021 dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) + 1022 (intel_host->glk_tun_val << 1)); 1023 if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1)) 1024 return; 1025 1026 glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly; 1027 sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1); 1028 1029 intel_host->rpm_retune_ok = true; 1030 chip->rpm_retune = true; 1031 mmc_retune_needed(host->mmc); 1032 pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc)); 1033 } 1034 1035 static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp) 1036 { 1037 if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && 1038 !chip->rpm_retune) 1039 glk_rpm_retune_wa(chip, susp); 1040 } 1041 1042 static int glk_runtime_suspend(struct sdhci_pci_chip *chip) 1043 { 1044 glk_rpm_retune_chk(chip, true); 1045 1046 return sdhci_cqhci_runtime_suspend(chip); 1047 } 1048 1049 static int glk_runtime_resume(struct sdhci_pci_chip *chip) 1050 { 1051 glk_rpm_retune_chk(chip, false); 1052 1053 return sdhci_cqhci_runtime_resume(chip); 1054 } 1055 #endif 1056 1057 #ifdef CONFIG_ACPI 1058 static int ni_set_max_freq(struct sdhci_pci_slot *slot) 1059 { 1060 acpi_status status; 1061 unsigned long long max_freq; 1062 1063 status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev), 1064 "MXFQ", NULL, &max_freq); 1065 if (ACPI_FAILURE(status)) { 1066 dev_err(&slot->chip->pdev->dev, 1067 "MXFQ not found in acpi table\n"); 1068 return -EINVAL; 1069 } 1070 1071 slot->host->mmc->f_max = max_freq * 1000000; 1072 1073 return 0; 1074 } 1075 #else 1076 static inline int ni_set_max_freq(struct sdhci_pci_slot *slot) 1077 { 1078 return 0; 1079 } 1080 #endif 1081 1082 static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) 1083 { 1084 int err; 1085 1086 byt_probe_slot(slot); 1087 1088 err = ni_set_max_freq(slot); 1089 if (err) 1090 return err; 1091 1092 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | 1093 MMC_CAP_WAIT_WHILE_BUSY; 1094 return 0; 1095 } 1096 1097 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) 1098 { 1099 byt_probe_slot(slot); 1100 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | 1101 MMC_CAP_WAIT_WHILE_BUSY; 1102 return 0; 1103 } 1104 1105 static void byt_needs_pwr_off(struct sdhci_pci_slot *slot) 1106 { 1107 struct intel_host *intel_host = sdhci_pci_priv(slot); 1108 u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL); 1109 1110 intel_host->needs_pwr_off = reg & SDHCI_POWER_ON; 1111 } 1112 1113 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) 1114 { 1115 byt_probe_slot(slot); 1116 slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | 1117 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; 1118 slot->cd_idx = 0; 1119 slot->cd_override_level = true; 1120 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD || 1121 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD || 1122 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD || 1123 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD) 1124 slot->host->mmc_host_ops.get_cd = bxt_get_cd; 1125 1126 if (slot->chip->pdev->subsystem_vendor == PCI_VENDOR_ID_NI && 1127 slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3) 1128 slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V; 1129 1130 byt_needs_pwr_off(slot); 1131 1132 return 0; 1133 } 1134 1135 #ifdef CONFIG_PM_SLEEP 1136 1137 static int byt_resume(struct sdhci_pci_chip *chip) 1138 { 1139 byt_ocp_setting(chip->pdev); 1140 1141 return sdhci_pci_resume_host(chip); 1142 } 1143 1144 #endif 1145 1146 #ifdef CONFIG_PM 1147 1148 static int byt_runtime_resume(struct sdhci_pci_chip *chip) 1149 { 1150 byt_ocp_setting(chip->pdev); 1151 1152 return sdhci_pci_runtime_resume_host(chip); 1153 } 1154 1155 #endif 1156 1157 static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { 1158 #ifdef CONFIG_PM_SLEEP 1159 .resume = byt_resume, 1160 #endif 1161 #ifdef CONFIG_PM 1162 .runtime_resume = byt_runtime_resume, 1163 #endif 1164 .allow_runtime_pm = true, 1165 .probe_slot = byt_emmc_probe_slot, 1166 .add_host = byt_add_host, 1167 .remove_slot = byt_remove_slot, 1168 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1169 SDHCI_QUIRK_NO_LED, 1170 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1171 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | 1172 SDHCI_QUIRK2_STOP_WITH_TC, 1173 .ops = &sdhci_intel_byt_ops, 1174 .priv_size = sizeof(struct intel_host), 1175 }; 1176 1177 static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = { 1178 .allow_runtime_pm = true, 1179 .probe_slot = glk_emmc_probe_slot, 1180 .add_host = glk_emmc_add_host, 1181 .remove_slot = byt_remove_slot, 1182 #ifdef CONFIG_PM_SLEEP 1183 .suspend = sdhci_cqhci_suspend, 1184 .resume = sdhci_cqhci_resume, 1185 #endif 1186 #ifdef CONFIG_PM 1187 .runtime_suspend = glk_runtime_suspend, 1188 .runtime_resume = glk_runtime_resume, 1189 #endif 1190 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1191 SDHCI_QUIRK_NO_LED, 1192 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1193 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | 1194 SDHCI_QUIRK2_STOP_WITH_TC, 1195 .ops = &sdhci_intel_glk_ops, 1196 .priv_size = sizeof(struct intel_host), 1197 }; 1198 1199 static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = { 1200 #ifdef CONFIG_PM_SLEEP 1201 .resume = byt_resume, 1202 #endif 1203 #ifdef CONFIG_PM 1204 .runtime_resume = byt_runtime_resume, 1205 #endif 1206 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1207 SDHCI_QUIRK_NO_LED, 1208 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON | 1209 SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1210 .allow_runtime_pm = true, 1211 .probe_slot = ni_byt_sdio_probe_slot, 1212 .add_host = byt_add_host, 1213 .remove_slot = byt_remove_slot, 1214 .ops = &sdhci_intel_byt_ops, 1215 .priv_size = sizeof(struct intel_host), 1216 }; 1217 1218 static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { 1219 #ifdef CONFIG_PM_SLEEP 1220 .resume = byt_resume, 1221 #endif 1222 #ifdef CONFIG_PM 1223 .runtime_resume = byt_runtime_resume, 1224 #endif 1225 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1226 SDHCI_QUIRK_NO_LED, 1227 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON | 1228 SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1229 .allow_runtime_pm = true, 1230 .probe_slot = byt_sdio_probe_slot, 1231 .add_host = byt_add_host, 1232 .remove_slot = byt_remove_slot, 1233 .ops = &sdhci_intel_byt_ops, 1234 .priv_size = sizeof(struct intel_host), 1235 }; 1236 1237 static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { 1238 #ifdef CONFIG_PM_SLEEP 1239 .resume = byt_resume, 1240 #endif 1241 #ifdef CONFIG_PM 1242 .runtime_resume = byt_runtime_resume, 1243 #endif 1244 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1245 SDHCI_QUIRK_NO_LED, 1246 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | 1247 SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1248 SDHCI_QUIRK2_STOP_WITH_TC, 1249 .allow_runtime_pm = true, 1250 .own_cd_for_runtime_pm = true, 1251 .probe_slot = byt_sd_probe_slot, 1252 .add_host = byt_add_host, 1253 .remove_slot = byt_remove_slot, 1254 .ops = &sdhci_intel_byt_ops, 1255 .priv_size = sizeof(struct intel_host), 1256 }; 1257 1258 /* Define Host controllers for Intel Merrifield platform */ 1259 #define INTEL_MRFLD_EMMC_0 0 1260 #define INTEL_MRFLD_EMMC_1 1 1261 #define INTEL_MRFLD_SD 2 1262 #define INTEL_MRFLD_SDIO 3 1263 1264 #ifdef CONFIG_ACPI 1265 static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) 1266 { 1267 struct acpi_device *device; 1268 1269 device = ACPI_COMPANION(&slot->chip->pdev->dev); 1270 if (device) 1271 acpi_device_fix_up_power_extended(device); 1272 } 1273 #else 1274 static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {} 1275 #endif 1276 1277 static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) 1278 { 1279 unsigned int func = PCI_FUNC(slot->chip->pdev->devfn); 1280 1281 switch (func) { 1282 case INTEL_MRFLD_EMMC_0: 1283 case INTEL_MRFLD_EMMC_1: 1284 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | 1285 MMC_CAP_8_BIT_DATA | 1286 MMC_CAP_1_8V_DDR; 1287 break; 1288 case INTEL_MRFLD_SD: 1289 slot->cd_idx = 0; 1290 slot->cd_override_level = true; 1291 /* 1292 * There are two PCB designs of SD card slot with the opposite 1293 * card detection sense. Quirk this out by ignoring GPIO state 1294 * completely in the custom ->get_cd() callback. 1295 */ 1296 slot->host->mmc_host_ops.get_cd = mrfld_get_cd; 1297 slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; 1298 break; 1299 case INTEL_MRFLD_SDIO: 1300 /* Advertise 2.0v for compatibility with the SDIO card's OCR */ 1301 slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195; 1302 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | 1303 MMC_CAP_POWER_OFF_CARD; 1304 break; 1305 default: 1306 return -ENODEV; 1307 } 1308 1309 intel_mrfld_mmc_fix_up_power_slot(slot); 1310 return 0; 1311 } 1312 1313 static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = { 1314 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 1315 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | 1316 SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1317 .allow_runtime_pm = true, 1318 .probe_slot = intel_mrfld_mmc_probe_slot, 1319 }; 1320 1321 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) 1322 { 1323 u8 scratch; 1324 int ret; 1325 1326 ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch); 1327 if (ret) 1328 return ret; 1329 1330 /* 1331 * Turn PMOS on [bit 0], set over current detection to 2.4 V 1332 * [bit 1:2] and enable over current debouncing [bit 6]. 1333 */ 1334 if (on) 1335 scratch |= 0x47; 1336 else 1337 scratch &= ~0x47; 1338 1339 return pci_write_config_byte(chip->pdev, 0xAE, scratch); 1340 } 1341 1342 static int jmicron_probe(struct sdhci_pci_chip *chip) 1343 { 1344 int ret; 1345 u16 mmcdev = 0; 1346 1347 if (chip->pdev->revision == 0) { 1348 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | 1349 SDHCI_QUIRK_32BIT_DMA_SIZE | 1350 SDHCI_QUIRK_32BIT_ADMA_SIZE | 1351 SDHCI_QUIRK_RESET_AFTER_REQUEST | 1352 SDHCI_QUIRK_BROKEN_SMALL_PIO; 1353 } 1354 1355 /* 1356 * JMicron chips can have two interfaces to the same hardware 1357 * in order to work around limitations in Microsoft's driver. 1358 * We need to make sure we only bind to one of them. 1359 * 1360 * This code assumes two things: 1361 * 1362 * 1. The PCI code adds subfunctions in order. 1363 * 1364 * 2. The MMC interface has a lower subfunction number 1365 * than the SD interface. 1366 */ 1367 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) 1368 mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC; 1369 else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD) 1370 mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD; 1371 1372 if (mmcdev) { 1373 struct pci_dev *sd_dev; 1374 1375 sd_dev = NULL; 1376 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, 1377 mmcdev, sd_dev)) != NULL) { 1378 if ((PCI_SLOT(chip->pdev->devfn) == 1379 PCI_SLOT(sd_dev->devfn)) && 1380 (chip->pdev->bus == sd_dev->bus)) 1381 break; 1382 } 1383 1384 if (sd_dev) { 1385 pci_dev_put(sd_dev); 1386 dev_info(&chip->pdev->dev, "Refusing to bind to " 1387 "secondary interface.\n"); 1388 return -ENODEV; 1389 } 1390 } 1391 1392 /* 1393 * JMicron chips need a bit of a nudge to enable the power 1394 * output pins. 1395 */ 1396 ret = jmicron_pmos(chip, 1); 1397 if (ret) { 1398 dev_err(&chip->pdev->dev, "Failure enabling card power\n"); 1399 return ret; 1400 } 1401 1402 /* quirk for unsable RO-detection on JM388 chips */ 1403 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD || 1404 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) 1405 chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT; 1406 1407 return 0; 1408 } 1409 1410 static void jmicron_enable_mmc(struct sdhci_host *host, int on) 1411 { 1412 u8 scratch; 1413 1414 scratch = readb(host->ioaddr + 0xC0); 1415 1416 if (on) 1417 scratch |= 0x01; 1418 else 1419 scratch &= ~0x01; 1420 1421 writeb(scratch, host->ioaddr + 0xC0); 1422 } 1423 1424 static int jmicron_probe_slot(struct sdhci_pci_slot *slot) 1425 { 1426 if (slot->chip->pdev->revision == 0) { 1427 u16 version; 1428 1429 version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION); 1430 version = (version & SDHCI_VENDOR_VER_MASK) >> 1431 SDHCI_VENDOR_VER_SHIFT; 1432 1433 /* 1434 * Older versions of the chip have lots of nasty glitches 1435 * in the ADMA engine. It's best just to avoid it 1436 * completely. 1437 */ 1438 if (version < 0xAC) 1439 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 1440 } 1441 1442 /* JM388 MMC doesn't support 1.8V while SD supports it */ 1443 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 1444 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 | 1445 MMC_VDD_29_30 | MMC_VDD_30_31 | 1446 MMC_VDD_165_195; /* allow 1.8V */ 1447 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 | 1448 MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */ 1449 } 1450 1451 /* 1452 * The secondary interface requires a bit set to get the 1453 * interrupts. 1454 */ 1455 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 1456 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) 1457 jmicron_enable_mmc(slot->host, 1); 1458 1459 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST; 1460 1461 return 0; 1462 } 1463 1464 static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead) 1465 { 1466 if (dead) 1467 return; 1468 1469 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 1470 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) 1471 jmicron_enable_mmc(slot->host, 0); 1472 } 1473 1474 #ifdef CONFIG_PM_SLEEP 1475 static int jmicron_suspend(struct sdhci_pci_chip *chip) 1476 { 1477 int i, ret; 1478 1479 ret = sdhci_pci_suspend_host(chip); 1480 if (ret) 1481 return ret; 1482 1483 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 1484 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 1485 for (i = 0; i < chip->num_slots; i++) 1486 jmicron_enable_mmc(chip->slots[i]->host, 0); 1487 } 1488 1489 return 0; 1490 } 1491 1492 static int jmicron_resume(struct sdhci_pci_chip *chip) 1493 { 1494 int ret, i; 1495 1496 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 1497 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 1498 for (i = 0; i < chip->num_slots; i++) 1499 jmicron_enable_mmc(chip->slots[i]->host, 1); 1500 } 1501 1502 ret = jmicron_pmos(chip, 1); 1503 if (ret) { 1504 dev_err(&chip->pdev->dev, "Failure enabling card power\n"); 1505 return ret; 1506 } 1507 1508 return sdhci_pci_resume_host(chip); 1509 } 1510 #endif 1511 1512 static const struct sdhci_pci_fixes sdhci_jmicron = { 1513 .probe = jmicron_probe, 1514 1515 .probe_slot = jmicron_probe_slot, 1516 .remove_slot = jmicron_remove_slot, 1517 1518 #ifdef CONFIG_PM_SLEEP 1519 .suspend = jmicron_suspend, 1520 .resume = jmicron_resume, 1521 #endif 1522 }; 1523 1524 /* SysKonnect CardBus2SDIO extra registers */ 1525 #define SYSKT_CTRL 0x200 1526 #define SYSKT_RDFIFO_STAT 0x204 1527 #define SYSKT_WRFIFO_STAT 0x208 1528 #define SYSKT_POWER_DATA 0x20c 1529 #define SYSKT_POWER_330 0xef 1530 #define SYSKT_POWER_300 0xf8 1531 #define SYSKT_POWER_184 0xcc 1532 #define SYSKT_POWER_CMD 0x20d 1533 #define SYSKT_POWER_START (1 << 7) 1534 #define SYSKT_POWER_STATUS 0x20e 1535 #define SYSKT_POWER_STATUS_OK (1 << 0) 1536 #define SYSKT_BOARD_REV 0x210 1537 #define SYSKT_CHIP_REV 0x211 1538 #define SYSKT_CONF_DATA 0x212 1539 #define SYSKT_CONF_DATA_1V8 (1 << 2) 1540 #define SYSKT_CONF_DATA_2V5 (1 << 1) 1541 #define SYSKT_CONF_DATA_3V3 (1 << 0) 1542 1543 static int syskt_probe(struct sdhci_pci_chip *chip) 1544 { 1545 if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { 1546 chip->pdev->class &= ~0x0000FF; 1547 chip->pdev->class |= PCI_SDHCI_IFDMA; 1548 } 1549 return 0; 1550 } 1551 1552 static int syskt_probe_slot(struct sdhci_pci_slot *slot) 1553 { 1554 int tm, ps; 1555 1556 u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV); 1557 u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV); 1558 dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, " 1559 "board rev %d.%d, chip rev %d.%d\n", 1560 board_rev >> 4, board_rev & 0xf, 1561 chip_rev >> 4, chip_rev & 0xf); 1562 if (chip_rev >= 0x20) 1563 slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA; 1564 1565 writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA); 1566 writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD); 1567 udelay(50); 1568 tm = 10; /* Wait max 1 ms */ 1569 do { 1570 ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS); 1571 if (ps & SYSKT_POWER_STATUS_OK) 1572 break; 1573 udelay(100); 1574 } while (--tm); 1575 if (!tm) { 1576 dev_err(&slot->chip->pdev->dev, 1577 "power regulator never stabilized"); 1578 writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD); 1579 return -ENODEV; 1580 } 1581 1582 return 0; 1583 } 1584 1585 static const struct sdhci_pci_fixes sdhci_syskt = { 1586 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER, 1587 .probe = syskt_probe, 1588 .probe_slot = syskt_probe_slot, 1589 }; 1590 1591 static int via_probe(struct sdhci_pci_chip *chip) 1592 { 1593 if (chip->pdev->revision == 0x10) 1594 chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER; 1595 1596 return 0; 1597 } 1598 1599 static const struct sdhci_pci_fixes sdhci_via = { 1600 .probe = via_probe, 1601 }; 1602 1603 static int rtsx_probe_slot(struct sdhci_pci_slot *slot) 1604 { 1605 slot->host->mmc->caps2 |= MMC_CAP2_HS200; 1606 return 0; 1607 } 1608 1609 static const struct sdhci_pci_fixes sdhci_rtsx = { 1610 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1611 SDHCI_QUIRK2_BROKEN_64_BIT_DMA | 1612 SDHCI_QUIRK2_BROKEN_DDR50, 1613 .probe_slot = rtsx_probe_slot, 1614 }; 1615 1616 /*AMD chipset generation*/ 1617 enum amd_chipset_gen { 1618 AMD_CHIPSET_BEFORE_ML, 1619 AMD_CHIPSET_CZ, 1620 AMD_CHIPSET_NL, 1621 AMD_CHIPSET_UNKNOWN, 1622 }; 1623 1624 /* AMD registers */ 1625 #define AMD_SD_AUTO_PATTERN 0xB8 1626 #define AMD_MSLEEP_DURATION 4 1627 #define AMD_SD_MISC_CONTROL 0xD0 1628 #define AMD_MAX_TUNE_VALUE 0x0B 1629 #define AMD_AUTO_TUNE_SEL 0x10800 1630 #define AMD_FIFO_PTR 0x30 1631 #define AMD_BIT_MASK 0x1F 1632 1633 static void amd_tuning_reset(struct sdhci_host *host) 1634 { 1635 unsigned int val; 1636 1637 val = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1638 val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING; 1639 sdhci_writew(host, val, SDHCI_HOST_CONTROL2); 1640 1641 val = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1642 val &= ~SDHCI_CTRL_EXEC_TUNING; 1643 sdhci_writew(host, val, SDHCI_HOST_CONTROL2); 1644 } 1645 1646 static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase) 1647 { 1648 unsigned int val; 1649 1650 pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val); 1651 val &= ~AMD_BIT_MASK; 1652 val |= (AMD_AUTO_TUNE_SEL | (phase << 1)); 1653 pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val); 1654 } 1655 1656 static void amd_enable_manual_tuning(struct pci_dev *pdev) 1657 { 1658 unsigned int val; 1659 1660 pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val); 1661 val |= AMD_FIFO_PTR; 1662 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); 1663 } 1664 1665 static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode) 1666 { 1667 struct sdhci_pci_slot *slot = sdhci_priv(host); 1668 struct pci_dev *pdev = slot->chip->pdev; 1669 u8 valid_win = 0; 1670 u8 valid_win_max = 0; 1671 u8 valid_win_end = 0; 1672 u8 ctrl, tune_around; 1673 1674 amd_tuning_reset(host); 1675 1676 for (tune_around = 0; tune_around < 12; tune_around++) { 1677 amd_config_tuning_phase(pdev, tune_around); 1678 1679 if (mmc_send_tuning(host->mmc, opcode, NULL)) { 1680 valid_win = 0; 1681 msleep(AMD_MSLEEP_DURATION); 1682 ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA; 1683 sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET); 1684 } else if (++valid_win > valid_win_max) { 1685 valid_win_max = valid_win; 1686 valid_win_end = tune_around; 1687 } 1688 } 1689 1690 if (!valid_win_max) { 1691 dev_err(&pdev->dev, "no tuning point found\n"); 1692 return -EIO; 1693 } 1694 1695 amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2); 1696 1697 amd_enable_manual_tuning(pdev); 1698 1699 host->mmc->retune_period = 0; 1700 1701 return 0; 1702 } 1703 1704 static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode) 1705 { 1706 struct sdhci_host *host = mmc_priv(mmc); 1707 1708 /* AMD requires custom HS200 tuning */ 1709 if (host->timing == MMC_TIMING_MMC_HS200) 1710 return amd_execute_tuning_hs200(host, opcode); 1711 1712 /* Otherwise perform standard SDHCI tuning */ 1713 return sdhci_execute_tuning(mmc, opcode); 1714 } 1715 1716 static int amd_probe_slot(struct sdhci_pci_slot *slot) 1717 { 1718 struct mmc_host_ops *ops = &slot->host->mmc_host_ops; 1719 1720 ops->execute_tuning = amd_execute_tuning; 1721 1722 return 0; 1723 } 1724 1725 static int amd_probe(struct sdhci_pci_chip *chip) 1726 { 1727 struct pci_dev *smbus_dev; 1728 enum amd_chipset_gen gen; 1729 1730 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 1731 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); 1732 if (smbus_dev) { 1733 gen = AMD_CHIPSET_BEFORE_ML; 1734 } else { 1735 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 1736 PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); 1737 if (smbus_dev) { 1738 if (smbus_dev->revision < 0x51) 1739 gen = AMD_CHIPSET_CZ; 1740 else 1741 gen = AMD_CHIPSET_NL; 1742 } else { 1743 gen = AMD_CHIPSET_UNKNOWN; 1744 } 1745 } 1746 1747 pci_dev_put(smbus_dev); 1748 1749 if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ) 1750 chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD; 1751 1752 return 0; 1753 } 1754 1755 static u32 sdhci_read_present_state(struct sdhci_host *host) 1756 { 1757 return sdhci_readl(host, SDHCI_PRESENT_STATE); 1758 } 1759 1760 static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) 1761 { 1762 struct sdhci_pci_slot *slot = sdhci_priv(host); 1763 struct pci_dev *pdev = slot->chip->pdev; 1764 u32 present_state; 1765 1766 /* 1767 * SDHC 0x7906 requires a hard reset to clear all internal state. 1768 * Otherwise it can get into a bad state where the DATA lines are always 1769 * read as zeros. 1770 */ 1771 if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) { 1772 pci_clear_master(pdev); 1773 1774 pci_save_state(pdev); 1775 1776 pci_set_power_state(pdev, PCI_D3cold); 1777 pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc), 1778 pdev->current_state); 1779 pci_set_power_state(pdev, PCI_D0); 1780 1781 pci_restore_state(pdev); 1782 1783 /* 1784 * SDHCI_RESET_ALL says the card detect logic should not be 1785 * reset, but since we need to reset the entire controller 1786 * we should wait until the card detect logic has stabilized. 1787 * 1788 * This normally takes about 40ms. 1789 */ 1790 readx_poll_timeout( 1791 sdhci_read_present_state, 1792 host, 1793 present_state, 1794 present_state & SDHCI_CD_STABLE, 1795 10000, 1796 100000 1797 ); 1798 } 1799 1800 return sdhci_reset(host, mask); 1801 } 1802 1803 static const struct sdhci_ops amd_sdhci_pci_ops = { 1804 .set_clock = sdhci_set_clock, 1805 .enable_dma = sdhci_pci_enable_dma, 1806 .set_bus_width = sdhci_set_bus_width, 1807 .reset = amd_sdhci_reset, 1808 .set_uhs_signaling = sdhci_set_uhs_signaling, 1809 }; 1810 1811 static const struct sdhci_pci_fixes sdhci_amd = { 1812 .probe = amd_probe, 1813 .ops = &amd_sdhci_pci_ops, 1814 .probe_slot = amd_probe_slot, 1815 }; 1816 1817 static const struct pci_device_id pci_ids[] = { 1818 SDHCI_PCI_DEVICE(RICOH, R5C822, ricoh), 1819 SDHCI_PCI_DEVICE(RICOH, R5C843, ricoh_mmc), 1820 SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc), 1821 SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc), 1822 SDHCI_PCI_DEVICE(ENE, CB712_SD, ene_712), 1823 SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712), 1824 SDHCI_PCI_DEVICE(ENE, CB714_SD, ene_714), 1825 SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714), 1826 SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe), 1827 SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD, jmicron), 1828 SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron), 1829 SDHCI_PCI_DEVICE(JMICRON, JMB388_SD, jmicron), 1830 SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron), 1831 SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt), 1832 SDHCI_PCI_DEVICE(VIA, 95D0, via), 1833 SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx), 1834 SDHCI_PCI_DEVICE(INTEL, QRK_SD, intel_qrk), 1835 SDHCI_PCI_DEVICE(INTEL, MRST_SD0, intel_mrst_hc0), 1836 SDHCI_PCI_DEVICE(INTEL, MRST_SD1, intel_mrst_hc1_hc2), 1837 SDHCI_PCI_DEVICE(INTEL, MRST_SD2, intel_mrst_hc1_hc2), 1838 SDHCI_PCI_DEVICE(INTEL, MFD_SD, intel_mfd_sd), 1839 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio), 1840 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio), 1841 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc), 1842 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc), 1843 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio), 1844 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio), 1845 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC, intel_byt_emmc), 1846 SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio), 1847 SDHCI_PCI_DEVICE(INTEL, BYT_SDIO, intel_byt_sdio), 1848 SDHCI_PCI_DEVICE(INTEL, BYT_SD, intel_byt_sd), 1849 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc), 1850 SDHCI_PCI_DEVICE(INTEL, BSW_EMMC, intel_byt_emmc), 1851 SDHCI_PCI_DEVICE(INTEL, BSW_SDIO, intel_byt_sdio), 1852 SDHCI_PCI_DEVICE(INTEL, BSW_SD, intel_byt_sd), 1853 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd), 1854 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio), 1855 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio), 1856 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc), 1857 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc), 1858 SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc), 1859 SDHCI_PCI_DEVICE(INTEL, SPT_EMMC, intel_byt_emmc), 1860 SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio), 1861 SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd), 1862 SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc), 1863 SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc), 1864 SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc), 1865 SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio), 1866 SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd), 1867 SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc), 1868 SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio), 1869 SDHCI_PCI_DEVICE(INTEL, BXTM_SD, intel_byt_sd), 1870 SDHCI_PCI_DEVICE(INTEL, APL_EMMC, intel_byt_emmc), 1871 SDHCI_PCI_DEVICE(INTEL, APL_SDIO, intel_byt_sdio), 1872 SDHCI_PCI_DEVICE(INTEL, APL_SD, intel_byt_sd), 1873 SDHCI_PCI_DEVICE(INTEL, GLK_EMMC, intel_glk_emmc), 1874 SDHCI_PCI_DEVICE(INTEL, GLK_SDIO, intel_byt_sdio), 1875 SDHCI_PCI_DEVICE(INTEL, GLK_SD, intel_byt_sd), 1876 SDHCI_PCI_DEVICE(INTEL, CNP_EMMC, intel_glk_emmc), 1877 SDHCI_PCI_DEVICE(INTEL, CNP_SD, intel_byt_sd), 1878 SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd), 1879 SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc), 1880 SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd), 1881 SDHCI_PCI_DEVICE(INTEL, EHL_EMMC, intel_glk_emmc), 1882 SDHCI_PCI_DEVICE(INTEL, EHL_SD, intel_byt_sd), 1883 SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc), 1884 SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd), 1885 SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd), 1886 SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc), 1887 SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd), 1888 SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc), 1889 SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd), 1890 SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc), 1891 SDHCI_PCI_DEVICE(O2, 8120, o2), 1892 SDHCI_PCI_DEVICE(O2, 8220, o2), 1893 SDHCI_PCI_DEVICE(O2, 8221, o2), 1894 SDHCI_PCI_DEVICE(O2, 8320, o2), 1895 SDHCI_PCI_DEVICE(O2, 8321, o2), 1896 SDHCI_PCI_DEVICE(O2, FUJIN2, o2), 1897 SDHCI_PCI_DEVICE(O2, SDS0, o2), 1898 SDHCI_PCI_DEVICE(O2, SDS1, o2), 1899 SDHCI_PCI_DEVICE(O2, SEABIRD0, o2), 1900 SDHCI_PCI_DEVICE(O2, SEABIRD1, o2), 1901 SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan), 1902 SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps), 1903 SDHCI_PCI_DEVICE(GLI, 9750, gl9750), 1904 SDHCI_PCI_DEVICE(GLI, 9755, gl9755), 1905 SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e), 1906 SDHCI_PCI_DEVICE(GLI, 9767, gl9767), 1907 SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd), 1908 /* Generic SD host controller */ 1909 {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)}, 1910 { /* end: all zeroes */ }, 1911 }; 1912 1913 MODULE_DEVICE_TABLE(pci, pci_ids); 1914 1915 /*****************************************************************************\ 1916 * * 1917 * SDHCI core callbacks * 1918 * * 1919 \*****************************************************************************/ 1920 1921 int sdhci_pci_enable_dma(struct sdhci_host *host) 1922 { 1923 struct sdhci_pci_slot *slot; 1924 struct pci_dev *pdev; 1925 1926 slot = sdhci_priv(host); 1927 pdev = slot->chip->pdev; 1928 1929 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) && 1930 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && 1931 (host->flags & SDHCI_USE_SDMA)) { 1932 dev_warn(&pdev->dev, "Will use DMA mode even though HW " 1933 "doesn't fully claim to support it.\n"); 1934 } 1935 1936 pci_set_master(pdev); 1937 1938 return 0; 1939 } 1940 1941 static void sdhci_pci_hw_reset(struct sdhci_host *host) 1942 { 1943 struct sdhci_pci_slot *slot = sdhci_priv(host); 1944 1945 if (slot->hw_reset) 1946 slot->hw_reset(host); 1947 } 1948 1949 static const struct sdhci_ops sdhci_pci_ops = { 1950 .set_clock = sdhci_set_clock, 1951 .enable_dma = sdhci_pci_enable_dma, 1952 .set_bus_width = sdhci_set_bus_width, 1953 .reset = sdhci_reset, 1954 .set_uhs_signaling = sdhci_set_uhs_signaling, 1955 .hw_reset = sdhci_pci_hw_reset, 1956 }; 1957 1958 /*****************************************************************************\ 1959 * * 1960 * Suspend/resume * 1961 * * 1962 \*****************************************************************************/ 1963 1964 #ifdef CONFIG_PM_SLEEP 1965 static int sdhci_pci_suspend(struct device *dev) 1966 { 1967 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 1968 1969 if (!chip) 1970 return 0; 1971 1972 if (chip->fixes && chip->fixes->suspend) 1973 return chip->fixes->suspend(chip); 1974 1975 return sdhci_pci_suspend_host(chip); 1976 } 1977 1978 static int sdhci_pci_resume(struct device *dev) 1979 { 1980 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 1981 1982 if (!chip) 1983 return 0; 1984 1985 if (chip->fixes && chip->fixes->resume) 1986 return chip->fixes->resume(chip); 1987 1988 return sdhci_pci_resume_host(chip); 1989 } 1990 #endif 1991 1992 #ifdef CONFIG_PM 1993 static int sdhci_pci_runtime_suspend(struct device *dev) 1994 { 1995 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 1996 1997 if (!chip) 1998 return 0; 1999 2000 if (chip->fixes && chip->fixes->runtime_suspend) 2001 return chip->fixes->runtime_suspend(chip); 2002 2003 return sdhci_pci_runtime_suspend_host(chip); 2004 } 2005 2006 static int sdhci_pci_runtime_resume(struct device *dev) 2007 { 2008 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 2009 2010 if (!chip) 2011 return 0; 2012 2013 if (chip->fixes && chip->fixes->runtime_resume) 2014 return chip->fixes->runtime_resume(chip); 2015 2016 return sdhci_pci_runtime_resume_host(chip); 2017 } 2018 #endif 2019 2020 static const struct dev_pm_ops sdhci_pci_pm_ops = { 2021 SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume) 2022 SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend, 2023 sdhci_pci_runtime_resume, NULL) 2024 }; 2025 2026 /*****************************************************************************\ 2027 * * 2028 * Device probing/removal * 2029 * * 2030 \*****************************************************************************/ 2031 2032 static struct sdhci_pci_slot *sdhci_pci_probe_slot( 2033 struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar, 2034 int slotno) 2035 { 2036 struct sdhci_pci_slot *slot; 2037 struct sdhci_host *host; 2038 int ret, bar = first_bar + slotno; 2039 size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0; 2040 2041 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 2042 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); 2043 return ERR_PTR(-ENODEV); 2044 } 2045 2046 if (pci_resource_len(pdev, bar) < 0x100) { 2047 dev_err(&pdev->dev, "Invalid iomem size. You may " 2048 "experience problems.\n"); 2049 } 2050 2051 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { 2052 dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n"); 2053 return ERR_PTR(-ENODEV); 2054 } 2055 2056 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { 2057 dev_err(&pdev->dev, "Unknown interface. Aborting.\n"); 2058 return ERR_PTR(-ENODEV); 2059 } 2060 2061 host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size); 2062 if (IS_ERR(host)) { 2063 dev_err(&pdev->dev, "cannot allocate host\n"); 2064 return ERR_CAST(host); 2065 } 2066 2067 slot = sdhci_priv(host); 2068 2069 slot->chip = chip; 2070 slot->host = host; 2071 slot->cd_idx = -1; 2072 2073 host->hw_name = "PCI"; 2074 host->ops = chip->fixes && chip->fixes->ops ? 2075 chip->fixes->ops : 2076 &sdhci_pci_ops; 2077 host->quirks = chip->quirks; 2078 host->quirks2 = chip->quirks2; 2079 2080 host->irq = pdev->irq; 2081 2082 ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc)); 2083 if (ret) { 2084 dev_err(&pdev->dev, "cannot request region\n"); 2085 goto cleanup; 2086 } 2087 2088 host->ioaddr = pcim_iomap_table(pdev)[bar]; 2089 2090 if (chip->fixes && chip->fixes->probe_slot) { 2091 ret = chip->fixes->probe_slot(slot); 2092 if (ret) 2093 goto cleanup; 2094 } 2095 2096 host->mmc->pm_caps = MMC_PM_KEEP_POWER; 2097 host->mmc->slotno = slotno; 2098 host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; 2099 2100 if (device_can_wakeup(&pdev->dev)) 2101 host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; 2102 2103 if (host->mmc->caps & MMC_CAP_CD_WAKE) 2104 device_init_wakeup(&pdev->dev, true); 2105 2106 if (slot->cd_idx >= 0) { 2107 ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, 2108 slot->cd_override_level, 0); 2109 if (ret && ret != -EPROBE_DEFER) 2110 ret = mmc_gpiod_request_cd(host->mmc, NULL, 2111 slot->cd_idx, 2112 slot->cd_override_level, 2113 0); 2114 if (ret == -EPROBE_DEFER) 2115 goto remove; 2116 2117 if (ret) { 2118 dev_warn(&pdev->dev, "failed to setup card detect gpio\n"); 2119 slot->cd_idx = -1; 2120 } 2121 } 2122 2123 if (chip->fixes && chip->fixes->add_host) 2124 ret = chip->fixes->add_host(slot); 2125 else 2126 ret = sdhci_add_host(host); 2127 if (ret) 2128 goto remove; 2129 2130 /* 2131 * Check if the chip needs a separate GPIO for card detect to wake up 2132 * from runtime suspend. If it is not there, don't allow runtime PM. 2133 */ 2134 if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0) 2135 chip->allow_runtime_pm = false; 2136 2137 return slot; 2138 2139 remove: 2140 if (chip->fixes && chip->fixes->remove_slot) 2141 chip->fixes->remove_slot(slot, 0); 2142 2143 cleanup: 2144 sdhci_free_host(host); 2145 2146 return ERR_PTR(ret); 2147 } 2148 2149 static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) 2150 { 2151 int dead; 2152 u32 scratch; 2153 2154 dead = 0; 2155 scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); 2156 if (scratch == (u32)-1) 2157 dead = 1; 2158 2159 sdhci_remove_host(slot->host, dead); 2160 2161 if (slot->chip->fixes && slot->chip->fixes->remove_slot) 2162 slot->chip->fixes->remove_slot(slot, dead); 2163 2164 sdhci_free_host(slot->host); 2165 } 2166 2167 static void sdhci_pci_runtime_pm_allow(struct device *dev) 2168 { 2169 pm_suspend_ignore_children(dev, 1); 2170 pm_runtime_set_autosuspend_delay(dev, 50); 2171 pm_runtime_use_autosuspend(dev); 2172 pm_runtime_allow(dev); 2173 /* Stay active until mmc core scans for a card */ 2174 pm_runtime_put_noidle(dev); 2175 } 2176 2177 static void sdhci_pci_runtime_pm_forbid(struct device *dev) 2178 { 2179 pm_runtime_forbid(dev); 2180 pm_runtime_get_noresume(dev); 2181 } 2182 2183 static int sdhci_pci_probe(struct pci_dev *pdev, 2184 const struct pci_device_id *ent) 2185 { 2186 struct sdhci_pci_chip *chip; 2187 struct sdhci_pci_slot *slot; 2188 2189 u8 slots, first_bar; 2190 int ret, i; 2191 2192 BUG_ON(pdev == NULL); 2193 BUG_ON(ent == NULL); 2194 2195 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", 2196 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); 2197 2198 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); 2199 if (ret) 2200 return ret; 2201 2202 slots = PCI_SLOT_INFO_SLOTS(slots) + 1; 2203 dev_dbg(&pdev->dev, "found %d slot(s)\n", slots); 2204 2205 BUG_ON(slots > MAX_SLOTS); 2206 2207 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); 2208 if (ret) 2209 return ret; 2210 2211 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; 2212 2213 if (first_bar > 5) { 2214 dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n"); 2215 return -ENODEV; 2216 } 2217 2218 ret = pcim_enable_device(pdev); 2219 if (ret) 2220 return ret; 2221 2222 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); 2223 if (!chip) 2224 return -ENOMEM; 2225 2226 chip->pdev = pdev; 2227 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; 2228 if (chip->fixes) { 2229 chip->quirks = chip->fixes->quirks; 2230 chip->quirks2 = chip->fixes->quirks2; 2231 chip->allow_runtime_pm = chip->fixes->allow_runtime_pm; 2232 } 2233 chip->num_slots = slots; 2234 chip->pm_retune = true; 2235 chip->rpm_retune = true; 2236 2237 pci_set_drvdata(pdev, chip); 2238 2239 if (chip->fixes && chip->fixes->probe) { 2240 ret = chip->fixes->probe(chip); 2241 if (ret) 2242 return ret; 2243 } 2244 2245 slots = chip->num_slots; /* Quirk may have changed this */ 2246 2247 for (i = 0; i < slots; i++) { 2248 slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i); 2249 if (IS_ERR(slot)) { 2250 for (i--; i >= 0; i--) 2251 sdhci_pci_remove_slot(chip->slots[i]); 2252 return PTR_ERR(slot); 2253 } 2254 2255 chip->slots[i] = slot; 2256 } 2257 2258 if (chip->allow_runtime_pm) 2259 sdhci_pci_runtime_pm_allow(&pdev->dev); 2260 2261 return 0; 2262 } 2263 2264 static void sdhci_pci_remove(struct pci_dev *pdev) 2265 { 2266 int i; 2267 struct sdhci_pci_chip *chip = pci_get_drvdata(pdev); 2268 2269 if (chip->allow_runtime_pm) 2270 sdhci_pci_runtime_pm_forbid(&pdev->dev); 2271 2272 for (i = 0; i < chip->num_slots; i++) 2273 sdhci_pci_remove_slot(chip->slots[i]); 2274 } 2275 2276 static struct pci_driver sdhci_driver = { 2277 .name = "sdhci-pci", 2278 .id_table = pci_ids, 2279 .probe = sdhci_pci_probe, 2280 .remove = sdhci_pci_remove, 2281 .driver = { 2282 .pm = &sdhci_pci_pm_ops, 2283 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2284 }, 2285 }; 2286 2287 module_pci_driver(sdhci_driver); 2288 2289 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 2290 MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); 2291 MODULE_LICENSE("GPL"); 2292