1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface 3 * 4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 5 * 6 * Thanks to the following companies for their support: 7 * 8 * - JMicron (hardware and technical support) 9 */ 10 11 #include <linux/bitfield.h> 12 #include <linux/string.h> 13 #include <linux/delay.h> 14 #include <linux/highmem.h> 15 #include <linux/module.h> 16 #include <linux/pci.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/slab.h> 19 #include <linux/device.h> 20 #include <linux/scatterlist.h> 21 #include <linux/io.h> 22 #include <linux/iopoll.h> 23 #include <linux/gpio.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/pm_qos.h> 26 #include <linux/debugfs.h> 27 #include <linux/acpi.h> 28 #include <linux/dmi.h> 29 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/slot-gpio.h> 33 34 #ifdef CONFIG_X86 35 #include <asm/iosf_mbi.h> 36 #endif 37 38 #include "cqhci.h" 39 40 #include "sdhci.h" 41 #include "sdhci-cqhci.h" 42 #include "sdhci-pci.h" 43 44 static void sdhci_pci_hw_reset(struct sdhci_host *host); 45 46 #ifdef CONFIG_PM_SLEEP 47 static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip) 48 { 49 mmc_pm_flag_t pm_flags = 0; 50 bool cap_cd_wake = false; 51 int i; 52 53 for (i = 0; i < chip->num_slots; i++) { 54 struct sdhci_pci_slot *slot = chip->slots[i]; 55 56 if (slot) { 57 pm_flags |= slot->host->mmc->pm_flags; 58 if (slot->host->mmc->caps & MMC_CAP_CD_WAKE) 59 cap_cd_wake = true; 60 } 61 } 62 63 if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ)) 64 return device_wakeup_enable(&chip->pdev->dev); 65 else if (!cap_cd_wake) 66 return device_wakeup_disable(&chip->pdev->dev); 67 68 return 0; 69 } 70 71 static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip) 72 { 73 int i, ret; 74 75 sdhci_pci_init_wakeup(chip); 76 77 for (i = 0; i < chip->num_slots; i++) { 78 struct sdhci_pci_slot *slot = chip->slots[i]; 79 struct sdhci_host *host; 80 81 if (!slot) 82 continue; 83 84 host = slot->host; 85 86 if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3) 87 mmc_retune_needed(host->mmc); 88 89 ret = sdhci_suspend_host(host); 90 if (ret) 91 goto err_pci_suspend; 92 93 if (device_may_wakeup(&chip->pdev->dev)) 94 mmc_gpio_set_cd_wake(host->mmc, true); 95 } 96 97 return 0; 98 99 err_pci_suspend: 100 while (--i >= 0) 101 sdhci_resume_host(chip->slots[i]->host); 102 return ret; 103 } 104 105 int sdhci_pci_resume_host(struct sdhci_pci_chip *chip) 106 { 107 struct sdhci_pci_slot *slot; 108 int i, ret; 109 110 for (i = 0; i < chip->num_slots; i++) { 111 slot = chip->slots[i]; 112 if (!slot) 113 continue; 114 115 ret = sdhci_resume_host(slot->host); 116 if (ret) 117 return ret; 118 119 mmc_gpio_set_cd_wake(slot->host->mmc, false); 120 } 121 122 return 0; 123 } 124 125 static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip) 126 { 127 int ret; 128 129 ret = cqhci_suspend(chip->slots[0]->host->mmc); 130 if (ret) 131 return ret; 132 133 return sdhci_pci_suspend_host(chip); 134 } 135 136 static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip) 137 { 138 int ret; 139 140 ret = sdhci_pci_resume_host(chip); 141 if (ret) 142 return ret; 143 144 return cqhci_resume(chip->slots[0]->host->mmc); 145 } 146 #endif 147 148 #ifdef CONFIG_PM 149 static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip) 150 { 151 struct sdhci_pci_slot *slot; 152 struct sdhci_host *host; 153 int i, ret; 154 155 for (i = 0; i < chip->num_slots; i++) { 156 slot = chip->slots[i]; 157 if (!slot) 158 continue; 159 160 host = slot->host; 161 162 ret = sdhci_runtime_suspend_host(host); 163 if (ret) 164 goto err_pci_runtime_suspend; 165 166 if (chip->rpm_retune && 167 host->tuning_mode != SDHCI_TUNING_MODE_3) 168 mmc_retune_needed(host->mmc); 169 } 170 171 return 0; 172 173 err_pci_runtime_suspend: 174 while (--i >= 0) 175 sdhci_runtime_resume_host(chip->slots[i]->host, 0); 176 return ret; 177 } 178 179 static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip) 180 { 181 struct sdhci_pci_slot *slot; 182 int i, ret; 183 184 for (i = 0; i < chip->num_slots; i++) { 185 slot = chip->slots[i]; 186 if (!slot) 187 continue; 188 189 ret = sdhci_runtime_resume_host(slot->host, 0); 190 if (ret) 191 return ret; 192 } 193 194 return 0; 195 } 196 197 static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip) 198 { 199 int ret; 200 201 ret = cqhci_suspend(chip->slots[0]->host->mmc); 202 if (ret) 203 return ret; 204 205 return sdhci_pci_runtime_suspend_host(chip); 206 } 207 208 static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip) 209 { 210 int ret; 211 212 ret = sdhci_pci_runtime_resume_host(chip); 213 if (ret) 214 return ret; 215 216 return cqhci_resume(chip->slots[0]->host->mmc); 217 } 218 #endif 219 220 static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask) 221 { 222 int cmd_error = 0; 223 int data_error = 0; 224 225 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) 226 return intmask; 227 228 cqhci_irq(host->mmc, intmask, cmd_error, data_error); 229 230 return 0; 231 } 232 233 static void sdhci_pci_dumpregs(struct mmc_host *mmc) 234 { 235 sdhci_dumpregs(mmc_priv(mmc)); 236 } 237 238 /*****************************************************************************\ 239 * * 240 * Hardware specific quirk handling * 241 * * 242 \*****************************************************************************/ 243 244 static int ricoh_probe(struct sdhci_pci_chip *chip) 245 { 246 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG || 247 chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY) 248 chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET; 249 return 0; 250 } 251 252 static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot) 253 { 254 slot->host->caps = 255 FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) | 256 FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) | 257 SDHCI_TIMEOUT_CLK_UNIT | 258 SDHCI_CAN_VDD_330 | 259 SDHCI_CAN_DO_HISPD | 260 SDHCI_CAN_DO_SDMA; 261 return 0; 262 } 263 264 #ifdef CONFIG_PM_SLEEP 265 static int ricoh_mmc_resume(struct sdhci_pci_chip *chip) 266 { 267 /* Apply a delay to allow controller to settle */ 268 /* Otherwise it becomes confused if card state changed 269 during suspend */ 270 msleep(500); 271 return sdhci_pci_resume_host(chip); 272 } 273 #endif 274 275 static const struct sdhci_pci_fixes sdhci_ricoh = { 276 .probe = ricoh_probe, 277 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 278 SDHCI_QUIRK_FORCE_DMA | 279 SDHCI_QUIRK_CLOCK_BEFORE_RESET, 280 }; 281 282 static const struct sdhci_pci_fixes sdhci_ricoh_mmc = { 283 .probe_slot = ricoh_mmc_probe_slot, 284 #ifdef CONFIG_PM_SLEEP 285 .resume = ricoh_mmc_resume, 286 #endif 287 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 288 SDHCI_QUIRK_CLOCK_BEFORE_RESET | 289 SDHCI_QUIRK_NO_CARD_NO_RESET | 290 SDHCI_QUIRK_MISSING_CAPS 291 }; 292 293 static void ene_714_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 294 { 295 struct sdhci_host *host = mmc_priv(mmc); 296 297 sdhci_set_ios(mmc, ios); 298 299 /* 300 * Some (ENE) controllers misbehave on some ios operations, 301 * signalling timeout and CRC errors even on CMD0. Resetting 302 * it on each ios seems to solve the problem. 303 */ 304 if (!(host->flags & SDHCI_DEVICE_DEAD)) 305 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 306 } 307 308 static int ene_714_probe_slot(struct sdhci_pci_slot *slot) 309 { 310 slot->host->mmc_host_ops.set_ios = ene_714_set_ios; 311 return 0; 312 } 313 314 static const struct sdhci_pci_fixes sdhci_ene_712 = { 315 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | 316 SDHCI_QUIRK_BROKEN_DMA, 317 }; 318 319 static const struct sdhci_pci_fixes sdhci_ene_714 = { 320 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE | 321 SDHCI_QUIRK_BROKEN_DMA, 322 .probe_slot = ene_714_probe_slot, 323 }; 324 325 static const struct sdhci_pci_fixes sdhci_cafe = { 326 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | 327 SDHCI_QUIRK_NO_BUSY_IRQ | 328 SDHCI_QUIRK_BROKEN_CARD_DETECTION | 329 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, 330 }; 331 332 static const struct sdhci_pci_fixes sdhci_intel_qrk = { 333 .quirks = SDHCI_QUIRK_NO_HISPD_BIT, 334 }; 335 336 static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot) 337 { 338 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; 339 return 0; 340 } 341 342 /* 343 * ADMA operation is disabled for Moorestown platform due to 344 * hardware bugs. 345 */ 346 static int mrst_hc_probe(struct sdhci_pci_chip *chip) 347 { 348 /* 349 * slots number is fixed here for MRST as SDIO3/5 are never used and 350 * have hardware bugs. 351 */ 352 chip->num_slots = 1; 353 return 0; 354 } 355 356 static int pch_hc_probe_slot(struct sdhci_pci_slot *slot) 357 { 358 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; 359 return 0; 360 } 361 362 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) 363 { 364 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; 365 slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC; 366 return 0; 367 } 368 369 static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot) 370 { 371 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; 372 return 0; 373 } 374 375 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { 376 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, 377 .probe_slot = mrst_hc_probe_slot, 378 }; 379 380 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { 381 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, 382 .probe = mrst_hc_probe, 383 }; 384 385 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { 386 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 387 .allow_runtime_pm = true, 388 .own_cd_for_runtime_pm = true, 389 }; 390 391 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { 392 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 393 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, 394 .allow_runtime_pm = true, 395 .probe_slot = mfd_sdio_probe_slot, 396 }; 397 398 static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { 399 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 400 .allow_runtime_pm = true, 401 .probe_slot = mfd_emmc_probe_slot, 402 }; 403 404 static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { 405 .quirks = SDHCI_QUIRK_BROKEN_ADMA, 406 .probe_slot = pch_hc_probe_slot, 407 }; 408 409 #ifdef CONFIG_X86 410 411 #define BYT_IOSF_SCCEP 0x63 412 #define BYT_IOSF_OCP_NETCTRL0 0x1078 413 #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8) 414 415 static void byt_ocp_setting(struct pci_dev *pdev) 416 { 417 u32 val = 0; 418 419 if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC && 420 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO && 421 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD && 422 pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2) 423 return; 424 425 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0, 426 &val)) { 427 dev_err(&pdev->dev, "%s read error\n", __func__); 428 return; 429 } 430 431 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE)) 432 return; 433 434 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE; 435 436 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0, 437 val)) { 438 dev_err(&pdev->dev, "%s write error\n", __func__); 439 return; 440 } 441 442 dev_dbg(&pdev->dev, "%s completed\n", __func__); 443 } 444 445 #else 446 447 static inline void byt_ocp_setting(struct pci_dev *pdev) 448 { 449 } 450 451 #endif 452 453 enum { 454 INTEL_DSM_FNS = 0, 455 INTEL_DSM_V18_SWITCH = 3, 456 INTEL_DSM_V33_SWITCH = 4, 457 INTEL_DSM_DRV_STRENGTH = 9, 458 INTEL_DSM_D3_RETUNE = 10, 459 }; 460 461 struct intel_host { 462 u32 dsm_fns; 463 int drv_strength; 464 bool d3_retune; 465 bool rpm_retune_ok; 466 bool needs_pwr_off; 467 u32 glk_rx_ctrl1; 468 u32 glk_tun_val; 469 u32 active_ltr; 470 u32 idle_ltr; 471 }; 472 473 static const guid_t intel_dsm_guid = 474 GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F, 475 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61); 476 477 static int __intel_dsm(struct intel_host *intel_host, struct device *dev, 478 unsigned int fn, u32 *result) 479 { 480 union acpi_object *obj; 481 int err = 0; 482 size_t len; 483 484 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL); 485 if (!obj) 486 return -EOPNOTSUPP; 487 488 if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) { 489 err = -EINVAL; 490 goto out; 491 } 492 493 len = min_t(size_t, obj->buffer.length, 4); 494 495 *result = 0; 496 memcpy(result, obj->buffer.pointer, len); 497 out: 498 ACPI_FREE(obj); 499 500 return err; 501 } 502 503 static int intel_dsm(struct intel_host *intel_host, struct device *dev, 504 unsigned int fn, u32 *result) 505 { 506 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn))) 507 return -EOPNOTSUPP; 508 509 return __intel_dsm(intel_host, dev, fn, result); 510 } 511 512 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, 513 struct mmc_host *mmc) 514 { 515 int err; 516 u32 val; 517 518 intel_host->d3_retune = true; 519 520 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); 521 if (err) { 522 pr_debug("%s: DSM not supported, error %d\n", 523 mmc_hostname(mmc), err); 524 return; 525 } 526 527 pr_debug("%s: DSM function mask %#x\n", 528 mmc_hostname(mmc), intel_host->dsm_fns); 529 530 err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val); 531 intel_host->drv_strength = err ? 0 : val; 532 533 err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val); 534 intel_host->d3_retune = err ? true : !!val; 535 } 536 537 static void sdhci_pci_int_hw_reset(struct sdhci_host *host) 538 { 539 u8 reg; 540 541 reg = sdhci_readb(host, SDHCI_POWER_CONTROL); 542 reg |= 0x10; 543 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 544 /* For eMMC, minimum is 1us but give it 9us for good measure */ 545 udelay(9); 546 reg &= ~0x10; 547 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 548 /* For eMMC, minimum is 200us but give it 300us for good measure */ 549 usleep_range(300, 1000); 550 } 551 552 static int intel_select_drive_strength(struct mmc_card *card, 553 unsigned int max_dtr, int host_drv, 554 int card_drv, int *drv_type) 555 { 556 struct sdhci_host *host = mmc_priv(card->host); 557 struct sdhci_pci_slot *slot = sdhci_priv(host); 558 struct intel_host *intel_host = sdhci_pci_priv(slot); 559 560 if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv)) 561 return 0; 562 563 return intel_host->drv_strength; 564 } 565 566 static int bxt_get_cd(struct mmc_host *mmc) 567 { 568 int gpio_cd = mmc_gpio_get_cd(mmc); 569 570 if (!gpio_cd) 571 return 0; 572 573 return sdhci_get_cd_nogpio(mmc); 574 } 575 576 static int mrfld_get_cd(struct mmc_host *mmc) 577 { 578 return sdhci_get_cd_nogpio(mmc); 579 } 580 581 #define SDHCI_INTEL_PWR_TIMEOUT_CNT 20 582 #define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100 583 584 static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, 585 unsigned short vdd) 586 { 587 struct sdhci_pci_slot *slot = sdhci_priv(host); 588 struct intel_host *intel_host = sdhci_pci_priv(slot); 589 int cntr; 590 u8 reg; 591 592 /* 593 * Bus power may control card power, but a full reset still may not 594 * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can. 595 * That might be needed to initialize correctly, if the card was left 596 * powered on previously. 597 */ 598 if (intel_host->needs_pwr_off) { 599 intel_host->needs_pwr_off = false; 600 if (mode != MMC_POWER_OFF) { 601 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 602 usleep_range(10000, 12500); 603 } 604 } 605 606 sdhci_set_power(host, mode, vdd); 607 608 if (mode == MMC_POWER_OFF) 609 return; 610 611 /* 612 * Bus power might not enable after D3 -> D0 transition due to the 613 * present state not yet having propagated. Retry for up to 2ms. 614 */ 615 for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) { 616 reg = sdhci_readb(host, SDHCI_POWER_CONTROL); 617 if (reg & SDHCI_POWER_ON) 618 break; 619 udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY); 620 reg |= SDHCI_POWER_ON; 621 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 622 } 623 } 624 625 static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host, 626 unsigned int timing) 627 { 628 /* Set UHS timing to SDR25 for High Speed mode */ 629 if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS) 630 timing = MMC_TIMING_UHS_SDR25; 631 sdhci_set_uhs_signaling(host, timing); 632 } 633 634 #define INTEL_HS400_ES_REG 0x78 635 #define INTEL_HS400_ES_BIT BIT(0) 636 637 static void intel_hs400_enhanced_strobe(struct mmc_host *mmc, 638 struct mmc_ios *ios) 639 { 640 struct sdhci_host *host = mmc_priv(mmc); 641 u32 val; 642 643 val = sdhci_readl(host, INTEL_HS400_ES_REG); 644 if (ios->enhanced_strobe) 645 val |= INTEL_HS400_ES_BIT; 646 else 647 val &= ~INTEL_HS400_ES_BIT; 648 sdhci_writel(host, val, INTEL_HS400_ES_REG); 649 } 650 651 static int intel_start_signal_voltage_switch(struct mmc_host *mmc, 652 struct mmc_ios *ios) 653 { 654 struct device *dev = mmc_dev(mmc); 655 struct sdhci_host *host = mmc_priv(mmc); 656 struct sdhci_pci_slot *slot = sdhci_priv(host); 657 struct intel_host *intel_host = sdhci_pci_priv(slot); 658 unsigned int fn; 659 u32 result = 0; 660 int err; 661 662 err = sdhci_start_signal_voltage_switch(mmc, ios); 663 if (err) 664 return err; 665 666 switch (ios->signal_voltage) { 667 case MMC_SIGNAL_VOLTAGE_330: 668 fn = INTEL_DSM_V33_SWITCH; 669 break; 670 case MMC_SIGNAL_VOLTAGE_180: 671 fn = INTEL_DSM_V18_SWITCH; 672 break; 673 default: 674 return 0; 675 } 676 677 err = intel_dsm(intel_host, dev, fn, &result); 678 pr_debug("%s: %s DSM fn %u error %d result %u\n", 679 mmc_hostname(mmc), __func__, fn, err, result); 680 681 return 0; 682 } 683 684 static const struct sdhci_ops sdhci_intel_byt_ops = { 685 .set_clock = sdhci_set_clock, 686 .set_power = sdhci_intel_set_power, 687 .enable_dma = sdhci_pci_enable_dma, 688 .set_bus_width = sdhci_set_bus_width, 689 .reset = sdhci_reset, 690 .set_uhs_signaling = sdhci_intel_set_uhs_signaling, 691 .hw_reset = sdhci_pci_hw_reset, 692 }; 693 694 static const struct sdhci_ops sdhci_intel_glk_ops = { 695 .set_clock = sdhci_set_clock, 696 .set_power = sdhci_intel_set_power, 697 .enable_dma = sdhci_pci_enable_dma, 698 .set_bus_width = sdhci_set_bus_width, 699 .reset = sdhci_and_cqhci_reset, 700 .set_uhs_signaling = sdhci_intel_set_uhs_signaling, 701 .hw_reset = sdhci_pci_hw_reset, 702 .irq = sdhci_cqhci_irq, 703 }; 704 705 static void byt_read_dsm(struct sdhci_pci_slot *slot) 706 { 707 struct intel_host *intel_host = sdhci_pci_priv(slot); 708 struct device *dev = &slot->chip->pdev->dev; 709 struct mmc_host *mmc = slot->host->mmc; 710 711 intel_dsm_init(intel_host, dev, mmc); 712 slot->chip->rpm_retune = intel_host->d3_retune; 713 } 714 715 static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode) 716 { 717 int err = sdhci_execute_tuning(mmc, opcode); 718 struct sdhci_host *host = mmc_priv(mmc); 719 720 if (err) 721 return err; 722 723 /* 724 * Tuning can leave the IP in an active state (Buffer Read Enable bit 725 * set) which prevents the entry to low power states (i.e. S0i3). Data 726 * reset will clear it. 727 */ 728 sdhci_reset(host, SDHCI_RESET_DATA); 729 730 return 0; 731 } 732 733 #define INTEL_ACTIVELTR 0x804 734 #define INTEL_IDLELTR 0x808 735 736 #define INTEL_LTR_REQ BIT(15) 737 #define INTEL_LTR_SCALE_MASK GENMASK(11, 10) 738 #define INTEL_LTR_SCALE_1US (2 << 10) 739 #define INTEL_LTR_SCALE_32US (3 << 10) 740 #define INTEL_LTR_VALUE_MASK GENMASK(9, 0) 741 742 static void intel_cache_ltr(struct sdhci_pci_slot *slot) 743 { 744 struct intel_host *intel_host = sdhci_pci_priv(slot); 745 struct sdhci_host *host = slot->host; 746 747 intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR); 748 intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR); 749 } 750 751 static void intel_ltr_set(struct device *dev, s32 val) 752 { 753 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 754 struct sdhci_pci_slot *slot = chip->slots[0]; 755 struct intel_host *intel_host = sdhci_pci_priv(slot); 756 struct sdhci_host *host = slot->host; 757 u32 ltr; 758 759 pm_runtime_get_sync(dev); 760 761 /* 762 * Program latency tolerance (LTR) accordingly what has been asked 763 * by the PM QoS layer or disable it in case we were passed 764 * negative value or PM_QOS_LATENCY_ANY. 765 */ 766 ltr = readl(host->ioaddr + INTEL_ACTIVELTR); 767 768 if (val == PM_QOS_LATENCY_ANY || val < 0) { 769 ltr &= ~INTEL_LTR_REQ; 770 } else { 771 ltr |= INTEL_LTR_REQ; 772 ltr &= ~INTEL_LTR_SCALE_MASK; 773 ltr &= ~INTEL_LTR_VALUE_MASK; 774 775 if (val > INTEL_LTR_VALUE_MASK) { 776 val >>= 5; 777 if (val > INTEL_LTR_VALUE_MASK) 778 val = INTEL_LTR_VALUE_MASK; 779 ltr |= INTEL_LTR_SCALE_32US | val; 780 } else { 781 ltr |= INTEL_LTR_SCALE_1US | val; 782 } 783 } 784 785 if (ltr == intel_host->active_ltr) 786 goto out; 787 788 writel(ltr, host->ioaddr + INTEL_ACTIVELTR); 789 writel(ltr, host->ioaddr + INTEL_IDLELTR); 790 791 /* Cache the values into lpss structure */ 792 intel_cache_ltr(slot); 793 out: 794 pm_runtime_put_autosuspend(dev); 795 } 796 797 static bool intel_use_ltr(struct sdhci_pci_chip *chip) 798 { 799 switch (chip->pdev->device) { 800 case PCI_DEVICE_ID_INTEL_BYT_EMMC: 801 case PCI_DEVICE_ID_INTEL_BYT_EMMC2: 802 case PCI_DEVICE_ID_INTEL_BYT_SDIO: 803 case PCI_DEVICE_ID_INTEL_BYT_SD: 804 case PCI_DEVICE_ID_INTEL_BSW_EMMC: 805 case PCI_DEVICE_ID_INTEL_BSW_SDIO: 806 case PCI_DEVICE_ID_INTEL_BSW_SD: 807 return false; 808 default: 809 return true; 810 } 811 } 812 813 static void intel_ltr_expose(struct sdhci_pci_chip *chip) 814 { 815 struct device *dev = &chip->pdev->dev; 816 817 if (!intel_use_ltr(chip)) 818 return; 819 820 dev->power.set_latency_tolerance = intel_ltr_set; 821 dev_pm_qos_expose_latency_tolerance(dev); 822 } 823 824 static void intel_ltr_hide(struct sdhci_pci_chip *chip) 825 { 826 struct device *dev = &chip->pdev->dev; 827 828 if (!intel_use_ltr(chip)) 829 return; 830 831 dev_pm_qos_hide_latency_tolerance(dev); 832 dev->power.set_latency_tolerance = NULL; 833 } 834 835 static void byt_probe_slot(struct sdhci_pci_slot *slot) 836 { 837 struct mmc_host_ops *ops = &slot->host->mmc_host_ops; 838 struct device *dev = &slot->chip->pdev->dev; 839 struct mmc_host *mmc = slot->host->mmc; 840 841 byt_read_dsm(slot); 842 843 byt_ocp_setting(slot->chip->pdev); 844 845 ops->execute_tuning = intel_execute_tuning; 846 ops->start_signal_voltage_switch = intel_start_signal_voltage_switch; 847 848 device_property_read_u32(dev, "max-frequency", &mmc->f_max); 849 850 if (!mmc->slotno) { 851 slot->chip->slots[mmc->slotno] = slot; 852 intel_ltr_expose(slot->chip); 853 } 854 } 855 856 static void byt_add_debugfs(struct sdhci_pci_slot *slot) 857 { 858 struct intel_host *intel_host = sdhci_pci_priv(slot); 859 struct mmc_host *mmc = slot->host->mmc; 860 struct dentry *dir = mmc->debugfs_root; 861 862 if (!intel_use_ltr(slot->chip)) 863 return; 864 865 debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr); 866 debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr); 867 868 intel_cache_ltr(slot); 869 } 870 871 static int byt_add_host(struct sdhci_pci_slot *slot) 872 { 873 int ret = sdhci_add_host(slot->host); 874 875 if (!ret) 876 byt_add_debugfs(slot); 877 return ret; 878 } 879 880 static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead) 881 { 882 struct mmc_host *mmc = slot->host->mmc; 883 884 if (!mmc->slotno) 885 intel_ltr_hide(slot->chip); 886 } 887 888 static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) 889 { 890 byt_probe_slot(slot); 891 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 892 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | 893 MMC_CAP_CMD_DURING_TFR | 894 MMC_CAP_WAIT_WHILE_BUSY; 895 slot->hw_reset = sdhci_pci_int_hw_reset; 896 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC) 897 slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ 898 slot->host->mmc_host_ops.select_drive_strength = 899 intel_select_drive_strength; 900 return 0; 901 } 902 903 static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) 904 { 905 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && 906 (dmi_match(DMI_BIOS_VENDOR, "LENOVO") || 907 dmi_match(DMI_SYS_VENDOR, "IRBIS")); 908 } 909 910 static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot) 911 { 912 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC && 913 dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC."); 914 } 915 916 static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) 917 { 918 int ret = byt_emmc_probe_slot(slot); 919 920 if (!glk_broken_cqhci(slot)) 921 slot->host->mmc->caps2 |= MMC_CAP2_CQE; 922 923 if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) { 924 if (!jsl_broken_hs400es(slot)) { 925 slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES; 926 slot->host->mmc_host_ops.hs400_enhanced_strobe = 927 intel_hs400_enhanced_strobe; 928 } 929 slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; 930 } 931 932 return ret; 933 } 934 935 static const struct cqhci_host_ops glk_cqhci_ops = { 936 .enable = sdhci_cqe_enable, 937 .disable = sdhci_cqe_disable, 938 .dumpregs = sdhci_pci_dumpregs, 939 }; 940 941 static int glk_emmc_add_host(struct sdhci_pci_slot *slot) 942 { 943 struct device *dev = &slot->chip->pdev->dev; 944 struct sdhci_host *host = slot->host; 945 struct cqhci_host *cq_host; 946 bool dma64; 947 int ret; 948 949 ret = sdhci_setup_host(host); 950 if (ret) 951 return ret; 952 953 cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL); 954 if (!cq_host) { 955 ret = -ENOMEM; 956 goto cleanup; 957 } 958 959 cq_host->mmio = host->ioaddr + 0x200; 960 cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ; 961 cq_host->ops = &glk_cqhci_ops; 962 963 dma64 = host->flags & SDHCI_USE_64_BIT_DMA; 964 if (dma64) 965 cq_host->caps |= CQHCI_TASK_DESC_SZ_128; 966 967 ret = cqhci_init(cq_host, host->mmc, dma64); 968 if (ret) 969 goto cleanup; 970 971 ret = __sdhci_add_host(host); 972 if (ret) 973 goto cleanup; 974 975 byt_add_debugfs(slot); 976 977 return 0; 978 979 cleanup: 980 sdhci_cleanup_host(host); 981 return ret; 982 } 983 984 #ifdef CONFIG_PM 985 #define GLK_RX_CTRL1 0x834 986 #define GLK_TUN_VAL 0x840 987 #define GLK_PATH_PLL GENMASK(13, 8) 988 #define GLK_DLY GENMASK(6, 0) 989 /* Workaround firmware failing to restore the tuning value */ 990 static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp) 991 { 992 struct sdhci_pci_slot *slot = chip->slots[0]; 993 struct intel_host *intel_host = sdhci_pci_priv(slot); 994 struct sdhci_host *host = slot->host; 995 u32 glk_rx_ctrl1; 996 u32 glk_tun_val; 997 u32 dly; 998 999 if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc)) 1000 return; 1001 1002 glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1); 1003 glk_tun_val = sdhci_readl(host, GLK_TUN_VAL); 1004 1005 if (susp) { 1006 intel_host->glk_rx_ctrl1 = glk_rx_ctrl1; 1007 intel_host->glk_tun_val = glk_tun_val; 1008 return; 1009 } 1010 1011 if (!intel_host->glk_tun_val) 1012 return; 1013 1014 if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) { 1015 intel_host->rpm_retune_ok = true; 1016 return; 1017 } 1018 1019 dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) + 1020 (intel_host->glk_tun_val << 1)); 1021 if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1)) 1022 return; 1023 1024 glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly; 1025 sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1); 1026 1027 intel_host->rpm_retune_ok = true; 1028 chip->rpm_retune = true; 1029 mmc_retune_needed(host->mmc); 1030 pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc)); 1031 } 1032 1033 static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp) 1034 { 1035 if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && 1036 !chip->rpm_retune) 1037 glk_rpm_retune_wa(chip, susp); 1038 } 1039 1040 static int glk_runtime_suspend(struct sdhci_pci_chip *chip) 1041 { 1042 glk_rpm_retune_chk(chip, true); 1043 1044 return sdhci_cqhci_runtime_suspend(chip); 1045 } 1046 1047 static int glk_runtime_resume(struct sdhci_pci_chip *chip) 1048 { 1049 glk_rpm_retune_chk(chip, false); 1050 1051 return sdhci_cqhci_runtime_resume(chip); 1052 } 1053 #endif 1054 1055 #ifdef CONFIG_ACPI 1056 static int ni_set_max_freq(struct sdhci_pci_slot *slot) 1057 { 1058 acpi_status status; 1059 unsigned long long max_freq; 1060 1061 status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev), 1062 "MXFQ", NULL, &max_freq); 1063 if (ACPI_FAILURE(status)) { 1064 dev_err(&slot->chip->pdev->dev, 1065 "MXFQ not found in acpi table\n"); 1066 return -EINVAL; 1067 } 1068 1069 slot->host->mmc->f_max = max_freq * 1000000; 1070 1071 return 0; 1072 } 1073 #else 1074 static inline int ni_set_max_freq(struct sdhci_pci_slot *slot) 1075 { 1076 return 0; 1077 } 1078 #endif 1079 1080 static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) 1081 { 1082 int err; 1083 1084 byt_probe_slot(slot); 1085 1086 err = ni_set_max_freq(slot); 1087 if (err) 1088 return err; 1089 1090 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | 1091 MMC_CAP_WAIT_WHILE_BUSY; 1092 return 0; 1093 } 1094 1095 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) 1096 { 1097 byt_probe_slot(slot); 1098 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | 1099 MMC_CAP_WAIT_WHILE_BUSY; 1100 return 0; 1101 } 1102 1103 static void byt_needs_pwr_off(struct sdhci_pci_slot *slot) 1104 { 1105 struct intel_host *intel_host = sdhci_pci_priv(slot); 1106 u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL); 1107 1108 intel_host->needs_pwr_off = reg & SDHCI_POWER_ON; 1109 } 1110 1111 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) 1112 { 1113 byt_probe_slot(slot); 1114 slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | 1115 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; 1116 slot->cd_idx = 0; 1117 slot->cd_override_level = true; 1118 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD || 1119 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD || 1120 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD || 1121 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD) 1122 slot->host->mmc_host_ops.get_cd = bxt_get_cd; 1123 1124 if (slot->chip->pdev->subsystem_vendor == PCI_VENDOR_ID_NI && 1125 slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3) 1126 slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V; 1127 1128 byt_needs_pwr_off(slot); 1129 1130 return 0; 1131 } 1132 1133 #ifdef CONFIG_PM_SLEEP 1134 1135 static int byt_resume(struct sdhci_pci_chip *chip) 1136 { 1137 byt_ocp_setting(chip->pdev); 1138 1139 return sdhci_pci_resume_host(chip); 1140 } 1141 1142 #endif 1143 1144 #ifdef CONFIG_PM 1145 1146 static int byt_runtime_resume(struct sdhci_pci_chip *chip) 1147 { 1148 byt_ocp_setting(chip->pdev); 1149 1150 return sdhci_pci_runtime_resume_host(chip); 1151 } 1152 1153 #endif 1154 1155 static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { 1156 #ifdef CONFIG_PM_SLEEP 1157 .resume = byt_resume, 1158 #endif 1159 #ifdef CONFIG_PM 1160 .runtime_resume = byt_runtime_resume, 1161 #endif 1162 .allow_runtime_pm = true, 1163 .probe_slot = byt_emmc_probe_slot, 1164 .add_host = byt_add_host, 1165 .remove_slot = byt_remove_slot, 1166 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1167 SDHCI_QUIRK_NO_LED, 1168 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1169 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | 1170 SDHCI_QUIRK2_STOP_WITH_TC, 1171 .ops = &sdhci_intel_byt_ops, 1172 .priv_size = sizeof(struct intel_host), 1173 }; 1174 1175 static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = { 1176 .allow_runtime_pm = true, 1177 .probe_slot = glk_emmc_probe_slot, 1178 .add_host = glk_emmc_add_host, 1179 .remove_slot = byt_remove_slot, 1180 #ifdef CONFIG_PM_SLEEP 1181 .suspend = sdhci_cqhci_suspend, 1182 .resume = sdhci_cqhci_resume, 1183 #endif 1184 #ifdef CONFIG_PM 1185 .runtime_suspend = glk_runtime_suspend, 1186 .runtime_resume = glk_runtime_resume, 1187 #endif 1188 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1189 SDHCI_QUIRK_NO_LED, 1190 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1191 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | 1192 SDHCI_QUIRK2_STOP_WITH_TC, 1193 .ops = &sdhci_intel_glk_ops, 1194 .priv_size = sizeof(struct intel_host), 1195 }; 1196 1197 static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = { 1198 #ifdef CONFIG_PM_SLEEP 1199 .resume = byt_resume, 1200 #endif 1201 #ifdef CONFIG_PM 1202 .runtime_resume = byt_runtime_resume, 1203 #endif 1204 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1205 SDHCI_QUIRK_NO_LED, 1206 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON | 1207 SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1208 .allow_runtime_pm = true, 1209 .probe_slot = ni_byt_sdio_probe_slot, 1210 .add_host = byt_add_host, 1211 .remove_slot = byt_remove_slot, 1212 .ops = &sdhci_intel_byt_ops, 1213 .priv_size = sizeof(struct intel_host), 1214 }; 1215 1216 static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { 1217 #ifdef CONFIG_PM_SLEEP 1218 .resume = byt_resume, 1219 #endif 1220 #ifdef CONFIG_PM 1221 .runtime_resume = byt_runtime_resume, 1222 #endif 1223 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1224 SDHCI_QUIRK_NO_LED, 1225 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON | 1226 SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1227 .allow_runtime_pm = true, 1228 .probe_slot = byt_sdio_probe_slot, 1229 .add_host = byt_add_host, 1230 .remove_slot = byt_remove_slot, 1231 .ops = &sdhci_intel_byt_ops, 1232 .priv_size = sizeof(struct intel_host), 1233 }; 1234 1235 static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { 1236 #ifdef CONFIG_PM_SLEEP 1237 .resume = byt_resume, 1238 #endif 1239 #ifdef CONFIG_PM 1240 .runtime_resume = byt_runtime_resume, 1241 #endif 1242 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 1243 SDHCI_QUIRK_NO_LED, 1244 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | 1245 SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1246 SDHCI_QUIRK2_STOP_WITH_TC, 1247 .allow_runtime_pm = true, 1248 .own_cd_for_runtime_pm = true, 1249 .probe_slot = byt_sd_probe_slot, 1250 .add_host = byt_add_host, 1251 .remove_slot = byt_remove_slot, 1252 .ops = &sdhci_intel_byt_ops, 1253 .priv_size = sizeof(struct intel_host), 1254 }; 1255 1256 /* Define Host controllers for Intel Merrifield platform */ 1257 #define INTEL_MRFLD_EMMC_0 0 1258 #define INTEL_MRFLD_EMMC_1 1 1259 #define INTEL_MRFLD_SD 2 1260 #define INTEL_MRFLD_SDIO 3 1261 1262 #ifdef CONFIG_ACPI 1263 static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) 1264 { 1265 struct acpi_device *device; 1266 1267 device = ACPI_COMPANION(&slot->chip->pdev->dev); 1268 if (device) 1269 acpi_device_fix_up_power_extended(device); 1270 } 1271 #else 1272 static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {} 1273 #endif 1274 1275 static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) 1276 { 1277 unsigned int func = PCI_FUNC(slot->chip->pdev->devfn); 1278 1279 switch (func) { 1280 case INTEL_MRFLD_EMMC_0: 1281 case INTEL_MRFLD_EMMC_1: 1282 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | 1283 MMC_CAP_8_BIT_DATA | 1284 MMC_CAP_1_8V_DDR; 1285 break; 1286 case INTEL_MRFLD_SD: 1287 slot->cd_idx = 0; 1288 slot->cd_override_level = true; 1289 /* 1290 * There are two PCB designs of SD card slot with the opposite 1291 * card detection sense. Quirk this out by ignoring GPIO state 1292 * completely in the custom ->get_cd() callback. 1293 */ 1294 slot->host->mmc_host_ops.get_cd = mrfld_get_cd; 1295 slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; 1296 break; 1297 case INTEL_MRFLD_SDIO: 1298 /* Advertise 2.0v for compatibility with the SDIO card's OCR */ 1299 slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195; 1300 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | 1301 MMC_CAP_POWER_OFF_CARD; 1302 break; 1303 default: 1304 return -ENODEV; 1305 } 1306 1307 intel_mrfld_mmc_fix_up_power_slot(slot); 1308 return 0; 1309 } 1310 1311 static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = { 1312 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 1313 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | 1314 SDHCI_QUIRK2_PRESET_VALUE_BROKEN, 1315 .allow_runtime_pm = true, 1316 .probe_slot = intel_mrfld_mmc_probe_slot, 1317 }; 1318 1319 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) 1320 { 1321 u8 scratch; 1322 int ret; 1323 1324 ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch); 1325 if (ret) 1326 return ret; 1327 1328 /* 1329 * Turn PMOS on [bit 0], set over current detection to 2.4 V 1330 * [bit 1:2] and enable over current debouncing [bit 6]. 1331 */ 1332 if (on) 1333 scratch |= 0x47; 1334 else 1335 scratch &= ~0x47; 1336 1337 return pci_write_config_byte(chip->pdev, 0xAE, scratch); 1338 } 1339 1340 static int jmicron_probe(struct sdhci_pci_chip *chip) 1341 { 1342 int ret; 1343 u16 mmcdev = 0; 1344 1345 if (chip->pdev->revision == 0) { 1346 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | 1347 SDHCI_QUIRK_32BIT_DMA_SIZE | 1348 SDHCI_QUIRK_32BIT_ADMA_SIZE | 1349 SDHCI_QUIRK_RESET_AFTER_REQUEST | 1350 SDHCI_QUIRK_BROKEN_SMALL_PIO; 1351 } 1352 1353 /* 1354 * JMicron chips can have two interfaces to the same hardware 1355 * in order to work around limitations in Microsoft's driver. 1356 * We need to make sure we only bind to one of them. 1357 * 1358 * This code assumes two things: 1359 * 1360 * 1. The PCI code adds subfunctions in order. 1361 * 1362 * 2. The MMC interface has a lower subfunction number 1363 * than the SD interface. 1364 */ 1365 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) 1366 mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC; 1367 else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD) 1368 mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD; 1369 1370 if (mmcdev) { 1371 struct pci_dev *sd_dev; 1372 1373 sd_dev = NULL; 1374 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, 1375 mmcdev, sd_dev)) != NULL) { 1376 if ((PCI_SLOT(chip->pdev->devfn) == 1377 PCI_SLOT(sd_dev->devfn)) && 1378 (chip->pdev->bus == sd_dev->bus)) 1379 break; 1380 } 1381 1382 if (sd_dev) { 1383 pci_dev_put(sd_dev); 1384 dev_info(&chip->pdev->dev, "Refusing to bind to " 1385 "secondary interface.\n"); 1386 return -ENODEV; 1387 } 1388 } 1389 1390 /* 1391 * JMicron chips need a bit of a nudge to enable the power 1392 * output pins. 1393 */ 1394 ret = jmicron_pmos(chip, 1); 1395 if (ret) { 1396 dev_err(&chip->pdev->dev, "Failure enabling card power\n"); 1397 return ret; 1398 } 1399 1400 /* quirk for unsable RO-detection on JM388 chips */ 1401 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD || 1402 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) 1403 chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT; 1404 1405 return 0; 1406 } 1407 1408 static void jmicron_enable_mmc(struct sdhci_host *host, int on) 1409 { 1410 u8 scratch; 1411 1412 scratch = readb(host->ioaddr + 0xC0); 1413 1414 if (on) 1415 scratch |= 0x01; 1416 else 1417 scratch &= ~0x01; 1418 1419 writeb(scratch, host->ioaddr + 0xC0); 1420 } 1421 1422 static int jmicron_probe_slot(struct sdhci_pci_slot *slot) 1423 { 1424 if (slot->chip->pdev->revision == 0) { 1425 u16 version; 1426 1427 version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION); 1428 version = (version & SDHCI_VENDOR_VER_MASK) >> 1429 SDHCI_VENDOR_VER_SHIFT; 1430 1431 /* 1432 * Older versions of the chip have lots of nasty glitches 1433 * in the ADMA engine. It's best just to avoid it 1434 * completely. 1435 */ 1436 if (version < 0xAC) 1437 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 1438 } 1439 1440 /* JM388 MMC doesn't support 1.8V while SD supports it */ 1441 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 1442 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 | 1443 MMC_VDD_29_30 | MMC_VDD_30_31 | 1444 MMC_VDD_165_195; /* allow 1.8V */ 1445 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 | 1446 MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */ 1447 } 1448 1449 /* 1450 * The secondary interface requires a bit set to get the 1451 * interrupts. 1452 */ 1453 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 1454 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) 1455 jmicron_enable_mmc(slot->host, 1); 1456 1457 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST; 1458 1459 return 0; 1460 } 1461 1462 static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead) 1463 { 1464 if (dead) 1465 return; 1466 1467 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 1468 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) 1469 jmicron_enable_mmc(slot->host, 0); 1470 } 1471 1472 #ifdef CONFIG_PM_SLEEP 1473 static int jmicron_suspend(struct sdhci_pci_chip *chip) 1474 { 1475 int i, ret; 1476 1477 ret = sdhci_pci_suspend_host(chip); 1478 if (ret) 1479 return ret; 1480 1481 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 1482 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 1483 for (i = 0; i < chip->num_slots; i++) 1484 jmicron_enable_mmc(chip->slots[i]->host, 0); 1485 } 1486 1487 return 0; 1488 } 1489 1490 static int jmicron_resume(struct sdhci_pci_chip *chip) 1491 { 1492 int ret, i; 1493 1494 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 1495 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 1496 for (i = 0; i < chip->num_slots; i++) 1497 jmicron_enable_mmc(chip->slots[i]->host, 1); 1498 } 1499 1500 ret = jmicron_pmos(chip, 1); 1501 if (ret) { 1502 dev_err(&chip->pdev->dev, "Failure enabling card power\n"); 1503 return ret; 1504 } 1505 1506 return sdhci_pci_resume_host(chip); 1507 } 1508 #endif 1509 1510 static const struct sdhci_pci_fixes sdhci_jmicron = { 1511 .probe = jmicron_probe, 1512 1513 .probe_slot = jmicron_probe_slot, 1514 .remove_slot = jmicron_remove_slot, 1515 1516 #ifdef CONFIG_PM_SLEEP 1517 .suspend = jmicron_suspend, 1518 .resume = jmicron_resume, 1519 #endif 1520 }; 1521 1522 /* SysKonnect CardBus2SDIO extra registers */ 1523 #define SYSKT_CTRL 0x200 1524 #define SYSKT_RDFIFO_STAT 0x204 1525 #define SYSKT_WRFIFO_STAT 0x208 1526 #define SYSKT_POWER_DATA 0x20c 1527 #define SYSKT_POWER_330 0xef 1528 #define SYSKT_POWER_300 0xf8 1529 #define SYSKT_POWER_184 0xcc 1530 #define SYSKT_POWER_CMD 0x20d 1531 #define SYSKT_POWER_START (1 << 7) 1532 #define SYSKT_POWER_STATUS 0x20e 1533 #define SYSKT_POWER_STATUS_OK (1 << 0) 1534 #define SYSKT_BOARD_REV 0x210 1535 #define SYSKT_CHIP_REV 0x211 1536 #define SYSKT_CONF_DATA 0x212 1537 #define SYSKT_CONF_DATA_1V8 (1 << 2) 1538 #define SYSKT_CONF_DATA_2V5 (1 << 1) 1539 #define SYSKT_CONF_DATA_3V3 (1 << 0) 1540 1541 static int syskt_probe(struct sdhci_pci_chip *chip) 1542 { 1543 if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { 1544 chip->pdev->class &= ~0x0000FF; 1545 chip->pdev->class |= PCI_SDHCI_IFDMA; 1546 } 1547 return 0; 1548 } 1549 1550 static int syskt_probe_slot(struct sdhci_pci_slot *slot) 1551 { 1552 int tm, ps; 1553 1554 u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV); 1555 u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV); 1556 dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, " 1557 "board rev %d.%d, chip rev %d.%d\n", 1558 board_rev >> 4, board_rev & 0xf, 1559 chip_rev >> 4, chip_rev & 0xf); 1560 if (chip_rev >= 0x20) 1561 slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA; 1562 1563 writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA); 1564 writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD); 1565 udelay(50); 1566 tm = 10; /* Wait max 1 ms */ 1567 do { 1568 ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS); 1569 if (ps & SYSKT_POWER_STATUS_OK) 1570 break; 1571 udelay(100); 1572 } while (--tm); 1573 if (!tm) { 1574 dev_err(&slot->chip->pdev->dev, 1575 "power regulator never stabilized"); 1576 writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD); 1577 return -ENODEV; 1578 } 1579 1580 return 0; 1581 } 1582 1583 static const struct sdhci_pci_fixes sdhci_syskt = { 1584 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER, 1585 .probe = syskt_probe, 1586 .probe_slot = syskt_probe_slot, 1587 }; 1588 1589 static int via_probe(struct sdhci_pci_chip *chip) 1590 { 1591 if (chip->pdev->revision == 0x10) 1592 chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER; 1593 1594 return 0; 1595 } 1596 1597 static const struct sdhci_pci_fixes sdhci_via = { 1598 .probe = via_probe, 1599 }; 1600 1601 static int rtsx_probe_slot(struct sdhci_pci_slot *slot) 1602 { 1603 slot->host->mmc->caps2 |= MMC_CAP2_HS200; 1604 return 0; 1605 } 1606 1607 static const struct sdhci_pci_fixes sdhci_rtsx = { 1608 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 1609 SDHCI_QUIRK2_BROKEN_64_BIT_DMA | 1610 SDHCI_QUIRK2_BROKEN_DDR50, 1611 .probe_slot = rtsx_probe_slot, 1612 }; 1613 1614 /*AMD chipset generation*/ 1615 enum amd_chipset_gen { 1616 AMD_CHIPSET_BEFORE_ML, 1617 AMD_CHIPSET_CZ, 1618 AMD_CHIPSET_NL, 1619 AMD_CHIPSET_UNKNOWN, 1620 }; 1621 1622 /* AMD registers */ 1623 #define AMD_SD_AUTO_PATTERN 0xB8 1624 #define AMD_MSLEEP_DURATION 4 1625 #define AMD_SD_MISC_CONTROL 0xD0 1626 #define AMD_MAX_TUNE_VALUE 0x0B 1627 #define AMD_AUTO_TUNE_SEL 0x10800 1628 #define AMD_FIFO_PTR 0x30 1629 #define AMD_BIT_MASK 0x1F 1630 1631 static void amd_tuning_reset(struct sdhci_host *host) 1632 { 1633 unsigned int val; 1634 1635 val = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1636 val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING; 1637 sdhci_writew(host, val, SDHCI_HOST_CONTROL2); 1638 1639 val = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1640 val &= ~SDHCI_CTRL_EXEC_TUNING; 1641 sdhci_writew(host, val, SDHCI_HOST_CONTROL2); 1642 } 1643 1644 static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase) 1645 { 1646 unsigned int val; 1647 1648 pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val); 1649 val &= ~AMD_BIT_MASK; 1650 val |= (AMD_AUTO_TUNE_SEL | (phase << 1)); 1651 pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val); 1652 } 1653 1654 static void amd_enable_manual_tuning(struct pci_dev *pdev) 1655 { 1656 unsigned int val; 1657 1658 pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val); 1659 val |= AMD_FIFO_PTR; 1660 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); 1661 } 1662 1663 static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode) 1664 { 1665 struct sdhci_pci_slot *slot = sdhci_priv(host); 1666 struct pci_dev *pdev = slot->chip->pdev; 1667 u8 valid_win = 0; 1668 u8 valid_win_max = 0; 1669 u8 valid_win_end = 0; 1670 u8 ctrl, tune_around; 1671 1672 amd_tuning_reset(host); 1673 1674 for (tune_around = 0; tune_around < 12; tune_around++) { 1675 amd_config_tuning_phase(pdev, tune_around); 1676 1677 if (mmc_send_tuning(host->mmc, opcode, NULL)) { 1678 valid_win = 0; 1679 msleep(AMD_MSLEEP_DURATION); 1680 ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA; 1681 sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET); 1682 } else if (++valid_win > valid_win_max) { 1683 valid_win_max = valid_win; 1684 valid_win_end = tune_around; 1685 } 1686 } 1687 1688 if (!valid_win_max) { 1689 dev_err(&pdev->dev, "no tuning point found\n"); 1690 return -EIO; 1691 } 1692 1693 amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2); 1694 1695 amd_enable_manual_tuning(pdev); 1696 1697 host->mmc->retune_period = 0; 1698 1699 return 0; 1700 } 1701 1702 static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode) 1703 { 1704 struct sdhci_host *host = mmc_priv(mmc); 1705 1706 /* AMD requires custom HS200 tuning */ 1707 if (host->timing == MMC_TIMING_MMC_HS200) 1708 return amd_execute_tuning_hs200(host, opcode); 1709 1710 /* Otherwise perform standard SDHCI tuning */ 1711 return sdhci_execute_tuning(mmc, opcode); 1712 } 1713 1714 static int amd_probe_slot(struct sdhci_pci_slot *slot) 1715 { 1716 struct mmc_host_ops *ops = &slot->host->mmc_host_ops; 1717 1718 ops->execute_tuning = amd_execute_tuning; 1719 1720 return 0; 1721 } 1722 1723 static int amd_probe(struct sdhci_pci_chip *chip) 1724 { 1725 struct pci_dev *smbus_dev; 1726 enum amd_chipset_gen gen; 1727 1728 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 1729 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); 1730 if (smbus_dev) { 1731 gen = AMD_CHIPSET_BEFORE_ML; 1732 } else { 1733 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 1734 PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); 1735 if (smbus_dev) { 1736 if (smbus_dev->revision < 0x51) 1737 gen = AMD_CHIPSET_CZ; 1738 else 1739 gen = AMD_CHIPSET_NL; 1740 } else { 1741 gen = AMD_CHIPSET_UNKNOWN; 1742 } 1743 } 1744 1745 pci_dev_put(smbus_dev); 1746 1747 if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ) 1748 chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD; 1749 1750 return 0; 1751 } 1752 1753 static u32 sdhci_read_present_state(struct sdhci_host *host) 1754 { 1755 return sdhci_readl(host, SDHCI_PRESENT_STATE); 1756 } 1757 1758 static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) 1759 { 1760 struct sdhci_pci_slot *slot = sdhci_priv(host); 1761 struct pci_dev *pdev = slot->chip->pdev; 1762 u32 present_state; 1763 1764 /* 1765 * SDHC 0x7906 requires a hard reset to clear all internal state. 1766 * Otherwise it can get into a bad state where the DATA lines are always 1767 * read as zeros. 1768 */ 1769 if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) { 1770 pci_clear_master(pdev); 1771 1772 pci_save_state(pdev); 1773 1774 pci_set_power_state(pdev, PCI_D3cold); 1775 pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc), 1776 pdev->current_state); 1777 pci_set_power_state(pdev, PCI_D0); 1778 1779 pci_restore_state(pdev); 1780 1781 /* 1782 * SDHCI_RESET_ALL says the card detect logic should not be 1783 * reset, but since we need to reset the entire controller 1784 * we should wait until the card detect logic has stabilized. 1785 * 1786 * This normally takes about 40ms. 1787 */ 1788 readx_poll_timeout( 1789 sdhci_read_present_state, 1790 host, 1791 present_state, 1792 present_state & SDHCI_CD_STABLE, 1793 10000, 1794 100000 1795 ); 1796 } 1797 1798 return sdhci_reset(host, mask); 1799 } 1800 1801 static const struct sdhci_ops amd_sdhci_pci_ops = { 1802 .set_clock = sdhci_set_clock, 1803 .enable_dma = sdhci_pci_enable_dma, 1804 .set_bus_width = sdhci_set_bus_width, 1805 .reset = amd_sdhci_reset, 1806 .set_uhs_signaling = sdhci_set_uhs_signaling, 1807 }; 1808 1809 static const struct sdhci_pci_fixes sdhci_amd = { 1810 .probe = amd_probe, 1811 .ops = &amd_sdhci_pci_ops, 1812 .probe_slot = amd_probe_slot, 1813 }; 1814 1815 static const struct pci_device_id pci_ids[] = { 1816 SDHCI_PCI_DEVICE(RICOH, R5C822, ricoh), 1817 SDHCI_PCI_DEVICE(RICOH, R5C843, ricoh_mmc), 1818 SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc), 1819 SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc), 1820 SDHCI_PCI_DEVICE(ENE, CB712_SD, ene_712), 1821 SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712), 1822 SDHCI_PCI_DEVICE(ENE, CB714_SD, ene_714), 1823 SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714), 1824 SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe), 1825 SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD, jmicron), 1826 SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron), 1827 SDHCI_PCI_DEVICE(JMICRON, JMB388_SD, jmicron), 1828 SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron), 1829 SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt), 1830 SDHCI_PCI_DEVICE(VIA, 95D0, via), 1831 SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx), 1832 SDHCI_PCI_DEVICE(INTEL, QRK_SD, intel_qrk), 1833 SDHCI_PCI_DEVICE(INTEL, MRST_SD0, intel_mrst_hc0), 1834 SDHCI_PCI_DEVICE(INTEL, MRST_SD1, intel_mrst_hc1_hc2), 1835 SDHCI_PCI_DEVICE(INTEL, MRST_SD2, intel_mrst_hc1_hc2), 1836 SDHCI_PCI_DEVICE(INTEL, MFD_SD, intel_mfd_sd), 1837 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio), 1838 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio), 1839 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc), 1840 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc), 1841 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio), 1842 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio), 1843 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC, intel_byt_emmc), 1844 SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio), 1845 SDHCI_PCI_DEVICE(INTEL, BYT_SDIO, intel_byt_sdio), 1846 SDHCI_PCI_DEVICE(INTEL, BYT_SD, intel_byt_sd), 1847 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc), 1848 SDHCI_PCI_DEVICE(INTEL, BSW_EMMC, intel_byt_emmc), 1849 SDHCI_PCI_DEVICE(INTEL, BSW_SDIO, intel_byt_sdio), 1850 SDHCI_PCI_DEVICE(INTEL, BSW_SD, intel_byt_sd), 1851 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd), 1852 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio), 1853 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio), 1854 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc), 1855 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc), 1856 SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc), 1857 SDHCI_PCI_DEVICE(INTEL, SPT_EMMC, intel_byt_emmc), 1858 SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio), 1859 SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd), 1860 SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc), 1861 SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc), 1862 SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc), 1863 SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio), 1864 SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd), 1865 SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc), 1866 SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio), 1867 SDHCI_PCI_DEVICE(INTEL, BXTM_SD, intel_byt_sd), 1868 SDHCI_PCI_DEVICE(INTEL, APL_EMMC, intel_byt_emmc), 1869 SDHCI_PCI_DEVICE(INTEL, APL_SDIO, intel_byt_sdio), 1870 SDHCI_PCI_DEVICE(INTEL, APL_SD, intel_byt_sd), 1871 SDHCI_PCI_DEVICE(INTEL, GLK_EMMC, intel_glk_emmc), 1872 SDHCI_PCI_DEVICE(INTEL, GLK_SDIO, intel_byt_sdio), 1873 SDHCI_PCI_DEVICE(INTEL, GLK_SD, intel_byt_sd), 1874 SDHCI_PCI_DEVICE(INTEL, CNP_EMMC, intel_glk_emmc), 1875 SDHCI_PCI_DEVICE(INTEL, CNP_SD, intel_byt_sd), 1876 SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd), 1877 SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc), 1878 SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd), 1879 SDHCI_PCI_DEVICE(INTEL, EHL_EMMC, intel_glk_emmc), 1880 SDHCI_PCI_DEVICE(INTEL, EHL_SD, intel_byt_sd), 1881 SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc), 1882 SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd), 1883 SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd), 1884 SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc), 1885 SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd), 1886 SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc), 1887 SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd), 1888 SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc), 1889 SDHCI_PCI_DEVICE(O2, 8120, o2), 1890 SDHCI_PCI_DEVICE(O2, 8220, o2), 1891 SDHCI_PCI_DEVICE(O2, 8221, o2), 1892 SDHCI_PCI_DEVICE(O2, 8320, o2), 1893 SDHCI_PCI_DEVICE(O2, 8321, o2), 1894 SDHCI_PCI_DEVICE(O2, FUJIN2, o2), 1895 SDHCI_PCI_DEVICE(O2, SDS0, o2), 1896 SDHCI_PCI_DEVICE(O2, SDS1, o2), 1897 SDHCI_PCI_DEVICE(O2, SEABIRD0, o2), 1898 SDHCI_PCI_DEVICE(O2, SEABIRD1, o2), 1899 SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan), 1900 SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps), 1901 SDHCI_PCI_DEVICE(GLI, 9750, gl9750), 1902 SDHCI_PCI_DEVICE(GLI, 9755, gl9755), 1903 SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e), 1904 SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd), 1905 /* Generic SD host controller */ 1906 {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)}, 1907 { /* end: all zeroes */ }, 1908 }; 1909 1910 MODULE_DEVICE_TABLE(pci, pci_ids); 1911 1912 /*****************************************************************************\ 1913 * * 1914 * SDHCI core callbacks * 1915 * * 1916 \*****************************************************************************/ 1917 1918 int sdhci_pci_enable_dma(struct sdhci_host *host) 1919 { 1920 struct sdhci_pci_slot *slot; 1921 struct pci_dev *pdev; 1922 1923 slot = sdhci_priv(host); 1924 pdev = slot->chip->pdev; 1925 1926 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) && 1927 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && 1928 (host->flags & SDHCI_USE_SDMA)) { 1929 dev_warn(&pdev->dev, "Will use DMA mode even though HW " 1930 "doesn't fully claim to support it.\n"); 1931 } 1932 1933 pci_set_master(pdev); 1934 1935 return 0; 1936 } 1937 1938 static void sdhci_pci_hw_reset(struct sdhci_host *host) 1939 { 1940 struct sdhci_pci_slot *slot = sdhci_priv(host); 1941 1942 if (slot->hw_reset) 1943 slot->hw_reset(host); 1944 } 1945 1946 static const struct sdhci_ops sdhci_pci_ops = { 1947 .set_clock = sdhci_set_clock, 1948 .enable_dma = sdhci_pci_enable_dma, 1949 .set_bus_width = sdhci_set_bus_width, 1950 .reset = sdhci_reset, 1951 .set_uhs_signaling = sdhci_set_uhs_signaling, 1952 .hw_reset = sdhci_pci_hw_reset, 1953 }; 1954 1955 /*****************************************************************************\ 1956 * * 1957 * Suspend/resume * 1958 * * 1959 \*****************************************************************************/ 1960 1961 #ifdef CONFIG_PM_SLEEP 1962 static int sdhci_pci_suspend(struct device *dev) 1963 { 1964 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 1965 1966 if (!chip) 1967 return 0; 1968 1969 if (chip->fixes && chip->fixes->suspend) 1970 return chip->fixes->suspend(chip); 1971 1972 return sdhci_pci_suspend_host(chip); 1973 } 1974 1975 static int sdhci_pci_resume(struct device *dev) 1976 { 1977 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 1978 1979 if (!chip) 1980 return 0; 1981 1982 if (chip->fixes && chip->fixes->resume) 1983 return chip->fixes->resume(chip); 1984 1985 return sdhci_pci_resume_host(chip); 1986 } 1987 #endif 1988 1989 #ifdef CONFIG_PM 1990 static int sdhci_pci_runtime_suspend(struct device *dev) 1991 { 1992 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 1993 1994 if (!chip) 1995 return 0; 1996 1997 if (chip->fixes && chip->fixes->runtime_suspend) 1998 return chip->fixes->runtime_suspend(chip); 1999 2000 return sdhci_pci_runtime_suspend_host(chip); 2001 } 2002 2003 static int sdhci_pci_runtime_resume(struct device *dev) 2004 { 2005 struct sdhci_pci_chip *chip = dev_get_drvdata(dev); 2006 2007 if (!chip) 2008 return 0; 2009 2010 if (chip->fixes && chip->fixes->runtime_resume) 2011 return chip->fixes->runtime_resume(chip); 2012 2013 return sdhci_pci_runtime_resume_host(chip); 2014 } 2015 #endif 2016 2017 static const struct dev_pm_ops sdhci_pci_pm_ops = { 2018 SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume) 2019 SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend, 2020 sdhci_pci_runtime_resume, NULL) 2021 }; 2022 2023 /*****************************************************************************\ 2024 * * 2025 * Device probing/removal * 2026 * * 2027 \*****************************************************************************/ 2028 2029 static struct sdhci_pci_slot *sdhci_pci_probe_slot( 2030 struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar, 2031 int slotno) 2032 { 2033 struct sdhci_pci_slot *slot; 2034 struct sdhci_host *host; 2035 int ret, bar = first_bar + slotno; 2036 size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0; 2037 2038 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 2039 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar); 2040 return ERR_PTR(-ENODEV); 2041 } 2042 2043 if (pci_resource_len(pdev, bar) < 0x100) { 2044 dev_err(&pdev->dev, "Invalid iomem size. You may " 2045 "experience problems.\n"); 2046 } 2047 2048 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) { 2049 dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n"); 2050 return ERR_PTR(-ENODEV); 2051 } 2052 2053 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) { 2054 dev_err(&pdev->dev, "Unknown interface. Aborting.\n"); 2055 return ERR_PTR(-ENODEV); 2056 } 2057 2058 host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size); 2059 if (IS_ERR(host)) { 2060 dev_err(&pdev->dev, "cannot allocate host\n"); 2061 return ERR_CAST(host); 2062 } 2063 2064 slot = sdhci_priv(host); 2065 2066 slot->chip = chip; 2067 slot->host = host; 2068 slot->cd_idx = -1; 2069 2070 host->hw_name = "PCI"; 2071 host->ops = chip->fixes && chip->fixes->ops ? 2072 chip->fixes->ops : 2073 &sdhci_pci_ops; 2074 host->quirks = chip->quirks; 2075 host->quirks2 = chip->quirks2; 2076 2077 host->irq = pdev->irq; 2078 2079 ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc)); 2080 if (ret) { 2081 dev_err(&pdev->dev, "cannot request region\n"); 2082 goto cleanup; 2083 } 2084 2085 host->ioaddr = pcim_iomap_table(pdev)[bar]; 2086 2087 if (chip->fixes && chip->fixes->probe_slot) { 2088 ret = chip->fixes->probe_slot(slot); 2089 if (ret) 2090 goto cleanup; 2091 } 2092 2093 host->mmc->pm_caps = MMC_PM_KEEP_POWER; 2094 host->mmc->slotno = slotno; 2095 host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; 2096 2097 if (device_can_wakeup(&pdev->dev)) 2098 host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; 2099 2100 if (host->mmc->caps & MMC_CAP_CD_WAKE) 2101 device_init_wakeup(&pdev->dev, true); 2102 2103 if (slot->cd_idx >= 0) { 2104 ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, 2105 slot->cd_override_level, 0); 2106 if (ret && ret != -EPROBE_DEFER) 2107 ret = mmc_gpiod_request_cd(host->mmc, NULL, 2108 slot->cd_idx, 2109 slot->cd_override_level, 2110 0); 2111 if (ret == -EPROBE_DEFER) 2112 goto remove; 2113 2114 if (ret) { 2115 dev_warn(&pdev->dev, "failed to setup card detect gpio\n"); 2116 slot->cd_idx = -1; 2117 } 2118 } 2119 2120 if (chip->fixes && chip->fixes->add_host) 2121 ret = chip->fixes->add_host(slot); 2122 else 2123 ret = sdhci_add_host(host); 2124 if (ret) 2125 goto remove; 2126 2127 /* 2128 * Check if the chip needs a separate GPIO for card detect to wake up 2129 * from runtime suspend. If it is not there, don't allow runtime PM. 2130 */ 2131 if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0) 2132 chip->allow_runtime_pm = false; 2133 2134 return slot; 2135 2136 remove: 2137 if (chip->fixes && chip->fixes->remove_slot) 2138 chip->fixes->remove_slot(slot, 0); 2139 2140 cleanup: 2141 sdhci_free_host(host); 2142 2143 return ERR_PTR(ret); 2144 } 2145 2146 static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) 2147 { 2148 int dead; 2149 u32 scratch; 2150 2151 dead = 0; 2152 scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS); 2153 if (scratch == (u32)-1) 2154 dead = 1; 2155 2156 sdhci_remove_host(slot->host, dead); 2157 2158 if (slot->chip->fixes && slot->chip->fixes->remove_slot) 2159 slot->chip->fixes->remove_slot(slot, dead); 2160 2161 sdhci_free_host(slot->host); 2162 } 2163 2164 static void sdhci_pci_runtime_pm_allow(struct device *dev) 2165 { 2166 pm_suspend_ignore_children(dev, 1); 2167 pm_runtime_set_autosuspend_delay(dev, 50); 2168 pm_runtime_use_autosuspend(dev); 2169 pm_runtime_allow(dev); 2170 /* Stay active until mmc core scans for a card */ 2171 pm_runtime_put_noidle(dev); 2172 } 2173 2174 static void sdhci_pci_runtime_pm_forbid(struct device *dev) 2175 { 2176 pm_runtime_forbid(dev); 2177 pm_runtime_get_noresume(dev); 2178 } 2179 2180 static int sdhci_pci_probe(struct pci_dev *pdev, 2181 const struct pci_device_id *ent) 2182 { 2183 struct sdhci_pci_chip *chip; 2184 struct sdhci_pci_slot *slot; 2185 2186 u8 slots, first_bar; 2187 int ret, i; 2188 2189 BUG_ON(pdev == NULL); 2190 BUG_ON(ent == NULL); 2191 2192 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", 2193 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); 2194 2195 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); 2196 if (ret) 2197 return ret; 2198 2199 slots = PCI_SLOT_INFO_SLOTS(slots) + 1; 2200 dev_dbg(&pdev->dev, "found %d slot(s)\n", slots); 2201 2202 BUG_ON(slots > MAX_SLOTS); 2203 2204 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar); 2205 if (ret) 2206 return ret; 2207 2208 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK; 2209 2210 if (first_bar > 5) { 2211 dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n"); 2212 return -ENODEV; 2213 } 2214 2215 ret = pcim_enable_device(pdev); 2216 if (ret) 2217 return ret; 2218 2219 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); 2220 if (!chip) 2221 return -ENOMEM; 2222 2223 chip->pdev = pdev; 2224 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; 2225 if (chip->fixes) { 2226 chip->quirks = chip->fixes->quirks; 2227 chip->quirks2 = chip->fixes->quirks2; 2228 chip->allow_runtime_pm = chip->fixes->allow_runtime_pm; 2229 } 2230 chip->num_slots = slots; 2231 chip->pm_retune = true; 2232 chip->rpm_retune = true; 2233 2234 pci_set_drvdata(pdev, chip); 2235 2236 if (chip->fixes && chip->fixes->probe) { 2237 ret = chip->fixes->probe(chip); 2238 if (ret) 2239 return ret; 2240 } 2241 2242 slots = chip->num_slots; /* Quirk may have changed this */ 2243 2244 for (i = 0; i < slots; i++) { 2245 slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i); 2246 if (IS_ERR(slot)) { 2247 for (i--; i >= 0; i--) 2248 sdhci_pci_remove_slot(chip->slots[i]); 2249 return PTR_ERR(slot); 2250 } 2251 2252 chip->slots[i] = slot; 2253 } 2254 2255 if (chip->allow_runtime_pm) 2256 sdhci_pci_runtime_pm_allow(&pdev->dev); 2257 2258 return 0; 2259 } 2260 2261 static void sdhci_pci_remove(struct pci_dev *pdev) 2262 { 2263 int i; 2264 struct sdhci_pci_chip *chip = pci_get_drvdata(pdev); 2265 2266 if (chip->allow_runtime_pm) 2267 sdhci_pci_runtime_pm_forbid(&pdev->dev); 2268 2269 for (i = 0; i < chip->num_slots; i++) 2270 sdhci_pci_remove_slot(chip->slots[i]); 2271 } 2272 2273 static struct pci_driver sdhci_driver = { 2274 .name = "sdhci-pci", 2275 .id_table = pci_ids, 2276 .probe = sdhci_pci_probe, 2277 .remove = sdhci_pci_remove, 2278 .driver = { 2279 .pm = &sdhci_pci_pm_ops, 2280 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2281 }, 2282 }; 2283 2284 module_pci_driver(sdhci_driver); 2285 2286 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 2287 MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); 2288 MODULE_LICENSE("GPL"); 2289