1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Secure Digital Host Controller Interface ACPI driver. 4 * 5 * Copyright (c) 2012, Intel Corporation. 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/init.h> 10 #include <linux/export.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/platform_device.h> 14 #include <linux/ioport.h> 15 #include <linux/io.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/compiler.h> 18 #include <linux/stddef.h> 19 #include <linux/bitops.h> 20 #include <linux/types.h> 21 #include <linux/err.h> 22 #include <linux/interrupt.h> 23 #include <linux/acpi.h> 24 #include <linux/pm.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/delay.h> 27 #include <linux/dmi.h> 28 29 #include <linux/mmc/host.h> 30 #include <linux/mmc/pm.h> 31 #include <linux/mmc/slot-gpio.h> 32 33 #ifdef CONFIG_X86 34 #include <asm/cpu_device_id.h> 35 #include <asm/intel-family.h> 36 #include <asm/iosf_mbi.h> 37 #include <linux/pci.h> 38 #endif 39 40 #include "sdhci.h" 41 42 enum { 43 SDHCI_ACPI_SD_CD = BIT(0), 44 SDHCI_ACPI_RUNTIME_PM = BIT(1), 45 SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL = BIT(2), 46 }; 47 48 struct sdhci_acpi_chip { 49 const struct sdhci_ops *ops; 50 unsigned int quirks; 51 unsigned int quirks2; 52 unsigned long caps; 53 unsigned int caps2; 54 mmc_pm_flag_t pm_caps; 55 }; 56 57 struct sdhci_acpi_slot { 58 const struct sdhci_acpi_chip *chip; 59 unsigned int quirks; 60 unsigned int quirks2; 61 unsigned long caps; 62 unsigned int caps2; 63 mmc_pm_flag_t pm_caps; 64 unsigned int flags; 65 size_t priv_size; 66 int (*probe_slot)(struct platform_device *, struct acpi_device *); 67 int (*remove_slot)(struct platform_device *); 68 int (*free_slot)(struct platform_device *pdev); 69 int (*setup_host)(struct platform_device *pdev); 70 }; 71 72 struct sdhci_acpi_host { 73 struct sdhci_host *host; 74 const struct sdhci_acpi_slot *slot; 75 struct platform_device *pdev; 76 bool use_runtime_pm; 77 bool is_intel; 78 bool reset_signal_volt_on_suspend; 79 unsigned long private[] ____cacheline_aligned; 80 }; 81 82 enum { 83 DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP = BIT(0), 84 DMI_QUIRK_SD_NO_WRITE_PROTECT = BIT(1), 85 }; 86 87 static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c) 88 { 89 return (void *)c->private; 90 } 91 92 static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) 93 { 94 return c->slot && (c->slot->flags & flag); 95 } 96 97 #define INTEL_DSM_HS_CAPS_SDR25 BIT(0) 98 #define INTEL_DSM_HS_CAPS_DDR50 BIT(1) 99 #define INTEL_DSM_HS_CAPS_SDR50 BIT(2) 100 #define INTEL_DSM_HS_CAPS_SDR104 BIT(3) 101 102 enum { 103 INTEL_DSM_FNS = 0, 104 INTEL_DSM_V18_SWITCH = 3, 105 INTEL_DSM_V33_SWITCH = 4, 106 INTEL_DSM_HS_CAPS = 8, 107 }; 108 109 struct intel_host { 110 u32 dsm_fns; 111 u32 hs_caps; 112 }; 113 114 static const guid_t intel_dsm_guid = 115 GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F, 116 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61); 117 118 static int __intel_dsm(struct intel_host *intel_host, struct device *dev, 119 unsigned int fn, u32 *result) 120 { 121 union acpi_object *obj; 122 int err = 0; 123 124 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL); 125 if (!obj) 126 return -EOPNOTSUPP; 127 128 if (obj->type == ACPI_TYPE_INTEGER) { 129 *result = obj->integer.value; 130 } else if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length > 0) { 131 size_t len = min_t(size_t, obj->buffer.length, 4); 132 133 *result = 0; 134 memcpy(result, obj->buffer.pointer, len); 135 } else { 136 dev_err(dev, "%s DSM fn %u obj->type %d obj->buffer.length %d\n", 137 __func__, fn, obj->type, obj->buffer.length); 138 err = -EINVAL; 139 } 140 141 ACPI_FREE(obj); 142 143 return err; 144 } 145 146 static int intel_dsm(struct intel_host *intel_host, struct device *dev, 147 unsigned int fn, u32 *result) 148 { 149 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn))) 150 return -EOPNOTSUPP; 151 152 return __intel_dsm(intel_host, dev, fn, result); 153 } 154 155 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, 156 struct mmc_host *mmc) 157 { 158 int err; 159 160 intel_host->hs_caps = ~0; 161 162 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); 163 if (err) { 164 pr_debug("%s: DSM not supported, error %d\n", 165 mmc_hostname(mmc), err); 166 return; 167 } 168 169 pr_debug("%s: DSM function mask %#x\n", 170 mmc_hostname(mmc), intel_host->dsm_fns); 171 172 intel_dsm(intel_host, dev, INTEL_DSM_HS_CAPS, &intel_host->hs_caps); 173 } 174 175 static int intel_start_signal_voltage_switch(struct mmc_host *mmc, 176 struct mmc_ios *ios) 177 { 178 struct device *dev = mmc_dev(mmc); 179 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 180 struct intel_host *intel_host = sdhci_acpi_priv(c); 181 unsigned int fn; 182 u32 result = 0; 183 int err; 184 185 err = sdhci_start_signal_voltage_switch(mmc, ios); 186 if (err) 187 return err; 188 189 switch (ios->signal_voltage) { 190 case MMC_SIGNAL_VOLTAGE_330: 191 fn = INTEL_DSM_V33_SWITCH; 192 break; 193 case MMC_SIGNAL_VOLTAGE_180: 194 fn = INTEL_DSM_V18_SWITCH; 195 break; 196 default: 197 return 0; 198 } 199 200 err = intel_dsm(intel_host, dev, fn, &result); 201 pr_debug("%s: %s DSM fn %u error %d result %u\n", 202 mmc_hostname(mmc), __func__, fn, err, result); 203 204 return 0; 205 } 206 207 static void sdhci_acpi_int_hw_reset(struct sdhci_host *host) 208 { 209 u8 reg; 210 211 reg = sdhci_readb(host, SDHCI_POWER_CONTROL); 212 reg |= 0x10; 213 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 214 /* For eMMC, minimum is 1us but give it 9us for good measure */ 215 udelay(9); 216 reg &= ~0x10; 217 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 218 /* For eMMC, minimum is 200us but give it 300us for good measure */ 219 usleep_range(300, 1000); 220 } 221 222 static const struct sdhci_ops sdhci_acpi_ops_dflt = { 223 .set_clock = sdhci_set_clock, 224 .set_bus_width = sdhci_set_bus_width, 225 .reset = sdhci_reset, 226 .set_uhs_signaling = sdhci_set_uhs_signaling, 227 }; 228 229 static const struct sdhci_ops sdhci_acpi_ops_int = { 230 .set_clock = sdhci_set_clock, 231 .set_bus_width = sdhci_set_bus_width, 232 .reset = sdhci_reset, 233 .set_uhs_signaling = sdhci_set_uhs_signaling, 234 .hw_reset = sdhci_acpi_int_hw_reset, 235 }; 236 237 static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { 238 .ops = &sdhci_acpi_ops_int, 239 }; 240 241 #ifdef CONFIG_X86 242 243 static bool sdhci_acpi_byt(void) 244 { 245 static const struct x86_cpu_id byt[] = { 246 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL), 247 {} 248 }; 249 250 return x86_match_cpu(byt); 251 } 252 253 static bool sdhci_acpi_cht(void) 254 { 255 static const struct x86_cpu_id cht[] = { 256 X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL), 257 {} 258 }; 259 260 return x86_match_cpu(cht); 261 } 262 263 #define BYT_IOSF_SCCEP 0x63 264 #define BYT_IOSF_OCP_NETCTRL0 0x1078 265 #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8) 266 267 static void sdhci_acpi_byt_setting(struct device *dev) 268 { 269 u32 val = 0; 270 271 if (!sdhci_acpi_byt()) 272 return; 273 274 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0, 275 &val)) { 276 dev_err(dev, "%s read error\n", __func__); 277 return; 278 } 279 280 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE)) 281 return; 282 283 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE; 284 285 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0, 286 val)) { 287 dev_err(dev, "%s write error\n", __func__); 288 return; 289 } 290 291 dev_dbg(dev, "%s completed\n", __func__); 292 } 293 294 static bool sdhci_acpi_byt_defer(struct device *dev) 295 { 296 if (!sdhci_acpi_byt()) 297 return false; 298 299 if (!iosf_mbi_available()) 300 return true; 301 302 sdhci_acpi_byt_setting(dev); 303 304 return false; 305 } 306 307 static bool sdhci_acpi_cht_pci_wifi(unsigned int vendor, unsigned int device, 308 unsigned int slot, unsigned int parent_slot) 309 { 310 struct pci_dev *dev, *parent, *from = NULL; 311 312 while (1) { 313 dev = pci_get_device(vendor, device, from); 314 pci_dev_put(from); 315 if (!dev) 316 break; 317 parent = pci_upstream_bridge(dev); 318 if (ACPI_COMPANION(&dev->dev) && PCI_SLOT(dev->devfn) == slot && 319 parent && PCI_SLOT(parent->devfn) == parent_slot && 320 !pci_upstream_bridge(parent)) { 321 pci_dev_put(dev); 322 return true; 323 } 324 from = dev; 325 } 326 327 return false; 328 } 329 330 /* 331 * GPDwin uses PCI wifi which conflicts with SDIO's use of 332 * acpi_device_fix_up_power() on child device nodes. Identifying GPDwin is 333 * problematic, but since SDIO is only used for wifi, the presence of the PCI 334 * wifi card in the expected slot with an ACPI companion node, is used to 335 * indicate that acpi_device_fix_up_power() should be avoided. 336 */ 337 static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev) 338 { 339 return sdhci_acpi_cht() && 340 acpi_dev_hid_uid_match(adev, "80860F14", "2") && 341 sdhci_acpi_cht_pci_wifi(0x14e4, 0x43ec, 0, 28); 342 } 343 344 #else 345 346 static inline void sdhci_acpi_byt_setting(struct device *dev) 347 { 348 } 349 350 static inline bool sdhci_acpi_byt_defer(struct device *dev) 351 { 352 return false; 353 } 354 355 static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev) 356 { 357 return false; 358 } 359 360 #endif 361 362 static int bxt_get_cd(struct mmc_host *mmc) 363 { 364 int gpio_cd = mmc_gpio_get_cd(mmc); 365 366 if (!gpio_cd) 367 return 0; 368 369 return sdhci_get_cd_nogpio(mmc); 370 } 371 372 static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *adev) 373 { 374 struct sdhci_acpi_host *c = platform_get_drvdata(pdev); 375 struct intel_host *intel_host = sdhci_acpi_priv(c); 376 struct sdhci_host *host = c->host; 377 378 if (acpi_dev_hid_uid_match(adev, "80860F14", "1") && 379 sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 && 380 sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807) 381 host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ 382 383 if (acpi_dev_hid_uid_match(adev, "80865ACA", NULL)) 384 host->mmc_host_ops.get_cd = bxt_get_cd; 385 386 intel_dsm_init(intel_host, &pdev->dev, host->mmc); 387 388 host->mmc_host_ops.start_signal_voltage_switch = 389 intel_start_signal_voltage_switch; 390 391 c->is_intel = true; 392 393 return 0; 394 } 395 396 static int intel_setup_host(struct platform_device *pdev) 397 { 398 struct sdhci_acpi_host *c = platform_get_drvdata(pdev); 399 struct intel_host *intel_host = sdhci_acpi_priv(c); 400 401 if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR25)) 402 c->host->mmc->caps &= ~MMC_CAP_UHS_SDR25; 403 404 if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR50)) 405 c->host->mmc->caps &= ~MMC_CAP_UHS_SDR50; 406 407 if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_DDR50)) 408 c->host->mmc->caps &= ~MMC_CAP_UHS_DDR50; 409 410 if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR104)) 411 c->host->mmc->caps &= ~MMC_CAP_UHS_SDR104; 412 413 return 0; 414 } 415 416 static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { 417 .chip = &sdhci_acpi_chip_int, 418 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 419 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | 420 MMC_CAP_CMD_DURING_TFR | MMC_CAP_WAIT_WHILE_BUSY, 421 .flags = SDHCI_ACPI_RUNTIME_PM, 422 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 423 SDHCI_QUIRK_NO_LED, 424 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 425 SDHCI_QUIRK2_STOP_WITH_TC | 426 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400, 427 .probe_slot = intel_probe_slot, 428 .setup_host = intel_setup_host, 429 .priv_size = sizeof(struct intel_host), 430 }; 431 432 static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { 433 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | 434 SDHCI_QUIRK_NO_LED | 435 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 436 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, 437 .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD | 438 MMC_CAP_WAIT_WHILE_BUSY, 439 .flags = SDHCI_ACPI_RUNTIME_PM, 440 .pm_caps = MMC_PM_KEEP_POWER, 441 .probe_slot = intel_probe_slot, 442 .setup_host = intel_setup_host, 443 .priv_size = sizeof(struct intel_host), 444 }; 445 446 static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { 447 .flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL | 448 SDHCI_ACPI_RUNTIME_PM, 449 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 450 SDHCI_QUIRK_NO_LED, 451 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | 452 SDHCI_QUIRK2_STOP_WITH_TC, 453 .caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM, 454 .probe_slot = intel_probe_slot, 455 .setup_host = intel_setup_host, 456 .priv_size = sizeof(struct intel_host), 457 }; 458 459 #define VENDOR_SPECIFIC_PWRCTL_CLEAR_REG 0x1a8 460 #define VENDOR_SPECIFIC_PWRCTL_CTL_REG 0x1ac 461 static irqreturn_t sdhci_acpi_qcom_handler(int irq, void *ptr) 462 { 463 struct sdhci_host *host = ptr; 464 465 sdhci_writel(host, 0x3, VENDOR_SPECIFIC_PWRCTL_CLEAR_REG); 466 sdhci_writel(host, 0x1, VENDOR_SPECIFIC_PWRCTL_CTL_REG); 467 468 return IRQ_HANDLED; 469 } 470 471 static int qcom_probe_slot(struct platform_device *pdev, struct acpi_device *adev) 472 { 473 struct sdhci_acpi_host *c = platform_get_drvdata(pdev); 474 struct sdhci_host *host = c->host; 475 int *irq = sdhci_acpi_priv(c); 476 477 *irq = -EINVAL; 478 479 if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL)) 480 return 0; 481 482 *irq = platform_get_irq(pdev, 1); 483 if (*irq < 0) 484 return 0; 485 486 return request_threaded_irq(*irq, NULL, sdhci_acpi_qcom_handler, 487 IRQF_ONESHOT | IRQF_TRIGGER_HIGH, 488 "sdhci_qcom", host); 489 } 490 491 static int qcom_free_slot(struct platform_device *pdev) 492 { 493 struct device *dev = &pdev->dev; 494 struct sdhci_acpi_host *c = platform_get_drvdata(pdev); 495 struct sdhci_host *host = c->host; 496 struct acpi_device *adev; 497 int *irq = sdhci_acpi_priv(c); 498 499 adev = ACPI_COMPANION(dev); 500 if (!adev) 501 return -ENODEV; 502 503 if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL)) 504 return 0; 505 506 if (*irq < 0) 507 return 0; 508 509 free_irq(*irq, host); 510 return 0; 511 } 512 513 static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = { 514 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION, 515 .quirks2 = SDHCI_QUIRK2_NO_1_8_V, 516 .caps = MMC_CAP_NONREMOVABLE, 517 .priv_size = sizeof(int), 518 .probe_slot = qcom_probe_slot, 519 .free_slot = qcom_free_slot, 520 }; 521 522 static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = { 523 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION, 524 .caps = MMC_CAP_NONREMOVABLE, 525 }; 526 527 struct amd_sdhci_host { 528 bool tuned_clock; 529 bool dll_enabled; 530 }; 531 532 /* AMD sdhci reset dll register. */ 533 #define SDHCI_AMD_RESET_DLL_REGISTER 0x908 534 535 static int amd_select_drive_strength(struct mmc_card *card, 536 unsigned int max_dtr, int host_drv, 537 int card_drv, int *host_driver_strength) 538 { 539 struct sdhci_host *host = mmc_priv(card->host); 540 u16 preset, preset_driver_strength; 541 542 /* 543 * This method is only called by mmc_select_hs200 so we only need to 544 * read from the HS200 (SDR104) preset register. 545 * 546 * Firmware that has "invalid/default" presets return a driver strength 547 * of A. This matches the previously hard coded value. 548 */ 549 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 550 preset_driver_strength = FIELD_GET(SDHCI_PRESET_DRV_MASK, preset); 551 552 /* 553 * We want the controller driver strength to match the card's driver 554 * strength so they have similar rise/fall times. 555 * 556 * The controller driver strength set by this method is sticky for all 557 * timings after this method is called. This unfortunately means that 558 * while HS400 tuning is in progress we end up with mismatched driver 559 * strengths between the controller and the card. HS400 tuning requires 560 * switching from HS400->DDR52->HS->HS200->HS400. So the driver mismatch 561 * happens while in DDR52 and HS modes. This has not been observed to 562 * cause problems. Enabling presets would fix this issue. 563 */ 564 *host_driver_strength = preset_driver_strength; 565 566 /* 567 * The resulting card driver strength is only set when switching the 568 * card's timing to HS200 or HS400. The card will use the default driver 569 * strength (B) for any other mode. 570 */ 571 return preset_driver_strength; 572 } 573 574 static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable) 575 { 576 struct sdhci_acpi_host *acpi_host = sdhci_priv(host); 577 struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); 578 579 /* AMD Platform requires dll setting */ 580 sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER); 581 usleep_range(10, 20); 582 if (enable) 583 sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); 584 585 amd_host->dll_enabled = enable; 586 } 587 588 /* 589 * The initialization sequence for HS400 is: 590 * HS->HS200->Perform Tuning->HS->HS400 591 * 592 * The re-tuning sequence is: 593 * HS400->DDR52->HS->HS200->Perform Tuning->HS->HS400 594 * 595 * The AMD eMMC Controller can only use the tuned clock while in HS200 and HS400 596 * mode. If we switch to a different mode, we need to disable the tuned clock. 597 * If we have previously performed tuning and switch back to HS200 or 598 * HS400, we can re-enable the tuned clock. 599 * 600 */ 601 static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 602 { 603 struct sdhci_host *host = mmc_priv(mmc); 604 struct sdhci_acpi_host *acpi_host = sdhci_priv(host); 605 struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); 606 unsigned int old_timing = host->timing; 607 u16 val; 608 609 sdhci_set_ios(mmc, ios); 610 611 if (old_timing != host->timing && amd_host->tuned_clock) { 612 if (host->timing == MMC_TIMING_MMC_HS400 || 613 host->timing == MMC_TIMING_MMC_HS200) { 614 val = sdhci_readw(host, SDHCI_HOST_CONTROL2); 615 val |= SDHCI_CTRL_TUNED_CLK; 616 sdhci_writew(host, val, SDHCI_HOST_CONTROL2); 617 } else { 618 val = sdhci_readw(host, SDHCI_HOST_CONTROL2); 619 val &= ~SDHCI_CTRL_TUNED_CLK; 620 sdhci_writew(host, val, SDHCI_HOST_CONTROL2); 621 } 622 623 /* DLL is only required for HS400 */ 624 if (host->timing == MMC_TIMING_MMC_HS400 && 625 !amd_host->dll_enabled) 626 sdhci_acpi_amd_hs400_dll(host, true); 627 } 628 } 629 630 static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 631 { 632 int err; 633 struct sdhci_host *host = mmc_priv(mmc); 634 struct sdhci_acpi_host *acpi_host = sdhci_priv(host); 635 struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); 636 637 amd_host->tuned_clock = false; 638 639 err = sdhci_execute_tuning(mmc, opcode); 640 641 if (!err && !host->tuning_err) 642 amd_host->tuned_clock = true; 643 644 return err; 645 } 646 647 static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) 648 { 649 struct sdhci_acpi_host *acpi_host = sdhci_priv(host); 650 struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); 651 652 if (mask & SDHCI_RESET_ALL) { 653 amd_host->tuned_clock = false; 654 sdhci_acpi_amd_hs400_dll(host, false); 655 } 656 657 sdhci_reset(host, mask); 658 } 659 660 static const struct sdhci_ops sdhci_acpi_ops_amd = { 661 .set_clock = sdhci_set_clock, 662 .set_bus_width = sdhci_set_bus_width, 663 .reset = amd_sdhci_reset, 664 .set_uhs_signaling = sdhci_set_uhs_signaling, 665 }; 666 667 static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = { 668 .ops = &sdhci_acpi_ops_amd, 669 }; 670 671 static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev, 672 struct acpi_device *adev) 673 { 674 struct sdhci_acpi_host *c = platform_get_drvdata(pdev); 675 struct sdhci_host *host = c->host; 676 677 sdhci_read_caps(host); 678 if (host->caps1 & SDHCI_SUPPORT_DDR50) 679 host->mmc->caps = MMC_CAP_1_8V_DDR; 680 681 if ((host->caps1 & SDHCI_SUPPORT_SDR104) && 682 (host->mmc->caps & MMC_CAP_1_8V_DDR)) 683 host->mmc->caps2 = MMC_CAP2_HS400_1_8V; 684 685 /* 686 * There are two types of presets out in the wild: 687 * 1) Default/broken presets. 688 * These presets have two sets of problems: 689 * a) The clock divisor for SDR12, SDR25, and SDR50 is too small. 690 * This results in clock frequencies that are 2x higher than 691 * acceptable. i.e., SDR12 = 25 MHz, SDR25 = 50 MHz, SDR50 = 692 * 100 MHz.x 693 * b) The HS200 and HS400 driver strengths don't match. 694 * By default, the SDR104 preset register has a driver strength of 695 * A, but the (internal) HS400 preset register has a driver 696 * strength of B. As part of initializing HS400, HS200 tuning 697 * needs to be performed. Having different driver strengths 698 * between tuning and operation is wrong. It results in different 699 * rise/fall times that lead to incorrect sampling. 700 * 2) Firmware with properly initialized presets. 701 * These presets have proper clock divisors. i.e., SDR12 => 12MHz, 702 * SDR25 => 25 MHz, SDR50 => 50 MHz. Additionally the HS200 and 703 * HS400 preset driver strengths match. 704 * 705 * Enabling presets for HS400 doesn't work for the following reasons: 706 * 1) sdhci_set_ios has a hard coded list of timings that are used 707 * to determine if presets should be enabled. 708 * 2) sdhci_get_preset_value is using a non-standard register to 709 * read out HS400 presets. The AMD controller doesn't support this 710 * non-standard register. In fact, it doesn't expose the HS400 711 * preset register anywhere in the SDHCI memory map. This results 712 * in reading a garbage value and using the wrong presets. 713 * 714 * Since HS400 and HS200 presets must be identical, we could 715 * instead use the the SDR104 preset register. 716 * 717 * If the above issues are resolved we could remove this quirk for 718 * firmware that that has valid presets (i.e., SDR12 <= 12 MHz). 719 */ 720 host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; 721 722 host->mmc_host_ops.select_drive_strength = amd_select_drive_strength; 723 host->mmc_host_ops.set_ios = amd_set_ios; 724 host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning; 725 return 0; 726 } 727 728 static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = { 729 .chip = &sdhci_acpi_chip_amd, 730 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, 731 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 732 SDHCI_QUIRK_32BIT_DMA_SIZE | 733 SDHCI_QUIRK_32BIT_ADMA_SIZE, 734 .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA, 735 .probe_slot = sdhci_acpi_emmc_amd_probe_slot, 736 .priv_size = sizeof(struct amd_sdhci_host), 737 }; 738 739 struct sdhci_acpi_uid_slot { 740 const char *hid; 741 const char *uid; 742 const struct sdhci_acpi_slot *slot; 743 }; 744 745 static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { 746 { "80865ACA", NULL, &sdhci_acpi_slot_int_sd }, 747 { "80865ACC", NULL, &sdhci_acpi_slot_int_emmc }, 748 { "80865AD0", NULL, &sdhci_acpi_slot_int_sdio }, 749 { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc }, 750 { "80860F14" , "2" , &sdhci_acpi_slot_int_sdio }, 751 { "80860F14" , "3" , &sdhci_acpi_slot_int_sd }, 752 { "80860F16" , NULL, &sdhci_acpi_slot_int_sd }, 753 { "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio }, 754 { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd }, 755 { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, 756 { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, 757 { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio }, 758 { "PNP0FFF" , "3" , &sdhci_acpi_slot_int_sd }, 759 { "PNP0D40" }, 760 { "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v }, 761 { "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd }, 762 { "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc }, 763 { "AMDI0041", NULL, &sdhci_acpi_slot_amd_emmc }, 764 { }, 765 }; 766 767 static const struct acpi_device_id sdhci_acpi_ids[] = { 768 { "80865ACA" }, 769 { "80865ACC" }, 770 { "80865AD0" }, 771 { "80860F14" }, 772 { "80860F16" }, 773 { "INT33BB" }, 774 { "INT33C6" }, 775 { "INT3436" }, 776 { "INT344D" }, 777 { "PNP0D40" }, 778 { "QCOM8051" }, 779 { "QCOM8052" }, 780 { "AMDI0040" }, 781 { "AMDI0041" }, 782 { }, 783 }; 784 MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); 785 786 static const struct dmi_system_id sdhci_acpi_quirks[] = { 787 { 788 /* 789 * The Lenovo Miix 320-10ICR has a bug in the _PS0 method of 790 * the SHC1 ACPI device, this bug causes it to reprogram the 791 * wrong LDO (DLDO3) to 1.8V if 1.8V modes are used and the 792 * card is (runtime) suspended + resumed. DLDO3 is used for 793 * the LCD and setting it to 1.8V causes the LCD to go black. 794 */ 795 .matches = { 796 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 797 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), 798 }, 799 .driver_data = (void *)DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP, 800 }, 801 { 802 /* 803 * The Acer Aspire Switch 10 (SW5-012) microSD slot always 804 * reports the card being write-protected even though microSD 805 * cards do not have a write-protect switch at all. 806 */ 807 .matches = { 808 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 809 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), 810 }, 811 .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, 812 }, 813 { 814 /* 815 * The Toshiba WT8-B's microSD slot always reports the card being 816 * write-protected. 817 */ 818 .matches = { 819 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 820 DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA ENCORE 2 WT8-B"), 821 }, 822 .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, 823 }, 824 {} /* Terminating entry */ 825 }; 826 827 static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(struct acpi_device *adev) 828 { 829 const struct sdhci_acpi_uid_slot *u; 830 831 for (u = sdhci_acpi_uids; u->hid; u++) { 832 if (acpi_dev_hid_uid_match(adev, u->hid, u->uid)) 833 return u->slot; 834 } 835 return NULL; 836 } 837 838 static int sdhci_acpi_probe(struct platform_device *pdev) 839 { 840 struct device *dev = &pdev->dev; 841 const struct sdhci_acpi_slot *slot; 842 struct acpi_device *device, *child; 843 const struct dmi_system_id *id; 844 struct sdhci_acpi_host *c; 845 struct sdhci_host *host; 846 struct resource *iomem; 847 resource_size_t len; 848 size_t priv_size; 849 int quirks = 0; 850 int err; 851 852 device = ACPI_COMPANION(dev); 853 if (!device) 854 return -ENODEV; 855 856 id = dmi_first_match(sdhci_acpi_quirks); 857 if (id) 858 quirks = (long)id->driver_data; 859 860 slot = sdhci_acpi_get_slot(device); 861 862 /* Power on the SDHCI controller and its children */ 863 acpi_device_fix_up_power(device); 864 if (!sdhci_acpi_no_fixup_child_power(device)) { 865 list_for_each_entry(child, &device->children, node) 866 if (child->status.present && child->status.enabled) 867 acpi_device_fix_up_power(child); 868 } 869 870 if (sdhci_acpi_byt_defer(dev)) 871 return -EPROBE_DEFER; 872 873 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 874 if (!iomem) 875 return -ENOMEM; 876 877 len = resource_size(iomem); 878 if (len < 0x100) 879 dev_err(dev, "Invalid iomem size!\n"); 880 881 if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) 882 return -ENOMEM; 883 884 priv_size = slot ? slot->priv_size : 0; 885 host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size); 886 if (IS_ERR(host)) 887 return PTR_ERR(host); 888 889 c = sdhci_priv(host); 890 c->host = host; 891 c->slot = slot; 892 c->pdev = pdev; 893 c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); 894 895 platform_set_drvdata(pdev, c); 896 897 host->hw_name = "ACPI"; 898 host->ops = &sdhci_acpi_ops_dflt; 899 host->irq = platform_get_irq(pdev, 0); 900 if (host->irq < 0) { 901 err = -EINVAL; 902 goto err_free; 903 } 904 905 host->ioaddr = devm_ioremap(dev, iomem->start, 906 resource_size(iomem)); 907 if (host->ioaddr == NULL) { 908 err = -ENOMEM; 909 goto err_free; 910 } 911 912 if (c->slot) { 913 if (c->slot->probe_slot) { 914 err = c->slot->probe_slot(pdev, device); 915 if (err) 916 goto err_free; 917 } 918 if (c->slot->chip) { 919 host->ops = c->slot->chip->ops; 920 host->quirks |= c->slot->chip->quirks; 921 host->quirks2 |= c->slot->chip->quirks2; 922 host->mmc->caps |= c->slot->chip->caps; 923 host->mmc->caps2 |= c->slot->chip->caps2; 924 host->mmc->pm_caps |= c->slot->chip->pm_caps; 925 } 926 host->quirks |= c->slot->quirks; 927 host->quirks2 |= c->slot->quirks2; 928 host->mmc->caps |= c->slot->caps; 929 host->mmc->caps2 |= c->slot->caps2; 930 host->mmc->pm_caps |= c->slot->pm_caps; 931 } 932 933 host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; 934 935 if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { 936 bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); 937 938 err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0); 939 if (err) { 940 if (err == -EPROBE_DEFER) 941 goto err_free; 942 dev_warn(dev, "failed to setup card detect gpio\n"); 943 c->use_runtime_pm = false; 944 } 945 946 if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP) 947 c->reset_signal_volt_on_suspend = true; 948 949 if (quirks & DMI_QUIRK_SD_NO_WRITE_PROTECT) 950 host->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; 951 } 952 953 err = sdhci_setup_host(host); 954 if (err) 955 goto err_free; 956 957 if (c->slot && c->slot->setup_host) { 958 err = c->slot->setup_host(pdev); 959 if (err) 960 goto err_cleanup; 961 } 962 963 err = __sdhci_add_host(host); 964 if (err) 965 goto err_cleanup; 966 967 if (c->use_runtime_pm) { 968 pm_runtime_set_active(dev); 969 pm_suspend_ignore_children(dev, 1); 970 pm_runtime_set_autosuspend_delay(dev, 50); 971 pm_runtime_use_autosuspend(dev); 972 pm_runtime_enable(dev); 973 } 974 975 device_enable_async_suspend(dev); 976 977 return 0; 978 979 err_cleanup: 980 sdhci_cleanup_host(c->host); 981 err_free: 982 if (c->slot && c->slot->free_slot) 983 c->slot->free_slot(pdev); 984 985 sdhci_free_host(c->host); 986 return err; 987 } 988 989 static int sdhci_acpi_remove(struct platform_device *pdev) 990 { 991 struct sdhci_acpi_host *c = platform_get_drvdata(pdev); 992 struct device *dev = &pdev->dev; 993 int dead; 994 995 if (c->use_runtime_pm) { 996 pm_runtime_get_sync(dev); 997 pm_runtime_disable(dev); 998 pm_runtime_put_noidle(dev); 999 } 1000 1001 if (c->slot && c->slot->remove_slot) 1002 c->slot->remove_slot(pdev); 1003 1004 dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); 1005 sdhci_remove_host(c->host, dead); 1006 1007 if (c->slot && c->slot->free_slot) 1008 c->slot->free_slot(pdev); 1009 1010 sdhci_free_host(c->host); 1011 1012 return 0; 1013 } 1014 1015 static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed( 1016 struct device *dev) 1017 { 1018 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 1019 struct sdhci_host *host = c->host; 1020 1021 if (c->is_intel && c->reset_signal_volt_on_suspend && 1022 host->mmc->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_330) { 1023 struct intel_host *intel_host = sdhci_acpi_priv(c); 1024 unsigned int fn = INTEL_DSM_V33_SWITCH; 1025 u32 result = 0; 1026 1027 intel_dsm(intel_host, dev, fn, &result); 1028 } 1029 } 1030 1031 #ifdef CONFIG_PM_SLEEP 1032 1033 static int sdhci_acpi_suspend(struct device *dev) 1034 { 1035 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 1036 struct sdhci_host *host = c->host; 1037 int ret; 1038 1039 if (host->tuning_mode != SDHCI_TUNING_MODE_3) 1040 mmc_retune_needed(host->mmc); 1041 1042 ret = sdhci_suspend_host(host); 1043 if (ret) 1044 return ret; 1045 1046 sdhci_acpi_reset_signal_voltage_if_needed(dev); 1047 return 0; 1048 } 1049 1050 static int sdhci_acpi_resume(struct device *dev) 1051 { 1052 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 1053 1054 sdhci_acpi_byt_setting(&c->pdev->dev); 1055 1056 return sdhci_resume_host(c->host); 1057 } 1058 1059 #endif 1060 1061 #ifdef CONFIG_PM 1062 1063 static int sdhci_acpi_runtime_suspend(struct device *dev) 1064 { 1065 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 1066 struct sdhci_host *host = c->host; 1067 int ret; 1068 1069 if (host->tuning_mode != SDHCI_TUNING_MODE_3) 1070 mmc_retune_needed(host->mmc); 1071 1072 ret = sdhci_runtime_suspend_host(host); 1073 if (ret) 1074 return ret; 1075 1076 sdhci_acpi_reset_signal_voltage_if_needed(dev); 1077 return 0; 1078 } 1079 1080 static int sdhci_acpi_runtime_resume(struct device *dev) 1081 { 1082 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 1083 1084 sdhci_acpi_byt_setting(&c->pdev->dev); 1085 1086 return sdhci_runtime_resume_host(c->host, 0); 1087 } 1088 1089 #endif 1090 1091 static const struct dev_pm_ops sdhci_acpi_pm_ops = { 1092 SET_SYSTEM_SLEEP_PM_OPS(sdhci_acpi_suspend, sdhci_acpi_resume) 1093 SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend, 1094 sdhci_acpi_runtime_resume, NULL) 1095 }; 1096 1097 static struct platform_driver sdhci_acpi_driver = { 1098 .driver = { 1099 .name = "sdhci-acpi", 1100 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1101 .acpi_match_table = sdhci_acpi_ids, 1102 .pm = &sdhci_acpi_pm_ops, 1103 }, 1104 .probe = sdhci_acpi_probe, 1105 .remove = sdhci_acpi_remove, 1106 }; 1107 1108 module_platform_driver(sdhci_acpi_driver); 1109 1110 MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver"); 1111 MODULE_AUTHOR("Adrian Hunter"); 1112 MODULE_LICENSE("GPL v2"); 1113