1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Secure Digital Host Controller Interface ACPI driver. 4 * 5 * Copyright (c) 2012, Intel Corporation. 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/init.h> 10 #include <linux/export.h> 11 #include <linux/module.h> 12 #include <linux/device.h> 13 #include <linux/platform_device.h> 14 #include <linux/ioport.h> 15 #include <linux/io.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/compiler.h> 18 #include <linux/stddef.h> 19 #include <linux/bitops.h> 20 #include <linux/types.h> 21 #include <linux/err.h> 22 #include <linux/interrupt.h> 23 #include <linux/acpi.h> 24 #include <linux/pm.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/delay.h> 27 #include <linux/dmi.h> 28 29 #include <linux/mmc/host.h> 30 #include <linux/mmc/pm.h> 31 #include <linux/mmc/slot-gpio.h> 32 33 #ifdef CONFIG_X86 34 #include <asm/cpu_device_id.h> 35 #include <asm/intel-family.h> 36 #include <asm/iosf_mbi.h> 37 #include <linux/pci.h> 38 #endif 39 40 #include "sdhci.h" 41 42 enum { 43 SDHCI_ACPI_SD_CD = BIT(0), 44 SDHCI_ACPI_RUNTIME_PM = BIT(1), 45 SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL = BIT(2), 46 }; 47 48 struct sdhci_acpi_chip { 49 const struct sdhci_ops *ops; 50 unsigned int quirks; 51 unsigned int quirks2; 52 unsigned long caps; 53 unsigned int caps2; 54 mmc_pm_flag_t pm_caps; 55 }; 56 57 struct sdhci_acpi_slot { 58 const struct sdhci_acpi_chip *chip; 59 unsigned int quirks; 60 unsigned int quirks2; 61 unsigned long caps; 62 unsigned int caps2; 63 mmc_pm_flag_t pm_caps; 64 unsigned int flags; 65 size_t priv_size; 66 int (*probe_slot)(struct platform_device *, struct acpi_device *); 67 int (*remove_slot)(struct platform_device *); 68 int (*free_slot)(struct platform_device *pdev); 69 int (*setup_host)(struct platform_device *pdev); 70 }; 71 72 struct sdhci_acpi_host { 73 struct sdhci_host *host; 74 const struct sdhci_acpi_slot *slot; 75 struct platform_device *pdev; 76 bool use_runtime_pm; 77 bool is_intel; 78 bool reset_signal_volt_on_suspend; 79 unsigned long private[] ____cacheline_aligned; 80 }; 81 82 enum { 83 DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP = BIT(0), 84 DMI_QUIRK_SD_NO_WRITE_PROTECT = BIT(1), 85 }; 86 87 static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c) 88 { 89 return (void *)c->private; 90 } 91 92 static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) 93 { 94 return c->slot && (c->slot->flags & flag); 95 } 96 97 #define INTEL_DSM_HS_CAPS_SDR25 BIT(0) 98 #define INTEL_DSM_HS_CAPS_DDR50 BIT(1) 99 #define INTEL_DSM_HS_CAPS_SDR50 BIT(2) 100 #define INTEL_DSM_HS_CAPS_SDR104 BIT(3) 101 102 enum { 103 INTEL_DSM_FNS = 0, 104 INTEL_DSM_V18_SWITCH = 3, 105 INTEL_DSM_V33_SWITCH = 4, 106 INTEL_DSM_HS_CAPS = 8, 107 }; 108 109 struct intel_host { 110 u32 dsm_fns; 111 u32 hs_caps; 112 }; 113 114 static const guid_t intel_dsm_guid = 115 GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F, 116 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61); 117 118 static int __intel_dsm(struct intel_host *intel_host, struct device *dev, 119 unsigned int fn, u32 *result) 120 { 121 union acpi_object *obj; 122 int err = 0; 123 124 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL); 125 if (!obj) 126 return -EOPNOTSUPP; 127 128 if (obj->type == ACPI_TYPE_INTEGER) { 129 *result = obj->integer.value; 130 } else if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length > 0) { 131 size_t len = min_t(size_t, obj->buffer.length, 4); 132 133 *result = 0; 134 memcpy(result, obj->buffer.pointer, len); 135 } else { 136 dev_err(dev, "%s DSM fn %u obj->type %d obj->buffer.length %d\n", 137 __func__, fn, obj->type, obj->buffer.length); 138 err = -EINVAL; 139 } 140 141 ACPI_FREE(obj); 142 143 return err; 144 } 145 146 static int intel_dsm(struct intel_host *intel_host, struct device *dev, 147 unsigned int fn, u32 *result) 148 { 149 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn))) 150 return -EOPNOTSUPP; 151 152 return __intel_dsm(intel_host, dev, fn, result); 153 } 154 155 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, 156 struct mmc_host *mmc) 157 { 158 int err; 159 160 intel_host->hs_caps = ~0; 161 162 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); 163 if (err) { 164 pr_debug("%s: DSM not supported, error %d\n", 165 mmc_hostname(mmc), err); 166 return; 167 } 168 169 pr_debug("%s: DSM function mask %#x\n", 170 mmc_hostname(mmc), intel_host->dsm_fns); 171 172 intel_dsm(intel_host, dev, INTEL_DSM_HS_CAPS, &intel_host->hs_caps); 173 } 174 175 static int intel_start_signal_voltage_switch(struct mmc_host *mmc, 176 struct mmc_ios *ios) 177 { 178 struct device *dev = mmc_dev(mmc); 179 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 180 struct intel_host *intel_host = sdhci_acpi_priv(c); 181 unsigned int fn; 182 u32 result = 0; 183 int err; 184 185 err = sdhci_start_signal_voltage_switch(mmc, ios); 186 if (err) 187 return err; 188 189 switch (ios->signal_voltage) { 190 case MMC_SIGNAL_VOLTAGE_330: 191 fn = INTEL_DSM_V33_SWITCH; 192 break; 193 case MMC_SIGNAL_VOLTAGE_180: 194 fn = INTEL_DSM_V18_SWITCH; 195 break; 196 default: 197 return 0; 198 } 199 200 err = intel_dsm(intel_host, dev, fn, &result); 201 pr_debug("%s: %s DSM fn %u error %d result %u\n", 202 mmc_hostname(mmc), __func__, fn, err, result); 203 204 return 0; 205 } 206 207 static void sdhci_acpi_int_hw_reset(struct sdhci_host *host) 208 { 209 u8 reg; 210 211 reg = sdhci_readb(host, SDHCI_POWER_CONTROL); 212 reg |= 0x10; 213 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 214 /* For eMMC, minimum is 1us but give it 9us for good measure */ 215 udelay(9); 216 reg &= ~0x10; 217 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 218 /* For eMMC, minimum is 200us but give it 300us for good measure */ 219 usleep_range(300, 1000); 220 } 221 222 static const struct sdhci_ops sdhci_acpi_ops_dflt = { 223 .set_clock = sdhci_set_clock, 224 .set_bus_width = sdhci_set_bus_width, 225 .reset = sdhci_reset, 226 .set_uhs_signaling = sdhci_set_uhs_signaling, 227 }; 228 229 static const struct sdhci_ops sdhci_acpi_ops_int = { 230 .set_clock = sdhci_set_clock, 231 .set_bus_width = sdhci_set_bus_width, 232 .reset = sdhci_reset, 233 .set_uhs_signaling = sdhci_set_uhs_signaling, 234 .hw_reset = sdhci_acpi_int_hw_reset, 235 }; 236 237 static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { 238 .ops = &sdhci_acpi_ops_int, 239 }; 240 241 #ifdef CONFIG_X86 242 243 static bool sdhci_acpi_byt(void) 244 { 245 static const struct x86_cpu_id byt[] = { 246 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL), 247 {} 248 }; 249 250 return x86_match_cpu(byt); 251 } 252 253 static bool sdhci_acpi_cht(void) 254 { 255 static const struct x86_cpu_id cht[] = { 256 X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL), 257 {} 258 }; 259 260 return x86_match_cpu(cht); 261 } 262 263 #define BYT_IOSF_SCCEP 0x63 264 #define BYT_IOSF_OCP_NETCTRL0 0x1078 265 #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8) 266 267 static void sdhci_acpi_byt_setting(struct device *dev) 268 { 269 u32 val = 0; 270 271 if (!sdhci_acpi_byt()) 272 return; 273 274 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0, 275 &val)) { 276 dev_err(dev, "%s read error\n", __func__); 277 return; 278 } 279 280 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE)) 281 return; 282 283 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE; 284 285 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0, 286 val)) { 287 dev_err(dev, "%s write error\n", __func__); 288 return; 289 } 290 291 dev_dbg(dev, "%s completed\n", __func__); 292 } 293 294 static bool sdhci_acpi_byt_defer(struct device *dev) 295 { 296 if (!sdhci_acpi_byt()) 297 return false; 298 299 if (!iosf_mbi_available()) 300 return true; 301 302 sdhci_acpi_byt_setting(dev); 303 304 return false; 305 } 306 307 static bool sdhci_acpi_cht_pci_wifi(unsigned int vendor, unsigned int device, 308 unsigned int slot, unsigned int parent_slot) 309 { 310 struct pci_dev *dev, *parent, *from = NULL; 311 312 while (1) { 313 dev = pci_get_device(vendor, device, from); 314 pci_dev_put(from); 315 if (!dev) 316 break; 317 parent = pci_upstream_bridge(dev); 318 if (ACPI_COMPANION(&dev->dev) && PCI_SLOT(dev->devfn) == slot && 319 parent && PCI_SLOT(parent->devfn) == parent_slot && 320 !pci_upstream_bridge(parent)) { 321 pci_dev_put(dev); 322 return true; 323 } 324 from = dev; 325 } 326 327 return false; 328 } 329 330 /* 331 * GPDwin uses PCI wifi which conflicts with SDIO's use of 332 * acpi_device_fix_up_power() on child device nodes. Identifying GPDwin is 333 * problematic, but since SDIO is only used for wifi, the presence of the PCI 334 * wifi card in the expected slot with an ACPI companion node, is used to 335 * indicate that acpi_device_fix_up_power() should be avoided. 336 */ 337 static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev) 338 { 339 return sdhci_acpi_cht() && 340 acpi_dev_hid_uid_match(adev, "80860F14", "2") && 341 sdhci_acpi_cht_pci_wifi(0x14e4, 0x43ec, 0, 28); 342 } 343 344 #else 345 346 static inline void sdhci_acpi_byt_setting(struct device *dev) 347 { 348 } 349 350 static inline bool sdhci_acpi_byt_defer(struct device *dev) 351 { 352 return false; 353 } 354 355 static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev) 356 { 357 return false; 358 } 359 360 #endif 361 362 static int bxt_get_cd(struct mmc_host *mmc) 363 { 364 int gpio_cd = mmc_gpio_get_cd(mmc); 365 struct sdhci_host *host = mmc_priv(mmc); 366 unsigned long flags; 367 int ret = 0; 368 369 if (!gpio_cd) 370 return 0; 371 372 spin_lock_irqsave(&host->lock, flags); 373 374 if (host->flags & SDHCI_DEVICE_DEAD) 375 goto out; 376 377 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 378 out: 379 spin_unlock_irqrestore(&host->lock, flags); 380 381 return ret; 382 } 383 384 static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *adev) 385 { 386 struct sdhci_acpi_host *c = platform_get_drvdata(pdev); 387 struct intel_host *intel_host = sdhci_acpi_priv(c); 388 struct sdhci_host *host = c->host; 389 390 if (acpi_dev_hid_uid_match(adev, "80860F14", "1") && 391 sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 && 392 sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807) 393 host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ 394 395 if (acpi_dev_hid_uid_match(adev, "80865ACA", NULL)) 396 host->mmc_host_ops.get_cd = bxt_get_cd; 397 398 intel_dsm_init(intel_host, &pdev->dev, host->mmc); 399 400 host->mmc_host_ops.start_signal_voltage_switch = 401 intel_start_signal_voltage_switch; 402 403 c->is_intel = true; 404 405 return 0; 406 } 407 408 static int intel_setup_host(struct platform_device *pdev) 409 { 410 struct sdhci_acpi_host *c = platform_get_drvdata(pdev); 411 struct intel_host *intel_host = sdhci_acpi_priv(c); 412 413 if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR25)) 414 c->host->mmc->caps &= ~MMC_CAP_UHS_SDR25; 415 416 if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR50)) 417 c->host->mmc->caps &= ~MMC_CAP_UHS_SDR50; 418 419 if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_DDR50)) 420 c->host->mmc->caps &= ~MMC_CAP_UHS_DDR50; 421 422 if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR104)) 423 c->host->mmc->caps &= ~MMC_CAP_UHS_SDR104; 424 425 return 0; 426 } 427 428 static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { 429 .chip = &sdhci_acpi_chip_int, 430 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 431 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | 432 MMC_CAP_CMD_DURING_TFR | MMC_CAP_WAIT_WHILE_BUSY, 433 .flags = SDHCI_ACPI_RUNTIME_PM, 434 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 435 SDHCI_QUIRK_NO_LED, 436 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | 437 SDHCI_QUIRK2_STOP_WITH_TC | 438 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400, 439 .probe_slot = intel_probe_slot, 440 .setup_host = intel_setup_host, 441 .priv_size = sizeof(struct intel_host), 442 }; 443 444 static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { 445 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | 446 SDHCI_QUIRK_NO_LED | 447 SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 448 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, 449 .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD | 450 MMC_CAP_WAIT_WHILE_BUSY, 451 .flags = SDHCI_ACPI_RUNTIME_PM, 452 .pm_caps = MMC_PM_KEEP_POWER, 453 .probe_slot = intel_probe_slot, 454 .setup_host = intel_setup_host, 455 .priv_size = sizeof(struct intel_host), 456 }; 457 458 static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { 459 .flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL | 460 SDHCI_ACPI_RUNTIME_PM, 461 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | 462 SDHCI_QUIRK_NO_LED, 463 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | 464 SDHCI_QUIRK2_STOP_WITH_TC, 465 .caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM, 466 .probe_slot = intel_probe_slot, 467 .setup_host = intel_setup_host, 468 .priv_size = sizeof(struct intel_host), 469 }; 470 471 #define VENDOR_SPECIFIC_PWRCTL_CLEAR_REG 0x1a8 472 #define VENDOR_SPECIFIC_PWRCTL_CTL_REG 0x1ac 473 static irqreturn_t sdhci_acpi_qcom_handler(int irq, void *ptr) 474 { 475 struct sdhci_host *host = ptr; 476 477 sdhci_writel(host, 0x3, VENDOR_SPECIFIC_PWRCTL_CLEAR_REG); 478 sdhci_writel(host, 0x1, VENDOR_SPECIFIC_PWRCTL_CTL_REG); 479 480 return IRQ_HANDLED; 481 } 482 483 static int qcom_probe_slot(struct platform_device *pdev, struct acpi_device *adev) 484 { 485 struct sdhci_acpi_host *c = platform_get_drvdata(pdev); 486 struct sdhci_host *host = c->host; 487 int *irq = sdhci_acpi_priv(c); 488 489 *irq = -EINVAL; 490 491 if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL)) 492 return 0; 493 494 *irq = platform_get_irq(pdev, 1); 495 if (*irq < 0) 496 return 0; 497 498 return request_threaded_irq(*irq, NULL, sdhci_acpi_qcom_handler, 499 IRQF_ONESHOT | IRQF_TRIGGER_HIGH, 500 "sdhci_qcom", host); 501 } 502 503 static int qcom_free_slot(struct platform_device *pdev) 504 { 505 struct device *dev = &pdev->dev; 506 struct sdhci_acpi_host *c = platform_get_drvdata(pdev); 507 struct sdhci_host *host = c->host; 508 struct acpi_device *adev; 509 int *irq = sdhci_acpi_priv(c); 510 511 adev = ACPI_COMPANION(dev); 512 if (!adev) 513 return -ENODEV; 514 515 if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL)) 516 return 0; 517 518 if (*irq < 0) 519 return 0; 520 521 free_irq(*irq, host); 522 return 0; 523 } 524 525 static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = { 526 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION, 527 .quirks2 = SDHCI_QUIRK2_NO_1_8_V, 528 .caps = MMC_CAP_NONREMOVABLE, 529 .priv_size = sizeof(int), 530 .probe_slot = qcom_probe_slot, 531 .free_slot = qcom_free_slot, 532 }; 533 534 static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = { 535 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION, 536 .caps = MMC_CAP_NONREMOVABLE, 537 }; 538 539 struct amd_sdhci_host { 540 bool tuned_clock; 541 bool dll_enabled; 542 }; 543 544 /* AMD sdhci reset dll register. */ 545 #define SDHCI_AMD_RESET_DLL_REGISTER 0x908 546 547 static int amd_select_drive_strength(struct mmc_card *card, 548 unsigned int max_dtr, int host_drv, 549 int card_drv, int *host_driver_strength) 550 { 551 struct sdhci_host *host = mmc_priv(card->host); 552 u16 preset, preset_driver_strength; 553 554 /* 555 * This method is only called by mmc_select_hs200 so we only need to 556 * read from the HS200 (SDR104) preset register. 557 * 558 * Firmware that has "invalid/default" presets return a driver strength 559 * of A. This matches the previously hard coded value. 560 */ 561 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 562 preset_driver_strength = FIELD_GET(SDHCI_PRESET_DRV_MASK, preset); 563 564 /* 565 * We want the controller driver strength to match the card's driver 566 * strength so they have similar rise/fall times. 567 * 568 * The controller driver strength set by this method is sticky for all 569 * timings after this method is called. This unfortunately means that 570 * while HS400 tuning is in progress we end up with mismatched driver 571 * strengths between the controller and the card. HS400 tuning requires 572 * switching from HS400->DDR52->HS->HS200->HS400. So the driver mismatch 573 * happens while in DDR52 and HS modes. This has not been observed to 574 * cause problems. Enabling presets would fix this issue. 575 */ 576 *host_driver_strength = preset_driver_strength; 577 578 /* 579 * The resulting card driver strength is only set when switching the 580 * card's timing to HS200 or HS400. The card will use the default driver 581 * strength (B) for any other mode. 582 */ 583 return preset_driver_strength; 584 } 585 586 static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable) 587 { 588 struct sdhci_acpi_host *acpi_host = sdhci_priv(host); 589 struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); 590 591 /* AMD Platform requires dll setting */ 592 sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER); 593 usleep_range(10, 20); 594 if (enable) 595 sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); 596 597 amd_host->dll_enabled = enable; 598 } 599 600 /* 601 * The initialization sequence for HS400 is: 602 * HS->HS200->Perform Tuning->HS->HS400 603 * 604 * The re-tuning sequence is: 605 * HS400->DDR52->HS->HS200->Perform Tuning->HS->HS400 606 * 607 * The AMD eMMC Controller can only use the tuned clock while in HS200 and HS400 608 * mode. If we switch to a different mode, we need to disable the tuned clock. 609 * If we have previously performed tuning and switch back to HS200 or 610 * HS400, we can re-enable the tuned clock. 611 * 612 */ 613 static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 614 { 615 struct sdhci_host *host = mmc_priv(mmc); 616 struct sdhci_acpi_host *acpi_host = sdhci_priv(host); 617 struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); 618 unsigned int old_timing = host->timing; 619 u16 val; 620 621 sdhci_set_ios(mmc, ios); 622 623 if (old_timing != host->timing && amd_host->tuned_clock) { 624 if (host->timing == MMC_TIMING_MMC_HS400 || 625 host->timing == MMC_TIMING_MMC_HS200) { 626 val = sdhci_readw(host, SDHCI_HOST_CONTROL2); 627 val |= SDHCI_CTRL_TUNED_CLK; 628 sdhci_writew(host, val, SDHCI_HOST_CONTROL2); 629 } else { 630 val = sdhci_readw(host, SDHCI_HOST_CONTROL2); 631 val &= ~SDHCI_CTRL_TUNED_CLK; 632 sdhci_writew(host, val, SDHCI_HOST_CONTROL2); 633 } 634 635 /* DLL is only required for HS400 */ 636 if (host->timing == MMC_TIMING_MMC_HS400 && 637 !amd_host->dll_enabled) 638 sdhci_acpi_amd_hs400_dll(host, true); 639 } 640 } 641 642 static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 643 { 644 int err; 645 struct sdhci_host *host = mmc_priv(mmc); 646 struct sdhci_acpi_host *acpi_host = sdhci_priv(host); 647 struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); 648 649 amd_host->tuned_clock = false; 650 651 err = sdhci_execute_tuning(mmc, opcode); 652 653 if (!err && !host->tuning_err) 654 amd_host->tuned_clock = true; 655 656 return err; 657 } 658 659 static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) 660 { 661 struct sdhci_acpi_host *acpi_host = sdhci_priv(host); 662 struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); 663 664 if (mask & SDHCI_RESET_ALL) { 665 amd_host->tuned_clock = false; 666 sdhci_acpi_amd_hs400_dll(host, false); 667 } 668 669 sdhci_reset(host, mask); 670 } 671 672 static const struct sdhci_ops sdhci_acpi_ops_amd = { 673 .set_clock = sdhci_set_clock, 674 .set_bus_width = sdhci_set_bus_width, 675 .reset = amd_sdhci_reset, 676 .set_uhs_signaling = sdhci_set_uhs_signaling, 677 }; 678 679 static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = { 680 .ops = &sdhci_acpi_ops_amd, 681 }; 682 683 static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev, 684 struct acpi_device *adev) 685 { 686 struct sdhci_acpi_host *c = platform_get_drvdata(pdev); 687 struct sdhci_host *host = c->host; 688 689 sdhci_read_caps(host); 690 if (host->caps1 & SDHCI_SUPPORT_DDR50) 691 host->mmc->caps = MMC_CAP_1_8V_DDR; 692 693 if ((host->caps1 & SDHCI_SUPPORT_SDR104) && 694 (host->mmc->caps & MMC_CAP_1_8V_DDR)) 695 host->mmc->caps2 = MMC_CAP2_HS400_1_8V; 696 697 /* 698 * There are two types of presets out in the wild: 699 * 1) Default/broken presets. 700 * These presets have two sets of problems: 701 * a) The clock divisor for SDR12, SDR25, and SDR50 is too small. 702 * This results in clock frequencies that are 2x higher than 703 * acceptable. i.e., SDR12 = 25 MHz, SDR25 = 50 MHz, SDR50 = 704 * 100 MHz.x 705 * b) The HS200 and HS400 driver strengths don't match. 706 * By default, the SDR104 preset register has a driver strength of 707 * A, but the (internal) HS400 preset register has a driver 708 * strength of B. As part of initializing HS400, HS200 tuning 709 * needs to be performed. Having different driver strengths 710 * between tuning and operation is wrong. It results in different 711 * rise/fall times that lead to incorrect sampling. 712 * 2) Firmware with properly initialized presets. 713 * These presets have proper clock divisors. i.e., SDR12 => 12MHz, 714 * SDR25 => 25 MHz, SDR50 => 50 MHz. Additionally the HS200 and 715 * HS400 preset driver strengths match. 716 * 717 * Enabling presets for HS400 doesn't work for the following reasons: 718 * 1) sdhci_set_ios has a hard coded list of timings that are used 719 * to determine if presets should be enabled. 720 * 2) sdhci_get_preset_value is using a non-standard register to 721 * read out HS400 presets. The AMD controller doesn't support this 722 * non-standard register. In fact, it doesn't expose the HS400 723 * preset register anywhere in the SDHCI memory map. This results 724 * in reading a garbage value and using the wrong presets. 725 * 726 * Since HS400 and HS200 presets must be identical, we could 727 * instead use the the SDR104 preset register. 728 * 729 * If the above issues are resolved we could remove this quirk for 730 * firmware that that has valid presets (i.e., SDR12 <= 12 MHz). 731 */ 732 host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; 733 734 host->mmc_host_ops.select_drive_strength = amd_select_drive_strength; 735 host->mmc_host_ops.set_ios = amd_set_ios; 736 host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning; 737 return 0; 738 } 739 740 static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = { 741 .chip = &sdhci_acpi_chip_amd, 742 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, 743 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 744 SDHCI_QUIRK_32BIT_DMA_SIZE | 745 SDHCI_QUIRK_32BIT_ADMA_SIZE, 746 .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA, 747 .probe_slot = sdhci_acpi_emmc_amd_probe_slot, 748 .priv_size = sizeof(struct amd_sdhci_host), 749 }; 750 751 struct sdhci_acpi_uid_slot { 752 const char *hid; 753 const char *uid; 754 const struct sdhci_acpi_slot *slot; 755 }; 756 757 static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { 758 { "80865ACA", NULL, &sdhci_acpi_slot_int_sd }, 759 { "80865ACC", NULL, &sdhci_acpi_slot_int_emmc }, 760 { "80865AD0", NULL, &sdhci_acpi_slot_int_sdio }, 761 { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc }, 762 { "80860F14" , "2" , &sdhci_acpi_slot_int_sdio }, 763 { "80860F14" , "3" , &sdhci_acpi_slot_int_sd }, 764 { "80860F16" , NULL, &sdhci_acpi_slot_int_sd }, 765 { "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio }, 766 { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd }, 767 { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, 768 { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, 769 { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio }, 770 { "PNP0FFF" , "3" , &sdhci_acpi_slot_int_sd }, 771 { "PNP0D40" }, 772 { "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v }, 773 { "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd }, 774 { "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc }, 775 { "AMDI0041", NULL, &sdhci_acpi_slot_amd_emmc }, 776 { }, 777 }; 778 779 static const struct acpi_device_id sdhci_acpi_ids[] = { 780 { "80865ACA" }, 781 { "80865ACC" }, 782 { "80865AD0" }, 783 { "80860F14" }, 784 { "80860F16" }, 785 { "INT33BB" }, 786 { "INT33C6" }, 787 { "INT3436" }, 788 { "INT344D" }, 789 { "PNP0D40" }, 790 { "QCOM8051" }, 791 { "QCOM8052" }, 792 { "AMDI0040" }, 793 { "AMDI0041" }, 794 { }, 795 }; 796 MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); 797 798 static const struct dmi_system_id sdhci_acpi_quirks[] = { 799 { 800 /* 801 * The Lenovo Miix 320-10ICR has a bug in the _PS0 method of 802 * the SHC1 ACPI device, this bug causes it to reprogram the 803 * wrong LDO (DLDO3) to 1.8V if 1.8V modes are used and the 804 * card is (runtime) suspended + resumed. DLDO3 is used for 805 * the LCD and setting it to 1.8V causes the LCD to go black. 806 */ 807 .matches = { 808 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 809 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), 810 }, 811 .driver_data = (void *)DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP, 812 }, 813 { 814 /* 815 * The Acer Aspire Switch 10 (SW5-012) microSD slot always 816 * reports the card being write-protected even though microSD 817 * cards do not have a write-protect switch at all. 818 */ 819 .matches = { 820 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 821 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), 822 }, 823 .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, 824 }, 825 {} /* Terminating entry */ 826 }; 827 828 static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(struct acpi_device *adev) 829 { 830 const struct sdhci_acpi_uid_slot *u; 831 832 for (u = sdhci_acpi_uids; u->hid; u++) { 833 if (acpi_dev_hid_uid_match(adev, u->hid, u->uid)) 834 return u->slot; 835 } 836 return NULL; 837 } 838 839 static int sdhci_acpi_probe(struct platform_device *pdev) 840 { 841 struct device *dev = &pdev->dev; 842 const struct sdhci_acpi_slot *slot; 843 struct acpi_device *device, *child; 844 const struct dmi_system_id *id; 845 struct sdhci_acpi_host *c; 846 struct sdhci_host *host; 847 struct resource *iomem; 848 resource_size_t len; 849 size_t priv_size; 850 int quirks = 0; 851 int err; 852 853 device = ACPI_COMPANION(dev); 854 if (!device) 855 return -ENODEV; 856 857 id = dmi_first_match(sdhci_acpi_quirks); 858 if (id) 859 quirks = (long)id->driver_data; 860 861 slot = sdhci_acpi_get_slot(device); 862 863 /* Power on the SDHCI controller and its children */ 864 acpi_device_fix_up_power(device); 865 if (!sdhci_acpi_no_fixup_child_power(device)) { 866 list_for_each_entry(child, &device->children, node) 867 if (child->status.present && child->status.enabled) 868 acpi_device_fix_up_power(child); 869 } 870 871 if (sdhci_acpi_byt_defer(dev)) 872 return -EPROBE_DEFER; 873 874 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 875 if (!iomem) 876 return -ENOMEM; 877 878 len = resource_size(iomem); 879 if (len < 0x100) 880 dev_err(dev, "Invalid iomem size!\n"); 881 882 if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) 883 return -ENOMEM; 884 885 priv_size = slot ? slot->priv_size : 0; 886 host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size); 887 if (IS_ERR(host)) 888 return PTR_ERR(host); 889 890 c = sdhci_priv(host); 891 c->host = host; 892 c->slot = slot; 893 c->pdev = pdev; 894 c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); 895 896 platform_set_drvdata(pdev, c); 897 898 host->hw_name = "ACPI"; 899 host->ops = &sdhci_acpi_ops_dflt; 900 host->irq = platform_get_irq(pdev, 0); 901 if (host->irq < 0) { 902 err = -EINVAL; 903 goto err_free; 904 } 905 906 host->ioaddr = devm_ioremap(dev, iomem->start, 907 resource_size(iomem)); 908 if (host->ioaddr == NULL) { 909 err = -ENOMEM; 910 goto err_free; 911 } 912 913 if (c->slot) { 914 if (c->slot->probe_slot) { 915 err = c->slot->probe_slot(pdev, device); 916 if (err) 917 goto err_free; 918 } 919 if (c->slot->chip) { 920 host->ops = c->slot->chip->ops; 921 host->quirks |= c->slot->chip->quirks; 922 host->quirks2 |= c->slot->chip->quirks2; 923 host->mmc->caps |= c->slot->chip->caps; 924 host->mmc->caps2 |= c->slot->chip->caps2; 925 host->mmc->pm_caps |= c->slot->chip->pm_caps; 926 } 927 host->quirks |= c->slot->quirks; 928 host->quirks2 |= c->slot->quirks2; 929 host->mmc->caps |= c->slot->caps; 930 host->mmc->caps2 |= c->slot->caps2; 931 host->mmc->pm_caps |= c->slot->pm_caps; 932 } 933 934 host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; 935 936 if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { 937 bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); 938 939 err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0); 940 if (err) { 941 if (err == -EPROBE_DEFER) 942 goto err_free; 943 dev_warn(dev, "failed to setup card detect gpio\n"); 944 c->use_runtime_pm = false; 945 } 946 947 if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP) 948 c->reset_signal_volt_on_suspend = true; 949 950 if (quirks & DMI_QUIRK_SD_NO_WRITE_PROTECT) 951 host->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; 952 } 953 954 err = sdhci_setup_host(host); 955 if (err) 956 goto err_free; 957 958 if (c->slot && c->slot->setup_host) { 959 err = c->slot->setup_host(pdev); 960 if (err) 961 goto err_cleanup; 962 } 963 964 err = __sdhci_add_host(host); 965 if (err) 966 goto err_cleanup; 967 968 if (c->use_runtime_pm) { 969 pm_runtime_set_active(dev); 970 pm_suspend_ignore_children(dev, 1); 971 pm_runtime_set_autosuspend_delay(dev, 50); 972 pm_runtime_use_autosuspend(dev); 973 pm_runtime_enable(dev); 974 } 975 976 device_enable_async_suspend(dev); 977 978 return 0; 979 980 err_cleanup: 981 sdhci_cleanup_host(c->host); 982 err_free: 983 if (c->slot && c->slot->free_slot) 984 c->slot->free_slot(pdev); 985 986 sdhci_free_host(c->host); 987 return err; 988 } 989 990 static int sdhci_acpi_remove(struct platform_device *pdev) 991 { 992 struct sdhci_acpi_host *c = platform_get_drvdata(pdev); 993 struct device *dev = &pdev->dev; 994 int dead; 995 996 if (c->use_runtime_pm) { 997 pm_runtime_get_sync(dev); 998 pm_runtime_disable(dev); 999 pm_runtime_put_noidle(dev); 1000 } 1001 1002 if (c->slot && c->slot->remove_slot) 1003 c->slot->remove_slot(pdev); 1004 1005 dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); 1006 sdhci_remove_host(c->host, dead); 1007 1008 if (c->slot && c->slot->free_slot) 1009 c->slot->free_slot(pdev); 1010 1011 sdhci_free_host(c->host); 1012 1013 return 0; 1014 } 1015 1016 static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed( 1017 struct device *dev) 1018 { 1019 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 1020 struct sdhci_host *host = c->host; 1021 1022 if (c->is_intel && c->reset_signal_volt_on_suspend && 1023 host->mmc->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_330) { 1024 struct intel_host *intel_host = sdhci_acpi_priv(c); 1025 unsigned int fn = INTEL_DSM_V33_SWITCH; 1026 u32 result = 0; 1027 1028 intel_dsm(intel_host, dev, fn, &result); 1029 } 1030 } 1031 1032 #ifdef CONFIG_PM_SLEEP 1033 1034 static int sdhci_acpi_suspend(struct device *dev) 1035 { 1036 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 1037 struct sdhci_host *host = c->host; 1038 int ret; 1039 1040 if (host->tuning_mode != SDHCI_TUNING_MODE_3) 1041 mmc_retune_needed(host->mmc); 1042 1043 ret = sdhci_suspend_host(host); 1044 if (ret) 1045 return ret; 1046 1047 sdhci_acpi_reset_signal_voltage_if_needed(dev); 1048 return 0; 1049 } 1050 1051 static int sdhci_acpi_resume(struct device *dev) 1052 { 1053 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 1054 1055 sdhci_acpi_byt_setting(&c->pdev->dev); 1056 1057 return sdhci_resume_host(c->host); 1058 } 1059 1060 #endif 1061 1062 #ifdef CONFIG_PM 1063 1064 static int sdhci_acpi_runtime_suspend(struct device *dev) 1065 { 1066 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 1067 struct sdhci_host *host = c->host; 1068 int ret; 1069 1070 if (host->tuning_mode != SDHCI_TUNING_MODE_3) 1071 mmc_retune_needed(host->mmc); 1072 1073 ret = sdhci_runtime_suspend_host(host); 1074 if (ret) 1075 return ret; 1076 1077 sdhci_acpi_reset_signal_voltage_if_needed(dev); 1078 return 0; 1079 } 1080 1081 static int sdhci_acpi_runtime_resume(struct device *dev) 1082 { 1083 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 1084 1085 sdhci_acpi_byt_setting(&c->pdev->dev); 1086 1087 return sdhci_runtime_resume_host(c->host, 0); 1088 } 1089 1090 #endif 1091 1092 static const struct dev_pm_ops sdhci_acpi_pm_ops = { 1093 SET_SYSTEM_SLEEP_PM_OPS(sdhci_acpi_suspend, sdhci_acpi_resume) 1094 SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend, 1095 sdhci_acpi_runtime_resume, NULL) 1096 }; 1097 1098 static struct platform_driver sdhci_acpi_driver = { 1099 .driver = { 1100 .name = "sdhci-acpi", 1101 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1102 .acpi_match_table = sdhci_acpi_ids, 1103 .pm = &sdhci_acpi_pm_ops, 1104 }, 1105 .probe = sdhci_acpi_probe, 1106 .remove = sdhci_acpi_remove, 1107 }; 1108 1109 module_platform_driver(sdhci_acpi_driver); 1110 1111 MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver"); 1112 MODULE_AUTHOR("Adrian Hunter"); 1113 MODULE_LICENSE("GPL v2"); 1114