1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ACPI support for Intel Lynxpoint LPSS. 4 * 5 * Copyright (C) 2013, Intel Corporation 6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> 7 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/clkdev.h> 12 #include <linux/clk-provider.h> 13 #include <linux/dmi.h> 14 #include <linux/err.h> 15 #include <linux/io.h> 16 #include <linux/mutex.h> 17 #include <linux/pci.h> 18 #include <linux/platform_device.h> 19 #include <linux/platform_data/x86/clk-lpss.h> 20 #include <linux/platform_data/x86/pmc_atom.h> 21 #include <linux/pm_domain.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pwm.h> 24 #include <linux/suspend.h> 25 #include <linux/delay.h> 26 27 #include "internal.h" 28 29 #ifdef CONFIG_X86_INTEL_LPSS 30 31 #include <asm/cpu_device_id.h> 32 #include <asm/intel-family.h> 33 #include <asm/iosf_mbi.h> 34 35 #define LPSS_ADDR(desc) ((unsigned long)&desc) 36 37 #define LPSS_CLK_SIZE 0x04 38 #define LPSS_LTR_SIZE 0x18 39 40 /* Offsets relative to LPSS_PRIVATE_OFFSET */ 41 #define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16)) 42 #define LPSS_RESETS 0x04 43 #define LPSS_RESETS_RESET_FUNC BIT(0) 44 #define LPSS_RESETS_RESET_APB BIT(1) 45 #define LPSS_GENERAL 0x08 46 #define LPSS_GENERAL_LTR_MODE_SW BIT(2) 47 #define LPSS_GENERAL_UART_RTS_OVRD BIT(3) 48 #define LPSS_SW_LTR 0x10 49 #define LPSS_AUTO_LTR 0x14 50 #define LPSS_LTR_SNOOP_REQ BIT(15) 51 #define LPSS_LTR_SNOOP_MASK 0x0000FFFF 52 #define LPSS_LTR_SNOOP_LAT_1US 0x800 53 #define LPSS_LTR_SNOOP_LAT_32US 0xC00 54 #define LPSS_LTR_SNOOP_LAT_SHIFT 5 55 #define LPSS_LTR_SNOOP_LAT_CUTOFF 3000 56 #define LPSS_LTR_MAX_VAL 0x3FF 57 #define LPSS_TX_INT 0x20 58 #define LPSS_TX_INT_MASK BIT(1) 59 60 #define LPSS_PRV_REG_COUNT 9 61 62 /* LPSS Flags */ 63 #define LPSS_CLK BIT(0) 64 #define LPSS_CLK_GATE BIT(1) 65 #define LPSS_CLK_DIVIDER BIT(2) 66 #define LPSS_LTR BIT(3) 67 #define LPSS_SAVE_CTX BIT(4) 68 /* 69 * For some devices the DSDT AML code for another device turns off the device 70 * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff) 71 * as ctx register values. 72 * Luckily these devices always use the same ctx register values, so we can 73 * work around this by saving the ctx registers once on activation. 74 */ 75 #define LPSS_SAVE_CTX_ONCE BIT(5) 76 #define LPSS_NO_D3_DELAY BIT(6) 77 78 struct lpss_private_data; 79 80 struct lpss_device_desc { 81 unsigned int flags; 82 const char *clk_con_id; 83 unsigned int prv_offset; 84 size_t prv_size_override; 85 struct property_entry *properties; 86 void (*setup)(struct lpss_private_data *pdata); 87 bool resume_from_noirq; 88 }; 89 90 static const struct lpss_device_desc lpss_dma_desc = { 91 .flags = LPSS_CLK, 92 }; 93 94 struct lpss_private_data { 95 struct acpi_device *adev; 96 void __iomem *mmio_base; 97 resource_size_t mmio_size; 98 unsigned int fixed_clk_rate; 99 struct clk *clk; 100 const struct lpss_device_desc *dev_desc; 101 u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; 102 }; 103 104 /* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */ 105 static u32 pmc_atom_d3_mask = 0xfe000ffe; 106 107 /* LPSS run time quirks */ 108 static unsigned int lpss_quirks; 109 110 /* 111 * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device. 112 * 113 * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover 114 * it can be powered off automatically whenever the last LPSS device goes down. 115 * In case of no power any access to the DMA controller will hang the system. 116 * The behaviour is reproduced on some HP laptops based on Intel BayTrail as 117 * well as on ASuS T100TA transformer. 118 * 119 * This quirk overrides power state of entire LPSS island to keep DMA powered 120 * on whenever we have at least one other device in use. 121 */ 122 #define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0) 123 124 /* UART Component Parameter Register */ 125 #define LPSS_UART_CPR 0xF4 126 #define LPSS_UART_CPR_AFCE BIT(4) 127 128 static void lpss_uart_setup(struct lpss_private_data *pdata) 129 { 130 unsigned int offset; 131 u32 val; 132 133 offset = pdata->dev_desc->prv_offset + LPSS_TX_INT; 134 val = readl(pdata->mmio_base + offset); 135 writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset); 136 137 val = readl(pdata->mmio_base + LPSS_UART_CPR); 138 if (!(val & LPSS_UART_CPR_AFCE)) { 139 offset = pdata->dev_desc->prv_offset + LPSS_GENERAL; 140 val = readl(pdata->mmio_base + offset); 141 val |= LPSS_GENERAL_UART_RTS_OVRD; 142 writel(val, pdata->mmio_base + offset); 143 } 144 } 145 146 static void lpss_deassert_reset(struct lpss_private_data *pdata) 147 { 148 unsigned int offset; 149 u32 val; 150 151 offset = pdata->dev_desc->prv_offset + LPSS_RESETS; 152 val = readl(pdata->mmio_base + offset); 153 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; 154 writel(val, pdata->mmio_base + offset); 155 } 156 157 /* 158 * BYT PWM used for backlight control by the i915 driver on systems without 159 * the Crystal Cove PMIC. 160 */ 161 static struct pwm_lookup byt_pwm_lookup[] = { 162 PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0", 163 "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL, 164 "pwm-lpss-platform"), 165 }; 166 167 static void byt_pwm_setup(struct lpss_private_data *pdata) 168 { 169 struct acpi_device *adev = pdata->adev; 170 171 /* Only call pwm_add_table for the first PWM controller */ 172 if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) 173 return; 174 175 pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); 176 } 177 178 #define LPSS_I2C_ENABLE 0x6c 179 180 static void byt_i2c_setup(struct lpss_private_data *pdata) 181 { 182 const char *uid_str = acpi_device_uid(pdata->adev); 183 acpi_handle handle = pdata->adev->handle; 184 unsigned long long shared_host = 0; 185 acpi_status status; 186 long uid = 0; 187 188 /* Expected to always be true, but better safe then sorry */ 189 if (uid_str) 190 uid = simple_strtol(uid_str, NULL, 10); 191 192 /* Detect I2C bus shared with PUNIT and ignore its d3 status */ 193 status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); 194 if (ACPI_SUCCESS(status) && shared_host && uid) 195 pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1)); 196 197 lpss_deassert_reset(pdata); 198 199 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset)) 200 pdata->fixed_clk_rate = 133000000; 201 202 writel(0, pdata->mmio_base + LPSS_I2C_ENABLE); 203 } 204 205 /* BSW PWM used for backlight control by the i915 driver */ 206 static struct pwm_lookup bsw_pwm_lookup[] = { 207 PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0", 208 "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL, 209 "pwm-lpss-platform"), 210 }; 211 212 static void bsw_pwm_setup(struct lpss_private_data *pdata) 213 { 214 struct acpi_device *adev = pdata->adev; 215 216 /* Only call pwm_add_table for the first PWM controller */ 217 if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) 218 return; 219 220 pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); 221 } 222 223 static const struct lpss_device_desc lpt_dev_desc = { 224 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR 225 | LPSS_SAVE_CTX, 226 .prv_offset = 0x800, 227 }; 228 229 static const struct lpss_device_desc lpt_i2c_dev_desc = { 230 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX, 231 .prv_offset = 0x800, 232 }; 233 234 static struct property_entry uart_properties[] = { 235 PROPERTY_ENTRY_U32("reg-io-width", 4), 236 PROPERTY_ENTRY_U32("reg-shift", 2), 237 PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"), 238 { }, 239 }; 240 241 static const struct lpss_device_desc lpt_uart_dev_desc = { 242 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR 243 | LPSS_SAVE_CTX, 244 .clk_con_id = "baudclk", 245 .prv_offset = 0x800, 246 .setup = lpss_uart_setup, 247 .properties = uart_properties, 248 }; 249 250 static const struct lpss_device_desc lpt_sdio_dev_desc = { 251 .flags = LPSS_LTR, 252 .prv_offset = 0x1000, 253 .prv_size_override = 0x1018, 254 }; 255 256 static const struct lpss_device_desc byt_pwm_dev_desc = { 257 .flags = LPSS_SAVE_CTX, 258 .prv_offset = 0x800, 259 .setup = byt_pwm_setup, 260 }; 261 262 static const struct lpss_device_desc bsw_pwm_dev_desc = { 263 .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY, 264 .prv_offset = 0x800, 265 .setup = bsw_pwm_setup, 266 .resume_from_noirq = true, 267 }; 268 269 static const struct lpss_device_desc byt_uart_dev_desc = { 270 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 271 .clk_con_id = "baudclk", 272 .prv_offset = 0x800, 273 .setup = lpss_uart_setup, 274 .properties = uart_properties, 275 }; 276 277 static const struct lpss_device_desc bsw_uart_dev_desc = { 278 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX 279 | LPSS_NO_D3_DELAY, 280 .clk_con_id = "baudclk", 281 .prv_offset = 0x800, 282 .setup = lpss_uart_setup, 283 .properties = uart_properties, 284 }; 285 286 static const struct lpss_device_desc byt_spi_dev_desc = { 287 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 288 .prv_offset = 0x400, 289 }; 290 291 static const struct lpss_device_desc byt_sdio_dev_desc = { 292 .flags = LPSS_CLK, 293 }; 294 295 static const struct lpss_device_desc byt_i2c_dev_desc = { 296 .flags = LPSS_CLK | LPSS_SAVE_CTX, 297 .prv_offset = 0x800, 298 .setup = byt_i2c_setup, 299 .resume_from_noirq = true, 300 }; 301 302 static const struct lpss_device_desc bsw_i2c_dev_desc = { 303 .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, 304 .prv_offset = 0x800, 305 .setup = byt_i2c_setup, 306 .resume_from_noirq = true, 307 }; 308 309 static const struct lpss_device_desc bsw_spi_dev_desc = { 310 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX 311 | LPSS_NO_D3_DELAY, 312 .prv_offset = 0x400, 313 .setup = lpss_deassert_reset, 314 }; 315 316 static const struct x86_cpu_id lpss_cpu_ids[] = { 317 X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL), 318 X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL), 319 {} 320 }; 321 322 #else 323 324 #define LPSS_ADDR(desc) (0UL) 325 326 #endif /* CONFIG_X86_INTEL_LPSS */ 327 328 static const struct acpi_device_id acpi_lpss_device_ids[] = { 329 /* Generic LPSS devices */ 330 { "INTL9C60", LPSS_ADDR(lpss_dma_desc) }, 331 332 /* Lynxpoint LPSS devices */ 333 { "INT33C0", LPSS_ADDR(lpt_dev_desc) }, 334 { "INT33C1", LPSS_ADDR(lpt_dev_desc) }, 335 { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) }, 336 { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) }, 337 { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) }, 338 { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) }, 339 { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) }, 340 { "INT33C7", }, 341 342 /* BayTrail LPSS devices */ 343 { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) }, 344 { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) }, 345 { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) }, 346 { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) }, 347 { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) }, 348 { "INT33B2", }, 349 { "INT33FC", }, 350 351 /* Braswell LPSS devices */ 352 { "80862286", LPSS_ADDR(lpss_dma_desc) }, 353 { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, 354 { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) }, 355 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) }, 356 { "808622C0", LPSS_ADDR(lpss_dma_desc) }, 357 { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) }, 358 359 /* Broadwell LPSS devices */ 360 { "INT3430", LPSS_ADDR(lpt_dev_desc) }, 361 { "INT3431", LPSS_ADDR(lpt_dev_desc) }, 362 { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) }, 363 { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) }, 364 { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) }, 365 { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) }, 366 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) }, 367 { "INT3437", }, 368 369 /* Wildcat Point LPSS devices */ 370 { "INT3438", LPSS_ADDR(lpt_dev_desc) }, 371 372 { } 373 }; 374 375 #ifdef CONFIG_X86_INTEL_LPSS 376 377 static int is_memory(struct acpi_resource *res, void *not_used) 378 { 379 struct resource r; 380 381 return !acpi_dev_resource_memory(res, &r); 382 } 383 384 /* LPSS main clock device. */ 385 static struct platform_device *lpss_clk_dev; 386 387 static inline void lpt_register_clock_device(void) 388 { 389 lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0); 390 } 391 392 static int register_device_clock(struct acpi_device *adev, 393 struct lpss_private_data *pdata) 394 { 395 const struct lpss_device_desc *dev_desc = pdata->dev_desc; 396 const char *devname = dev_name(&adev->dev); 397 struct clk *clk; 398 struct lpss_clk_data *clk_data; 399 const char *parent, *clk_name; 400 void __iomem *prv_base; 401 402 if (!lpss_clk_dev) 403 lpt_register_clock_device(); 404 405 clk_data = platform_get_drvdata(lpss_clk_dev); 406 if (!clk_data) 407 return -ENODEV; 408 clk = clk_data->clk; 409 410 if (!pdata->mmio_base 411 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) 412 return -ENODATA; 413 414 parent = clk_data->name; 415 prv_base = pdata->mmio_base + dev_desc->prv_offset; 416 417 if (pdata->fixed_clk_rate) { 418 clk = clk_register_fixed_rate(NULL, devname, parent, 0, 419 pdata->fixed_clk_rate); 420 goto out; 421 } 422 423 if (dev_desc->flags & LPSS_CLK_GATE) { 424 clk = clk_register_gate(NULL, devname, parent, 0, 425 prv_base, 0, 0, NULL); 426 parent = devname; 427 } 428 429 if (dev_desc->flags & LPSS_CLK_DIVIDER) { 430 /* Prevent division by zero */ 431 if (!readl(prv_base)) 432 writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base); 433 434 clk_name = kasprintf(GFP_KERNEL, "%s-div", devname); 435 if (!clk_name) 436 return -ENOMEM; 437 clk = clk_register_fractional_divider(NULL, clk_name, parent, 438 0, prv_base, 439 1, 15, 16, 15, 0, NULL); 440 parent = clk_name; 441 442 clk_name = kasprintf(GFP_KERNEL, "%s-update", devname); 443 if (!clk_name) { 444 kfree(parent); 445 return -ENOMEM; 446 } 447 clk = clk_register_gate(NULL, clk_name, parent, 448 CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, 449 prv_base, 31, 0, NULL); 450 kfree(parent); 451 kfree(clk_name); 452 } 453 out: 454 if (IS_ERR(clk)) 455 return PTR_ERR(clk); 456 457 pdata->clk = clk; 458 clk_register_clkdev(clk, dev_desc->clk_con_id, devname); 459 return 0; 460 } 461 462 struct lpss_device_links { 463 const char *supplier_hid; 464 const char *supplier_uid; 465 const char *consumer_hid; 466 const char *consumer_uid; 467 u32 flags; 468 const struct dmi_system_id *dep_missing_ids; 469 }; 470 471 /* Please keep this list sorted alphabetically by vendor and model */ 472 static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = { 473 { 474 .matches = { 475 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 476 DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"), 477 }, 478 }, 479 {} 480 }; 481 482 /* 483 * The _DEP method is used to identify dependencies but instead of creating 484 * device links for every handle in _DEP, only links in the following list are 485 * created. That is necessary because, in the general case, _DEP can refer to 486 * devices that might not have drivers, or that are on different buses, or where 487 * the supplier is not enumerated until after the consumer is probed. 488 */ 489 static const struct lpss_device_links lpss_device_links[] = { 490 /* CHT External sdcard slot controller depends on PMIC I2C ctrl */ 491 {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME}, 492 /* CHT iGPU depends on PMIC I2C controller */ 493 {"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, 494 /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */ 495 {"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME, 496 i2c1_dep_missing_dmi_ids}, 497 /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */ 498 {"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, 499 /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */ 500 {"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME}, 501 }; 502 503 static bool acpi_lpss_is_supplier(struct acpi_device *adev, 504 const struct lpss_device_links *link) 505 { 506 return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid); 507 } 508 509 static bool acpi_lpss_is_consumer(struct acpi_device *adev, 510 const struct lpss_device_links *link) 511 { 512 return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid); 513 } 514 515 struct hid_uid { 516 const char *hid; 517 const char *uid; 518 }; 519 520 static int match_hid_uid(struct device *dev, const void *data) 521 { 522 struct acpi_device *adev = ACPI_COMPANION(dev); 523 const struct hid_uid *id = data; 524 525 if (!adev) 526 return 0; 527 528 return acpi_dev_hid_uid_match(adev, id->hid, id->uid); 529 } 530 531 static struct device *acpi_lpss_find_device(const char *hid, const char *uid) 532 { 533 struct device *dev; 534 535 struct hid_uid data = { 536 .hid = hid, 537 .uid = uid, 538 }; 539 540 dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid); 541 if (dev) 542 return dev; 543 544 return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid); 545 } 546 547 static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle) 548 { 549 struct acpi_handle_list dep_devices; 550 acpi_status status; 551 int i; 552 553 if (!acpi_has_method(adev->handle, "_DEP")) 554 return false; 555 556 status = acpi_evaluate_reference(adev->handle, "_DEP", NULL, 557 &dep_devices); 558 if (ACPI_FAILURE(status)) { 559 dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n"); 560 return false; 561 } 562 563 for (i = 0; i < dep_devices.count; i++) { 564 if (dep_devices.handles[i] == handle) 565 return true; 566 } 567 568 return false; 569 } 570 571 static void acpi_lpss_link_consumer(struct device *dev1, 572 const struct lpss_device_links *link) 573 { 574 struct device *dev2; 575 576 dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid); 577 if (!dev2) 578 return; 579 580 if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids)) 581 || acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1))) 582 device_link_add(dev2, dev1, link->flags); 583 584 put_device(dev2); 585 } 586 587 static void acpi_lpss_link_supplier(struct device *dev1, 588 const struct lpss_device_links *link) 589 { 590 struct device *dev2; 591 592 dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid); 593 if (!dev2) 594 return; 595 596 if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids)) 597 || acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2))) 598 device_link_add(dev1, dev2, link->flags); 599 600 put_device(dev2); 601 } 602 603 static void acpi_lpss_create_device_links(struct acpi_device *adev, 604 struct platform_device *pdev) 605 { 606 int i; 607 608 for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) { 609 const struct lpss_device_links *link = &lpss_device_links[i]; 610 611 if (acpi_lpss_is_supplier(adev, link)) 612 acpi_lpss_link_consumer(&pdev->dev, link); 613 614 if (acpi_lpss_is_consumer(adev, link)) 615 acpi_lpss_link_supplier(&pdev->dev, link); 616 } 617 } 618 619 static int acpi_lpss_create_device(struct acpi_device *adev, 620 const struct acpi_device_id *id) 621 { 622 const struct lpss_device_desc *dev_desc; 623 struct lpss_private_data *pdata; 624 struct resource_entry *rentry; 625 struct list_head resource_list; 626 struct platform_device *pdev; 627 int ret; 628 629 dev_desc = (const struct lpss_device_desc *)id->driver_data; 630 if (!dev_desc) { 631 pdev = acpi_create_platform_device(adev, NULL); 632 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; 633 } 634 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 635 if (!pdata) 636 return -ENOMEM; 637 638 INIT_LIST_HEAD(&resource_list); 639 ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL); 640 if (ret < 0) 641 goto err_out; 642 643 list_for_each_entry(rentry, &resource_list, node) 644 if (resource_type(rentry->res) == IORESOURCE_MEM) { 645 if (dev_desc->prv_size_override) 646 pdata->mmio_size = dev_desc->prv_size_override; 647 else 648 pdata->mmio_size = resource_size(rentry->res); 649 pdata->mmio_base = ioremap(rentry->res->start, 650 pdata->mmio_size); 651 break; 652 } 653 654 acpi_dev_free_resource_list(&resource_list); 655 656 if (!pdata->mmio_base) { 657 /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */ 658 adev->pnp.type.platform_id = 0; 659 /* Skip the device, but continue the namespace scan. */ 660 ret = 0; 661 goto err_out; 662 } 663 664 pdata->adev = adev; 665 pdata->dev_desc = dev_desc; 666 667 if (dev_desc->setup) 668 dev_desc->setup(pdata); 669 670 if (dev_desc->flags & LPSS_CLK) { 671 ret = register_device_clock(adev, pdata); 672 if (ret) { 673 /* Skip the device, but continue the namespace scan. */ 674 ret = 0; 675 goto err_out; 676 } 677 } 678 679 /* 680 * This works around a known issue in ACPI tables where LPSS devices 681 * have _PS0 and _PS3 without _PSC (and no power resources), so 682 * acpi_bus_init_power() will assume that the BIOS has put them into D0. 683 */ 684 acpi_device_fix_up_power(adev); 685 686 adev->driver_data = pdata; 687 pdev = acpi_create_platform_device(adev, dev_desc->properties); 688 if (!IS_ERR_OR_NULL(pdev)) { 689 acpi_lpss_create_device_links(adev, pdev); 690 return 1; 691 } 692 693 ret = PTR_ERR(pdev); 694 adev->driver_data = NULL; 695 696 err_out: 697 kfree(pdata); 698 return ret; 699 } 700 701 static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg) 702 { 703 return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg); 704 } 705 706 static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata, 707 unsigned int reg) 708 { 709 writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg); 710 } 711 712 static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val) 713 { 714 struct acpi_device *adev; 715 struct lpss_private_data *pdata; 716 unsigned long flags; 717 int ret; 718 719 ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev); 720 if (WARN_ON(ret)) 721 return ret; 722 723 spin_lock_irqsave(&dev->power.lock, flags); 724 if (pm_runtime_suspended(dev)) { 725 ret = -EAGAIN; 726 goto out; 727 } 728 pdata = acpi_driver_data(adev); 729 if (WARN_ON(!pdata || !pdata->mmio_base)) { 730 ret = -ENODEV; 731 goto out; 732 } 733 *val = __lpss_reg_read(pdata, reg); 734 735 out: 736 spin_unlock_irqrestore(&dev->power.lock, flags); 737 return ret; 738 } 739 740 static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr, 741 char *buf) 742 { 743 u32 ltr_value = 0; 744 unsigned int reg; 745 int ret; 746 747 reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR; 748 ret = lpss_reg_read(dev, reg, <r_value); 749 if (ret) 750 return ret; 751 752 return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value); 753 } 754 755 static ssize_t lpss_ltr_mode_show(struct device *dev, 756 struct device_attribute *attr, char *buf) 757 { 758 u32 ltr_mode = 0; 759 char *outstr; 760 int ret; 761 762 ret = lpss_reg_read(dev, LPSS_GENERAL, <r_mode); 763 if (ret) 764 return ret; 765 766 outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto"; 767 return sprintf(buf, "%s\n", outstr); 768 } 769 770 static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL); 771 static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL); 772 static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL); 773 774 static struct attribute *lpss_attrs[] = { 775 &dev_attr_auto_ltr.attr, 776 &dev_attr_sw_ltr.attr, 777 &dev_attr_ltr_mode.attr, 778 NULL, 779 }; 780 781 static const struct attribute_group lpss_attr_group = { 782 .attrs = lpss_attrs, 783 .name = "lpss_ltr", 784 }; 785 786 static void acpi_lpss_set_ltr(struct device *dev, s32 val) 787 { 788 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 789 u32 ltr_mode, ltr_val; 790 791 ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL); 792 if (val < 0) { 793 if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) { 794 ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW; 795 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL); 796 } 797 return; 798 } 799 ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK; 800 if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) { 801 ltr_val |= LPSS_LTR_SNOOP_LAT_32US; 802 val = LPSS_LTR_MAX_VAL; 803 } else if (val > LPSS_LTR_MAX_VAL) { 804 ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ; 805 val >>= LPSS_LTR_SNOOP_LAT_SHIFT; 806 } else { 807 ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ; 808 } 809 ltr_val |= val; 810 __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR); 811 if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) { 812 ltr_mode |= LPSS_GENERAL_LTR_MODE_SW; 813 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL); 814 } 815 } 816 817 #ifdef CONFIG_PM 818 /** 819 * acpi_lpss_save_ctx() - Save the private registers of LPSS device 820 * @dev: LPSS device 821 * @pdata: pointer to the private data of the LPSS device 822 * 823 * Most LPSS devices have private registers which may loose their context when 824 * the device is powered down. acpi_lpss_save_ctx() saves those registers into 825 * prv_reg_ctx array. 826 */ 827 static void acpi_lpss_save_ctx(struct device *dev, 828 struct lpss_private_data *pdata) 829 { 830 unsigned int i; 831 832 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { 833 unsigned long offset = i * sizeof(u32); 834 835 pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset); 836 dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n", 837 pdata->prv_reg_ctx[i], offset); 838 } 839 } 840 841 /** 842 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device 843 * @dev: LPSS device 844 * @pdata: pointer to the private data of the LPSS device 845 * 846 * Restores the registers that were previously stored with acpi_lpss_save_ctx(). 847 */ 848 static void acpi_lpss_restore_ctx(struct device *dev, 849 struct lpss_private_data *pdata) 850 { 851 unsigned int i; 852 853 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { 854 unsigned long offset = i * sizeof(u32); 855 856 __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset); 857 dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n", 858 pdata->prv_reg_ctx[i], offset); 859 } 860 } 861 862 static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata) 863 { 864 /* 865 * The following delay is needed or the subsequent write operations may 866 * fail. The LPSS devices are actually PCI devices and the PCI spec 867 * expects 10ms delay before the device can be accessed after D3 to D0 868 * transition. However some platforms like BSW does not need this delay. 869 */ 870 unsigned int delay = 10; /* default 10ms delay */ 871 872 if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY) 873 delay = 0; 874 875 msleep(delay); 876 } 877 878 static int acpi_lpss_activate(struct device *dev) 879 { 880 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 881 int ret; 882 883 ret = acpi_dev_resume(dev); 884 if (ret) 885 return ret; 886 887 acpi_lpss_d3_to_d0_delay(pdata); 888 889 /* 890 * This is called only on ->probe() stage where a device is either in 891 * known state defined by BIOS or most likely powered off. Due to this 892 * we have to deassert reset line to be sure that ->probe() will 893 * recognize the device. 894 */ 895 if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE)) 896 lpss_deassert_reset(pdata); 897 898 #ifdef CONFIG_PM 899 if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE) 900 acpi_lpss_save_ctx(dev, pdata); 901 #endif 902 903 return 0; 904 } 905 906 static void acpi_lpss_dismiss(struct device *dev) 907 { 908 acpi_dev_suspend(dev, false); 909 } 910 911 /* IOSF SB for LPSS island */ 912 #define LPSS_IOSF_UNIT_LPIOEP 0xA0 913 #define LPSS_IOSF_UNIT_LPIO1 0xAB 914 #define LPSS_IOSF_UNIT_LPIO2 0xAC 915 916 #define LPSS_IOSF_PMCSR 0x84 917 #define LPSS_PMCSR_D0 0 918 #define LPSS_PMCSR_D3hot 3 919 #define LPSS_PMCSR_Dx_MASK GENMASK(1, 0) 920 921 #define LPSS_IOSF_GPIODEF0 0x154 922 #define LPSS_GPIODEF0_DMA1_D3 BIT(2) 923 #define LPSS_GPIODEF0_DMA2_D3 BIT(3) 924 #define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2) 925 #define LPSS_GPIODEF0_DMA_LLP BIT(13) 926 927 static DEFINE_MUTEX(lpss_iosf_mutex); 928 static bool lpss_iosf_d3_entered = true; 929 930 static void lpss_iosf_enter_d3_state(void) 931 { 932 u32 value1 = 0; 933 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP; 934 u32 value2 = LPSS_PMCSR_D3hot; 935 u32 mask2 = LPSS_PMCSR_Dx_MASK; 936 /* 937 * PMC provides an information about actual status of the LPSS devices. 938 * Here we read the values related to LPSS power island, i.e. LPSS 939 * devices, excluding both LPSS DMA controllers, along with SCC domain. 940 */ 941 u32 func_dis, d3_sts_0, pmc_status; 942 int ret; 943 944 ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis); 945 if (ret) 946 return; 947 948 mutex_lock(&lpss_iosf_mutex); 949 950 ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0); 951 if (ret) 952 goto exit; 953 954 /* 955 * Get the status of entire LPSS power island per device basis. 956 * Shutdown both LPSS DMA controllers if and only if all other devices 957 * are already in D3hot. 958 */ 959 pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask; 960 if (pmc_status) 961 goto exit; 962 963 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, 964 LPSS_IOSF_PMCSR, value2, mask2); 965 966 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, 967 LPSS_IOSF_PMCSR, value2, mask2); 968 969 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, 970 LPSS_IOSF_GPIODEF0, value1, mask1); 971 972 lpss_iosf_d3_entered = true; 973 974 exit: 975 mutex_unlock(&lpss_iosf_mutex); 976 } 977 978 static void lpss_iosf_exit_d3_state(void) 979 { 980 u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 | 981 LPSS_GPIODEF0_DMA_LLP; 982 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP; 983 u32 value2 = LPSS_PMCSR_D0; 984 u32 mask2 = LPSS_PMCSR_Dx_MASK; 985 986 mutex_lock(&lpss_iosf_mutex); 987 988 if (!lpss_iosf_d3_entered) 989 goto exit; 990 991 lpss_iosf_d3_entered = false; 992 993 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, 994 LPSS_IOSF_GPIODEF0, value1, mask1); 995 996 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, 997 LPSS_IOSF_PMCSR, value2, mask2); 998 999 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, 1000 LPSS_IOSF_PMCSR, value2, mask2); 1001 1002 exit: 1003 mutex_unlock(&lpss_iosf_mutex); 1004 } 1005 1006 static int acpi_lpss_suspend(struct device *dev, bool wakeup) 1007 { 1008 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 1009 int ret; 1010 1011 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 1012 acpi_lpss_save_ctx(dev, pdata); 1013 1014 ret = acpi_dev_suspend(dev, wakeup); 1015 1016 /* 1017 * This call must be last in the sequence, otherwise PMC will return 1018 * wrong status for devices being about to be powered off. See 1019 * lpss_iosf_enter_d3_state() for further information. 1020 */ 1021 if (acpi_target_system_state() == ACPI_STATE_S0 && 1022 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) 1023 lpss_iosf_enter_d3_state(); 1024 1025 return ret; 1026 } 1027 1028 static int acpi_lpss_resume(struct device *dev) 1029 { 1030 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 1031 int ret; 1032 1033 /* 1034 * This call is kept first to be in symmetry with 1035 * acpi_lpss_runtime_suspend() one. 1036 */ 1037 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) 1038 lpss_iosf_exit_d3_state(); 1039 1040 ret = acpi_dev_resume(dev); 1041 if (ret) 1042 return ret; 1043 1044 acpi_lpss_d3_to_d0_delay(pdata); 1045 1046 if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE)) 1047 acpi_lpss_restore_ctx(dev, pdata); 1048 1049 return 0; 1050 } 1051 1052 #ifdef CONFIG_PM_SLEEP 1053 static int acpi_lpss_do_suspend_late(struct device *dev) 1054 { 1055 int ret; 1056 1057 if (dev_pm_skip_suspend(dev)) 1058 return 0; 1059 1060 ret = pm_generic_suspend_late(dev); 1061 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); 1062 } 1063 1064 static int acpi_lpss_suspend_late(struct device *dev) 1065 { 1066 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 1067 1068 if (pdata->dev_desc->resume_from_noirq) 1069 return 0; 1070 1071 return acpi_lpss_do_suspend_late(dev); 1072 } 1073 1074 static int acpi_lpss_suspend_noirq(struct device *dev) 1075 { 1076 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 1077 int ret; 1078 1079 if (pdata->dev_desc->resume_from_noirq) { 1080 /* 1081 * The driver's ->suspend_late callback will be invoked by 1082 * acpi_lpss_do_suspend_late(), with the assumption that the 1083 * driver really wanted to run that code in ->suspend_noirq, but 1084 * it could not run after acpi_dev_suspend() and the driver 1085 * expected the latter to be called in the "late" phase. 1086 */ 1087 ret = acpi_lpss_do_suspend_late(dev); 1088 if (ret) 1089 return ret; 1090 } 1091 1092 return acpi_subsys_suspend_noirq(dev); 1093 } 1094 1095 static int acpi_lpss_do_resume_early(struct device *dev) 1096 { 1097 int ret = acpi_lpss_resume(dev); 1098 1099 return ret ? ret : pm_generic_resume_early(dev); 1100 } 1101 1102 static int acpi_lpss_resume_early(struct device *dev) 1103 { 1104 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 1105 1106 if (pdata->dev_desc->resume_from_noirq) 1107 return 0; 1108 1109 if (dev_pm_skip_resume(dev)) 1110 return 0; 1111 1112 return acpi_lpss_do_resume_early(dev); 1113 } 1114 1115 static int acpi_lpss_resume_noirq(struct device *dev) 1116 { 1117 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 1118 int ret; 1119 1120 /* Follow acpi_subsys_resume_noirq(). */ 1121 if (dev_pm_skip_resume(dev)) 1122 return 0; 1123 1124 ret = pm_generic_resume_noirq(dev); 1125 if (ret) 1126 return ret; 1127 1128 if (!pdata->dev_desc->resume_from_noirq) 1129 return 0; 1130 1131 /* 1132 * The driver's ->resume_early callback will be invoked by 1133 * acpi_lpss_do_resume_early(), with the assumption that the driver 1134 * really wanted to run that code in ->resume_noirq, but it could not 1135 * run before acpi_dev_resume() and the driver expected the latter to be 1136 * called in the "early" phase. 1137 */ 1138 return acpi_lpss_do_resume_early(dev); 1139 } 1140 1141 static int acpi_lpss_do_restore_early(struct device *dev) 1142 { 1143 int ret = acpi_lpss_resume(dev); 1144 1145 return ret ? ret : pm_generic_restore_early(dev); 1146 } 1147 1148 static int acpi_lpss_restore_early(struct device *dev) 1149 { 1150 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 1151 1152 if (pdata->dev_desc->resume_from_noirq) 1153 return 0; 1154 1155 return acpi_lpss_do_restore_early(dev); 1156 } 1157 1158 static int acpi_lpss_restore_noirq(struct device *dev) 1159 { 1160 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 1161 int ret; 1162 1163 ret = pm_generic_restore_noirq(dev); 1164 if (ret) 1165 return ret; 1166 1167 if (!pdata->dev_desc->resume_from_noirq) 1168 return 0; 1169 1170 /* This is analogous to what happens in acpi_lpss_resume_noirq(). */ 1171 return acpi_lpss_do_restore_early(dev); 1172 } 1173 1174 static int acpi_lpss_do_poweroff_late(struct device *dev) 1175 { 1176 int ret = pm_generic_poweroff_late(dev); 1177 1178 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); 1179 } 1180 1181 static int acpi_lpss_poweroff_late(struct device *dev) 1182 { 1183 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 1184 1185 if (dev_pm_skip_suspend(dev)) 1186 return 0; 1187 1188 if (pdata->dev_desc->resume_from_noirq) 1189 return 0; 1190 1191 return acpi_lpss_do_poweroff_late(dev); 1192 } 1193 1194 static int acpi_lpss_poweroff_noirq(struct device *dev) 1195 { 1196 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 1197 1198 if (dev_pm_skip_suspend(dev)) 1199 return 0; 1200 1201 if (pdata->dev_desc->resume_from_noirq) { 1202 /* This is analogous to the acpi_lpss_suspend_noirq() case. */ 1203 int ret = acpi_lpss_do_poweroff_late(dev); 1204 1205 if (ret) 1206 return ret; 1207 } 1208 1209 return pm_generic_poweroff_noirq(dev); 1210 } 1211 #endif /* CONFIG_PM_SLEEP */ 1212 1213 static int acpi_lpss_runtime_suspend(struct device *dev) 1214 { 1215 int ret = pm_generic_runtime_suspend(dev); 1216 1217 return ret ? ret : acpi_lpss_suspend(dev, true); 1218 } 1219 1220 static int acpi_lpss_runtime_resume(struct device *dev) 1221 { 1222 int ret = acpi_lpss_resume(dev); 1223 1224 return ret ? ret : pm_generic_runtime_resume(dev); 1225 } 1226 #endif /* CONFIG_PM */ 1227 1228 static struct dev_pm_domain acpi_lpss_pm_domain = { 1229 #ifdef CONFIG_PM 1230 .activate = acpi_lpss_activate, 1231 .dismiss = acpi_lpss_dismiss, 1232 #endif 1233 .ops = { 1234 #ifdef CONFIG_PM 1235 #ifdef CONFIG_PM_SLEEP 1236 .prepare = acpi_subsys_prepare, 1237 .complete = acpi_subsys_complete, 1238 .suspend = acpi_subsys_suspend, 1239 .suspend_late = acpi_lpss_suspend_late, 1240 .suspend_noirq = acpi_lpss_suspend_noirq, 1241 .resume_noirq = acpi_lpss_resume_noirq, 1242 .resume_early = acpi_lpss_resume_early, 1243 .freeze = acpi_subsys_freeze, 1244 .poweroff = acpi_subsys_poweroff, 1245 .poweroff_late = acpi_lpss_poweroff_late, 1246 .poweroff_noirq = acpi_lpss_poweroff_noirq, 1247 .restore_noirq = acpi_lpss_restore_noirq, 1248 .restore_early = acpi_lpss_restore_early, 1249 #endif 1250 .runtime_suspend = acpi_lpss_runtime_suspend, 1251 .runtime_resume = acpi_lpss_runtime_resume, 1252 #endif 1253 }, 1254 }; 1255 1256 static int acpi_lpss_platform_notify(struct notifier_block *nb, 1257 unsigned long action, void *data) 1258 { 1259 struct platform_device *pdev = to_platform_device(data); 1260 struct lpss_private_data *pdata; 1261 struct acpi_device *adev; 1262 const struct acpi_device_id *id; 1263 1264 id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev); 1265 if (!id || !id->driver_data) 1266 return 0; 1267 1268 if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) 1269 return 0; 1270 1271 pdata = acpi_driver_data(adev); 1272 if (!pdata) 1273 return 0; 1274 1275 if (pdata->mmio_base && 1276 pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) { 1277 dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n"); 1278 return 0; 1279 } 1280 1281 switch (action) { 1282 case BUS_NOTIFY_BIND_DRIVER: 1283 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain); 1284 break; 1285 case BUS_NOTIFY_DRIVER_NOT_BOUND: 1286 case BUS_NOTIFY_UNBOUND_DRIVER: 1287 dev_pm_domain_set(&pdev->dev, NULL); 1288 break; 1289 case BUS_NOTIFY_ADD_DEVICE: 1290 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain); 1291 if (pdata->dev_desc->flags & LPSS_LTR) 1292 return sysfs_create_group(&pdev->dev.kobj, 1293 &lpss_attr_group); 1294 break; 1295 case BUS_NOTIFY_DEL_DEVICE: 1296 if (pdata->dev_desc->flags & LPSS_LTR) 1297 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); 1298 dev_pm_domain_set(&pdev->dev, NULL); 1299 break; 1300 default: 1301 break; 1302 } 1303 1304 return 0; 1305 } 1306 1307 static struct notifier_block acpi_lpss_nb = { 1308 .notifier_call = acpi_lpss_platform_notify, 1309 }; 1310 1311 static void acpi_lpss_bind(struct device *dev) 1312 { 1313 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 1314 1315 if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR)) 1316 return; 1317 1318 if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) 1319 dev->power.set_latency_tolerance = acpi_lpss_set_ltr; 1320 else 1321 dev_err(dev, "MMIO size insufficient to access LTR\n"); 1322 } 1323 1324 static void acpi_lpss_unbind(struct device *dev) 1325 { 1326 dev->power.set_latency_tolerance = NULL; 1327 } 1328 1329 static struct acpi_scan_handler lpss_handler = { 1330 .ids = acpi_lpss_device_ids, 1331 .attach = acpi_lpss_create_device, 1332 .bind = acpi_lpss_bind, 1333 .unbind = acpi_lpss_unbind, 1334 }; 1335 1336 void __init acpi_lpss_init(void) 1337 { 1338 const struct x86_cpu_id *id; 1339 int ret; 1340 1341 ret = lpt_clk_init(); 1342 if (ret) 1343 return; 1344 1345 id = x86_match_cpu(lpss_cpu_ids); 1346 if (id) 1347 lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON; 1348 1349 bus_register_notifier(&platform_bus_type, &acpi_lpss_nb); 1350 acpi_scan_add_handler(&lpss_handler); 1351 } 1352 1353 #else 1354 1355 static struct acpi_scan_handler lpss_handler = { 1356 .ids = acpi_lpss_device_ids, 1357 }; 1358 1359 void __init acpi_lpss_init(void) 1360 { 1361 acpi_scan_add_handler(&lpss_handler); 1362 } 1363 1364 #endif /* CONFIG_X86_INTEL_LPSS */ 1365