1 /* 2 * ACPI support for Intel Lynxpoint LPSS. 3 * 4 * Copyright (C) 2013, Intel Corporation 5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> 6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/acpi.h> 14 #include <linux/clkdev.h> 15 #include <linux/clk-provider.h> 16 #include <linux/err.h> 17 #include <linux/io.h> 18 #include <linux/mutex.h> 19 #include <linux/platform_device.h> 20 #include <linux/platform_data/clk-lpss.h> 21 #include <linux/platform_data/x86/pmc_atom.h> 22 #include <linux/pm_domain.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/pwm.h> 25 #include <linux/delay.h> 26 27 #include "internal.h" 28 29 ACPI_MODULE_NAME("acpi_lpss"); 30 31 #ifdef CONFIG_X86_INTEL_LPSS 32 33 #include <asm/cpu_device_id.h> 34 #include <asm/intel-family.h> 35 #include <asm/iosf_mbi.h> 36 37 #define LPSS_ADDR(desc) ((unsigned long)&desc) 38 39 #define LPSS_CLK_SIZE 0x04 40 #define LPSS_LTR_SIZE 0x18 41 42 /* Offsets relative to LPSS_PRIVATE_OFFSET */ 43 #define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16)) 44 #define LPSS_RESETS 0x04 45 #define LPSS_RESETS_RESET_FUNC BIT(0) 46 #define LPSS_RESETS_RESET_APB BIT(1) 47 #define LPSS_GENERAL 0x08 48 #define LPSS_GENERAL_LTR_MODE_SW BIT(2) 49 #define LPSS_GENERAL_UART_RTS_OVRD BIT(3) 50 #define LPSS_SW_LTR 0x10 51 #define LPSS_AUTO_LTR 0x14 52 #define LPSS_LTR_SNOOP_REQ BIT(15) 53 #define LPSS_LTR_SNOOP_MASK 0x0000FFFF 54 #define LPSS_LTR_SNOOP_LAT_1US 0x800 55 #define LPSS_LTR_SNOOP_LAT_32US 0xC00 56 #define LPSS_LTR_SNOOP_LAT_SHIFT 5 57 #define LPSS_LTR_SNOOP_LAT_CUTOFF 3000 58 #define LPSS_LTR_MAX_VAL 0x3FF 59 #define LPSS_TX_INT 0x20 60 #define LPSS_TX_INT_MASK BIT(1) 61 62 #define LPSS_PRV_REG_COUNT 9 63 64 /* LPSS Flags */ 65 #define LPSS_CLK BIT(0) 66 #define LPSS_CLK_GATE BIT(1) 67 #define LPSS_CLK_DIVIDER BIT(2) 68 #define LPSS_LTR BIT(3) 69 #define LPSS_SAVE_CTX BIT(4) 70 #define LPSS_NO_D3_DELAY BIT(5) 71 72 struct lpss_private_data; 73 74 struct lpss_device_desc { 75 unsigned int flags; 76 const char *clk_con_id; 77 unsigned int prv_offset; 78 size_t prv_size_override; 79 struct property_entry *properties; 80 void (*setup)(struct lpss_private_data *pdata); 81 }; 82 83 static const struct lpss_device_desc lpss_dma_desc = { 84 .flags = LPSS_CLK, 85 }; 86 87 struct lpss_private_data { 88 void __iomem *mmio_base; 89 resource_size_t mmio_size; 90 unsigned int fixed_clk_rate; 91 struct clk *clk; 92 const struct lpss_device_desc *dev_desc; 93 u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; 94 }; 95 96 /* LPSS run time quirks */ 97 static unsigned int lpss_quirks; 98 99 /* 100 * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device. 101 * 102 * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover 103 * it can be powered off automatically whenever the last LPSS device goes down. 104 * In case of no power any access to the DMA controller will hang the system. 105 * The behaviour is reproduced on some HP laptops based on Intel BayTrail as 106 * well as on ASuS T100TA transformer. 107 * 108 * This quirk overrides power state of entire LPSS island to keep DMA powered 109 * on whenever we have at least one other device in use. 110 */ 111 #define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0) 112 113 /* UART Component Parameter Register */ 114 #define LPSS_UART_CPR 0xF4 115 #define LPSS_UART_CPR_AFCE BIT(4) 116 117 static void lpss_uart_setup(struct lpss_private_data *pdata) 118 { 119 unsigned int offset; 120 u32 val; 121 122 offset = pdata->dev_desc->prv_offset + LPSS_TX_INT; 123 val = readl(pdata->mmio_base + offset); 124 writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset); 125 126 val = readl(pdata->mmio_base + LPSS_UART_CPR); 127 if (!(val & LPSS_UART_CPR_AFCE)) { 128 offset = pdata->dev_desc->prv_offset + LPSS_GENERAL; 129 val = readl(pdata->mmio_base + offset); 130 val |= LPSS_GENERAL_UART_RTS_OVRD; 131 writel(val, pdata->mmio_base + offset); 132 } 133 } 134 135 static void lpss_deassert_reset(struct lpss_private_data *pdata) 136 { 137 unsigned int offset; 138 u32 val; 139 140 offset = pdata->dev_desc->prv_offset + LPSS_RESETS; 141 val = readl(pdata->mmio_base + offset); 142 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; 143 writel(val, pdata->mmio_base + offset); 144 } 145 146 /* 147 * BYT PWM used for backlight control by the i915 driver on systems without 148 * the Crystal Cove PMIC. 149 */ 150 static struct pwm_lookup byt_pwm_lookup[] = { 151 PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0", 152 "pwm_backlight", 0, PWM_POLARITY_NORMAL, 153 "pwm-lpss-platform"), 154 }; 155 156 static void byt_pwm_setup(struct lpss_private_data *pdata) 157 { 158 if (!acpi_dev_present("INT33FD", NULL, -1)) 159 pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); 160 } 161 162 #define LPSS_I2C_ENABLE 0x6c 163 164 static void byt_i2c_setup(struct lpss_private_data *pdata) 165 { 166 lpss_deassert_reset(pdata); 167 168 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset)) 169 pdata->fixed_clk_rate = 133000000; 170 171 writel(0, pdata->mmio_base + LPSS_I2C_ENABLE); 172 } 173 174 /* BSW PWM used for backlight control by the i915 driver */ 175 static struct pwm_lookup bsw_pwm_lookup[] = { 176 PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0", 177 "pwm_backlight", 0, PWM_POLARITY_NORMAL, 178 "pwm-lpss-platform"), 179 }; 180 181 static void bsw_pwm_setup(struct lpss_private_data *pdata) 182 { 183 pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); 184 } 185 186 static const struct lpss_device_desc lpt_dev_desc = { 187 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, 188 .prv_offset = 0x800, 189 }; 190 191 static const struct lpss_device_desc lpt_i2c_dev_desc = { 192 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR, 193 .prv_offset = 0x800, 194 }; 195 196 static struct property_entry uart_properties[] = { 197 PROPERTY_ENTRY_U32("reg-io-width", 4), 198 PROPERTY_ENTRY_U32("reg-shift", 2), 199 PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"), 200 { }, 201 }; 202 203 static const struct lpss_device_desc lpt_uart_dev_desc = { 204 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, 205 .clk_con_id = "baudclk", 206 .prv_offset = 0x800, 207 .setup = lpss_uart_setup, 208 .properties = uart_properties, 209 }; 210 211 static const struct lpss_device_desc lpt_sdio_dev_desc = { 212 .flags = LPSS_LTR, 213 .prv_offset = 0x1000, 214 .prv_size_override = 0x1018, 215 }; 216 217 static const struct lpss_device_desc byt_pwm_dev_desc = { 218 .flags = LPSS_SAVE_CTX, 219 .setup = byt_pwm_setup, 220 }; 221 222 static const struct lpss_device_desc bsw_pwm_dev_desc = { 223 .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, 224 .setup = bsw_pwm_setup, 225 }; 226 227 static const struct lpss_device_desc byt_uart_dev_desc = { 228 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 229 .clk_con_id = "baudclk", 230 .prv_offset = 0x800, 231 .setup = lpss_uart_setup, 232 .properties = uart_properties, 233 }; 234 235 static const struct lpss_device_desc bsw_uart_dev_desc = { 236 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX 237 | LPSS_NO_D3_DELAY, 238 .clk_con_id = "baudclk", 239 .prv_offset = 0x800, 240 .setup = lpss_uart_setup, 241 .properties = uart_properties, 242 }; 243 244 static const struct lpss_device_desc byt_spi_dev_desc = { 245 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, 246 .prv_offset = 0x400, 247 }; 248 249 static const struct lpss_device_desc byt_sdio_dev_desc = { 250 .flags = LPSS_CLK, 251 }; 252 253 static const struct lpss_device_desc byt_i2c_dev_desc = { 254 .flags = LPSS_CLK | LPSS_SAVE_CTX, 255 .prv_offset = 0x800, 256 .setup = byt_i2c_setup, 257 }; 258 259 static const struct lpss_device_desc bsw_i2c_dev_desc = { 260 .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, 261 .prv_offset = 0x800, 262 .setup = byt_i2c_setup, 263 }; 264 265 static const struct lpss_device_desc bsw_spi_dev_desc = { 266 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX 267 | LPSS_NO_D3_DELAY, 268 .prv_offset = 0x400, 269 .setup = lpss_deassert_reset, 270 }; 271 272 #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } 273 274 static const struct x86_cpu_id lpss_cpu_ids[] = { 275 ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */ 276 ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */ 277 {} 278 }; 279 280 #else 281 282 #define LPSS_ADDR(desc) (0UL) 283 284 #endif /* CONFIG_X86_INTEL_LPSS */ 285 286 static const struct acpi_device_id acpi_lpss_device_ids[] = { 287 /* Generic LPSS devices */ 288 { "INTL9C60", LPSS_ADDR(lpss_dma_desc) }, 289 290 /* Lynxpoint LPSS devices */ 291 { "INT33C0", LPSS_ADDR(lpt_dev_desc) }, 292 { "INT33C1", LPSS_ADDR(lpt_dev_desc) }, 293 { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) }, 294 { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) }, 295 { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) }, 296 { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) }, 297 { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) }, 298 { "INT33C7", }, 299 300 /* BayTrail LPSS devices */ 301 { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) }, 302 { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) }, 303 { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) }, 304 { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) }, 305 { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) }, 306 { "INT33B2", }, 307 { "INT33FC", }, 308 309 /* Braswell LPSS devices */ 310 { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, 311 { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) }, 312 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) }, 313 { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) }, 314 315 /* Broadwell LPSS devices */ 316 { "INT3430", LPSS_ADDR(lpt_dev_desc) }, 317 { "INT3431", LPSS_ADDR(lpt_dev_desc) }, 318 { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) }, 319 { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) }, 320 { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) }, 321 { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) }, 322 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) }, 323 { "INT3437", }, 324 325 /* Wildcat Point LPSS devices */ 326 { "INT3438", LPSS_ADDR(lpt_dev_desc) }, 327 328 { } 329 }; 330 331 #ifdef CONFIG_X86_INTEL_LPSS 332 333 static int is_memory(struct acpi_resource *res, void *not_used) 334 { 335 struct resource r; 336 return !acpi_dev_resource_memory(res, &r); 337 } 338 339 /* LPSS main clock device. */ 340 static struct platform_device *lpss_clk_dev; 341 342 static inline void lpt_register_clock_device(void) 343 { 344 lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0); 345 } 346 347 static int register_device_clock(struct acpi_device *adev, 348 struct lpss_private_data *pdata) 349 { 350 const struct lpss_device_desc *dev_desc = pdata->dev_desc; 351 const char *devname = dev_name(&adev->dev); 352 struct clk *clk = ERR_PTR(-ENODEV); 353 struct lpss_clk_data *clk_data; 354 const char *parent, *clk_name; 355 void __iomem *prv_base; 356 357 if (!lpss_clk_dev) 358 lpt_register_clock_device(); 359 360 clk_data = platform_get_drvdata(lpss_clk_dev); 361 if (!clk_data) 362 return -ENODEV; 363 clk = clk_data->clk; 364 365 if (!pdata->mmio_base 366 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) 367 return -ENODATA; 368 369 parent = clk_data->name; 370 prv_base = pdata->mmio_base + dev_desc->prv_offset; 371 372 if (pdata->fixed_clk_rate) { 373 clk = clk_register_fixed_rate(NULL, devname, parent, 0, 374 pdata->fixed_clk_rate); 375 goto out; 376 } 377 378 if (dev_desc->flags & LPSS_CLK_GATE) { 379 clk = clk_register_gate(NULL, devname, parent, 0, 380 prv_base, 0, 0, NULL); 381 parent = devname; 382 } 383 384 if (dev_desc->flags & LPSS_CLK_DIVIDER) { 385 /* Prevent division by zero */ 386 if (!readl(prv_base)) 387 writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base); 388 389 clk_name = kasprintf(GFP_KERNEL, "%s-div", devname); 390 if (!clk_name) 391 return -ENOMEM; 392 clk = clk_register_fractional_divider(NULL, clk_name, parent, 393 0, prv_base, 394 1, 15, 16, 15, 0, NULL); 395 parent = clk_name; 396 397 clk_name = kasprintf(GFP_KERNEL, "%s-update", devname); 398 if (!clk_name) { 399 kfree(parent); 400 return -ENOMEM; 401 } 402 clk = clk_register_gate(NULL, clk_name, parent, 403 CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, 404 prv_base, 31, 0, NULL); 405 kfree(parent); 406 kfree(clk_name); 407 } 408 out: 409 if (IS_ERR(clk)) 410 return PTR_ERR(clk); 411 412 pdata->clk = clk; 413 clk_register_clkdev(clk, dev_desc->clk_con_id, devname); 414 return 0; 415 } 416 417 static int acpi_lpss_create_device(struct acpi_device *adev, 418 const struct acpi_device_id *id) 419 { 420 const struct lpss_device_desc *dev_desc; 421 struct lpss_private_data *pdata; 422 struct resource_entry *rentry; 423 struct list_head resource_list; 424 struct platform_device *pdev; 425 int ret; 426 427 dev_desc = (const struct lpss_device_desc *)id->driver_data; 428 if (!dev_desc) { 429 pdev = acpi_create_platform_device(adev, NULL); 430 return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; 431 } 432 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 433 if (!pdata) 434 return -ENOMEM; 435 436 INIT_LIST_HEAD(&resource_list); 437 ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL); 438 if (ret < 0) 439 goto err_out; 440 441 list_for_each_entry(rentry, &resource_list, node) 442 if (resource_type(rentry->res) == IORESOURCE_MEM) { 443 if (dev_desc->prv_size_override) 444 pdata->mmio_size = dev_desc->prv_size_override; 445 else 446 pdata->mmio_size = resource_size(rentry->res); 447 pdata->mmio_base = ioremap(rentry->res->start, 448 pdata->mmio_size); 449 break; 450 } 451 452 acpi_dev_free_resource_list(&resource_list); 453 454 if (!pdata->mmio_base) { 455 ret = -ENOMEM; 456 goto err_out; 457 } 458 459 pdata->dev_desc = dev_desc; 460 461 if (dev_desc->setup) 462 dev_desc->setup(pdata); 463 464 if (dev_desc->flags & LPSS_CLK) { 465 ret = register_device_clock(adev, pdata); 466 if (ret) { 467 /* Skip the device, but continue the namespace scan. */ 468 ret = 0; 469 goto err_out; 470 } 471 } 472 473 /* 474 * This works around a known issue in ACPI tables where LPSS devices 475 * have _PS0 and _PS3 without _PSC (and no power resources), so 476 * acpi_bus_init_power() will assume that the BIOS has put them into D0. 477 */ 478 ret = acpi_device_fix_up_power(adev); 479 if (ret) { 480 /* Skip the device, but continue the namespace scan. */ 481 ret = 0; 482 goto err_out; 483 } 484 485 adev->driver_data = pdata; 486 pdev = acpi_create_platform_device(adev, dev_desc->properties); 487 if (!IS_ERR_OR_NULL(pdev)) { 488 return 1; 489 } 490 491 ret = PTR_ERR(pdev); 492 adev->driver_data = NULL; 493 494 err_out: 495 kfree(pdata); 496 return ret; 497 } 498 499 static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg) 500 { 501 return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg); 502 } 503 504 static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata, 505 unsigned int reg) 506 { 507 writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg); 508 } 509 510 static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val) 511 { 512 struct acpi_device *adev; 513 struct lpss_private_data *pdata; 514 unsigned long flags; 515 int ret; 516 517 ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev); 518 if (WARN_ON(ret)) 519 return ret; 520 521 spin_lock_irqsave(&dev->power.lock, flags); 522 if (pm_runtime_suspended(dev)) { 523 ret = -EAGAIN; 524 goto out; 525 } 526 pdata = acpi_driver_data(adev); 527 if (WARN_ON(!pdata || !pdata->mmio_base)) { 528 ret = -ENODEV; 529 goto out; 530 } 531 *val = __lpss_reg_read(pdata, reg); 532 533 out: 534 spin_unlock_irqrestore(&dev->power.lock, flags); 535 return ret; 536 } 537 538 static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr, 539 char *buf) 540 { 541 u32 ltr_value = 0; 542 unsigned int reg; 543 int ret; 544 545 reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR; 546 ret = lpss_reg_read(dev, reg, <r_value); 547 if (ret) 548 return ret; 549 550 return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value); 551 } 552 553 static ssize_t lpss_ltr_mode_show(struct device *dev, 554 struct device_attribute *attr, char *buf) 555 { 556 u32 ltr_mode = 0; 557 char *outstr; 558 int ret; 559 560 ret = lpss_reg_read(dev, LPSS_GENERAL, <r_mode); 561 if (ret) 562 return ret; 563 564 outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto"; 565 return sprintf(buf, "%s\n", outstr); 566 } 567 568 static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL); 569 static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL); 570 static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL); 571 572 static struct attribute *lpss_attrs[] = { 573 &dev_attr_auto_ltr.attr, 574 &dev_attr_sw_ltr.attr, 575 &dev_attr_ltr_mode.attr, 576 NULL, 577 }; 578 579 static const struct attribute_group lpss_attr_group = { 580 .attrs = lpss_attrs, 581 .name = "lpss_ltr", 582 }; 583 584 static void acpi_lpss_set_ltr(struct device *dev, s32 val) 585 { 586 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 587 u32 ltr_mode, ltr_val; 588 589 ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL); 590 if (val < 0) { 591 if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) { 592 ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW; 593 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL); 594 } 595 return; 596 } 597 ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK; 598 if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) { 599 ltr_val |= LPSS_LTR_SNOOP_LAT_32US; 600 val = LPSS_LTR_MAX_VAL; 601 } else if (val > LPSS_LTR_MAX_VAL) { 602 ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ; 603 val >>= LPSS_LTR_SNOOP_LAT_SHIFT; 604 } else { 605 ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ; 606 } 607 ltr_val |= val; 608 __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR); 609 if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) { 610 ltr_mode |= LPSS_GENERAL_LTR_MODE_SW; 611 __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL); 612 } 613 } 614 615 #ifdef CONFIG_PM 616 /** 617 * acpi_lpss_save_ctx() - Save the private registers of LPSS device 618 * @dev: LPSS device 619 * @pdata: pointer to the private data of the LPSS device 620 * 621 * Most LPSS devices have private registers which may loose their context when 622 * the device is powered down. acpi_lpss_save_ctx() saves those registers into 623 * prv_reg_ctx array. 624 */ 625 static void acpi_lpss_save_ctx(struct device *dev, 626 struct lpss_private_data *pdata) 627 { 628 unsigned int i; 629 630 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { 631 unsigned long offset = i * sizeof(u32); 632 633 pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset); 634 dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n", 635 pdata->prv_reg_ctx[i], offset); 636 } 637 } 638 639 /** 640 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device 641 * @dev: LPSS device 642 * @pdata: pointer to the private data of the LPSS device 643 * 644 * Restores the registers that were previously stored with acpi_lpss_save_ctx(). 645 */ 646 static void acpi_lpss_restore_ctx(struct device *dev, 647 struct lpss_private_data *pdata) 648 { 649 unsigned int i; 650 651 for (i = 0; i < LPSS_PRV_REG_COUNT; i++) { 652 unsigned long offset = i * sizeof(u32); 653 654 __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset); 655 dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n", 656 pdata->prv_reg_ctx[i], offset); 657 } 658 } 659 660 static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata) 661 { 662 /* 663 * The following delay is needed or the subsequent write operations may 664 * fail. The LPSS devices are actually PCI devices and the PCI spec 665 * expects 10ms delay before the device can be accessed after D3 to D0 666 * transition. However some platforms like BSW does not need this delay. 667 */ 668 unsigned int delay = 10; /* default 10ms delay */ 669 670 if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY) 671 delay = 0; 672 673 msleep(delay); 674 } 675 676 static int acpi_lpss_activate(struct device *dev) 677 { 678 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 679 int ret; 680 681 ret = acpi_dev_runtime_resume(dev); 682 if (ret) 683 return ret; 684 685 acpi_lpss_d3_to_d0_delay(pdata); 686 687 /* 688 * This is called only on ->probe() stage where a device is either in 689 * known state defined by BIOS or most likely powered off. Due to this 690 * we have to deassert reset line to be sure that ->probe() will 691 * recognize the device. 692 */ 693 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 694 lpss_deassert_reset(pdata); 695 696 return 0; 697 } 698 699 static void acpi_lpss_dismiss(struct device *dev) 700 { 701 acpi_dev_runtime_suspend(dev); 702 } 703 704 #ifdef CONFIG_PM_SLEEP 705 static int acpi_lpss_suspend_late(struct device *dev) 706 { 707 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 708 int ret; 709 710 ret = pm_generic_suspend_late(dev); 711 if (ret) 712 return ret; 713 714 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 715 acpi_lpss_save_ctx(dev, pdata); 716 717 return acpi_dev_suspend_late(dev); 718 } 719 720 static int acpi_lpss_resume_early(struct device *dev) 721 { 722 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 723 int ret; 724 725 ret = acpi_dev_resume_early(dev); 726 if (ret) 727 return ret; 728 729 acpi_lpss_d3_to_d0_delay(pdata); 730 731 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 732 acpi_lpss_restore_ctx(dev, pdata); 733 734 return pm_generic_resume_early(dev); 735 } 736 #endif /* CONFIG_PM_SLEEP */ 737 738 /* IOSF SB for LPSS island */ 739 #define LPSS_IOSF_UNIT_LPIOEP 0xA0 740 #define LPSS_IOSF_UNIT_LPIO1 0xAB 741 #define LPSS_IOSF_UNIT_LPIO2 0xAC 742 743 #define LPSS_IOSF_PMCSR 0x84 744 #define LPSS_PMCSR_D0 0 745 #define LPSS_PMCSR_D3hot 3 746 #define LPSS_PMCSR_Dx_MASK GENMASK(1, 0) 747 748 #define LPSS_IOSF_GPIODEF0 0x154 749 #define LPSS_GPIODEF0_DMA1_D3 BIT(2) 750 #define LPSS_GPIODEF0_DMA2_D3 BIT(3) 751 #define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2) 752 #define LPSS_GPIODEF0_DMA_LLP BIT(13) 753 754 static DEFINE_MUTEX(lpss_iosf_mutex); 755 756 static void lpss_iosf_enter_d3_state(void) 757 { 758 u32 value1 = 0; 759 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP; 760 u32 value2 = LPSS_PMCSR_D3hot; 761 u32 mask2 = LPSS_PMCSR_Dx_MASK; 762 /* 763 * PMC provides an information about actual status of the LPSS devices. 764 * Here we read the values related to LPSS power island, i.e. LPSS 765 * devices, excluding both LPSS DMA controllers, along with SCC domain. 766 */ 767 u32 func_dis, d3_sts_0, pmc_status, pmc_mask = 0xfe000ffe; 768 int ret; 769 770 ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis); 771 if (ret) 772 return; 773 774 mutex_lock(&lpss_iosf_mutex); 775 776 ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0); 777 if (ret) 778 goto exit; 779 780 /* 781 * Get the status of entire LPSS power island per device basis. 782 * Shutdown both LPSS DMA controllers if and only if all other devices 783 * are already in D3hot. 784 */ 785 pmc_status = (~(d3_sts_0 | func_dis)) & pmc_mask; 786 if (pmc_status) 787 goto exit; 788 789 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, 790 LPSS_IOSF_PMCSR, value2, mask2); 791 792 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, 793 LPSS_IOSF_PMCSR, value2, mask2); 794 795 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, 796 LPSS_IOSF_GPIODEF0, value1, mask1); 797 exit: 798 mutex_unlock(&lpss_iosf_mutex); 799 } 800 801 static void lpss_iosf_exit_d3_state(void) 802 { 803 u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 | 804 LPSS_GPIODEF0_DMA_LLP; 805 u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP; 806 u32 value2 = LPSS_PMCSR_D0; 807 u32 mask2 = LPSS_PMCSR_Dx_MASK; 808 809 mutex_lock(&lpss_iosf_mutex); 810 811 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, 812 LPSS_IOSF_GPIODEF0, value1, mask1); 813 814 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE, 815 LPSS_IOSF_PMCSR, value2, mask2); 816 817 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, 818 LPSS_IOSF_PMCSR, value2, mask2); 819 820 mutex_unlock(&lpss_iosf_mutex); 821 } 822 823 static int acpi_lpss_runtime_suspend(struct device *dev) 824 { 825 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 826 int ret; 827 828 ret = pm_generic_runtime_suspend(dev); 829 if (ret) 830 return ret; 831 832 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 833 acpi_lpss_save_ctx(dev, pdata); 834 835 ret = acpi_dev_runtime_suspend(dev); 836 837 /* 838 * This call must be last in the sequence, otherwise PMC will return 839 * wrong status for devices being about to be powered off. See 840 * lpss_iosf_enter_d3_state() for further information. 841 */ 842 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) 843 lpss_iosf_enter_d3_state(); 844 845 return ret; 846 } 847 848 static int acpi_lpss_runtime_resume(struct device *dev) 849 { 850 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 851 int ret; 852 853 /* 854 * This call is kept first to be in symmetry with 855 * acpi_lpss_runtime_suspend() one. 856 */ 857 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) 858 lpss_iosf_exit_d3_state(); 859 860 ret = acpi_dev_runtime_resume(dev); 861 if (ret) 862 return ret; 863 864 acpi_lpss_d3_to_d0_delay(pdata); 865 866 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 867 acpi_lpss_restore_ctx(dev, pdata); 868 869 return pm_generic_runtime_resume(dev); 870 } 871 #endif /* CONFIG_PM */ 872 873 static struct dev_pm_domain acpi_lpss_pm_domain = { 874 #ifdef CONFIG_PM 875 .activate = acpi_lpss_activate, 876 .dismiss = acpi_lpss_dismiss, 877 #endif 878 .ops = { 879 #ifdef CONFIG_PM 880 #ifdef CONFIG_PM_SLEEP 881 .prepare = acpi_subsys_prepare, 882 .complete = pm_complete_with_resume_check, 883 .suspend = acpi_subsys_suspend, 884 .suspend_late = acpi_lpss_suspend_late, 885 .resume_early = acpi_lpss_resume_early, 886 .freeze = acpi_subsys_freeze, 887 .poweroff = acpi_subsys_suspend, 888 .poweroff_late = acpi_lpss_suspend_late, 889 .restore_early = acpi_lpss_resume_early, 890 #endif 891 .runtime_suspend = acpi_lpss_runtime_suspend, 892 .runtime_resume = acpi_lpss_runtime_resume, 893 #endif 894 }, 895 }; 896 897 static int acpi_lpss_platform_notify(struct notifier_block *nb, 898 unsigned long action, void *data) 899 { 900 struct platform_device *pdev = to_platform_device(data); 901 struct lpss_private_data *pdata; 902 struct acpi_device *adev; 903 const struct acpi_device_id *id; 904 905 id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev); 906 if (!id || !id->driver_data) 907 return 0; 908 909 if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) 910 return 0; 911 912 pdata = acpi_driver_data(adev); 913 if (!pdata) 914 return 0; 915 916 if (pdata->mmio_base && 917 pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) { 918 dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n"); 919 return 0; 920 } 921 922 switch (action) { 923 case BUS_NOTIFY_BIND_DRIVER: 924 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain); 925 break; 926 case BUS_NOTIFY_DRIVER_NOT_BOUND: 927 case BUS_NOTIFY_UNBOUND_DRIVER: 928 dev_pm_domain_set(&pdev->dev, NULL); 929 break; 930 case BUS_NOTIFY_ADD_DEVICE: 931 dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain); 932 if (pdata->dev_desc->flags & LPSS_LTR) 933 return sysfs_create_group(&pdev->dev.kobj, 934 &lpss_attr_group); 935 break; 936 case BUS_NOTIFY_DEL_DEVICE: 937 if (pdata->dev_desc->flags & LPSS_LTR) 938 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); 939 dev_pm_domain_set(&pdev->dev, NULL); 940 break; 941 default: 942 break; 943 } 944 945 return 0; 946 } 947 948 static struct notifier_block acpi_lpss_nb = { 949 .notifier_call = acpi_lpss_platform_notify, 950 }; 951 952 static void acpi_lpss_bind(struct device *dev) 953 { 954 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 955 956 if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR)) 957 return; 958 959 if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) 960 dev->power.set_latency_tolerance = acpi_lpss_set_ltr; 961 else 962 dev_err(dev, "MMIO size insufficient to access LTR\n"); 963 } 964 965 static void acpi_lpss_unbind(struct device *dev) 966 { 967 dev->power.set_latency_tolerance = NULL; 968 } 969 970 static struct acpi_scan_handler lpss_handler = { 971 .ids = acpi_lpss_device_ids, 972 .attach = acpi_lpss_create_device, 973 .bind = acpi_lpss_bind, 974 .unbind = acpi_lpss_unbind, 975 }; 976 977 void __init acpi_lpss_init(void) 978 { 979 const struct x86_cpu_id *id; 980 int ret; 981 982 ret = lpt_clk_init(); 983 if (ret) 984 return; 985 986 id = x86_match_cpu(lpss_cpu_ids); 987 if (id) 988 lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON; 989 990 bus_register_notifier(&platform_bus_type, &acpi_lpss_nb); 991 acpi_scan_add_handler(&lpss_handler); 992 } 993 994 #else 995 996 static struct acpi_scan_handler lpss_handler = { 997 .ids = acpi_lpss_device_ids, 998 }; 999 1000 void __init acpi_lpss_init(void) 1001 { 1002 acpi_scan_add_handler(&lpss_handler); 1003 } 1004 1005 #endif /* CONFIG_X86_INTEL_LPSS */ 1006