1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include "i915_drv.h" 9 #include "i915_irq.h" 10 #include "intel_backlight_regs.h" 11 #include "intel_cdclk.h" 12 #include "intel_combo_phy.h" 13 #include "intel_de.h" 14 #include "intel_display_power.h" 15 #include "intel_display_power_map.h" 16 #include "intel_display_power_well.h" 17 #include "intel_display_types.h" 18 #include "intel_dmc.h" 19 #include "intel_mchbar_regs.h" 20 #include "intel_pch_refclk.h" 21 #include "intel_pcode.h" 22 #include "intel_snps_phy.h" 23 #include "skl_watermark.h" 24 #include "vlv_sideband.h" 25 26 #define for_each_power_domain_well(__dev_priv, __power_well, __domain) \ 27 for_each_power_well(__dev_priv, __power_well) \ 28 for_each_if(test_bit((__domain), (__power_well)->domains.bits)) 29 30 #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \ 31 for_each_power_well_reverse(__dev_priv, __power_well) \ 32 for_each_if(test_bit((__domain), (__power_well)->domains.bits)) 33 34 const char * 35 intel_display_power_domain_str(enum intel_display_power_domain domain) 36 { 37 switch (domain) { 38 case POWER_DOMAIN_DISPLAY_CORE: 39 return "DISPLAY_CORE"; 40 case POWER_DOMAIN_PIPE_A: 41 return "PIPE_A"; 42 case POWER_DOMAIN_PIPE_B: 43 return "PIPE_B"; 44 case POWER_DOMAIN_PIPE_C: 45 return "PIPE_C"; 46 case POWER_DOMAIN_PIPE_D: 47 return "PIPE_D"; 48 case POWER_DOMAIN_PIPE_PANEL_FITTER_A: 49 return "PIPE_PANEL_FITTER_A"; 50 case POWER_DOMAIN_PIPE_PANEL_FITTER_B: 51 return "PIPE_PANEL_FITTER_B"; 52 case POWER_DOMAIN_PIPE_PANEL_FITTER_C: 53 return "PIPE_PANEL_FITTER_C"; 54 case POWER_DOMAIN_PIPE_PANEL_FITTER_D: 55 return "PIPE_PANEL_FITTER_D"; 56 case POWER_DOMAIN_TRANSCODER_A: 57 return "TRANSCODER_A"; 58 case POWER_DOMAIN_TRANSCODER_B: 59 return "TRANSCODER_B"; 60 case POWER_DOMAIN_TRANSCODER_C: 61 return "TRANSCODER_C"; 62 case POWER_DOMAIN_TRANSCODER_D: 63 return "TRANSCODER_D"; 64 case POWER_DOMAIN_TRANSCODER_EDP: 65 return "TRANSCODER_EDP"; 66 case POWER_DOMAIN_TRANSCODER_DSI_A: 67 return "TRANSCODER_DSI_A"; 68 case POWER_DOMAIN_TRANSCODER_DSI_C: 69 return "TRANSCODER_DSI_C"; 70 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 71 return "TRANSCODER_VDSC_PW2"; 72 case POWER_DOMAIN_PORT_DDI_LANES_A: 73 return "PORT_DDI_LANES_A"; 74 case POWER_DOMAIN_PORT_DDI_LANES_B: 75 return "PORT_DDI_LANES_B"; 76 case POWER_DOMAIN_PORT_DDI_LANES_C: 77 return "PORT_DDI_LANES_C"; 78 case POWER_DOMAIN_PORT_DDI_LANES_D: 79 return "PORT_DDI_LANES_D"; 80 case POWER_DOMAIN_PORT_DDI_LANES_E: 81 return "PORT_DDI_LANES_E"; 82 case POWER_DOMAIN_PORT_DDI_LANES_F: 83 return "PORT_DDI_LANES_F"; 84 case POWER_DOMAIN_PORT_DDI_LANES_TC1: 85 return "PORT_DDI_LANES_TC1"; 86 case POWER_DOMAIN_PORT_DDI_LANES_TC2: 87 return "PORT_DDI_LANES_TC2"; 88 case POWER_DOMAIN_PORT_DDI_LANES_TC3: 89 return "PORT_DDI_LANES_TC3"; 90 case POWER_DOMAIN_PORT_DDI_LANES_TC4: 91 return "PORT_DDI_LANES_TC4"; 92 case POWER_DOMAIN_PORT_DDI_LANES_TC5: 93 return "PORT_DDI_LANES_TC5"; 94 case POWER_DOMAIN_PORT_DDI_LANES_TC6: 95 return "PORT_DDI_LANES_TC6"; 96 case POWER_DOMAIN_PORT_DDI_IO_A: 97 return "PORT_DDI_IO_A"; 98 case POWER_DOMAIN_PORT_DDI_IO_B: 99 return "PORT_DDI_IO_B"; 100 case POWER_DOMAIN_PORT_DDI_IO_C: 101 return "PORT_DDI_IO_C"; 102 case POWER_DOMAIN_PORT_DDI_IO_D: 103 return "PORT_DDI_IO_D"; 104 case POWER_DOMAIN_PORT_DDI_IO_E: 105 return "PORT_DDI_IO_E"; 106 case POWER_DOMAIN_PORT_DDI_IO_F: 107 return "PORT_DDI_IO_F"; 108 case POWER_DOMAIN_PORT_DDI_IO_TC1: 109 return "PORT_DDI_IO_TC1"; 110 case POWER_DOMAIN_PORT_DDI_IO_TC2: 111 return "PORT_DDI_IO_TC2"; 112 case POWER_DOMAIN_PORT_DDI_IO_TC3: 113 return "PORT_DDI_IO_TC3"; 114 case POWER_DOMAIN_PORT_DDI_IO_TC4: 115 return "PORT_DDI_IO_TC4"; 116 case POWER_DOMAIN_PORT_DDI_IO_TC5: 117 return "PORT_DDI_IO_TC5"; 118 case POWER_DOMAIN_PORT_DDI_IO_TC6: 119 return "PORT_DDI_IO_TC6"; 120 case POWER_DOMAIN_PORT_DSI: 121 return "PORT_DSI"; 122 case POWER_DOMAIN_PORT_CRT: 123 return "PORT_CRT"; 124 case POWER_DOMAIN_PORT_OTHER: 125 return "PORT_OTHER"; 126 case POWER_DOMAIN_VGA: 127 return "VGA"; 128 case POWER_DOMAIN_AUDIO_MMIO: 129 return "AUDIO_MMIO"; 130 case POWER_DOMAIN_AUDIO_PLAYBACK: 131 return "AUDIO_PLAYBACK"; 132 case POWER_DOMAIN_AUX_IO_A: 133 return "AUX_IO_A"; 134 case POWER_DOMAIN_AUX_IO_B: 135 return "AUX_IO_B"; 136 case POWER_DOMAIN_AUX_IO_C: 137 return "AUX_IO_C"; 138 case POWER_DOMAIN_AUX_IO_D: 139 return "AUX_IO_D"; 140 case POWER_DOMAIN_AUX_IO_E: 141 return "AUX_IO_E"; 142 case POWER_DOMAIN_AUX_IO_F: 143 return "AUX_IO_F"; 144 case POWER_DOMAIN_AUX_A: 145 return "AUX_A"; 146 case POWER_DOMAIN_AUX_B: 147 return "AUX_B"; 148 case POWER_DOMAIN_AUX_C: 149 return "AUX_C"; 150 case POWER_DOMAIN_AUX_D: 151 return "AUX_D"; 152 case POWER_DOMAIN_AUX_E: 153 return "AUX_E"; 154 case POWER_DOMAIN_AUX_F: 155 return "AUX_F"; 156 case POWER_DOMAIN_AUX_USBC1: 157 return "AUX_USBC1"; 158 case POWER_DOMAIN_AUX_USBC2: 159 return "AUX_USBC2"; 160 case POWER_DOMAIN_AUX_USBC3: 161 return "AUX_USBC3"; 162 case POWER_DOMAIN_AUX_USBC4: 163 return "AUX_USBC4"; 164 case POWER_DOMAIN_AUX_USBC5: 165 return "AUX_USBC5"; 166 case POWER_DOMAIN_AUX_USBC6: 167 return "AUX_USBC6"; 168 case POWER_DOMAIN_AUX_TBT1: 169 return "AUX_TBT1"; 170 case POWER_DOMAIN_AUX_TBT2: 171 return "AUX_TBT2"; 172 case POWER_DOMAIN_AUX_TBT3: 173 return "AUX_TBT3"; 174 case POWER_DOMAIN_AUX_TBT4: 175 return "AUX_TBT4"; 176 case POWER_DOMAIN_AUX_TBT5: 177 return "AUX_TBT5"; 178 case POWER_DOMAIN_AUX_TBT6: 179 return "AUX_TBT6"; 180 case POWER_DOMAIN_GMBUS: 181 return "GMBUS"; 182 case POWER_DOMAIN_INIT: 183 return "INIT"; 184 case POWER_DOMAIN_MODESET: 185 return "MODESET"; 186 case POWER_DOMAIN_GT_IRQ: 187 return "GT_IRQ"; 188 case POWER_DOMAIN_DC_OFF: 189 return "DC_OFF"; 190 case POWER_DOMAIN_TC_COLD_OFF: 191 return "TC_COLD_OFF"; 192 default: 193 MISSING_CASE(domain); 194 return "?"; 195 } 196 } 197 198 /** 199 * __intel_display_power_is_enabled - unlocked check for a power domain 200 * @dev_priv: i915 device instance 201 * @domain: power domain to check 202 * 203 * This is the unlocked version of intel_display_power_is_enabled() and should 204 * only be used from error capture and recovery code where deadlocks are 205 * possible. 206 * 207 * Returns: 208 * True when the power domain is enabled, false otherwise. 209 */ 210 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 211 enum intel_display_power_domain domain) 212 { 213 struct i915_power_well *power_well; 214 bool is_enabled; 215 216 if (dev_priv->runtime_pm.suspended) 217 return false; 218 219 is_enabled = true; 220 221 for_each_power_domain_well_reverse(dev_priv, power_well, domain) { 222 if (intel_power_well_is_always_on(power_well)) 223 continue; 224 225 if (!intel_power_well_is_enabled_cached(power_well)) { 226 is_enabled = false; 227 break; 228 } 229 } 230 231 return is_enabled; 232 } 233 234 /** 235 * intel_display_power_is_enabled - check for a power domain 236 * @dev_priv: i915 device instance 237 * @domain: power domain to check 238 * 239 * This function can be used to check the hw power domain state. It is mostly 240 * used in hardware state readout functions. Everywhere else code should rely 241 * upon explicit power domain reference counting to ensure that the hardware 242 * block is powered up before accessing it. 243 * 244 * Callers must hold the relevant modesetting locks to ensure that concurrent 245 * threads can't disable the power well while the caller tries to read a few 246 * registers. 247 * 248 * Returns: 249 * True when the power domain is enabled, false otherwise. 250 */ 251 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 252 enum intel_display_power_domain domain) 253 { 254 struct i915_power_domains *power_domains; 255 bool ret; 256 257 power_domains = &dev_priv->display.power.domains; 258 259 mutex_lock(&power_domains->lock); 260 ret = __intel_display_power_is_enabled(dev_priv, domain); 261 mutex_unlock(&power_domains->lock); 262 263 return ret; 264 } 265 266 static u32 267 sanitize_target_dc_state(struct drm_i915_private *i915, 268 u32 target_dc_state) 269 { 270 struct i915_power_domains *power_domains = &i915->display.power.domains; 271 static const u32 states[] = { 272 DC_STATE_EN_UPTO_DC6, 273 DC_STATE_EN_UPTO_DC5, 274 DC_STATE_EN_DC3CO, 275 DC_STATE_DISABLE, 276 }; 277 int i; 278 279 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 280 if (target_dc_state != states[i]) 281 continue; 282 283 if (power_domains->allowed_dc_mask & target_dc_state) 284 break; 285 286 target_dc_state = states[i + 1]; 287 } 288 289 return target_dc_state; 290 } 291 292 /** 293 * intel_display_power_set_target_dc_state - Set target dc state. 294 * @dev_priv: i915 device 295 * @state: state which needs to be set as target_dc_state. 296 * 297 * This function set the "DC off" power well target_dc_state, 298 * based upon this target_dc_stste, "DC off" power well will 299 * enable desired DC state. 300 */ 301 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 302 u32 state) 303 { 304 struct i915_power_well *power_well; 305 bool dc_off_enabled; 306 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 307 308 mutex_lock(&power_domains->lock); 309 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 310 311 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 312 goto unlock; 313 314 state = sanitize_target_dc_state(dev_priv, state); 315 316 if (state == power_domains->target_dc_state) 317 goto unlock; 318 319 dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well); 320 /* 321 * If DC off power well is disabled, need to enable and disable the 322 * DC off power well to effect target DC state. 323 */ 324 if (!dc_off_enabled) 325 intel_power_well_enable(dev_priv, power_well); 326 327 power_domains->target_dc_state = state; 328 329 if (!dc_off_enabled) 330 intel_power_well_disable(dev_priv, power_well); 331 332 unlock: 333 mutex_unlock(&power_domains->lock); 334 } 335 336 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 337 338 static void __async_put_domains_mask(struct i915_power_domains *power_domains, 339 struct intel_power_domain_mask *mask) 340 { 341 bitmap_or(mask->bits, 342 power_domains->async_put_domains[0].bits, 343 power_domains->async_put_domains[1].bits, 344 POWER_DOMAIN_NUM); 345 } 346 347 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 348 349 static bool 350 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 351 { 352 struct drm_i915_private *i915 = container_of(power_domains, 353 struct drm_i915_private, 354 display.power.domains); 355 356 return !drm_WARN_ON(&i915->drm, 357 bitmap_intersects(power_domains->async_put_domains[0].bits, 358 power_domains->async_put_domains[1].bits, 359 POWER_DOMAIN_NUM)); 360 } 361 362 static bool 363 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 364 { 365 struct drm_i915_private *i915 = container_of(power_domains, 366 struct drm_i915_private, 367 display.power.domains); 368 struct intel_power_domain_mask async_put_mask; 369 enum intel_display_power_domain domain; 370 bool err = false; 371 372 err |= !assert_async_put_domain_masks_disjoint(power_domains); 373 __async_put_domains_mask(power_domains, &async_put_mask); 374 err |= drm_WARN_ON(&i915->drm, 375 !!power_domains->async_put_wakeref != 376 !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)); 377 378 for_each_power_domain(domain, &async_put_mask) 379 err |= drm_WARN_ON(&i915->drm, 380 power_domains->domain_use_count[domain] != 1); 381 382 return !err; 383 } 384 385 static void print_power_domains(struct i915_power_domains *power_domains, 386 const char *prefix, struct intel_power_domain_mask *mask) 387 { 388 struct drm_i915_private *i915 = container_of(power_domains, 389 struct drm_i915_private, 390 display.power.domains); 391 enum intel_display_power_domain domain; 392 393 drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); 394 for_each_power_domain(domain, mask) 395 drm_dbg(&i915->drm, "%s use_count %d\n", 396 intel_display_power_domain_str(domain), 397 power_domains->domain_use_count[domain]); 398 } 399 400 static void 401 print_async_put_domains_state(struct i915_power_domains *power_domains) 402 { 403 struct drm_i915_private *i915 = container_of(power_domains, 404 struct drm_i915_private, 405 display.power.domains); 406 407 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 408 power_domains->async_put_wakeref); 409 410 print_power_domains(power_domains, "async_put_domains[0]", 411 &power_domains->async_put_domains[0]); 412 print_power_domains(power_domains, "async_put_domains[1]", 413 &power_domains->async_put_domains[1]); 414 } 415 416 static void 417 verify_async_put_domains_state(struct i915_power_domains *power_domains) 418 { 419 if (!__async_put_domains_state_ok(power_domains)) 420 print_async_put_domains_state(power_domains); 421 } 422 423 #else 424 425 static void 426 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 427 { 428 } 429 430 static void 431 verify_async_put_domains_state(struct i915_power_domains *power_domains) 432 { 433 } 434 435 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 436 437 static void async_put_domains_mask(struct i915_power_domains *power_domains, 438 struct intel_power_domain_mask *mask) 439 440 { 441 assert_async_put_domain_masks_disjoint(power_domains); 442 443 __async_put_domains_mask(power_domains, mask); 444 } 445 446 static void 447 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 448 enum intel_display_power_domain domain) 449 { 450 assert_async_put_domain_masks_disjoint(power_domains); 451 452 clear_bit(domain, power_domains->async_put_domains[0].bits); 453 clear_bit(domain, power_domains->async_put_domains[1].bits); 454 } 455 456 static bool 457 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 458 enum intel_display_power_domain domain) 459 { 460 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 461 struct intel_power_domain_mask async_put_mask; 462 bool ret = false; 463 464 async_put_domains_mask(power_domains, &async_put_mask); 465 if (!test_bit(domain, async_put_mask.bits)) 466 goto out_verify; 467 468 async_put_domains_clear_domain(power_domains, domain); 469 470 ret = true; 471 472 async_put_domains_mask(power_domains, &async_put_mask); 473 if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)) 474 goto out_verify; 475 476 cancel_delayed_work(&power_domains->async_put_work); 477 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 478 fetch_and_zero(&power_domains->async_put_wakeref)); 479 out_verify: 480 verify_async_put_domains_state(power_domains); 481 482 return ret; 483 } 484 485 static void 486 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 487 enum intel_display_power_domain domain) 488 { 489 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 490 struct i915_power_well *power_well; 491 492 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 493 return; 494 495 for_each_power_domain_well(dev_priv, power_well, domain) 496 intel_power_well_get(dev_priv, power_well); 497 498 power_domains->domain_use_count[domain]++; 499 } 500 501 /** 502 * intel_display_power_get - grab a power domain reference 503 * @dev_priv: i915 device instance 504 * @domain: power domain to reference 505 * 506 * This function grabs a power domain reference for @domain and ensures that the 507 * power domain and all its parents are powered up. Therefore users should only 508 * grab a reference to the innermost power domain they need. 509 * 510 * Any power domain reference obtained by this function must have a symmetric 511 * call to intel_display_power_put() to release the reference again. 512 */ 513 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 514 enum intel_display_power_domain domain) 515 { 516 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 517 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 518 519 mutex_lock(&power_domains->lock); 520 __intel_display_power_get_domain(dev_priv, domain); 521 mutex_unlock(&power_domains->lock); 522 523 return wakeref; 524 } 525 526 /** 527 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 528 * @dev_priv: i915 device instance 529 * @domain: power domain to reference 530 * 531 * This function grabs a power domain reference for @domain and ensures that the 532 * power domain and all its parents are powered up. Therefore users should only 533 * grab a reference to the innermost power domain they need. 534 * 535 * Any power domain reference obtained by this function must have a symmetric 536 * call to intel_display_power_put() to release the reference again. 537 */ 538 intel_wakeref_t 539 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 540 enum intel_display_power_domain domain) 541 { 542 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 543 intel_wakeref_t wakeref; 544 bool is_enabled; 545 546 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 547 if (!wakeref) 548 return false; 549 550 mutex_lock(&power_domains->lock); 551 552 if (__intel_display_power_is_enabled(dev_priv, domain)) { 553 __intel_display_power_get_domain(dev_priv, domain); 554 is_enabled = true; 555 } else { 556 is_enabled = false; 557 } 558 559 mutex_unlock(&power_domains->lock); 560 561 if (!is_enabled) { 562 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 563 wakeref = 0; 564 } 565 566 return wakeref; 567 } 568 569 static void 570 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 571 enum intel_display_power_domain domain) 572 { 573 struct i915_power_domains *power_domains; 574 struct i915_power_well *power_well; 575 const char *name = intel_display_power_domain_str(domain); 576 struct intel_power_domain_mask async_put_mask; 577 578 power_domains = &dev_priv->display.power.domains; 579 580 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 581 "Use count on domain %s is already zero\n", 582 name); 583 async_put_domains_mask(power_domains, &async_put_mask); 584 drm_WARN(&dev_priv->drm, 585 test_bit(domain, async_put_mask.bits), 586 "Async disabling of domain %s is pending\n", 587 name); 588 589 power_domains->domain_use_count[domain]--; 590 591 for_each_power_domain_well_reverse(dev_priv, power_well, domain) 592 intel_power_well_put(dev_priv, power_well); 593 } 594 595 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 596 enum intel_display_power_domain domain) 597 { 598 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 599 600 mutex_lock(&power_domains->lock); 601 __intel_display_power_put_domain(dev_priv, domain); 602 mutex_unlock(&power_domains->lock); 603 } 604 605 static void 606 queue_async_put_domains_work(struct i915_power_domains *power_domains, 607 intel_wakeref_t wakeref) 608 { 609 struct drm_i915_private *i915 = container_of(power_domains, 610 struct drm_i915_private, 611 display.power.domains); 612 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 613 power_domains->async_put_wakeref = wakeref; 614 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 615 &power_domains->async_put_work, 616 msecs_to_jiffies(100))); 617 } 618 619 static void 620 release_async_put_domains(struct i915_power_domains *power_domains, 621 struct intel_power_domain_mask *mask) 622 { 623 struct drm_i915_private *dev_priv = 624 container_of(power_domains, struct drm_i915_private, 625 display.power.domains); 626 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 627 enum intel_display_power_domain domain; 628 intel_wakeref_t wakeref; 629 630 /* 631 * The caller must hold already raw wakeref, upgrade that to a proper 632 * wakeref to make the state checker happy about the HW access during 633 * power well disabling. 634 */ 635 assert_rpm_raw_wakeref_held(rpm); 636 wakeref = intel_runtime_pm_get(rpm); 637 638 for_each_power_domain(domain, mask) { 639 /* Clear before put, so put's sanity check is happy. */ 640 async_put_domains_clear_domain(power_domains, domain); 641 __intel_display_power_put_domain(dev_priv, domain); 642 } 643 644 intel_runtime_pm_put(rpm, wakeref); 645 } 646 647 static void 648 intel_display_power_put_async_work(struct work_struct *work) 649 { 650 struct drm_i915_private *dev_priv = 651 container_of(work, struct drm_i915_private, 652 display.power.domains.async_put_work.work); 653 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 654 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 655 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 656 intel_wakeref_t old_work_wakeref = 0; 657 658 mutex_lock(&power_domains->lock); 659 660 /* 661 * Bail out if all the domain refs pending to be released were grabbed 662 * by subsequent gets or a flush_work. 663 */ 664 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 665 if (!old_work_wakeref) 666 goto out_verify; 667 668 release_async_put_domains(power_domains, 669 &power_domains->async_put_domains[0]); 670 671 /* Requeue the work if more domains were async put meanwhile. */ 672 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) { 673 bitmap_copy(power_domains->async_put_domains[0].bits, 674 power_domains->async_put_domains[1].bits, 675 POWER_DOMAIN_NUM); 676 bitmap_zero(power_domains->async_put_domains[1].bits, 677 POWER_DOMAIN_NUM); 678 queue_async_put_domains_work(power_domains, 679 fetch_and_zero(&new_work_wakeref)); 680 } else { 681 /* 682 * Cancel the work that got queued after this one got dequeued, 683 * since here we released the corresponding async-put reference. 684 */ 685 cancel_delayed_work(&power_domains->async_put_work); 686 } 687 688 out_verify: 689 verify_async_put_domains_state(power_domains); 690 691 mutex_unlock(&power_domains->lock); 692 693 if (old_work_wakeref) 694 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 695 if (new_work_wakeref) 696 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 697 } 698 699 /** 700 * intel_display_power_put_async - release a power domain reference asynchronously 701 * @i915: i915 device instance 702 * @domain: power domain to reference 703 * @wakeref: wakeref acquired for the reference that is being released 704 * 705 * This function drops the power domain reference obtained by 706 * intel_display_power_get*() and schedules a work to power down the 707 * corresponding hardware block if this is the last reference. 708 */ 709 void __intel_display_power_put_async(struct drm_i915_private *i915, 710 enum intel_display_power_domain domain, 711 intel_wakeref_t wakeref) 712 { 713 struct i915_power_domains *power_domains = &i915->display.power.domains; 714 struct intel_runtime_pm *rpm = &i915->runtime_pm; 715 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 716 717 mutex_lock(&power_domains->lock); 718 719 if (power_domains->domain_use_count[domain] > 1) { 720 __intel_display_power_put_domain(i915, domain); 721 722 goto out_verify; 723 } 724 725 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 726 727 /* Let a pending work requeue itself or queue a new one. */ 728 if (power_domains->async_put_wakeref) { 729 set_bit(domain, power_domains->async_put_domains[1].bits); 730 } else { 731 set_bit(domain, power_domains->async_put_domains[0].bits); 732 queue_async_put_domains_work(power_domains, 733 fetch_and_zero(&work_wakeref)); 734 } 735 736 out_verify: 737 verify_async_put_domains_state(power_domains); 738 739 mutex_unlock(&power_domains->lock); 740 741 if (work_wakeref) 742 intel_runtime_pm_put_raw(rpm, work_wakeref); 743 744 intel_runtime_pm_put(rpm, wakeref); 745 } 746 747 /** 748 * intel_display_power_flush_work - flushes the async display power disabling work 749 * @i915: i915 device instance 750 * 751 * Flushes any pending work that was scheduled by a preceding 752 * intel_display_power_put_async() call, completing the disabling of the 753 * corresponding power domains. 754 * 755 * Note that the work handler function may still be running after this 756 * function returns; to ensure that the work handler isn't running use 757 * intel_display_power_flush_work_sync() instead. 758 */ 759 void intel_display_power_flush_work(struct drm_i915_private *i915) 760 { 761 struct i915_power_domains *power_domains = &i915->display.power.domains; 762 struct intel_power_domain_mask async_put_mask; 763 intel_wakeref_t work_wakeref; 764 765 mutex_lock(&power_domains->lock); 766 767 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 768 if (!work_wakeref) 769 goto out_verify; 770 771 async_put_domains_mask(power_domains, &async_put_mask); 772 release_async_put_domains(power_domains, &async_put_mask); 773 cancel_delayed_work(&power_domains->async_put_work); 774 775 out_verify: 776 verify_async_put_domains_state(power_domains); 777 778 mutex_unlock(&power_domains->lock); 779 780 if (work_wakeref) 781 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 782 } 783 784 /** 785 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 786 * @i915: i915 device instance 787 * 788 * Like intel_display_power_flush_work(), but also ensure that the work 789 * handler function is not running any more when this function returns. 790 */ 791 static void 792 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 793 { 794 struct i915_power_domains *power_domains = &i915->display.power.domains; 795 796 intel_display_power_flush_work(i915); 797 cancel_delayed_work_sync(&power_domains->async_put_work); 798 799 verify_async_put_domains_state(power_domains); 800 801 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 802 } 803 804 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 805 /** 806 * intel_display_power_put - release a power domain reference 807 * @dev_priv: i915 device instance 808 * @domain: power domain to reference 809 * @wakeref: wakeref acquired for the reference that is being released 810 * 811 * This function drops the power domain reference obtained by 812 * intel_display_power_get() and might power down the corresponding hardware 813 * block right away if this is the last reference. 814 */ 815 void intel_display_power_put(struct drm_i915_private *dev_priv, 816 enum intel_display_power_domain domain, 817 intel_wakeref_t wakeref) 818 { 819 __intel_display_power_put(dev_priv, domain); 820 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 821 } 822 #else 823 /** 824 * intel_display_power_put_unchecked - release an unchecked power domain reference 825 * @dev_priv: i915 device instance 826 * @domain: power domain to reference 827 * 828 * This function drops the power domain reference obtained by 829 * intel_display_power_get() and might power down the corresponding hardware 830 * block right away if this is the last reference. 831 * 832 * This function is only for the power domain code's internal use to suppress wakeref 833 * tracking when the correspondig debug kconfig option is disabled, should not 834 * be used otherwise. 835 */ 836 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 837 enum intel_display_power_domain domain) 838 { 839 __intel_display_power_put(dev_priv, domain); 840 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 841 } 842 #endif 843 844 void 845 intel_display_power_get_in_set(struct drm_i915_private *i915, 846 struct intel_display_power_domain_set *power_domain_set, 847 enum intel_display_power_domain domain) 848 { 849 intel_wakeref_t __maybe_unused wf; 850 851 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); 852 853 wf = intel_display_power_get(i915, domain); 854 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 855 power_domain_set->wakerefs[domain] = wf; 856 #endif 857 set_bit(domain, power_domain_set->mask.bits); 858 } 859 860 bool 861 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 862 struct intel_display_power_domain_set *power_domain_set, 863 enum intel_display_power_domain domain) 864 { 865 intel_wakeref_t wf; 866 867 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); 868 869 wf = intel_display_power_get_if_enabled(i915, domain); 870 if (!wf) 871 return false; 872 873 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 874 power_domain_set->wakerefs[domain] = wf; 875 #endif 876 set_bit(domain, power_domain_set->mask.bits); 877 878 return true; 879 } 880 881 void 882 intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 883 struct intel_display_power_domain_set *power_domain_set, 884 struct intel_power_domain_mask *mask) 885 { 886 enum intel_display_power_domain domain; 887 888 drm_WARN_ON(&i915->drm, 889 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)); 890 891 for_each_power_domain(domain, mask) { 892 intel_wakeref_t __maybe_unused wf = -1; 893 894 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 895 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 896 #endif 897 intel_display_power_put(i915, domain, wf); 898 clear_bit(domain, power_domain_set->mask.bits); 899 } 900 } 901 902 static int 903 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 904 int disable_power_well) 905 { 906 if (disable_power_well >= 0) 907 return !!disable_power_well; 908 909 return 1; 910 } 911 912 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 913 int enable_dc) 914 { 915 u32 mask; 916 int requested_dc; 917 int max_dc; 918 919 if (!HAS_DISPLAY(dev_priv)) 920 return 0; 921 922 if (IS_DG2(dev_priv)) 923 max_dc = 1; 924 else if (IS_DG1(dev_priv)) 925 max_dc = 3; 926 else if (DISPLAY_VER(dev_priv) >= 12) 927 max_dc = 4; 928 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 929 max_dc = 1; 930 else if (DISPLAY_VER(dev_priv) >= 9) 931 max_dc = 2; 932 else 933 max_dc = 0; 934 935 /* 936 * DC9 has a separate HW flow from the rest of the DC states, 937 * not depending on the DMC firmware. It's needed by system 938 * suspend/resume, so allow it unconditionally. 939 */ 940 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || 941 DISPLAY_VER(dev_priv) >= 11 ? 942 DC_STATE_EN_DC9 : 0; 943 944 if (!dev_priv->params.disable_power_well) 945 max_dc = 0; 946 947 if (enable_dc >= 0 && enable_dc <= max_dc) { 948 requested_dc = enable_dc; 949 } else if (enable_dc == -1) { 950 requested_dc = max_dc; 951 } else if (enable_dc > max_dc && enable_dc <= 4) { 952 drm_dbg_kms(&dev_priv->drm, 953 "Adjusting requested max DC state (%d->%d)\n", 954 enable_dc, max_dc); 955 requested_dc = max_dc; 956 } else { 957 drm_err(&dev_priv->drm, 958 "Unexpected value for enable_dc (%d)\n", enable_dc); 959 requested_dc = max_dc; 960 } 961 962 switch (requested_dc) { 963 case 4: 964 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 965 break; 966 case 3: 967 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 968 break; 969 case 2: 970 mask |= DC_STATE_EN_UPTO_DC6; 971 break; 972 case 1: 973 mask |= DC_STATE_EN_UPTO_DC5; 974 break; 975 } 976 977 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 978 979 return mask; 980 } 981 982 /** 983 * intel_power_domains_init - initializes the power domain structures 984 * @dev_priv: i915 device instance 985 * 986 * Initializes the power domain structures for @dev_priv depending upon the 987 * supported platform. 988 */ 989 int intel_power_domains_init(struct drm_i915_private *dev_priv) 990 { 991 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 992 993 dev_priv->params.disable_power_well = 994 sanitize_disable_power_well_option(dev_priv, 995 dev_priv->params.disable_power_well); 996 power_domains->allowed_dc_mask = 997 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 998 999 power_domains->target_dc_state = 1000 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1001 1002 mutex_init(&power_domains->lock); 1003 1004 INIT_DELAYED_WORK(&power_domains->async_put_work, 1005 intel_display_power_put_async_work); 1006 1007 return intel_display_power_map_init(power_domains); 1008 } 1009 1010 /** 1011 * intel_power_domains_cleanup - clean up power domains resources 1012 * @dev_priv: i915 device instance 1013 * 1014 * Release any resources acquired by intel_power_domains_init() 1015 */ 1016 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 1017 { 1018 intel_display_power_map_cleanup(&dev_priv->display.power.domains); 1019 } 1020 1021 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 1022 { 1023 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1024 struct i915_power_well *power_well; 1025 1026 mutex_lock(&power_domains->lock); 1027 for_each_power_well(dev_priv, power_well) 1028 intel_power_well_sync_hw(dev_priv, power_well); 1029 mutex_unlock(&power_domains->lock); 1030 } 1031 1032 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 1033 enum dbuf_slice slice, bool enable) 1034 { 1035 i915_reg_t reg = DBUF_CTL_S(slice); 1036 bool state; 1037 1038 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST, 1039 enable ? DBUF_POWER_REQUEST : 0); 1040 intel_de_posting_read(dev_priv, reg); 1041 udelay(10); 1042 1043 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 1044 drm_WARN(&dev_priv->drm, enable != state, 1045 "DBuf slice %d power %s timeout!\n", 1046 slice, str_enable_disable(enable)); 1047 } 1048 1049 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 1050 u8 req_slices) 1051 { 1052 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1053 u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask; 1054 enum dbuf_slice slice; 1055 1056 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, 1057 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n", 1058 req_slices, slice_mask); 1059 1060 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 1061 req_slices); 1062 1063 /* 1064 * Might be running this in parallel to gen9_dc_off_power_well_enable 1065 * being called from intel_dp_detect for instance, 1066 * which causes assertion triggered by race condition, 1067 * as gen9_assert_dbuf_enabled might preempt this when registers 1068 * were already updated, while dev_priv was not. 1069 */ 1070 mutex_lock(&power_domains->lock); 1071 1072 for_each_dbuf_slice(dev_priv, slice) 1073 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 1074 1075 dev_priv->display.dbuf.enabled_slices = req_slices; 1076 1077 mutex_unlock(&power_domains->lock); 1078 } 1079 1080 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 1081 { 1082 dev_priv->display.dbuf.enabled_slices = 1083 intel_enabled_dbuf_slices_mask(dev_priv); 1084 1085 /* 1086 * Just power up at least 1 slice, we will 1087 * figure out later which slices we have and what we need. 1088 */ 1089 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 1090 dev_priv->display.dbuf.enabled_slices); 1091 } 1092 1093 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 1094 { 1095 gen9_dbuf_slices_update(dev_priv, 0); 1096 } 1097 1098 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) 1099 { 1100 enum dbuf_slice slice; 1101 1102 if (IS_ALDERLAKE_P(dev_priv)) 1103 return; 1104 1105 for_each_dbuf_slice(dev_priv, slice) 1106 intel_de_rmw(dev_priv, DBUF_CTL_S(slice), 1107 DBUF_TRACKER_STATE_SERVICE_MASK, 1108 DBUF_TRACKER_STATE_SERVICE(8)); 1109 } 1110 1111 static void icl_mbus_init(struct drm_i915_private *dev_priv) 1112 { 1113 unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask; 1114 u32 mask, val, i; 1115 1116 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) 1117 return; 1118 1119 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 1120 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 1121 MBUS_ABOX_B_CREDIT_MASK | 1122 MBUS_ABOX_BW_CREDIT_MASK; 1123 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 1124 MBUS_ABOX_BT_CREDIT_POOL2(16) | 1125 MBUS_ABOX_B_CREDIT(1) | 1126 MBUS_ABOX_BW_CREDIT(1); 1127 1128 /* 1129 * gen12 platforms that use abox1 and abox2 for pixel data reads still 1130 * expect us to program the abox_ctl0 register as well, even though 1131 * we don't have to program other instance-0 registers like BW_BUDDY. 1132 */ 1133 if (DISPLAY_VER(dev_priv) == 12) 1134 abox_regs |= BIT(0); 1135 1136 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 1137 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 1138 } 1139 1140 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 1141 { 1142 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 1143 1144 /* 1145 * The LCPLL register should be turned on by the BIOS. For now 1146 * let's just check its state and print errors in case 1147 * something is wrong. Don't even try to turn it on. 1148 */ 1149 1150 if (val & LCPLL_CD_SOURCE_FCLK) 1151 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 1152 1153 if (val & LCPLL_PLL_DISABLE) 1154 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 1155 1156 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 1157 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 1158 } 1159 1160 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 1161 { 1162 struct intel_crtc *crtc; 1163 1164 for_each_intel_crtc(&dev_priv->drm, crtc) 1165 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 1166 pipe_name(crtc->pipe)); 1167 1168 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 1169 "Display power well on\n"); 1170 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 1171 "SPLL enabled\n"); 1172 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 1173 "WRPLL1 enabled\n"); 1174 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 1175 "WRPLL2 enabled\n"); 1176 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 1177 "Panel power on\n"); 1178 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 1179 "CPU PWM1 enabled\n"); 1180 if (IS_HASWELL(dev_priv)) 1181 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 1182 "CPU PWM2 enabled\n"); 1183 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 1184 "PCH PWM1 enabled\n"); 1185 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1186 "Utility pin enabled\n"); 1187 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 1188 "PCH GTC enabled\n"); 1189 1190 /* 1191 * In theory we can still leave IRQs enabled, as long as only the HPD 1192 * interrupts remain enabled. We used to check for that, but since it's 1193 * gen-specific and since we only disable LCPLL after we fully disable 1194 * the interrupts, the check below should be enough. 1195 */ 1196 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 1197 } 1198 1199 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 1200 { 1201 if (IS_HASWELL(dev_priv)) 1202 return intel_de_read(dev_priv, D_COMP_HSW); 1203 else 1204 return intel_de_read(dev_priv, D_COMP_BDW); 1205 } 1206 1207 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 1208 { 1209 if (IS_HASWELL(dev_priv)) { 1210 if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val)) 1211 drm_dbg_kms(&dev_priv->drm, 1212 "Failed to write to D_COMP\n"); 1213 } else { 1214 intel_de_write(dev_priv, D_COMP_BDW, val); 1215 intel_de_posting_read(dev_priv, D_COMP_BDW); 1216 } 1217 } 1218 1219 /* 1220 * This function implements pieces of two sequences from BSpec: 1221 * - Sequence for display software to disable LCPLL 1222 * - Sequence for display software to allow package C8+ 1223 * The steps implemented here are just the steps that actually touch the LCPLL 1224 * register. Callers should take care of disabling all the display engine 1225 * functions, doing the mode unset, fixing interrupts, etc. 1226 */ 1227 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 1228 bool switch_to_fclk, bool allow_power_down) 1229 { 1230 u32 val; 1231 1232 assert_can_disable_lcpll(dev_priv); 1233 1234 val = intel_de_read(dev_priv, LCPLL_CTL); 1235 1236 if (switch_to_fclk) { 1237 val |= LCPLL_CD_SOURCE_FCLK; 1238 intel_de_write(dev_priv, LCPLL_CTL, val); 1239 1240 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 1241 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 1242 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 1243 1244 val = intel_de_read(dev_priv, LCPLL_CTL); 1245 } 1246 1247 val |= LCPLL_PLL_DISABLE; 1248 intel_de_write(dev_priv, LCPLL_CTL, val); 1249 intel_de_posting_read(dev_priv, LCPLL_CTL); 1250 1251 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 1252 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 1253 1254 val = hsw_read_dcomp(dev_priv); 1255 val |= D_COMP_COMP_DISABLE; 1256 hsw_write_dcomp(dev_priv, val); 1257 ndelay(100); 1258 1259 if (wait_for((hsw_read_dcomp(dev_priv) & 1260 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 1261 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 1262 1263 if (allow_power_down) { 1264 intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW); 1265 intel_de_posting_read(dev_priv, LCPLL_CTL); 1266 } 1267 } 1268 1269 /* 1270 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 1271 * source. 1272 */ 1273 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 1274 { 1275 u32 val; 1276 1277 val = intel_de_read(dev_priv, LCPLL_CTL); 1278 1279 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 1280 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 1281 return; 1282 1283 /* 1284 * Make sure we're not on PC8 state before disabling PC8, otherwise 1285 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 1286 */ 1287 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 1288 1289 if (val & LCPLL_POWER_DOWN_ALLOW) { 1290 val &= ~LCPLL_POWER_DOWN_ALLOW; 1291 intel_de_write(dev_priv, LCPLL_CTL, val); 1292 intel_de_posting_read(dev_priv, LCPLL_CTL); 1293 } 1294 1295 val = hsw_read_dcomp(dev_priv); 1296 val |= D_COMP_COMP_FORCE; 1297 val &= ~D_COMP_COMP_DISABLE; 1298 hsw_write_dcomp(dev_priv, val); 1299 1300 val = intel_de_read(dev_priv, LCPLL_CTL); 1301 val &= ~LCPLL_PLL_DISABLE; 1302 intel_de_write(dev_priv, LCPLL_CTL, val); 1303 1304 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 1305 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 1306 1307 if (val & LCPLL_CD_SOURCE_FCLK) { 1308 intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); 1309 1310 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 1311 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 1312 drm_err(&dev_priv->drm, 1313 "Switching back to LCPLL failed\n"); 1314 } 1315 1316 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 1317 1318 intel_update_cdclk(dev_priv); 1319 intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); 1320 } 1321 1322 /* 1323 * Package states C8 and deeper are really deep PC states that can only be 1324 * reached when all the devices on the system allow it, so even if the graphics 1325 * device allows PC8+, it doesn't mean the system will actually get to these 1326 * states. Our driver only allows PC8+ when going into runtime PM. 1327 * 1328 * The requirements for PC8+ are that all the outputs are disabled, the power 1329 * well is disabled and most interrupts are disabled, and these are also 1330 * requirements for runtime PM. When these conditions are met, we manually do 1331 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 1332 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 1333 * hang the machine. 1334 * 1335 * When we really reach PC8 or deeper states (not just when we allow it) we lose 1336 * the state of some registers, so when we come back from PC8+ we need to 1337 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 1338 * need to take care of the registers kept by RC6. Notice that this happens even 1339 * if we don't put the device in PCI D3 state (which is what currently happens 1340 * because of the runtime PM support). 1341 * 1342 * For more, read "Display Sequences for Package C8" on the hardware 1343 * documentation. 1344 */ 1345 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 1346 { 1347 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 1348 1349 if (HAS_PCH_LPT_LP(dev_priv)) 1350 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 1351 PCH_LP_PARTITION_LEVEL_DISABLE, 0); 1352 1353 lpt_disable_clkout_dp(dev_priv); 1354 hsw_disable_lcpll(dev_priv, true, true); 1355 } 1356 1357 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 1358 { 1359 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 1360 1361 hsw_restore_lcpll(dev_priv); 1362 intel_init_pch_refclk(dev_priv); 1363 1364 if (HAS_PCH_LPT_LP(dev_priv)) 1365 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 1366 0, PCH_LP_PARTITION_LEVEL_DISABLE); 1367 } 1368 1369 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 1370 bool enable) 1371 { 1372 i915_reg_t reg; 1373 u32 reset_bits; 1374 1375 if (IS_IVYBRIDGE(dev_priv)) { 1376 reg = GEN7_MSG_CTL; 1377 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 1378 } else { 1379 reg = HSW_NDE_RSTWRN_OPT; 1380 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 1381 } 1382 1383 if (DISPLAY_VER(dev_priv) >= 14) 1384 reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN; 1385 1386 intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0); 1387 } 1388 1389 static void skl_display_core_init(struct drm_i915_private *dev_priv, 1390 bool resume) 1391 { 1392 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1393 struct i915_power_well *well; 1394 1395 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1396 1397 /* enable PCH reset handshake */ 1398 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 1399 1400 if (!HAS_DISPLAY(dev_priv)) 1401 return; 1402 1403 /* enable PG1 and Misc I/O */ 1404 mutex_lock(&power_domains->lock); 1405 1406 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1407 intel_power_well_enable(dev_priv, well); 1408 1409 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 1410 intel_power_well_enable(dev_priv, well); 1411 1412 mutex_unlock(&power_domains->lock); 1413 1414 intel_cdclk_init_hw(dev_priv); 1415 1416 gen9_dbuf_enable(dev_priv); 1417 1418 if (resume) 1419 intel_dmc_load_program(dev_priv); 1420 } 1421 1422 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 1423 { 1424 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1425 struct i915_power_well *well; 1426 1427 if (!HAS_DISPLAY(dev_priv)) 1428 return; 1429 1430 gen9_disable_dc_states(dev_priv); 1431 /* TODO: disable DMC program */ 1432 1433 gen9_dbuf_disable(dev_priv); 1434 1435 intel_cdclk_uninit_hw(dev_priv); 1436 1437 /* The spec doesn't call for removing the reset handshake flag */ 1438 /* disable PG1 and Misc I/O */ 1439 1440 mutex_lock(&power_domains->lock); 1441 1442 /* 1443 * BSpec says to keep the MISC IO power well enabled here, only 1444 * remove our request for power well 1. 1445 * Note that even though the driver's request is removed power well 1 1446 * may stay enabled after this due to DMC's own request on it. 1447 */ 1448 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1449 intel_power_well_disable(dev_priv, well); 1450 1451 mutex_unlock(&power_domains->lock); 1452 1453 usleep_range(10, 30); /* 10 us delay per Bspec */ 1454 } 1455 1456 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 1457 { 1458 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1459 struct i915_power_well *well; 1460 1461 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1462 1463 /* 1464 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 1465 * or else the reset will hang because there is no PCH to respond. 1466 * Move the handshake programming to initialization sequence. 1467 * Previously was left up to BIOS. 1468 */ 1469 intel_pch_reset_handshake(dev_priv, false); 1470 1471 if (!HAS_DISPLAY(dev_priv)) 1472 return; 1473 1474 /* Enable PG1 */ 1475 mutex_lock(&power_domains->lock); 1476 1477 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1478 intel_power_well_enable(dev_priv, well); 1479 1480 mutex_unlock(&power_domains->lock); 1481 1482 intel_cdclk_init_hw(dev_priv); 1483 1484 gen9_dbuf_enable(dev_priv); 1485 1486 if (resume) 1487 intel_dmc_load_program(dev_priv); 1488 } 1489 1490 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 1491 { 1492 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1493 struct i915_power_well *well; 1494 1495 if (!HAS_DISPLAY(dev_priv)) 1496 return; 1497 1498 gen9_disable_dc_states(dev_priv); 1499 /* TODO: disable DMC program */ 1500 1501 gen9_dbuf_disable(dev_priv); 1502 1503 intel_cdclk_uninit_hw(dev_priv); 1504 1505 /* The spec doesn't call for removing the reset handshake flag */ 1506 1507 /* 1508 * Disable PW1 (PG1). 1509 * Note that even though the driver's request is removed power well 1 1510 * may stay enabled after this due to DMC's own request on it. 1511 */ 1512 mutex_lock(&power_domains->lock); 1513 1514 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1515 intel_power_well_disable(dev_priv, well); 1516 1517 mutex_unlock(&power_domains->lock); 1518 1519 usleep_range(10, 30); /* 10 us delay per Bspec */ 1520 } 1521 1522 struct buddy_page_mask { 1523 u32 page_mask; 1524 u8 type; 1525 u8 num_channels; 1526 }; 1527 1528 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 1529 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 1530 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, 1531 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 1532 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, 1533 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 1534 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, 1535 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 1536 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, 1537 {} 1538 }; 1539 1540 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 1541 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 1542 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 1543 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, 1544 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, 1545 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 1546 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 1547 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, 1548 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, 1549 {} 1550 }; 1551 1552 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 1553 { 1554 enum intel_dram_type type = dev_priv->dram_info.type; 1555 u8 num_channels = dev_priv->dram_info.num_channels; 1556 const struct buddy_page_mask *table; 1557 unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask; 1558 int config, i; 1559 1560 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ 1561 if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv)) 1562 return; 1563 1564 if (IS_ALDERLAKE_S(dev_priv) || 1565 IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 1566 /* Wa_1409767108 */ 1567 table = wa_1409767108_buddy_page_masks; 1568 else 1569 table = tgl_buddy_page_masks; 1570 1571 for (config = 0; table[config].page_mask != 0; config++) 1572 if (table[config].num_channels == num_channels && 1573 table[config].type == type) 1574 break; 1575 1576 if (table[config].page_mask == 0) { 1577 drm_dbg(&dev_priv->drm, 1578 "Unknown memory configuration; disabling address buddy logic.\n"); 1579 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 1580 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 1581 BW_BUDDY_DISABLE); 1582 } else { 1583 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 1584 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 1585 table[config].page_mask); 1586 1587 /* Wa_22010178259:tgl,dg1,rkl,adl-s */ 1588 if (DISPLAY_VER(dev_priv) == 12) 1589 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 1590 BW_BUDDY_TLB_REQ_TIMER_MASK, 1591 BW_BUDDY_TLB_REQ_TIMER(0x8)); 1592 } 1593 } 1594 } 1595 1596 static void icl_display_core_init(struct drm_i915_private *dev_priv, 1597 bool resume) 1598 { 1599 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1600 struct i915_power_well *well; 1601 1602 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1603 1604 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ 1605 if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && 1606 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 1607 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 1608 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 1609 1610 /* 1. Enable PCH reset handshake. */ 1611 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 1612 1613 if (!HAS_DISPLAY(dev_priv)) 1614 return; 1615 1616 /* 2. Initialize all combo phys */ 1617 intel_combo_phy_init(dev_priv); 1618 1619 /* 1620 * 3. Enable Power Well 1 (PG1). 1621 * The AUX IO power wells will be enabled on demand. 1622 */ 1623 mutex_lock(&power_domains->lock); 1624 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1625 intel_power_well_enable(dev_priv, well); 1626 mutex_unlock(&power_domains->lock); 1627 1628 /* 4. Enable CDCLK. */ 1629 intel_cdclk_init_hw(dev_priv); 1630 1631 if (DISPLAY_VER(dev_priv) >= 12) 1632 gen12_dbuf_slices_config(dev_priv); 1633 1634 /* 5. Enable DBUF. */ 1635 gen9_dbuf_enable(dev_priv); 1636 1637 /* 6. Setup MBUS. */ 1638 icl_mbus_init(dev_priv); 1639 1640 /* 7. Program arbiter BW_BUDDY registers */ 1641 if (DISPLAY_VER(dev_priv) >= 12) 1642 tgl_bw_buddy_init(dev_priv); 1643 1644 /* 8. Ensure PHYs have completed calibration and adaptation */ 1645 if (IS_DG2(dev_priv)) 1646 intel_snps_phy_wait_for_calibration(dev_priv); 1647 1648 if (resume) 1649 intel_dmc_load_program(dev_priv); 1650 1651 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ 1652 if (DISPLAY_VER(dev_priv) >= 12) 1653 intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, 1654 DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 1655 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR); 1656 1657 /* Wa_14011503030:xelpd */ 1658 if (DISPLAY_VER(dev_priv) >= 13) 1659 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); 1660 } 1661 1662 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 1663 { 1664 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1665 struct i915_power_well *well; 1666 1667 if (!HAS_DISPLAY(dev_priv)) 1668 return; 1669 1670 gen9_disable_dc_states(dev_priv); 1671 intel_dmc_disable_program(dev_priv); 1672 1673 /* 1. Disable all display engine functions -> aready done */ 1674 1675 /* 2. Disable DBUF */ 1676 gen9_dbuf_disable(dev_priv); 1677 1678 /* 3. Disable CD clock */ 1679 intel_cdclk_uninit_hw(dev_priv); 1680 1681 /* 1682 * 4. Disable Power Well 1 (PG1). 1683 * The AUX IO power wells are toggled on demand, so they are already 1684 * disabled at this point. 1685 */ 1686 mutex_lock(&power_domains->lock); 1687 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1688 intel_power_well_disable(dev_priv, well); 1689 mutex_unlock(&power_domains->lock); 1690 1691 /* 5. */ 1692 intel_combo_phy_uninit(dev_priv); 1693 } 1694 1695 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 1696 { 1697 struct i915_power_well *cmn_bc = 1698 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1699 struct i915_power_well *cmn_d = 1700 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1701 1702 /* 1703 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 1704 * workaround never ever read DISPLAY_PHY_CONTROL, and 1705 * instead maintain a shadow copy ourselves. Use the actual 1706 * power well state and lane status to reconstruct the 1707 * expected initial value. 1708 */ 1709 dev_priv->display.power.chv_phy_control = 1710 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 1711 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 1712 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 1713 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 1714 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 1715 1716 /* 1717 * If all lanes are disabled we leave the override disabled 1718 * with all power down bits cleared to match the state we 1719 * would use after disabling the port. Otherwise enable the 1720 * override and set the lane powerdown bits accding to the 1721 * current lane status. 1722 */ 1723 if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { 1724 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 1725 unsigned int mask; 1726 1727 mask = status & DPLL_PORTB_READY_MASK; 1728 if (mask == 0xf) 1729 mask = 0x0; 1730 else 1731 dev_priv->display.power.chv_phy_control |= 1732 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 1733 1734 dev_priv->display.power.chv_phy_control |= 1735 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 1736 1737 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 1738 if (mask == 0xf) 1739 mask = 0x0; 1740 else 1741 dev_priv->display.power.chv_phy_control |= 1742 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 1743 1744 dev_priv->display.power.chv_phy_control |= 1745 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 1746 1747 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 1748 1749 dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false; 1750 } else { 1751 dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true; 1752 } 1753 1754 if (intel_power_well_is_enabled(dev_priv, cmn_d)) { 1755 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 1756 unsigned int mask; 1757 1758 mask = status & DPLL_PORTD_READY_MASK; 1759 1760 if (mask == 0xf) 1761 mask = 0x0; 1762 else 1763 dev_priv->display.power.chv_phy_control |= 1764 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 1765 1766 dev_priv->display.power.chv_phy_control |= 1767 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 1768 1769 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 1770 1771 dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false; 1772 } else { 1773 dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true; 1774 } 1775 1776 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 1777 dev_priv->display.power.chv_phy_control); 1778 1779 /* Defer application of initial phy_control to enabling the powerwell */ 1780 } 1781 1782 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 1783 { 1784 struct i915_power_well *cmn = 1785 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1786 struct i915_power_well *disp2d = 1787 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 1788 1789 /* If the display might be already active skip this */ 1790 if (intel_power_well_is_enabled(dev_priv, cmn) && 1791 intel_power_well_is_enabled(dev_priv, disp2d) && 1792 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 1793 return; 1794 1795 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 1796 1797 /* cmnlane needs DPLL registers */ 1798 intel_power_well_enable(dev_priv, disp2d); 1799 1800 /* 1801 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 1802 * Need to assert and de-assert PHY SB reset by gating the 1803 * common lane power, then un-gating it. 1804 * Simply ungating isn't enough to reset the PHY enough to get 1805 * ports and lanes running. 1806 */ 1807 intel_power_well_disable(dev_priv, cmn); 1808 } 1809 1810 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 1811 { 1812 bool ret; 1813 1814 vlv_punit_get(dev_priv); 1815 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 1816 vlv_punit_put(dev_priv); 1817 1818 return ret; 1819 } 1820 1821 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 1822 { 1823 drm_WARN(&dev_priv->drm, 1824 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 1825 "VED not power gated\n"); 1826 } 1827 1828 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 1829 { 1830 static const struct pci_device_id isp_ids[] = { 1831 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 1832 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 1833 {} 1834 }; 1835 1836 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 1837 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 1838 "ISP not power gated\n"); 1839 } 1840 1841 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 1842 1843 /** 1844 * intel_power_domains_init_hw - initialize hardware power domain state 1845 * @i915: i915 device instance 1846 * @resume: Called from resume code paths or not 1847 * 1848 * This function initializes the hardware power domain state and enables all 1849 * power wells belonging to the INIT power domain. Power wells in other 1850 * domains (and not in the INIT domain) are referenced or disabled by 1851 * intel_modeset_readout_hw_state(). After that the reference count of each 1852 * power well must match its HW enabled state, see 1853 * intel_power_domains_verify_state(). 1854 * 1855 * It will return with power domains disabled (to be enabled later by 1856 * intel_power_domains_enable()) and must be paired with 1857 * intel_power_domains_driver_remove(). 1858 */ 1859 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 1860 { 1861 struct i915_power_domains *power_domains = &i915->display.power.domains; 1862 1863 power_domains->initializing = true; 1864 1865 if (DISPLAY_VER(i915) >= 11) { 1866 icl_display_core_init(i915, resume); 1867 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 1868 bxt_display_core_init(i915, resume); 1869 } else if (DISPLAY_VER(i915) == 9) { 1870 skl_display_core_init(i915, resume); 1871 } else if (IS_CHERRYVIEW(i915)) { 1872 mutex_lock(&power_domains->lock); 1873 chv_phy_control_init(i915); 1874 mutex_unlock(&power_domains->lock); 1875 assert_isp_power_gated(i915); 1876 } else if (IS_VALLEYVIEW(i915)) { 1877 mutex_lock(&power_domains->lock); 1878 vlv_cmnlane_wa(i915); 1879 mutex_unlock(&power_domains->lock); 1880 assert_ved_power_gated(i915); 1881 assert_isp_power_gated(i915); 1882 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 1883 hsw_assert_cdclk(i915); 1884 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 1885 } else if (IS_IVYBRIDGE(i915)) { 1886 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 1887 } 1888 1889 /* 1890 * Keep all power wells enabled for any dependent HW access during 1891 * initialization and to make sure we keep BIOS enabled display HW 1892 * resources powered until display HW readout is complete. We drop 1893 * this reference in intel_power_domains_enable(). 1894 */ 1895 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 1896 power_domains->init_wakeref = 1897 intel_display_power_get(i915, POWER_DOMAIN_INIT); 1898 1899 /* Disable power support if the user asked so. */ 1900 if (!i915->params.disable_power_well) { 1901 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 1902 i915->display.power.domains.disable_wakeref = intel_display_power_get(i915, 1903 POWER_DOMAIN_INIT); 1904 } 1905 intel_power_domains_sync_hw(i915); 1906 1907 power_domains->initializing = false; 1908 } 1909 1910 /** 1911 * intel_power_domains_driver_remove - deinitialize hw power domain state 1912 * @i915: i915 device instance 1913 * 1914 * De-initializes the display power domain HW state. It also ensures that the 1915 * device stays powered up so that the driver can be reloaded. 1916 * 1917 * It must be called with power domains already disabled (after a call to 1918 * intel_power_domains_disable()) and must be paired with 1919 * intel_power_domains_init_hw(). 1920 */ 1921 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 1922 { 1923 intel_wakeref_t wakeref __maybe_unused = 1924 fetch_and_zero(&i915->display.power.domains.init_wakeref); 1925 1926 /* Remove the refcount we took to keep power well support disabled. */ 1927 if (!i915->params.disable_power_well) 1928 intel_display_power_put(i915, POWER_DOMAIN_INIT, 1929 fetch_and_zero(&i915->display.power.domains.disable_wakeref)); 1930 1931 intel_display_power_flush_work_sync(i915); 1932 1933 intel_power_domains_verify_state(i915); 1934 1935 /* Keep the power well enabled, but cancel its rpm wakeref. */ 1936 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1937 } 1938 1939 /** 1940 * intel_power_domains_sanitize_state - sanitize power domains state 1941 * @i915: i915 device instance 1942 * 1943 * Sanitize the power domains state during driver loading and system resume. 1944 * The function will disable all display power wells that BIOS has enabled 1945 * without a user for it (any user for a power well has taken a reference 1946 * on it by the time this function is called, after the state of all the 1947 * pipe, encoder, etc. HW resources have been sanitized). 1948 */ 1949 void intel_power_domains_sanitize_state(struct drm_i915_private *i915) 1950 { 1951 struct i915_power_domains *power_domains = &i915->display.power.domains; 1952 struct i915_power_well *power_well; 1953 1954 mutex_lock(&power_domains->lock); 1955 1956 for_each_power_well_reverse(i915, power_well) { 1957 if (power_well->desc->always_on || power_well->count || 1958 !intel_power_well_is_enabled(i915, power_well)) 1959 continue; 1960 1961 drm_dbg_kms(&i915->drm, 1962 "BIOS left unused %s power well enabled, disabling it\n", 1963 intel_power_well_name(power_well)); 1964 intel_power_well_disable(i915, power_well); 1965 } 1966 1967 mutex_unlock(&power_domains->lock); 1968 } 1969 1970 /** 1971 * intel_power_domains_enable - enable toggling of display power wells 1972 * @i915: i915 device instance 1973 * 1974 * Enable the ondemand enabling/disabling of the display power wells. Note that 1975 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 1976 * only at specific points of the display modeset sequence, thus they are not 1977 * affected by the intel_power_domains_enable()/disable() calls. The purpose 1978 * of these function is to keep the rest of power wells enabled until the end 1979 * of display HW readout (which will acquire the power references reflecting 1980 * the current HW state). 1981 */ 1982 void intel_power_domains_enable(struct drm_i915_private *i915) 1983 { 1984 intel_wakeref_t wakeref __maybe_unused = 1985 fetch_and_zero(&i915->display.power.domains.init_wakeref); 1986 1987 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 1988 intel_power_domains_verify_state(i915); 1989 } 1990 1991 /** 1992 * intel_power_domains_disable - disable toggling of display power wells 1993 * @i915: i915 device instance 1994 * 1995 * Disable the ondemand enabling/disabling of the display power wells. See 1996 * intel_power_domains_enable() for which power wells this call controls. 1997 */ 1998 void intel_power_domains_disable(struct drm_i915_private *i915) 1999 { 2000 struct i915_power_domains *power_domains = &i915->display.power.domains; 2001 2002 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 2003 power_domains->init_wakeref = 2004 intel_display_power_get(i915, POWER_DOMAIN_INIT); 2005 2006 intel_power_domains_verify_state(i915); 2007 } 2008 2009 /** 2010 * intel_power_domains_suspend - suspend power domain state 2011 * @i915: i915 device instance 2012 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 2013 * 2014 * This function prepares the hardware power domain state before entering 2015 * system suspend. 2016 * 2017 * It must be called with power domains already disabled (after a call to 2018 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 2019 */ 2020 void intel_power_domains_suspend(struct drm_i915_private *i915, 2021 enum i915_drm_suspend_mode suspend_mode) 2022 { 2023 struct i915_power_domains *power_domains = &i915->display.power.domains; 2024 intel_wakeref_t wakeref __maybe_unused = 2025 fetch_and_zero(&power_domains->init_wakeref); 2026 2027 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 2028 2029 /* 2030 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 2031 * support don't manually deinit the power domains. This also means the 2032 * DMC firmware will stay active, it will power down any HW 2033 * resources as required and also enable deeper system power states 2034 * that would be blocked if the firmware was inactive. 2035 */ 2036 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && 2037 suspend_mode == I915_DRM_SUSPEND_IDLE && 2038 intel_dmc_has_payload(i915)) { 2039 intel_display_power_flush_work(i915); 2040 intel_power_domains_verify_state(i915); 2041 return; 2042 } 2043 2044 /* 2045 * Even if power well support was disabled we still want to disable 2046 * power wells if power domains must be deinitialized for suspend. 2047 */ 2048 if (!i915->params.disable_power_well) 2049 intel_display_power_put(i915, POWER_DOMAIN_INIT, 2050 fetch_and_zero(&i915->display.power.domains.disable_wakeref)); 2051 2052 intel_display_power_flush_work(i915); 2053 intel_power_domains_verify_state(i915); 2054 2055 if (DISPLAY_VER(i915) >= 11) 2056 icl_display_core_uninit(i915); 2057 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 2058 bxt_display_core_uninit(i915); 2059 else if (DISPLAY_VER(i915) == 9) 2060 skl_display_core_uninit(i915); 2061 2062 power_domains->display_core_suspended = true; 2063 } 2064 2065 /** 2066 * intel_power_domains_resume - resume power domain state 2067 * @i915: i915 device instance 2068 * 2069 * This function resume the hardware power domain state during system resume. 2070 * 2071 * It will return with power domain support disabled (to be enabled later by 2072 * intel_power_domains_enable()) and must be paired with 2073 * intel_power_domains_suspend(). 2074 */ 2075 void intel_power_domains_resume(struct drm_i915_private *i915) 2076 { 2077 struct i915_power_domains *power_domains = &i915->display.power.domains; 2078 2079 if (power_domains->display_core_suspended) { 2080 intel_power_domains_init_hw(i915, true); 2081 power_domains->display_core_suspended = false; 2082 } else { 2083 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 2084 power_domains->init_wakeref = 2085 intel_display_power_get(i915, POWER_DOMAIN_INIT); 2086 } 2087 2088 intel_power_domains_verify_state(i915); 2089 } 2090 2091 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2092 2093 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 2094 { 2095 struct i915_power_domains *power_domains = &i915->display.power.domains; 2096 struct i915_power_well *power_well; 2097 2098 for_each_power_well(i915, power_well) { 2099 enum intel_display_power_domain domain; 2100 2101 drm_dbg(&i915->drm, "%-25s %d\n", 2102 intel_power_well_name(power_well), intel_power_well_refcount(power_well)); 2103 2104 for_each_power_domain(domain, intel_power_well_domains(power_well)) 2105 drm_dbg(&i915->drm, " %-23s %d\n", 2106 intel_display_power_domain_str(domain), 2107 power_domains->domain_use_count[domain]); 2108 } 2109 } 2110 2111 /** 2112 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 2113 * @i915: i915 device instance 2114 * 2115 * Verify if the reference count of each power well matches its HW enabled 2116 * state and the total refcount of the domains it belongs to. This must be 2117 * called after modeset HW state sanitization, which is responsible for 2118 * acquiring reference counts for any power wells in use and disabling the 2119 * ones left on by BIOS but not required by any active output. 2120 */ 2121 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 2122 { 2123 struct i915_power_domains *power_domains = &i915->display.power.domains; 2124 struct i915_power_well *power_well; 2125 bool dump_domain_info; 2126 2127 mutex_lock(&power_domains->lock); 2128 2129 verify_async_put_domains_state(power_domains); 2130 2131 dump_domain_info = false; 2132 for_each_power_well(i915, power_well) { 2133 enum intel_display_power_domain domain; 2134 int domains_count; 2135 bool enabled; 2136 2137 enabled = intel_power_well_is_enabled(i915, power_well); 2138 if ((intel_power_well_refcount(power_well) || 2139 intel_power_well_is_always_on(power_well)) != 2140 enabled) 2141 drm_err(&i915->drm, 2142 "power well %s state mismatch (refcount %d/enabled %d)", 2143 intel_power_well_name(power_well), 2144 intel_power_well_refcount(power_well), enabled); 2145 2146 domains_count = 0; 2147 for_each_power_domain(domain, intel_power_well_domains(power_well)) 2148 domains_count += power_domains->domain_use_count[domain]; 2149 2150 if (intel_power_well_refcount(power_well) != domains_count) { 2151 drm_err(&i915->drm, 2152 "power well %s refcount/domain refcount mismatch " 2153 "(refcount %d/domains refcount %d)\n", 2154 intel_power_well_name(power_well), 2155 intel_power_well_refcount(power_well), 2156 domains_count); 2157 dump_domain_info = true; 2158 } 2159 } 2160 2161 if (dump_domain_info) { 2162 static bool dumped; 2163 2164 if (!dumped) { 2165 intel_power_domains_dump_info(i915); 2166 dumped = true; 2167 } 2168 } 2169 2170 mutex_unlock(&power_domains->lock); 2171 } 2172 2173 #else 2174 2175 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 2176 { 2177 } 2178 2179 #endif 2180 2181 void intel_display_power_suspend_late(struct drm_i915_private *i915) 2182 { 2183 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 2184 IS_BROXTON(i915)) { 2185 bxt_enable_dc9(i915); 2186 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2187 hsw_enable_pc8(i915); 2188 } 2189 2190 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 2191 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 2192 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 2193 } 2194 2195 void intel_display_power_resume_early(struct drm_i915_private *i915) 2196 { 2197 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 2198 IS_BROXTON(i915)) { 2199 gen9_sanitize_dc_state(i915); 2200 bxt_disable_dc9(i915); 2201 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2202 hsw_disable_pc8(i915); 2203 } 2204 2205 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 2206 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 2207 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 2208 } 2209 2210 void intel_display_power_suspend(struct drm_i915_private *i915) 2211 { 2212 if (DISPLAY_VER(i915) >= 11) { 2213 icl_display_core_uninit(i915); 2214 bxt_enable_dc9(i915); 2215 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 2216 bxt_display_core_uninit(i915); 2217 bxt_enable_dc9(i915); 2218 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2219 hsw_enable_pc8(i915); 2220 } 2221 } 2222 2223 void intel_display_power_resume(struct drm_i915_private *i915) 2224 { 2225 struct i915_power_domains *power_domains = &i915->display.power.domains; 2226 2227 if (DISPLAY_VER(i915) >= 11) { 2228 bxt_disable_dc9(i915); 2229 icl_display_core_init(i915, true); 2230 if (intel_dmc_has_payload(i915)) { 2231 if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6) 2232 skl_enable_dc6(i915); 2233 else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5) 2234 gen9_enable_dc5(i915); 2235 } 2236 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 2237 bxt_disable_dc9(i915); 2238 bxt_display_core_init(i915, true); 2239 if (intel_dmc_has_payload(i915) && 2240 (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 2241 gen9_enable_dc5(i915); 2242 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2243 hsw_disable_pc8(i915); 2244 } 2245 } 2246 2247 void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) 2248 { 2249 struct i915_power_domains *power_domains = &i915->display.power.domains; 2250 int i; 2251 2252 mutex_lock(&power_domains->lock); 2253 2254 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2255 for (i = 0; i < power_domains->power_well_count; i++) { 2256 struct i915_power_well *power_well; 2257 enum intel_display_power_domain power_domain; 2258 2259 power_well = &power_domains->power_wells[i]; 2260 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well), 2261 intel_power_well_refcount(power_well)); 2262 2263 for_each_power_domain(power_domain, intel_power_well_domains(power_well)) 2264 seq_printf(m, " %-23s %d\n", 2265 intel_display_power_domain_str(power_domain), 2266 power_domains->domain_use_count[power_domain]); 2267 } 2268 2269 mutex_unlock(&power_domains->lock); 2270 } 2271 2272 struct intel_ddi_port_domains { 2273 enum port port_start; 2274 enum port port_end; 2275 enum aux_ch aux_ch_start; 2276 enum aux_ch aux_ch_end; 2277 2278 enum intel_display_power_domain ddi_lanes; 2279 enum intel_display_power_domain ddi_io; 2280 enum intel_display_power_domain aux_io; 2281 enum intel_display_power_domain aux_legacy_usbc; 2282 enum intel_display_power_domain aux_tbt; 2283 }; 2284 2285 static const struct intel_ddi_port_domains 2286 i9xx_port_domains[] = { 2287 { 2288 .port_start = PORT_A, 2289 .port_end = PORT_F, 2290 .aux_ch_start = AUX_CH_A, 2291 .aux_ch_end = AUX_CH_F, 2292 2293 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2294 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2295 .aux_io = POWER_DOMAIN_AUX_IO_A, 2296 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2297 .aux_tbt = POWER_DOMAIN_INVALID, 2298 }, 2299 }; 2300 2301 static const struct intel_ddi_port_domains 2302 d11_port_domains[] = { 2303 { 2304 .port_start = PORT_A, 2305 .port_end = PORT_B, 2306 .aux_ch_start = AUX_CH_A, 2307 .aux_ch_end = AUX_CH_B, 2308 2309 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2310 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2311 .aux_io = POWER_DOMAIN_AUX_IO_A, 2312 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2313 .aux_tbt = POWER_DOMAIN_INVALID, 2314 }, { 2315 .port_start = PORT_C, 2316 .port_end = PORT_F, 2317 .aux_ch_start = AUX_CH_C, 2318 .aux_ch_end = AUX_CH_F, 2319 2320 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C, 2321 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C, 2322 .aux_io = POWER_DOMAIN_AUX_IO_C, 2323 .aux_legacy_usbc = POWER_DOMAIN_AUX_C, 2324 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2325 }, 2326 }; 2327 2328 static const struct intel_ddi_port_domains 2329 d12_port_domains[] = { 2330 { 2331 .port_start = PORT_A, 2332 .port_end = PORT_C, 2333 .aux_ch_start = AUX_CH_A, 2334 .aux_ch_end = AUX_CH_C, 2335 2336 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2337 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2338 .aux_io = POWER_DOMAIN_AUX_IO_A, 2339 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2340 .aux_tbt = POWER_DOMAIN_INVALID, 2341 }, { 2342 .port_start = PORT_TC1, 2343 .port_end = PORT_TC6, 2344 .aux_ch_start = AUX_CH_USBC1, 2345 .aux_ch_end = AUX_CH_USBC6, 2346 2347 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, 2348 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, 2349 .aux_io = POWER_DOMAIN_INVALID, 2350 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, 2351 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2352 }, 2353 }; 2354 2355 static const struct intel_ddi_port_domains 2356 d13_port_domains[] = { 2357 { 2358 .port_start = PORT_A, 2359 .port_end = PORT_C, 2360 .aux_ch_start = AUX_CH_A, 2361 .aux_ch_end = AUX_CH_C, 2362 2363 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2364 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2365 .aux_io = POWER_DOMAIN_AUX_IO_A, 2366 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2367 .aux_tbt = POWER_DOMAIN_INVALID, 2368 }, { 2369 .port_start = PORT_TC1, 2370 .port_end = PORT_TC4, 2371 .aux_ch_start = AUX_CH_USBC1, 2372 .aux_ch_end = AUX_CH_USBC4, 2373 2374 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, 2375 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, 2376 .aux_io = POWER_DOMAIN_INVALID, 2377 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, 2378 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2379 }, { 2380 .port_start = PORT_D_XELPD, 2381 .port_end = PORT_E_XELPD, 2382 .aux_ch_start = AUX_CH_D_XELPD, 2383 .aux_ch_end = AUX_CH_E_XELPD, 2384 2385 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D, 2386 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D, 2387 .aux_io = POWER_DOMAIN_AUX_IO_D, 2388 .aux_legacy_usbc = POWER_DOMAIN_AUX_D, 2389 .aux_tbt = POWER_DOMAIN_INVALID, 2390 }, 2391 }; 2392 2393 static void 2394 intel_port_domains_for_platform(struct drm_i915_private *i915, 2395 const struct intel_ddi_port_domains **domains, 2396 int *domains_size) 2397 { 2398 if (DISPLAY_VER(i915) >= 13) { 2399 *domains = d13_port_domains; 2400 *domains_size = ARRAY_SIZE(d13_port_domains); 2401 } else if (DISPLAY_VER(i915) >= 12) { 2402 *domains = d12_port_domains; 2403 *domains_size = ARRAY_SIZE(d12_port_domains); 2404 } else if (DISPLAY_VER(i915) >= 11) { 2405 *domains = d11_port_domains; 2406 *domains_size = ARRAY_SIZE(d11_port_domains); 2407 } else { 2408 *domains = i9xx_port_domains; 2409 *domains_size = ARRAY_SIZE(i9xx_port_domains); 2410 } 2411 } 2412 2413 static const struct intel_ddi_port_domains * 2414 intel_port_domains_for_port(struct drm_i915_private *i915, enum port port) 2415 { 2416 const struct intel_ddi_port_domains *domains; 2417 int domains_size; 2418 int i; 2419 2420 intel_port_domains_for_platform(i915, &domains, &domains_size); 2421 for (i = 0; i < domains_size; i++) 2422 if (port >= domains[i].port_start && port <= domains[i].port_end) 2423 return &domains[i]; 2424 2425 return NULL; 2426 } 2427 2428 enum intel_display_power_domain 2429 intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port) 2430 { 2431 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); 2432 2433 if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID)) 2434 return POWER_DOMAIN_PORT_DDI_IO_A; 2435 2436 return domains->ddi_io + (int)(port - domains->port_start); 2437 } 2438 2439 enum intel_display_power_domain 2440 intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port) 2441 { 2442 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); 2443 2444 if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID)) 2445 return POWER_DOMAIN_PORT_DDI_LANES_A; 2446 2447 return domains->ddi_lanes + (int)(port - domains->port_start); 2448 } 2449 2450 static const struct intel_ddi_port_domains * 2451 intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch) 2452 { 2453 const struct intel_ddi_port_domains *domains; 2454 int domains_size; 2455 int i; 2456 2457 intel_port_domains_for_platform(i915, &domains, &domains_size); 2458 for (i = 0; i < domains_size; i++) 2459 if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end) 2460 return &domains[i]; 2461 2462 return NULL; 2463 } 2464 2465 enum intel_display_power_domain 2466 intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) 2467 { 2468 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); 2469 2470 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID)) 2471 return POWER_DOMAIN_AUX_IO_A; 2472 2473 return domains->aux_io + (int)(aux_ch - domains->aux_ch_start); 2474 } 2475 2476 enum intel_display_power_domain 2477 intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) 2478 { 2479 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); 2480 2481 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)) 2482 return POWER_DOMAIN_AUX_A; 2483 2484 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start); 2485 } 2486 2487 enum intel_display_power_domain 2488 intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) 2489 { 2490 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); 2491 2492 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID)) 2493 return POWER_DOMAIN_AUX_TBT1; 2494 2495 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start); 2496 } 2497