1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include "i915_drv.h" 9 #include "i915_irq.h" 10 #include "intel_cdclk.h" 11 #include "intel_combo_phy.h" 12 #include "intel_de.h" 13 #include "intel_display_power.h" 14 #include "intel_display_power_map.h" 15 #include "intel_display_power_well.h" 16 #include "intel_display_types.h" 17 #include "intel_dmc.h" 18 #include "intel_mchbar_regs.h" 19 #include "intel_pch_refclk.h" 20 #include "intel_pcode.h" 21 #include "intel_pm.h" 22 #include "intel_snps_phy.h" 23 #include "vlv_sideband.h" 24 25 #define for_each_power_domain_well(__dev_priv, __power_well, __domain) \ 26 for_each_power_well(__dev_priv, __power_well) \ 27 for_each_if(test_bit((__domain), (__power_well)->domains.bits)) 28 29 #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \ 30 for_each_power_well_reverse(__dev_priv, __power_well) \ 31 for_each_if(test_bit((__domain), (__power_well)->domains.bits)) 32 33 const char * 34 intel_display_power_domain_str(enum intel_display_power_domain domain) 35 { 36 switch (domain) { 37 case POWER_DOMAIN_DISPLAY_CORE: 38 return "DISPLAY_CORE"; 39 case POWER_DOMAIN_PIPE_A: 40 return "PIPE_A"; 41 case POWER_DOMAIN_PIPE_B: 42 return "PIPE_B"; 43 case POWER_DOMAIN_PIPE_C: 44 return "PIPE_C"; 45 case POWER_DOMAIN_PIPE_D: 46 return "PIPE_D"; 47 case POWER_DOMAIN_PIPE_PANEL_FITTER_A: 48 return "PIPE_PANEL_FITTER_A"; 49 case POWER_DOMAIN_PIPE_PANEL_FITTER_B: 50 return "PIPE_PANEL_FITTER_B"; 51 case POWER_DOMAIN_PIPE_PANEL_FITTER_C: 52 return "PIPE_PANEL_FITTER_C"; 53 case POWER_DOMAIN_PIPE_PANEL_FITTER_D: 54 return "PIPE_PANEL_FITTER_D"; 55 case POWER_DOMAIN_TRANSCODER_A: 56 return "TRANSCODER_A"; 57 case POWER_DOMAIN_TRANSCODER_B: 58 return "TRANSCODER_B"; 59 case POWER_DOMAIN_TRANSCODER_C: 60 return "TRANSCODER_C"; 61 case POWER_DOMAIN_TRANSCODER_D: 62 return "TRANSCODER_D"; 63 case POWER_DOMAIN_TRANSCODER_EDP: 64 return "TRANSCODER_EDP"; 65 case POWER_DOMAIN_TRANSCODER_DSI_A: 66 return "TRANSCODER_DSI_A"; 67 case POWER_DOMAIN_TRANSCODER_DSI_C: 68 return "TRANSCODER_DSI_C"; 69 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 70 return "TRANSCODER_VDSC_PW2"; 71 case POWER_DOMAIN_PORT_DDI_LANES_A: 72 return "PORT_DDI_LANES_A"; 73 case POWER_DOMAIN_PORT_DDI_LANES_B: 74 return "PORT_DDI_LANES_B"; 75 case POWER_DOMAIN_PORT_DDI_LANES_C: 76 return "PORT_DDI_LANES_C"; 77 case POWER_DOMAIN_PORT_DDI_LANES_D: 78 return "PORT_DDI_LANES_D"; 79 case POWER_DOMAIN_PORT_DDI_LANES_E: 80 return "PORT_DDI_LANES_E"; 81 case POWER_DOMAIN_PORT_DDI_LANES_F: 82 return "PORT_DDI_LANES_F"; 83 case POWER_DOMAIN_PORT_DDI_LANES_TC1: 84 return "PORT_DDI_LANES_TC1"; 85 case POWER_DOMAIN_PORT_DDI_LANES_TC2: 86 return "PORT_DDI_LANES_TC2"; 87 case POWER_DOMAIN_PORT_DDI_LANES_TC3: 88 return "PORT_DDI_LANES_TC3"; 89 case POWER_DOMAIN_PORT_DDI_LANES_TC4: 90 return "PORT_DDI_LANES_TC4"; 91 case POWER_DOMAIN_PORT_DDI_LANES_TC5: 92 return "PORT_DDI_LANES_TC5"; 93 case POWER_DOMAIN_PORT_DDI_LANES_TC6: 94 return "PORT_DDI_LANES_TC6"; 95 case POWER_DOMAIN_PORT_DDI_IO_A: 96 return "PORT_DDI_IO_A"; 97 case POWER_DOMAIN_PORT_DDI_IO_B: 98 return "PORT_DDI_IO_B"; 99 case POWER_DOMAIN_PORT_DDI_IO_C: 100 return "PORT_DDI_IO_C"; 101 case POWER_DOMAIN_PORT_DDI_IO_D: 102 return "PORT_DDI_IO_D"; 103 case POWER_DOMAIN_PORT_DDI_IO_E: 104 return "PORT_DDI_IO_E"; 105 case POWER_DOMAIN_PORT_DDI_IO_F: 106 return "PORT_DDI_IO_F"; 107 case POWER_DOMAIN_PORT_DDI_IO_TC1: 108 return "PORT_DDI_IO_TC1"; 109 case POWER_DOMAIN_PORT_DDI_IO_TC2: 110 return "PORT_DDI_IO_TC2"; 111 case POWER_DOMAIN_PORT_DDI_IO_TC3: 112 return "PORT_DDI_IO_TC3"; 113 case POWER_DOMAIN_PORT_DDI_IO_TC4: 114 return "PORT_DDI_IO_TC4"; 115 case POWER_DOMAIN_PORT_DDI_IO_TC5: 116 return "PORT_DDI_IO_TC5"; 117 case POWER_DOMAIN_PORT_DDI_IO_TC6: 118 return "PORT_DDI_IO_TC6"; 119 case POWER_DOMAIN_PORT_DSI: 120 return "PORT_DSI"; 121 case POWER_DOMAIN_PORT_CRT: 122 return "PORT_CRT"; 123 case POWER_DOMAIN_PORT_OTHER: 124 return "PORT_OTHER"; 125 case POWER_DOMAIN_VGA: 126 return "VGA"; 127 case POWER_DOMAIN_AUDIO_MMIO: 128 return "AUDIO_MMIO"; 129 case POWER_DOMAIN_AUDIO_PLAYBACK: 130 return "AUDIO_PLAYBACK"; 131 case POWER_DOMAIN_AUX_A: 132 return "AUX_A"; 133 case POWER_DOMAIN_AUX_B: 134 return "AUX_B"; 135 case POWER_DOMAIN_AUX_C: 136 return "AUX_C"; 137 case POWER_DOMAIN_AUX_D: 138 return "AUX_D"; 139 case POWER_DOMAIN_AUX_E: 140 return "AUX_E"; 141 case POWER_DOMAIN_AUX_F: 142 return "AUX_F"; 143 case POWER_DOMAIN_AUX_USBC1: 144 return "AUX_USBC1"; 145 case POWER_DOMAIN_AUX_USBC2: 146 return "AUX_USBC2"; 147 case POWER_DOMAIN_AUX_USBC3: 148 return "AUX_USBC3"; 149 case POWER_DOMAIN_AUX_USBC4: 150 return "AUX_USBC4"; 151 case POWER_DOMAIN_AUX_USBC5: 152 return "AUX_USBC5"; 153 case POWER_DOMAIN_AUX_USBC6: 154 return "AUX_USBC6"; 155 case POWER_DOMAIN_AUX_IO_A: 156 return "AUX_IO_A"; 157 case POWER_DOMAIN_AUX_TBT1: 158 return "AUX_TBT1"; 159 case POWER_DOMAIN_AUX_TBT2: 160 return "AUX_TBT2"; 161 case POWER_DOMAIN_AUX_TBT3: 162 return "AUX_TBT3"; 163 case POWER_DOMAIN_AUX_TBT4: 164 return "AUX_TBT4"; 165 case POWER_DOMAIN_AUX_TBT5: 166 return "AUX_TBT5"; 167 case POWER_DOMAIN_AUX_TBT6: 168 return "AUX_TBT6"; 169 case POWER_DOMAIN_GMBUS: 170 return "GMBUS"; 171 case POWER_DOMAIN_INIT: 172 return "INIT"; 173 case POWER_DOMAIN_MODESET: 174 return "MODESET"; 175 case POWER_DOMAIN_GT_IRQ: 176 return "GT_IRQ"; 177 case POWER_DOMAIN_DC_OFF: 178 return "DC_OFF"; 179 case POWER_DOMAIN_TC_COLD_OFF: 180 return "TC_COLD_OFF"; 181 default: 182 MISSING_CASE(domain); 183 return "?"; 184 } 185 } 186 187 /** 188 * __intel_display_power_is_enabled - unlocked check for a power domain 189 * @dev_priv: i915 device instance 190 * @domain: power domain to check 191 * 192 * This is the unlocked version of intel_display_power_is_enabled() and should 193 * only be used from error capture and recovery code where deadlocks are 194 * possible. 195 * 196 * Returns: 197 * True when the power domain is enabled, false otherwise. 198 */ 199 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 200 enum intel_display_power_domain domain) 201 { 202 struct i915_power_well *power_well; 203 bool is_enabled; 204 205 if (dev_priv->runtime_pm.suspended) 206 return false; 207 208 is_enabled = true; 209 210 for_each_power_domain_well_reverse(dev_priv, power_well, domain) { 211 if (intel_power_well_is_always_on(power_well)) 212 continue; 213 214 if (!intel_power_well_is_enabled_cached(power_well)) { 215 is_enabled = false; 216 break; 217 } 218 } 219 220 return is_enabled; 221 } 222 223 /** 224 * intel_display_power_is_enabled - check for a power domain 225 * @dev_priv: i915 device instance 226 * @domain: power domain to check 227 * 228 * This function can be used to check the hw power domain state. It is mostly 229 * used in hardware state readout functions. Everywhere else code should rely 230 * upon explicit power domain reference counting to ensure that the hardware 231 * block is powered up before accessing it. 232 * 233 * Callers must hold the relevant modesetting locks to ensure that concurrent 234 * threads can't disable the power well while the caller tries to read a few 235 * registers. 236 * 237 * Returns: 238 * True when the power domain is enabled, false otherwise. 239 */ 240 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 241 enum intel_display_power_domain domain) 242 { 243 struct i915_power_domains *power_domains; 244 bool ret; 245 246 power_domains = &dev_priv->power_domains; 247 248 mutex_lock(&power_domains->lock); 249 ret = __intel_display_power_is_enabled(dev_priv, domain); 250 mutex_unlock(&power_domains->lock); 251 252 return ret; 253 } 254 255 static u32 256 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 257 u32 target_dc_state) 258 { 259 static const u32 states[] = { 260 DC_STATE_EN_UPTO_DC6, 261 DC_STATE_EN_UPTO_DC5, 262 DC_STATE_EN_DC3CO, 263 DC_STATE_DISABLE, 264 }; 265 int i; 266 267 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 268 if (target_dc_state != states[i]) 269 continue; 270 271 if (dev_priv->dmc.allowed_dc_mask & target_dc_state) 272 break; 273 274 target_dc_state = states[i + 1]; 275 } 276 277 return target_dc_state; 278 } 279 280 /** 281 * intel_display_power_set_target_dc_state - Set target dc state. 282 * @dev_priv: i915 device 283 * @state: state which needs to be set as target_dc_state. 284 * 285 * This function set the "DC off" power well target_dc_state, 286 * based upon this target_dc_stste, "DC off" power well will 287 * enable desired DC state. 288 */ 289 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 290 u32 state) 291 { 292 struct i915_power_well *power_well; 293 bool dc_off_enabled; 294 struct i915_power_domains *power_domains = &dev_priv->power_domains; 295 296 mutex_lock(&power_domains->lock); 297 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 298 299 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 300 goto unlock; 301 302 state = sanitize_target_dc_state(dev_priv, state); 303 304 if (state == dev_priv->dmc.target_dc_state) 305 goto unlock; 306 307 dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well); 308 /* 309 * If DC off power well is disabled, need to enable and disable the 310 * DC off power well to effect target DC state. 311 */ 312 if (!dc_off_enabled) 313 intel_power_well_enable(dev_priv, power_well); 314 315 dev_priv->dmc.target_dc_state = state; 316 317 if (!dc_off_enabled) 318 intel_power_well_disable(dev_priv, power_well); 319 320 unlock: 321 mutex_unlock(&power_domains->lock); 322 } 323 324 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 325 326 static void __async_put_domains_mask(struct i915_power_domains *power_domains, 327 struct intel_power_domain_mask *mask) 328 { 329 bitmap_or(mask->bits, 330 power_domains->async_put_domains[0].bits, 331 power_domains->async_put_domains[1].bits, 332 POWER_DOMAIN_NUM); 333 } 334 335 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 336 337 static bool 338 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 339 { 340 struct drm_i915_private *i915 = container_of(power_domains, 341 struct drm_i915_private, 342 power_domains); 343 344 return !drm_WARN_ON(&i915->drm, 345 bitmap_intersects(power_domains->async_put_domains[0].bits, 346 power_domains->async_put_domains[1].bits, 347 POWER_DOMAIN_NUM)); 348 } 349 350 static bool 351 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 352 { 353 struct drm_i915_private *i915 = container_of(power_domains, 354 struct drm_i915_private, 355 power_domains); 356 struct intel_power_domain_mask async_put_mask; 357 enum intel_display_power_domain domain; 358 bool err = false; 359 360 err |= !assert_async_put_domain_masks_disjoint(power_domains); 361 __async_put_domains_mask(power_domains, &async_put_mask); 362 err |= drm_WARN_ON(&i915->drm, 363 !!power_domains->async_put_wakeref != 364 !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)); 365 366 for_each_power_domain(domain, &async_put_mask) 367 err |= drm_WARN_ON(&i915->drm, 368 power_domains->domain_use_count[domain] != 1); 369 370 return !err; 371 } 372 373 static void print_power_domains(struct i915_power_domains *power_domains, 374 const char *prefix, struct intel_power_domain_mask *mask) 375 { 376 struct drm_i915_private *i915 = container_of(power_domains, 377 struct drm_i915_private, 378 power_domains); 379 enum intel_display_power_domain domain; 380 381 drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); 382 for_each_power_domain(domain, mask) 383 drm_dbg(&i915->drm, "%s use_count %d\n", 384 intel_display_power_domain_str(domain), 385 power_domains->domain_use_count[domain]); 386 } 387 388 static void 389 print_async_put_domains_state(struct i915_power_domains *power_domains) 390 { 391 struct drm_i915_private *i915 = container_of(power_domains, 392 struct drm_i915_private, 393 power_domains); 394 395 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 396 power_domains->async_put_wakeref); 397 398 print_power_domains(power_domains, "async_put_domains[0]", 399 &power_domains->async_put_domains[0]); 400 print_power_domains(power_domains, "async_put_domains[1]", 401 &power_domains->async_put_domains[1]); 402 } 403 404 static void 405 verify_async_put_domains_state(struct i915_power_domains *power_domains) 406 { 407 if (!__async_put_domains_state_ok(power_domains)) 408 print_async_put_domains_state(power_domains); 409 } 410 411 #else 412 413 static void 414 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 415 { 416 } 417 418 static void 419 verify_async_put_domains_state(struct i915_power_domains *power_domains) 420 { 421 } 422 423 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 424 425 static void async_put_domains_mask(struct i915_power_domains *power_domains, 426 struct intel_power_domain_mask *mask) 427 428 { 429 assert_async_put_domain_masks_disjoint(power_domains); 430 431 __async_put_domains_mask(power_domains, mask); 432 } 433 434 static void 435 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 436 enum intel_display_power_domain domain) 437 { 438 assert_async_put_domain_masks_disjoint(power_domains); 439 440 clear_bit(domain, power_domains->async_put_domains[0].bits); 441 clear_bit(domain, power_domains->async_put_domains[1].bits); 442 } 443 444 static bool 445 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 446 enum intel_display_power_domain domain) 447 { 448 struct i915_power_domains *power_domains = &dev_priv->power_domains; 449 struct intel_power_domain_mask async_put_mask; 450 bool ret = false; 451 452 async_put_domains_mask(power_domains, &async_put_mask); 453 if (!test_bit(domain, async_put_mask.bits)) 454 goto out_verify; 455 456 async_put_domains_clear_domain(power_domains, domain); 457 458 ret = true; 459 460 async_put_domains_mask(power_domains, &async_put_mask); 461 if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM)) 462 goto out_verify; 463 464 cancel_delayed_work(&power_domains->async_put_work); 465 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 466 fetch_and_zero(&power_domains->async_put_wakeref)); 467 out_verify: 468 verify_async_put_domains_state(power_domains); 469 470 return ret; 471 } 472 473 static void 474 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 475 enum intel_display_power_domain domain) 476 { 477 struct i915_power_domains *power_domains = &dev_priv->power_domains; 478 struct i915_power_well *power_well; 479 480 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 481 return; 482 483 for_each_power_domain_well(dev_priv, power_well, domain) 484 intel_power_well_get(dev_priv, power_well); 485 486 power_domains->domain_use_count[domain]++; 487 } 488 489 /** 490 * intel_display_power_get - grab a power domain reference 491 * @dev_priv: i915 device instance 492 * @domain: power domain to reference 493 * 494 * This function grabs a power domain reference for @domain and ensures that the 495 * power domain and all its parents are powered up. Therefore users should only 496 * grab a reference to the innermost power domain they need. 497 * 498 * Any power domain reference obtained by this function must have a symmetric 499 * call to intel_display_power_put() to release the reference again. 500 */ 501 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 502 enum intel_display_power_domain domain) 503 { 504 struct i915_power_domains *power_domains = &dev_priv->power_domains; 505 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 506 507 mutex_lock(&power_domains->lock); 508 __intel_display_power_get_domain(dev_priv, domain); 509 mutex_unlock(&power_domains->lock); 510 511 return wakeref; 512 } 513 514 /** 515 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 516 * @dev_priv: i915 device instance 517 * @domain: power domain to reference 518 * 519 * This function grabs a power domain reference for @domain and ensures that the 520 * power domain and all its parents are powered up. Therefore users should only 521 * grab a reference to the innermost power domain they need. 522 * 523 * Any power domain reference obtained by this function must have a symmetric 524 * call to intel_display_power_put() to release the reference again. 525 */ 526 intel_wakeref_t 527 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 528 enum intel_display_power_domain domain) 529 { 530 struct i915_power_domains *power_domains = &dev_priv->power_domains; 531 intel_wakeref_t wakeref; 532 bool is_enabled; 533 534 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 535 if (!wakeref) 536 return false; 537 538 mutex_lock(&power_domains->lock); 539 540 if (__intel_display_power_is_enabled(dev_priv, domain)) { 541 __intel_display_power_get_domain(dev_priv, domain); 542 is_enabled = true; 543 } else { 544 is_enabled = false; 545 } 546 547 mutex_unlock(&power_domains->lock); 548 549 if (!is_enabled) { 550 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 551 wakeref = 0; 552 } 553 554 return wakeref; 555 } 556 557 static void 558 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 559 enum intel_display_power_domain domain) 560 { 561 struct i915_power_domains *power_domains; 562 struct i915_power_well *power_well; 563 const char *name = intel_display_power_domain_str(domain); 564 struct intel_power_domain_mask async_put_mask; 565 566 power_domains = &dev_priv->power_domains; 567 568 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 569 "Use count on domain %s is already zero\n", 570 name); 571 async_put_domains_mask(power_domains, &async_put_mask); 572 drm_WARN(&dev_priv->drm, 573 test_bit(domain, async_put_mask.bits), 574 "Async disabling of domain %s is pending\n", 575 name); 576 577 power_domains->domain_use_count[domain]--; 578 579 for_each_power_domain_well_reverse(dev_priv, power_well, domain) 580 intel_power_well_put(dev_priv, power_well); 581 } 582 583 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 584 enum intel_display_power_domain domain) 585 { 586 struct i915_power_domains *power_domains = &dev_priv->power_domains; 587 588 mutex_lock(&power_domains->lock); 589 __intel_display_power_put_domain(dev_priv, domain); 590 mutex_unlock(&power_domains->lock); 591 } 592 593 static void 594 queue_async_put_domains_work(struct i915_power_domains *power_domains, 595 intel_wakeref_t wakeref) 596 { 597 struct drm_i915_private *i915 = container_of(power_domains, 598 struct drm_i915_private, 599 power_domains); 600 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 601 power_domains->async_put_wakeref = wakeref; 602 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 603 &power_domains->async_put_work, 604 msecs_to_jiffies(100))); 605 } 606 607 static void 608 release_async_put_domains(struct i915_power_domains *power_domains, 609 struct intel_power_domain_mask *mask) 610 { 611 struct drm_i915_private *dev_priv = 612 container_of(power_domains, struct drm_i915_private, 613 power_domains); 614 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 615 enum intel_display_power_domain domain; 616 intel_wakeref_t wakeref; 617 618 /* 619 * The caller must hold already raw wakeref, upgrade that to a proper 620 * wakeref to make the state checker happy about the HW access during 621 * power well disabling. 622 */ 623 assert_rpm_raw_wakeref_held(rpm); 624 wakeref = intel_runtime_pm_get(rpm); 625 626 for_each_power_domain(domain, mask) { 627 /* Clear before put, so put's sanity check is happy. */ 628 async_put_domains_clear_domain(power_domains, domain); 629 __intel_display_power_put_domain(dev_priv, domain); 630 } 631 632 intel_runtime_pm_put(rpm, wakeref); 633 } 634 635 static void 636 intel_display_power_put_async_work(struct work_struct *work) 637 { 638 struct drm_i915_private *dev_priv = 639 container_of(work, struct drm_i915_private, 640 power_domains.async_put_work.work); 641 struct i915_power_domains *power_domains = &dev_priv->power_domains; 642 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 643 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 644 intel_wakeref_t old_work_wakeref = 0; 645 646 mutex_lock(&power_domains->lock); 647 648 /* 649 * Bail out if all the domain refs pending to be released were grabbed 650 * by subsequent gets or a flush_work. 651 */ 652 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 653 if (!old_work_wakeref) 654 goto out_verify; 655 656 release_async_put_domains(power_domains, 657 &power_domains->async_put_domains[0]); 658 659 /* Requeue the work if more domains were async put meanwhile. */ 660 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) { 661 bitmap_copy(power_domains->async_put_domains[0].bits, 662 power_domains->async_put_domains[1].bits, 663 POWER_DOMAIN_NUM); 664 bitmap_zero(power_domains->async_put_domains[1].bits, 665 POWER_DOMAIN_NUM); 666 queue_async_put_domains_work(power_domains, 667 fetch_and_zero(&new_work_wakeref)); 668 } else { 669 /* 670 * Cancel the work that got queued after this one got dequeued, 671 * since here we released the corresponding async-put reference. 672 */ 673 cancel_delayed_work(&power_domains->async_put_work); 674 } 675 676 out_verify: 677 verify_async_put_domains_state(power_domains); 678 679 mutex_unlock(&power_domains->lock); 680 681 if (old_work_wakeref) 682 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 683 if (new_work_wakeref) 684 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 685 } 686 687 /** 688 * intel_display_power_put_async - release a power domain reference asynchronously 689 * @i915: i915 device instance 690 * @domain: power domain to reference 691 * @wakeref: wakeref acquired for the reference that is being released 692 * 693 * This function drops the power domain reference obtained by 694 * intel_display_power_get*() and schedules a work to power down the 695 * corresponding hardware block if this is the last reference. 696 */ 697 void __intel_display_power_put_async(struct drm_i915_private *i915, 698 enum intel_display_power_domain domain, 699 intel_wakeref_t wakeref) 700 { 701 struct i915_power_domains *power_domains = &i915->power_domains; 702 struct intel_runtime_pm *rpm = &i915->runtime_pm; 703 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 704 705 mutex_lock(&power_domains->lock); 706 707 if (power_domains->domain_use_count[domain] > 1) { 708 __intel_display_power_put_domain(i915, domain); 709 710 goto out_verify; 711 } 712 713 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 714 715 /* Let a pending work requeue itself or queue a new one. */ 716 if (power_domains->async_put_wakeref) { 717 set_bit(domain, power_domains->async_put_domains[1].bits); 718 } else { 719 set_bit(domain, power_domains->async_put_domains[0].bits); 720 queue_async_put_domains_work(power_domains, 721 fetch_and_zero(&work_wakeref)); 722 } 723 724 out_verify: 725 verify_async_put_domains_state(power_domains); 726 727 mutex_unlock(&power_domains->lock); 728 729 if (work_wakeref) 730 intel_runtime_pm_put_raw(rpm, work_wakeref); 731 732 intel_runtime_pm_put(rpm, wakeref); 733 } 734 735 /** 736 * intel_display_power_flush_work - flushes the async display power disabling work 737 * @i915: i915 device instance 738 * 739 * Flushes any pending work that was scheduled by a preceding 740 * intel_display_power_put_async() call, completing the disabling of the 741 * corresponding power domains. 742 * 743 * Note that the work handler function may still be running after this 744 * function returns; to ensure that the work handler isn't running use 745 * intel_display_power_flush_work_sync() instead. 746 */ 747 void intel_display_power_flush_work(struct drm_i915_private *i915) 748 { 749 struct i915_power_domains *power_domains = &i915->power_domains; 750 struct intel_power_domain_mask async_put_mask; 751 intel_wakeref_t work_wakeref; 752 753 mutex_lock(&power_domains->lock); 754 755 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 756 if (!work_wakeref) 757 goto out_verify; 758 759 async_put_domains_mask(power_domains, &async_put_mask); 760 release_async_put_domains(power_domains, &async_put_mask); 761 cancel_delayed_work(&power_domains->async_put_work); 762 763 out_verify: 764 verify_async_put_domains_state(power_domains); 765 766 mutex_unlock(&power_domains->lock); 767 768 if (work_wakeref) 769 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 770 } 771 772 /** 773 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 774 * @i915: i915 device instance 775 * 776 * Like intel_display_power_flush_work(), but also ensure that the work 777 * handler function is not running any more when this function returns. 778 */ 779 static void 780 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 781 { 782 struct i915_power_domains *power_domains = &i915->power_domains; 783 784 intel_display_power_flush_work(i915); 785 cancel_delayed_work_sync(&power_domains->async_put_work); 786 787 verify_async_put_domains_state(power_domains); 788 789 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 790 } 791 792 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 793 /** 794 * intel_display_power_put - release a power domain reference 795 * @dev_priv: i915 device instance 796 * @domain: power domain to reference 797 * @wakeref: wakeref acquired for the reference that is being released 798 * 799 * This function drops the power domain reference obtained by 800 * intel_display_power_get() and might power down the corresponding hardware 801 * block right away if this is the last reference. 802 */ 803 void intel_display_power_put(struct drm_i915_private *dev_priv, 804 enum intel_display_power_domain domain, 805 intel_wakeref_t wakeref) 806 { 807 __intel_display_power_put(dev_priv, domain); 808 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 809 } 810 #else 811 /** 812 * intel_display_power_put_unchecked - release an unchecked power domain reference 813 * @dev_priv: i915 device instance 814 * @domain: power domain to reference 815 * 816 * This function drops the power domain reference obtained by 817 * intel_display_power_get() and might power down the corresponding hardware 818 * block right away if this is the last reference. 819 * 820 * This function is only for the power domain code's internal use to suppress wakeref 821 * tracking when the correspondig debug kconfig option is disabled, should not 822 * be used otherwise. 823 */ 824 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 825 enum intel_display_power_domain domain) 826 { 827 __intel_display_power_put(dev_priv, domain); 828 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 829 } 830 #endif 831 832 void 833 intel_display_power_get_in_set(struct drm_i915_private *i915, 834 struct intel_display_power_domain_set *power_domain_set, 835 enum intel_display_power_domain domain) 836 { 837 intel_wakeref_t __maybe_unused wf; 838 839 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); 840 841 wf = intel_display_power_get(i915, domain); 842 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 843 power_domain_set->wakerefs[domain] = wf; 844 #endif 845 set_bit(domain, power_domain_set->mask.bits); 846 } 847 848 bool 849 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 850 struct intel_display_power_domain_set *power_domain_set, 851 enum intel_display_power_domain domain) 852 { 853 intel_wakeref_t wf; 854 855 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits)); 856 857 wf = intel_display_power_get_if_enabled(i915, domain); 858 if (!wf) 859 return false; 860 861 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 862 power_domain_set->wakerefs[domain] = wf; 863 #endif 864 set_bit(domain, power_domain_set->mask.bits); 865 866 return true; 867 } 868 869 void 870 intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 871 struct intel_display_power_domain_set *power_domain_set, 872 struct intel_power_domain_mask *mask) 873 { 874 enum intel_display_power_domain domain; 875 876 drm_WARN_ON(&i915->drm, 877 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM)); 878 879 for_each_power_domain(domain, mask) { 880 intel_wakeref_t __maybe_unused wf = -1; 881 882 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 883 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 884 #endif 885 intel_display_power_put(i915, domain, wf); 886 clear_bit(domain, power_domain_set->mask.bits); 887 } 888 } 889 890 static int 891 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 892 int disable_power_well) 893 { 894 if (disable_power_well >= 0) 895 return !!disable_power_well; 896 897 return 1; 898 } 899 900 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 901 int enable_dc) 902 { 903 u32 mask; 904 int requested_dc; 905 int max_dc; 906 907 if (!HAS_DISPLAY(dev_priv)) 908 return 0; 909 910 if (IS_DG2(dev_priv)) 911 max_dc = 0; 912 else if (IS_DG1(dev_priv)) 913 max_dc = 3; 914 else if (DISPLAY_VER(dev_priv) >= 12) 915 max_dc = 4; 916 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 917 max_dc = 1; 918 else if (DISPLAY_VER(dev_priv) >= 9) 919 max_dc = 2; 920 else 921 max_dc = 0; 922 923 /* 924 * DC9 has a separate HW flow from the rest of the DC states, 925 * not depending on the DMC firmware. It's needed by system 926 * suspend/resume, so allow it unconditionally. 927 */ 928 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || 929 DISPLAY_VER(dev_priv) >= 11 ? 930 DC_STATE_EN_DC9 : 0; 931 932 if (!dev_priv->params.disable_power_well) 933 max_dc = 0; 934 935 if (enable_dc >= 0 && enable_dc <= max_dc) { 936 requested_dc = enable_dc; 937 } else if (enable_dc == -1) { 938 requested_dc = max_dc; 939 } else if (enable_dc > max_dc && enable_dc <= 4) { 940 drm_dbg_kms(&dev_priv->drm, 941 "Adjusting requested max DC state (%d->%d)\n", 942 enable_dc, max_dc); 943 requested_dc = max_dc; 944 } else { 945 drm_err(&dev_priv->drm, 946 "Unexpected value for enable_dc (%d)\n", enable_dc); 947 requested_dc = max_dc; 948 } 949 950 switch (requested_dc) { 951 case 4: 952 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 953 break; 954 case 3: 955 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 956 break; 957 case 2: 958 mask |= DC_STATE_EN_UPTO_DC6; 959 break; 960 case 1: 961 mask |= DC_STATE_EN_UPTO_DC5; 962 break; 963 } 964 965 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 966 967 return mask; 968 } 969 970 /** 971 * intel_power_domains_init - initializes the power domain structures 972 * @dev_priv: i915 device instance 973 * 974 * Initializes the power domain structures for @dev_priv depending upon the 975 * supported platform. 976 */ 977 int intel_power_domains_init(struct drm_i915_private *dev_priv) 978 { 979 struct i915_power_domains *power_domains = &dev_priv->power_domains; 980 981 dev_priv->params.disable_power_well = 982 sanitize_disable_power_well_option(dev_priv, 983 dev_priv->params.disable_power_well); 984 dev_priv->dmc.allowed_dc_mask = 985 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 986 987 dev_priv->dmc.target_dc_state = 988 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 989 990 mutex_init(&power_domains->lock); 991 992 INIT_DELAYED_WORK(&power_domains->async_put_work, 993 intel_display_power_put_async_work); 994 995 return intel_display_power_map_init(power_domains); 996 } 997 998 /** 999 * intel_power_domains_cleanup - clean up power domains resources 1000 * @dev_priv: i915 device instance 1001 * 1002 * Release any resources acquired by intel_power_domains_init() 1003 */ 1004 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 1005 { 1006 intel_display_power_map_cleanup(&dev_priv->power_domains); 1007 } 1008 1009 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 1010 { 1011 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1012 struct i915_power_well *power_well; 1013 1014 mutex_lock(&power_domains->lock); 1015 for_each_power_well(dev_priv, power_well) 1016 intel_power_well_sync_hw(dev_priv, power_well); 1017 mutex_unlock(&power_domains->lock); 1018 } 1019 1020 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 1021 enum dbuf_slice slice, bool enable) 1022 { 1023 i915_reg_t reg = DBUF_CTL_S(slice); 1024 bool state; 1025 1026 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST, 1027 enable ? DBUF_POWER_REQUEST : 0); 1028 intel_de_posting_read(dev_priv, reg); 1029 udelay(10); 1030 1031 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 1032 drm_WARN(&dev_priv->drm, enable != state, 1033 "DBuf slice %d power %s timeout!\n", 1034 slice, str_enable_disable(enable)); 1035 } 1036 1037 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 1038 u8 req_slices) 1039 { 1040 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1041 u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask; 1042 enum dbuf_slice slice; 1043 1044 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, 1045 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n", 1046 req_slices, slice_mask); 1047 1048 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 1049 req_slices); 1050 1051 /* 1052 * Might be running this in parallel to gen9_dc_off_power_well_enable 1053 * being called from intel_dp_detect for instance, 1054 * which causes assertion triggered by race condition, 1055 * as gen9_assert_dbuf_enabled might preempt this when registers 1056 * were already updated, while dev_priv was not. 1057 */ 1058 mutex_lock(&power_domains->lock); 1059 1060 for_each_dbuf_slice(dev_priv, slice) 1061 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 1062 1063 dev_priv->dbuf.enabled_slices = req_slices; 1064 1065 mutex_unlock(&power_domains->lock); 1066 } 1067 1068 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 1069 { 1070 dev_priv->dbuf.enabled_slices = 1071 intel_enabled_dbuf_slices_mask(dev_priv); 1072 1073 /* 1074 * Just power up at least 1 slice, we will 1075 * figure out later which slices we have and what we need. 1076 */ 1077 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 1078 dev_priv->dbuf.enabled_slices); 1079 } 1080 1081 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 1082 { 1083 gen9_dbuf_slices_update(dev_priv, 0); 1084 } 1085 1086 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) 1087 { 1088 enum dbuf_slice slice; 1089 1090 if (IS_ALDERLAKE_P(dev_priv)) 1091 return; 1092 1093 for_each_dbuf_slice(dev_priv, slice) 1094 intel_de_rmw(dev_priv, DBUF_CTL_S(slice), 1095 DBUF_TRACKER_STATE_SERVICE_MASK, 1096 DBUF_TRACKER_STATE_SERVICE(8)); 1097 } 1098 1099 static void icl_mbus_init(struct drm_i915_private *dev_priv) 1100 { 1101 unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask; 1102 u32 mask, val, i; 1103 1104 if (IS_ALDERLAKE_P(dev_priv)) 1105 return; 1106 1107 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 1108 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 1109 MBUS_ABOX_B_CREDIT_MASK | 1110 MBUS_ABOX_BW_CREDIT_MASK; 1111 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 1112 MBUS_ABOX_BT_CREDIT_POOL2(16) | 1113 MBUS_ABOX_B_CREDIT(1) | 1114 MBUS_ABOX_BW_CREDIT(1); 1115 1116 /* 1117 * gen12 platforms that use abox1 and abox2 for pixel data reads still 1118 * expect us to program the abox_ctl0 register as well, even though 1119 * we don't have to program other instance-0 registers like BW_BUDDY. 1120 */ 1121 if (DISPLAY_VER(dev_priv) == 12) 1122 abox_regs |= BIT(0); 1123 1124 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 1125 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 1126 } 1127 1128 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 1129 { 1130 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 1131 1132 /* 1133 * The LCPLL register should be turned on by the BIOS. For now 1134 * let's just check its state and print errors in case 1135 * something is wrong. Don't even try to turn it on. 1136 */ 1137 1138 if (val & LCPLL_CD_SOURCE_FCLK) 1139 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 1140 1141 if (val & LCPLL_PLL_DISABLE) 1142 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 1143 1144 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 1145 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 1146 } 1147 1148 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 1149 { 1150 struct drm_device *dev = &dev_priv->drm; 1151 struct intel_crtc *crtc; 1152 1153 for_each_intel_crtc(dev, crtc) 1154 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 1155 pipe_name(crtc->pipe)); 1156 1157 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 1158 "Display power well on\n"); 1159 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 1160 "SPLL enabled\n"); 1161 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 1162 "WRPLL1 enabled\n"); 1163 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 1164 "WRPLL2 enabled\n"); 1165 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 1166 "Panel power on\n"); 1167 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 1168 "CPU PWM1 enabled\n"); 1169 if (IS_HASWELL(dev_priv)) 1170 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 1171 "CPU PWM2 enabled\n"); 1172 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 1173 "PCH PWM1 enabled\n"); 1174 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1175 "Utility pin enabled\n"); 1176 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 1177 "PCH GTC enabled\n"); 1178 1179 /* 1180 * In theory we can still leave IRQs enabled, as long as only the HPD 1181 * interrupts remain enabled. We used to check for that, but since it's 1182 * gen-specific and since we only disable LCPLL after we fully disable 1183 * the interrupts, the check below should be enough. 1184 */ 1185 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 1186 } 1187 1188 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 1189 { 1190 if (IS_HASWELL(dev_priv)) 1191 return intel_de_read(dev_priv, D_COMP_HSW); 1192 else 1193 return intel_de_read(dev_priv, D_COMP_BDW); 1194 } 1195 1196 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 1197 { 1198 if (IS_HASWELL(dev_priv)) { 1199 if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val)) 1200 drm_dbg_kms(&dev_priv->drm, 1201 "Failed to write to D_COMP\n"); 1202 } else { 1203 intel_de_write(dev_priv, D_COMP_BDW, val); 1204 intel_de_posting_read(dev_priv, D_COMP_BDW); 1205 } 1206 } 1207 1208 /* 1209 * This function implements pieces of two sequences from BSpec: 1210 * - Sequence for display software to disable LCPLL 1211 * - Sequence for display software to allow package C8+ 1212 * The steps implemented here are just the steps that actually touch the LCPLL 1213 * register. Callers should take care of disabling all the display engine 1214 * functions, doing the mode unset, fixing interrupts, etc. 1215 */ 1216 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 1217 bool switch_to_fclk, bool allow_power_down) 1218 { 1219 u32 val; 1220 1221 assert_can_disable_lcpll(dev_priv); 1222 1223 val = intel_de_read(dev_priv, LCPLL_CTL); 1224 1225 if (switch_to_fclk) { 1226 val |= LCPLL_CD_SOURCE_FCLK; 1227 intel_de_write(dev_priv, LCPLL_CTL, val); 1228 1229 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 1230 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 1231 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 1232 1233 val = intel_de_read(dev_priv, LCPLL_CTL); 1234 } 1235 1236 val |= LCPLL_PLL_DISABLE; 1237 intel_de_write(dev_priv, LCPLL_CTL, val); 1238 intel_de_posting_read(dev_priv, LCPLL_CTL); 1239 1240 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 1241 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 1242 1243 val = hsw_read_dcomp(dev_priv); 1244 val |= D_COMP_COMP_DISABLE; 1245 hsw_write_dcomp(dev_priv, val); 1246 ndelay(100); 1247 1248 if (wait_for((hsw_read_dcomp(dev_priv) & 1249 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 1250 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 1251 1252 if (allow_power_down) { 1253 val = intel_de_read(dev_priv, LCPLL_CTL); 1254 val |= LCPLL_POWER_DOWN_ALLOW; 1255 intel_de_write(dev_priv, LCPLL_CTL, val); 1256 intel_de_posting_read(dev_priv, LCPLL_CTL); 1257 } 1258 } 1259 1260 /* 1261 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 1262 * source. 1263 */ 1264 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 1265 { 1266 u32 val; 1267 1268 val = intel_de_read(dev_priv, LCPLL_CTL); 1269 1270 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 1271 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 1272 return; 1273 1274 /* 1275 * Make sure we're not on PC8 state before disabling PC8, otherwise 1276 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 1277 */ 1278 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 1279 1280 if (val & LCPLL_POWER_DOWN_ALLOW) { 1281 val &= ~LCPLL_POWER_DOWN_ALLOW; 1282 intel_de_write(dev_priv, LCPLL_CTL, val); 1283 intel_de_posting_read(dev_priv, LCPLL_CTL); 1284 } 1285 1286 val = hsw_read_dcomp(dev_priv); 1287 val |= D_COMP_COMP_FORCE; 1288 val &= ~D_COMP_COMP_DISABLE; 1289 hsw_write_dcomp(dev_priv, val); 1290 1291 val = intel_de_read(dev_priv, LCPLL_CTL); 1292 val &= ~LCPLL_PLL_DISABLE; 1293 intel_de_write(dev_priv, LCPLL_CTL, val); 1294 1295 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 1296 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 1297 1298 if (val & LCPLL_CD_SOURCE_FCLK) { 1299 val = intel_de_read(dev_priv, LCPLL_CTL); 1300 val &= ~LCPLL_CD_SOURCE_FCLK; 1301 intel_de_write(dev_priv, LCPLL_CTL, val); 1302 1303 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 1304 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 1305 drm_err(&dev_priv->drm, 1306 "Switching back to LCPLL failed\n"); 1307 } 1308 1309 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 1310 1311 intel_update_cdclk(dev_priv); 1312 intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); 1313 } 1314 1315 /* 1316 * Package states C8 and deeper are really deep PC states that can only be 1317 * reached when all the devices on the system allow it, so even if the graphics 1318 * device allows PC8+, it doesn't mean the system will actually get to these 1319 * states. Our driver only allows PC8+ when going into runtime PM. 1320 * 1321 * The requirements for PC8+ are that all the outputs are disabled, the power 1322 * well is disabled and most interrupts are disabled, and these are also 1323 * requirements for runtime PM. When these conditions are met, we manually do 1324 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 1325 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 1326 * hang the machine. 1327 * 1328 * When we really reach PC8 or deeper states (not just when we allow it) we lose 1329 * the state of some registers, so when we come back from PC8+ we need to 1330 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 1331 * need to take care of the registers kept by RC6. Notice that this happens even 1332 * if we don't put the device in PCI D3 state (which is what currently happens 1333 * because of the runtime PM support). 1334 * 1335 * For more, read "Display Sequences for Package C8" on the hardware 1336 * documentation. 1337 */ 1338 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 1339 { 1340 u32 val; 1341 1342 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 1343 1344 if (HAS_PCH_LPT_LP(dev_priv)) { 1345 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 1346 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 1347 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 1348 } 1349 1350 lpt_disable_clkout_dp(dev_priv); 1351 hsw_disable_lcpll(dev_priv, true, true); 1352 } 1353 1354 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 1355 { 1356 u32 val; 1357 1358 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 1359 1360 hsw_restore_lcpll(dev_priv); 1361 intel_init_pch_refclk(dev_priv); 1362 1363 if (HAS_PCH_LPT_LP(dev_priv)) { 1364 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 1365 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 1366 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 1367 } 1368 } 1369 1370 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 1371 bool enable) 1372 { 1373 i915_reg_t reg; 1374 u32 reset_bits, val; 1375 1376 if (IS_IVYBRIDGE(dev_priv)) { 1377 reg = GEN7_MSG_CTL; 1378 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 1379 } else { 1380 reg = HSW_NDE_RSTWRN_OPT; 1381 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 1382 } 1383 1384 val = intel_de_read(dev_priv, reg); 1385 1386 if (enable) 1387 val |= reset_bits; 1388 else 1389 val &= ~reset_bits; 1390 1391 intel_de_write(dev_priv, reg, val); 1392 } 1393 1394 static void skl_display_core_init(struct drm_i915_private *dev_priv, 1395 bool resume) 1396 { 1397 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1398 struct i915_power_well *well; 1399 1400 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1401 1402 /* enable PCH reset handshake */ 1403 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 1404 1405 if (!HAS_DISPLAY(dev_priv)) 1406 return; 1407 1408 /* enable PG1 and Misc I/O */ 1409 mutex_lock(&power_domains->lock); 1410 1411 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1412 intel_power_well_enable(dev_priv, well); 1413 1414 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 1415 intel_power_well_enable(dev_priv, well); 1416 1417 mutex_unlock(&power_domains->lock); 1418 1419 intel_cdclk_init_hw(dev_priv); 1420 1421 gen9_dbuf_enable(dev_priv); 1422 1423 if (resume) 1424 intel_dmc_load_program(dev_priv); 1425 } 1426 1427 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 1428 { 1429 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1430 struct i915_power_well *well; 1431 1432 if (!HAS_DISPLAY(dev_priv)) 1433 return; 1434 1435 gen9_disable_dc_states(dev_priv); 1436 1437 gen9_dbuf_disable(dev_priv); 1438 1439 intel_cdclk_uninit_hw(dev_priv); 1440 1441 /* The spec doesn't call for removing the reset handshake flag */ 1442 /* disable PG1 and Misc I/O */ 1443 1444 mutex_lock(&power_domains->lock); 1445 1446 /* 1447 * BSpec says to keep the MISC IO power well enabled here, only 1448 * remove our request for power well 1. 1449 * Note that even though the driver's request is removed power well 1 1450 * may stay enabled after this due to DMC's own request on it. 1451 */ 1452 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1453 intel_power_well_disable(dev_priv, well); 1454 1455 mutex_unlock(&power_domains->lock); 1456 1457 usleep_range(10, 30); /* 10 us delay per Bspec */ 1458 } 1459 1460 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 1461 { 1462 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1463 struct i915_power_well *well; 1464 1465 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1466 1467 /* 1468 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 1469 * or else the reset will hang because there is no PCH to respond. 1470 * Move the handshake programming to initialization sequence. 1471 * Previously was left up to BIOS. 1472 */ 1473 intel_pch_reset_handshake(dev_priv, false); 1474 1475 if (!HAS_DISPLAY(dev_priv)) 1476 return; 1477 1478 /* Enable PG1 */ 1479 mutex_lock(&power_domains->lock); 1480 1481 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1482 intel_power_well_enable(dev_priv, well); 1483 1484 mutex_unlock(&power_domains->lock); 1485 1486 intel_cdclk_init_hw(dev_priv); 1487 1488 gen9_dbuf_enable(dev_priv); 1489 1490 if (resume) 1491 intel_dmc_load_program(dev_priv); 1492 } 1493 1494 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 1495 { 1496 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1497 struct i915_power_well *well; 1498 1499 if (!HAS_DISPLAY(dev_priv)) 1500 return; 1501 1502 gen9_disable_dc_states(dev_priv); 1503 1504 gen9_dbuf_disable(dev_priv); 1505 1506 intel_cdclk_uninit_hw(dev_priv); 1507 1508 /* The spec doesn't call for removing the reset handshake flag */ 1509 1510 /* 1511 * Disable PW1 (PG1). 1512 * Note that even though the driver's request is removed power well 1 1513 * may stay enabled after this due to DMC's own request on it. 1514 */ 1515 mutex_lock(&power_domains->lock); 1516 1517 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1518 intel_power_well_disable(dev_priv, well); 1519 1520 mutex_unlock(&power_domains->lock); 1521 1522 usleep_range(10, 30); /* 10 us delay per Bspec */ 1523 } 1524 1525 struct buddy_page_mask { 1526 u32 page_mask; 1527 u8 type; 1528 u8 num_channels; 1529 }; 1530 1531 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 1532 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 1533 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, 1534 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 1535 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, 1536 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 1537 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, 1538 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 1539 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, 1540 {} 1541 }; 1542 1543 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 1544 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 1545 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 1546 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, 1547 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, 1548 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 1549 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 1550 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, 1551 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, 1552 {} 1553 }; 1554 1555 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 1556 { 1557 enum intel_dram_type type = dev_priv->dram_info.type; 1558 u8 num_channels = dev_priv->dram_info.num_channels; 1559 const struct buddy_page_mask *table; 1560 unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask; 1561 int config, i; 1562 1563 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ 1564 if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv)) 1565 return; 1566 1567 if (IS_ALDERLAKE_S(dev_priv) || 1568 IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 1569 IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 1570 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) 1571 /* Wa_1409767108:tgl,dg1,adl-s */ 1572 table = wa_1409767108_buddy_page_masks; 1573 else 1574 table = tgl_buddy_page_masks; 1575 1576 for (config = 0; table[config].page_mask != 0; config++) 1577 if (table[config].num_channels == num_channels && 1578 table[config].type == type) 1579 break; 1580 1581 if (table[config].page_mask == 0) { 1582 drm_dbg(&dev_priv->drm, 1583 "Unknown memory configuration; disabling address buddy logic.\n"); 1584 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 1585 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 1586 BW_BUDDY_DISABLE); 1587 } else { 1588 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 1589 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 1590 table[config].page_mask); 1591 1592 /* Wa_22010178259:tgl,dg1,rkl,adl-s */ 1593 if (DISPLAY_VER(dev_priv) == 12) 1594 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 1595 BW_BUDDY_TLB_REQ_TIMER_MASK, 1596 BW_BUDDY_TLB_REQ_TIMER(0x8)); 1597 } 1598 } 1599 } 1600 1601 static void icl_display_core_init(struct drm_i915_private *dev_priv, 1602 bool resume) 1603 { 1604 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1605 struct i915_power_well *well; 1606 u32 val; 1607 1608 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1609 1610 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ 1611 if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && 1612 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 1613 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 1614 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 1615 1616 /* 1. Enable PCH reset handshake. */ 1617 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 1618 1619 if (!HAS_DISPLAY(dev_priv)) 1620 return; 1621 1622 /* 2. Initialize all combo phys */ 1623 intel_combo_phy_init(dev_priv); 1624 1625 /* 1626 * 3. Enable Power Well 1 (PG1). 1627 * The AUX IO power wells will be enabled on demand. 1628 */ 1629 mutex_lock(&power_domains->lock); 1630 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1631 intel_power_well_enable(dev_priv, well); 1632 mutex_unlock(&power_domains->lock); 1633 1634 /* 4. Enable CDCLK. */ 1635 intel_cdclk_init_hw(dev_priv); 1636 1637 if (DISPLAY_VER(dev_priv) >= 12) 1638 gen12_dbuf_slices_config(dev_priv); 1639 1640 /* 5. Enable DBUF. */ 1641 gen9_dbuf_enable(dev_priv); 1642 1643 /* 6. Setup MBUS. */ 1644 icl_mbus_init(dev_priv); 1645 1646 /* 7. Program arbiter BW_BUDDY registers */ 1647 if (DISPLAY_VER(dev_priv) >= 12) 1648 tgl_bw_buddy_init(dev_priv); 1649 1650 /* 8. Ensure PHYs have completed calibration and adaptation */ 1651 if (IS_DG2(dev_priv)) 1652 intel_snps_phy_wait_for_calibration(dev_priv); 1653 1654 if (resume) 1655 intel_dmc_load_program(dev_priv); 1656 1657 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ 1658 if (DISPLAY_VER(dev_priv) >= 12) { 1659 val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 1660 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; 1661 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val); 1662 } 1663 1664 /* Wa_14011503030:xelpd */ 1665 if (DISPLAY_VER(dev_priv) >= 13) 1666 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); 1667 } 1668 1669 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 1670 { 1671 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1672 struct i915_power_well *well; 1673 1674 if (!HAS_DISPLAY(dev_priv)) 1675 return; 1676 1677 gen9_disable_dc_states(dev_priv); 1678 1679 /* 1. Disable all display engine functions -> aready done */ 1680 1681 /* 2. Disable DBUF */ 1682 gen9_dbuf_disable(dev_priv); 1683 1684 /* 3. Disable CD clock */ 1685 intel_cdclk_uninit_hw(dev_priv); 1686 1687 /* 1688 * 4. Disable Power Well 1 (PG1). 1689 * The AUX IO power wells are toggled on demand, so they are already 1690 * disabled at this point. 1691 */ 1692 mutex_lock(&power_domains->lock); 1693 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 1694 intel_power_well_disable(dev_priv, well); 1695 mutex_unlock(&power_domains->lock); 1696 1697 /* 5. */ 1698 intel_combo_phy_uninit(dev_priv); 1699 } 1700 1701 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 1702 { 1703 struct i915_power_well *cmn_bc = 1704 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1705 struct i915_power_well *cmn_d = 1706 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1707 1708 /* 1709 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 1710 * workaround never ever read DISPLAY_PHY_CONTROL, and 1711 * instead maintain a shadow copy ourselves. Use the actual 1712 * power well state and lane status to reconstruct the 1713 * expected initial value. 1714 */ 1715 dev_priv->chv_phy_control = 1716 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 1717 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 1718 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 1719 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 1720 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 1721 1722 /* 1723 * If all lanes are disabled we leave the override disabled 1724 * with all power down bits cleared to match the state we 1725 * would use after disabling the port. Otherwise enable the 1726 * override and set the lane powerdown bits accding to the 1727 * current lane status. 1728 */ 1729 if (intel_power_well_is_enabled(dev_priv, cmn_bc)) { 1730 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 1731 unsigned int mask; 1732 1733 mask = status & DPLL_PORTB_READY_MASK; 1734 if (mask == 0xf) 1735 mask = 0x0; 1736 else 1737 dev_priv->chv_phy_control |= 1738 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 1739 1740 dev_priv->chv_phy_control |= 1741 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 1742 1743 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 1744 if (mask == 0xf) 1745 mask = 0x0; 1746 else 1747 dev_priv->chv_phy_control |= 1748 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 1749 1750 dev_priv->chv_phy_control |= 1751 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 1752 1753 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 1754 1755 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 1756 } else { 1757 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 1758 } 1759 1760 if (intel_power_well_is_enabled(dev_priv, cmn_d)) { 1761 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 1762 unsigned int mask; 1763 1764 mask = status & DPLL_PORTD_READY_MASK; 1765 1766 if (mask == 0xf) 1767 mask = 0x0; 1768 else 1769 dev_priv->chv_phy_control |= 1770 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 1771 1772 dev_priv->chv_phy_control |= 1773 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 1774 1775 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 1776 1777 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 1778 } else { 1779 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 1780 } 1781 1782 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 1783 dev_priv->chv_phy_control); 1784 1785 /* Defer application of initial phy_control to enabling the powerwell */ 1786 } 1787 1788 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 1789 { 1790 struct i915_power_well *cmn = 1791 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1792 struct i915_power_well *disp2d = 1793 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 1794 1795 /* If the display might be already active skip this */ 1796 if (intel_power_well_is_enabled(dev_priv, cmn) && 1797 intel_power_well_is_enabled(dev_priv, disp2d) && 1798 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 1799 return; 1800 1801 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 1802 1803 /* cmnlane needs DPLL registers */ 1804 intel_power_well_enable(dev_priv, disp2d); 1805 1806 /* 1807 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 1808 * Need to assert and de-assert PHY SB reset by gating the 1809 * common lane power, then un-gating it. 1810 * Simply ungating isn't enough to reset the PHY enough to get 1811 * ports and lanes running. 1812 */ 1813 intel_power_well_disable(dev_priv, cmn); 1814 } 1815 1816 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 1817 { 1818 bool ret; 1819 1820 vlv_punit_get(dev_priv); 1821 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 1822 vlv_punit_put(dev_priv); 1823 1824 return ret; 1825 } 1826 1827 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 1828 { 1829 drm_WARN(&dev_priv->drm, 1830 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 1831 "VED not power gated\n"); 1832 } 1833 1834 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 1835 { 1836 static const struct pci_device_id isp_ids[] = { 1837 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 1838 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 1839 {} 1840 }; 1841 1842 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 1843 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 1844 "ISP not power gated\n"); 1845 } 1846 1847 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 1848 1849 /** 1850 * intel_power_domains_init_hw - initialize hardware power domain state 1851 * @i915: i915 device instance 1852 * @resume: Called from resume code paths or not 1853 * 1854 * This function initializes the hardware power domain state and enables all 1855 * power wells belonging to the INIT power domain. Power wells in other 1856 * domains (and not in the INIT domain) are referenced or disabled by 1857 * intel_modeset_readout_hw_state(). After that the reference count of each 1858 * power well must match its HW enabled state, see 1859 * intel_power_domains_verify_state(). 1860 * 1861 * It will return with power domains disabled (to be enabled later by 1862 * intel_power_domains_enable()) and must be paired with 1863 * intel_power_domains_driver_remove(). 1864 */ 1865 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 1866 { 1867 struct i915_power_domains *power_domains = &i915->power_domains; 1868 1869 power_domains->initializing = true; 1870 1871 if (DISPLAY_VER(i915) >= 11) { 1872 icl_display_core_init(i915, resume); 1873 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 1874 bxt_display_core_init(i915, resume); 1875 } else if (DISPLAY_VER(i915) == 9) { 1876 skl_display_core_init(i915, resume); 1877 } else if (IS_CHERRYVIEW(i915)) { 1878 mutex_lock(&power_domains->lock); 1879 chv_phy_control_init(i915); 1880 mutex_unlock(&power_domains->lock); 1881 assert_isp_power_gated(i915); 1882 } else if (IS_VALLEYVIEW(i915)) { 1883 mutex_lock(&power_domains->lock); 1884 vlv_cmnlane_wa(i915); 1885 mutex_unlock(&power_domains->lock); 1886 assert_ved_power_gated(i915); 1887 assert_isp_power_gated(i915); 1888 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 1889 hsw_assert_cdclk(i915); 1890 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 1891 } else if (IS_IVYBRIDGE(i915)) { 1892 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 1893 } 1894 1895 /* 1896 * Keep all power wells enabled for any dependent HW access during 1897 * initialization and to make sure we keep BIOS enabled display HW 1898 * resources powered until display HW readout is complete. We drop 1899 * this reference in intel_power_domains_enable(). 1900 */ 1901 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 1902 power_domains->init_wakeref = 1903 intel_display_power_get(i915, POWER_DOMAIN_INIT); 1904 1905 /* Disable power support if the user asked so. */ 1906 if (!i915->params.disable_power_well) { 1907 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 1908 i915->power_domains.disable_wakeref = intel_display_power_get(i915, 1909 POWER_DOMAIN_INIT); 1910 } 1911 intel_power_domains_sync_hw(i915); 1912 1913 power_domains->initializing = false; 1914 } 1915 1916 /** 1917 * intel_power_domains_driver_remove - deinitialize hw power domain state 1918 * @i915: i915 device instance 1919 * 1920 * De-initializes the display power domain HW state. It also ensures that the 1921 * device stays powered up so that the driver can be reloaded. 1922 * 1923 * It must be called with power domains already disabled (after a call to 1924 * intel_power_domains_disable()) and must be paired with 1925 * intel_power_domains_init_hw(). 1926 */ 1927 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 1928 { 1929 intel_wakeref_t wakeref __maybe_unused = 1930 fetch_and_zero(&i915->power_domains.init_wakeref); 1931 1932 /* Remove the refcount we took to keep power well support disabled. */ 1933 if (!i915->params.disable_power_well) 1934 intel_display_power_put(i915, POWER_DOMAIN_INIT, 1935 fetch_and_zero(&i915->power_domains.disable_wakeref)); 1936 1937 intel_display_power_flush_work_sync(i915); 1938 1939 intel_power_domains_verify_state(i915); 1940 1941 /* Keep the power well enabled, but cancel its rpm wakeref. */ 1942 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1943 } 1944 1945 /** 1946 * intel_power_domains_sanitize_state - sanitize power domains state 1947 * @i915: i915 device instance 1948 * 1949 * Sanitize the power domains state during driver loading and system resume. 1950 * The function will disable all display power wells that BIOS has enabled 1951 * without a user for it (any user for a power well has taken a reference 1952 * on it by the time this function is called, after the state of all the 1953 * pipe, encoder, etc. HW resources have been sanitized). 1954 */ 1955 void intel_power_domains_sanitize_state(struct drm_i915_private *i915) 1956 { 1957 struct i915_power_domains *power_domains = &i915->power_domains; 1958 struct i915_power_well *power_well; 1959 1960 mutex_lock(&power_domains->lock); 1961 1962 for_each_power_well_reverse(i915, power_well) { 1963 if (power_well->desc->always_on || power_well->count || 1964 !intel_power_well_is_enabled(i915, power_well)) 1965 continue; 1966 1967 drm_dbg_kms(&i915->drm, 1968 "BIOS left unused %s power well enabled, disabling it\n", 1969 intel_power_well_name(power_well)); 1970 intel_power_well_disable(i915, power_well); 1971 } 1972 1973 mutex_unlock(&power_domains->lock); 1974 } 1975 1976 /** 1977 * intel_power_domains_enable - enable toggling of display power wells 1978 * @i915: i915 device instance 1979 * 1980 * Enable the ondemand enabling/disabling of the display power wells. Note that 1981 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 1982 * only at specific points of the display modeset sequence, thus they are not 1983 * affected by the intel_power_domains_enable()/disable() calls. The purpose 1984 * of these function is to keep the rest of power wells enabled until the end 1985 * of display HW readout (which will acquire the power references reflecting 1986 * the current HW state). 1987 */ 1988 void intel_power_domains_enable(struct drm_i915_private *i915) 1989 { 1990 intel_wakeref_t wakeref __maybe_unused = 1991 fetch_and_zero(&i915->power_domains.init_wakeref); 1992 1993 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 1994 intel_power_domains_verify_state(i915); 1995 } 1996 1997 /** 1998 * intel_power_domains_disable - disable toggling of display power wells 1999 * @i915: i915 device instance 2000 * 2001 * Disable the ondemand enabling/disabling of the display power wells. See 2002 * intel_power_domains_enable() for which power wells this call controls. 2003 */ 2004 void intel_power_domains_disable(struct drm_i915_private *i915) 2005 { 2006 struct i915_power_domains *power_domains = &i915->power_domains; 2007 2008 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 2009 power_domains->init_wakeref = 2010 intel_display_power_get(i915, POWER_DOMAIN_INIT); 2011 2012 intel_power_domains_verify_state(i915); 2013 } 2014 2015 /** 2016 * intel_power_domains_suspend - suspend power domain state 2017 * @i915: i915 device instance 2018 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 2019 * 2020 * This function prepares the hardware power domain state before entering 2021 * system suspend. 2022 * 2023 * It must be called with power domains already disabled (after a call to 2024 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 2025 */ 2026 void intel_power_domains_suspend(struct drm_i915_private *i915, 2027 enum i915_drm_suspend_mode suspend_mode) 2028 { 2029 struct i915_power_domains *power_domains = &i915->power_domains; 2030 intel_wakeref_t wakeref __maybe_unused = 2031 fetch_and_zero(&power_domains->init_wakeref); 2032 2033 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 2034 2035 /* 2036 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 2037 * support don't manually deinit the power domains. This also means the 2038 * DMC firmware will stay active, it will power down any HW 2039 * resources as required and also enable deeper system power states 2040 * that would be blocked if the firmware was inactive. 2041 */ 2042 if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) && 2043 suspend_mode == I915_DRM_SUSPEND_IDLE && 2044 intel_dmc_has_payload(i915)) { 2045 intel_display_power_flush_work(i915); 2046 intel_power_domains_verify_state(i915); 2047 return; 2048 } 2049 2050 /* 2051 * Even if power well support was disabled we still want to disable 2052 * power wells if power domains must be deinitialized for suspend. 2053 */ 2054 if (!i915->params.disable_power_well) 2055 intel_display_power_put(i915, POWER_DOMAIN_INIT, 2056 fetch_and_zero(&i915->power_domains.disable_wakeref)); 2057 2058 intel_display_power_flush_work(i915); 2059 intel_power_domains_verify_state(i915); 2060 2061 if (DISPLAY_VER(i915) >= 11) 2062 icl_display_core_uninit(i915); 2063 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 2064 bxt_display_core_uninit(i915); 2065 else if (DISPLAY_VER(i915) == 9) 2066 skl_display_core_uninit(i915); 2067 2068 power_domains->display_core_suspended = true; 2069 } 2070 2071 /** 2072 * intel_power_domains_resume - resume power domain state 2073 * @i915: i915 device instance 2074 * 2075 * This function resume the hardware power domain state during system resume. 2076 * 2077 * It will return with power domain support disabled (to be enabled later by 2078 * intel_power_domains_enable()) and must be paired with 2079 * intel_power_domains_suspend(). 2080 */ 2081 void intel_power_domains_resume(struct drm_i915_private *i915) 2082 { 2083 struct i915_power_domains *power_domains = &i915->power_domains; 2084 2085 if (power_domains->display_core_suspended) { 2086 intel_power_domains_init_hw(i915, true); 2087 power_domains->display_core_suspended = false; 2088 } else { 2089 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 2090 power_domains->init_wakeref = 2091 intel_display_power_get(i915, POWER_DOMAIN_INIT); 2092 } 2093 2094 intel_power_domains_verify_state(i915); 2095 } 2096 2097 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2098 2099 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 2100 { 2101 struct i915_power_domains *power_domains = &i915->power_domains; 2102 struct i915_power_well *power_well; 2103 2104 for_each_power_well(i915, power_well) { 2105 enum intel_display_power_domain domain; 2106 2107 drm_dbg(&i915->drm, "%-25s %d\n", 2108 intel_power_well_name(power_well), intel_power_well_refcount(power_well)); 2109 2110 for_each_power_domain(domain, intel_power_well_domains(power_well)) 2111 drm_dbg(&i915->drm, " %-23s %d\n", 2112 intel_display_power_domain_str(domain), 2113 power_domains->domain_use_count[domain]); 2114 } 2115 } 2116 2117 /** 2118 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 2119 * @i915: i915 device instance 2120 * 2121 * Verify if the reference count of each power well matches its HW enabled 2122 * state and the total refcount of the domains it belongs to. This must be 2123 * called after modeset HW state sanitization, which is responsible for 2124 * acquiring reference counts for any power wells in use and disabling the 2125 * ones left on by BIOS but not required by any active output. 2126 */ 2127 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 2128 { 2129 struct i915_power_domains *power_domains = &i915->power_domains; 2130 struct i915_power_well *power_well; 2131 bool dump_domain_info; 2132 2133 mutex_lock(&power_domains->lock); 2134 2135 verify_async_put_domains_state(power_domains); 2136 2137 dump_domain_info = false; 2138 for_each_power_well(i915, power_well) { 2139 enum intel_display_power_domain domain; 2140 int domains_count; 2141 bool enabled; 2142 2143 enabled = intel_power_well_is_enabled(i915, power_well); 2144 if ((intel_power_well_refcount(power_well) || 2145 intel_power_well_is_always_on(power_well)) != 2146 enabled) 2147 drm_err(&i915->drm, 2148 "power well %s state mismatch (refcount %d/enabled %d)", 2149 intel_power_well_name(power_well), 2150 intel_power_well_refcount(power_well), enabled); 2151 2152 domains_count = 0; 2153 for_each_power_domain(domain, intel_power_well_domains(power_well)) 2154 domains_count += power_domains->domain_use_count[domain]; 2155 2156 if (intel_power_well_refcount(power_well) != domains_count) { 2157 drm_err(&i915->drm, 2158 "power well %s refcount/domain refcount mismatch " 2159 "(refcount %d/domains refcount %d)\n", 2160 intel_power_well_name(power_well), 2161 intel_power_well_refcount(power_well), 2162 domains_count); 2163 dump_domain_info = true; 2164 } 2165 } 2166 2167 if (dump_domain_info) { 2168 static bool dumped; 2169 2170 if (!dumped) { 2171 intel_power_domains_dump_info(i915); 2172 dumped = true; 2173 } 2174 } 2175 2176 mutex_unlock(&power_domains->lock); 2177 } 2178 2179 #else 2180 2181 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 2182 { 2183 } 2184 2185 #endif 2186 2187 void intel_display_power_suspend_late(struct drm_i915_private *i915) 2188 { 2189 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 2190 IS_BROXTON(i915)) { 2191 bxt_enable_dc9(i915); 2192 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2193 hsw_enable_pc8(i915); 2194 } 2195 2196 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 2197 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 2198 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 2199 } 2200 2201 void intel_display_power_resume_early(struct drm_i915_private *i915) 2202 { 2203 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 2204 IS_BROXTON(i915)) { 2205 gen9_sanitize_dc_state(i915); 2206 bxt_disable_dc9(i915); 2207 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2208 hsw_disable_pc8(i915); 2209 } 2210 2211 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 2212 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 2213 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 2214 } 2215 2216 void intel_display_power_suspend(struct drm_i915_private *i915) 2217 { 2218 if (DISPLAY_VER(i915) >= 11) { 2219 icl_display_core_uninit(i915); 2220 bxt_enable_dc9(i915); 2221 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 2222 bxt_display_core_uninit(i915); 2223 bxt_enable_dc9(i915); 2224 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2225 hsw_enable_pc8(i915); 2226 } 2227 } 2228 2229 void intel_display_power_resume(struct drm_i915_private *i915) 2230 { 2231 if (DISPLAY_VER(i915) >= 11) { 2232 bxt_disable_dc9(i915); 2233 icl_display_core_init(i915, true); 2234 if (intel_dmc_has_payload(i915)) { 2235 if (i915->dmc.allowed_dc_mask & 2236 DC_STATE_EN_UPTO_DC6) 2237 skl_enable_dc6(i915); 2238 else if (i915->dmc.allowed_dc_mask & 2239 DC_STATE_EN_UPTO_DC5) 2240 gen9_enable_dc5(i915); 2241 } 2242 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 2243 bxt_disable_dc9(i915); 2244 bxt_display_core_init(i915, true); 2245 if (intel_dmc_has_payload(i915) && 2246 (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 2247 gen9_enable_dc5(i915); 2248 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2249 hsw_disable_pc8(i915); 2250 } 2251 } 2252 2253 void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) 2254 { 2255 struct i915_power_domains *power_domains = &i915->power_domains; 2256 int i; 2257 2258 mutex_lock(&power_domains->lock); 2259 2260 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2261 for (i = 0; i < power_domains->power_well_count; i++) { 2262 struct i915_power_well *power_well; 2263 enum intel_display_power_domain power_domain; 2264 2265 power_well = &power_domains->power_wells[i]; 2266 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well), 2267 intel_power_well_refcount(power_well)); 2268 2269 for_each_power_domain(power_domain, intel_power_well_domains(power_well)) 2270 seq_printf(m, " %-23s %d\n", 2271 intel_display_power_domain_str(power_domain), 2272 power_domains->domain_use_count[power_domain]); 2273 } 2274 2275 mutex_unlock(&power_domains->lock); 2276 } 2277 2278 struct intel_ddi_port_domains { 2279 enum port port_start; 2280 enum port port_end; 2281 enum aux_ch aux_ch_start; 2282 enum aux_ch aux_ch_end; 2283 2284 enum intel_display_power_domain ddi_lanes; 2285 enum intel_display_power_domain ddi_io; 2286 enum intel_display_power_domain aux_legacy_usbc; 2287 enum intel_display_power_domain aux_tbt; 2288 }; 2289 2290 static const struct intel_ddi_port_domains 2291 i9xx_port_domains[] = { 2292 { 2293 .port_start = PORT_A, 2294 .port_end = PORT_F, 2295 .aux_ch_start = AUX_CH_A, 2296 .aux_ch_end = AUX_CH_F, 2297 2298 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2299 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2300 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2301 .aux_tbt = POWER_DOMAIN_INVALID, 2302 }, 2303 }; 2304 2305 static const struct intel_ddi_port_domains 2306 d11_port_domains[] = { 2307 { 2308 .port_start = PORT_A, 2309 .port_end = PORT_B, 2310 .aux_ch_start = AUX_CH_A, 2311 .aux_ch_end = AUX_CH_B, 2312 2313 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2314 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2315 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2316 .aux_tbt = POWER_DOMAIN_INVALID, 2317 }, { 2318 .port_start = PORT_C, 2319 .port_end = PORT_F, 2320 .aux_ch_start = AUX_CH_C, 2321 .aux_ch_end = AUX_CH_F, 2322 2323 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C, 2324 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C, 2325 .aux_legacy_usbc = POWER_DOMAIN_AUX_C, 2326 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2327 }, 2328 }; 2329 2330 static const struct intel_ddi_port_domains 2331 d12_port_domains[] = { 2332 { 2333 .port_start = PORT_A, 2334 .port_end = PORT_C, 2335 .aux_ch_start = AUX_CH_A, 2336 .aux_ch_end = AUX_CH_C, 2337 2338 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2339 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2340 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2341 .aux_tbt = POWER_DOMAIN_INVALID, 2342 }, { 2343 .port_start = PORT_TC1, 2344 .port_end = PORT_TC6, 2345 .aux_ch_start = AUX_CH_USBC1, 2346 .aux_ch_end = AUX_CH_USBC6, 2347 2348 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, 2349 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, 2350 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, 2351 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2352 }, 2353 }; 2354 2355 static const struct intel_ddi_port_domains 2356 d13_port_domains[] = { 2357 { 2358 .port_start = PORT_A, 2359 .port_end = PORT_C, 2360 .aux_ch_start = AUX_CH_A, 2361 .aux_ch_end = AUX_CH_C, 2362 2363 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A, 2364 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A, 2365 .aux_legacy_usbc = POWER_DOMAIN_AUX_A, 2366 .aux_tbt = POWER_DOMAIN_INVALID, 2367 }, { 2368 .port_start = PORT_TC1, 2369 .port_end = PORT_TC4, 2370 .aux_ch_start = AUX_CH_USBC1, 2371 .aux_ch_end = AUX_CH_USBC4, 2372 2373 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1, 2374 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1, 2375 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1, 2376 .aux_tbt = POWER_DOMAIN_AUX_TBT1, 2377 }, { 2378 .port_start = PORT_D_XELPD, 2379 .port_end = PORT_E_XELPD, 2380 .aux_ch_start = AUX_CH_D_XELPD, 2381 .aux_ch_end = AUX_CH_E_XELPD, 2382 2383 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D, 2384 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D, 2385 .aux_legacy_usbc = POWER_DOMAIN_AUX_D, 2386 .aux_tbt = POWER_DOMAIN_INVALID, 2387 }, 2388 }; 2389 2390 static void 2391 intel_port_domains_for_platform(struct drm_i915_private *i915, 2392 const struct intel_ddi_port_domains **domains, 2393 int *domains_size) 2394 { 2395 if (DISPLAY_VER(i915) >= 13) { 2396 *domains = d13_port_domains; 2397 *domains_size = ARRAY_SIZE(d13_port_domains); 2398 } else if (DISPLAY_VER(i915) >= 12) { 2399 *domains = d12_port_domains; 2400 *domains_size = ARRAY_SIZE(d12_port_domains); 2401 } else if (DISPLAY_VER(i915) >= 11) { 2402 *domains = d11_port_domains; 2403 *domains_size = ARRAY_SIZE(d11_port_domains); 2404 } else { 2405 *domains = i9xx_port_domains; 2406 *domains_size = ARRAY_SIZE(i9xx_port_domains); 2407 } 2408 } 2409 2410 static const struct intel_ddi_port_domains * 2411 intel_port_domains_for_port(struct drm_i915_private *i915, enum port port) 2412 { 2413 const struct intel_ddi_port_domains *domains; 2414 int domains_size; 2415 int i; 2416 2417 intel_port_domains_for_platform(i915, &domains, &domains_size); 2418 for (i = 0; i < domains_size; i++) 2419 if (port >= domains[i].port_start && port <= domains[i].port_end) 2420 return &domains[i]; 2421 2422 return NULL; 2423 } 2424 2425 enum intel_display_power_domain 2426 intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port) 2427 { 2428 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); 2429 2430 if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID) 2431 return POWER_DOMAIN_PORT_DDI_IO_A; 2432 2433 return domains->ddi_io + (int)(port - domains->port_start); 2434 } 2435 2436 enum intel_display_power_domain 2437 intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port) 2438 { 2439 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port); 2440 2441 if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID) 2442 return POWER_DOMAIN_PORT_DDI_LANES_A; 2443 2444 return domains->ddi_lanes + (int)(port - domains->port_start); 2445 } 2446 2447 static const struct intel_ddi_port_domains * 2448 intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch) 2449 { 2450 const struct intel_ddi_port_domains *domains; 2451 int domains_size; 2452 int i; 2453 2454 intel_port_domains_for_platform(i915, &domains, &domains_size); 2455 for (i = 0; i < domains_size; i++) 2456 if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end) 2457 return &domains[i]; 2458 2459 return NULL; 2460 } 2461 2462 enum intel_display_power_domain 2463 intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) 2464 { 2465 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); 2466 2467 if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID) 2468 return POWER_DOMAIN_AUX_A; 2469 2470 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start); 2471 } 2472 2473 enum intel_display_power_domain 2474 intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch) 2475 { 2476 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch); 2477 2478 if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID) 2479 return POWER_DOMAIN_AUX_TBT1; 2480 2481 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start); 2482 } 2483