1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/types.h> 33 34 #include <asm/byteorder.h> 35 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_crtc.h> 38 #include <drm/drm_dp_helper.h> 39 #include <drm/drm_edid.h> 40 #include <drm/drm_probe_helper.h> 41 42 #include "i915_debugfs.h" 43 #include "i915_drv.h" 44 #include "i915_trace.h" 45 #include "intel_atomic.h" 46 #include "intel_audio.h" 47 #include "intel_connector.h" 48 #include "intel_ddi.h" 49 #include "intel_display_types.h" 50 #include "intel_dp.h" 51 #include "intel_dp_link_training.h" 52 #include "intel_dp_mst.h" 53 #include "intel_dpio_phy.h" 54 #include "intel_fifo_underrun.h" 55 #include "intel_hdcp.h" 56 #include "intel_hdmi.h" 57 #include "intel_hotplug.h" 58 #include "intel_lspcon.h" 59 #include "intel_lvds.h" 60 #include "intel_panel.h" 61 #include "intel_psr.h" 62 #include "intel_sideband.h" 63 #include "intel_tc.h" 64 #include "intel_vdsc.h" 65 66 #define DP_DPRX_ESI_LEN 14 67 68 /* DP DSC throughput values used for slice count calculations KPixels/s */ 69 #define DP_DSC_PEAK_PIXEL_RATE 2720000 70 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 71 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 72 73 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 74 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 75 76 /* Compliance test status bits */ 77 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 78 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 79 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 80 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 81 82 struct dp_link_dpll { 83 int clock; 84 struct dpll dpll; 85 }; 86 87 static const struct dp_link_dpll g4x_dpll[] = { 88 { 162000, 89 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 90 { 270000, 91 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 92 }; 93 94 static const struct dp_link_dpll pch_dpll[] = { 95 { 162000, 96 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 97 { 270000, 98 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 99 }; 100 101 static const struct dp_link_dpll vlv_dpll[] = { 102 { 162000, 103 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 104 { 270000, 105 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 106 }; 107 108 /* 109 * CHV supports eDP 1.4 that have more link rates. 110 * Below only provides the fixed rate but exclude variable rate. 111 */ 112 static const struct dp_link_dpll chv_dpll[] = { 113 /* 114 * CHV requires to program fractional division for m2. 115 * m2 is stored in fixed point format using formula below 116 * (m2_int << 22) | m2_fraction 117 */ 118 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 119 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 120 { 270000, /* m2_int = 27, m2_fraction = 0 */ 121 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 122 }; 123 124 /* Constants for DP DSC configurations */ 125 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 126 127 /* With Single pipe configuration, HW is capable of supporting maximum 128 * of 4 slices per line. 129 */ 130 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 131 132 /** 133 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 134 * @intel_dp: DP struct 135 * 136 * If a CPU or PCH DP output is attached to an eDP panel, this function 137 * will return true, and false otherwise. 138 */ 139 bool intel_dp_is_edp(struct intel_dp *intel_dp) 140 { 141 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 142 143 return dig_port->base.type == INTEL_OUTPUT_EDP; 144 } 145 146 static void intel_dp_link_down(struct intel_encoder *encoder, 147 const struct intel_crtc_state *old_crtc_state); 148 static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 149 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 150 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 151 const struct intel_crtc_state *crtc_state); 152 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 153 enum pipe pipe); 154 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 155 156 /* update sink rates from dpcd */ 157 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 158 { 159 static const int dp_rates[] = { 160 162000, 270000, 540000, 810000 161 }; 162 int i, max_rate; 163 int max_lttpr_rate; 164 165 if (drm_dp_has_quirk(&intel_dp->desc, 0, 166 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 167 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 168 static const int quirk_rates[] = { 162000, 270000, 324000 }; 169 170 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 171 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 172 173 return; 174 } 175 176 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 177 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 178 if (max_lttpr_rate) 179 max_rate = min(max_rate, max_lttpr_rate); 180 181 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 182 if (dp_rates[i] > max_rate) 183 break; 184 intel_dp->sink_rates[i] = dp_rates[i]; 185 } 186 187 intel_dp->num_sink_rates = i; 188 } 189 190 /* Get length of rates array potentially limited by max_rate. */ 191 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 192 { 193 int i; 194 195 /* Limit results by potentially reduced max rate */ 196 for (i = 0; i < len; i++) { 197 if (rates[len - i - 1] <= max_rate) 198 return len - i; 199 } 200 201 return 0; 202 } 203 204 /* Get length of common rates array potentially limited by max_rate. */ 205 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 206 int max_rate) 207 { 208 return intel_dp_rate_limit_len(intel_dp->common_rates, 209 intel_dp->num_common_rates, max_rate); 210 } 211 212 /* Theoretical max between source and sink */ 213 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 214 { 215 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 216 } 217 218 /* Theoretical max between source and sink */ 219 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 220 { 221 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 222 int source_max = dig_port->max_lanes; 223 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 224 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 225 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 226 227 if (lttpr_max) 228 sink_max = min(sink_max, lttpr_max); 229 230 return min3(source_max, sink_max, fia_max); 231 } 232 233 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 234 { 235 return intel_dp->max_link_lane_count; 236 } 237 238 int 239 intel_dp_link_required(int pixel_clock, int bpp) 240 { 241 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 242 return DIV_ROUND_UP(pixel_clock * bpp, 8); 243 } 244 245 int 246 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 247 { 248 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 249 * link rate that is generally expressed in Gbps. Since, 8 bits of data 250 * is transmitted every LS_Clk per lane, there is no need to account for 251 * the channel encoding that is done in the PHY layer here. 252 */ 253 254 return max_link_clock * max_lanes; 255 } 256 257 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) 258 { 259 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 260 struct intel_encoder *encoder = &intel_dig_port->base; 261 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 262 263 return INTEL_GEN(dev_priv) >= 12 || 264 (INTEL_GEN(dev_priv) == 11 && 265 encoder->port != PORT_A); 266 } 267 268 static int cnl_max_source_rate(struct intel_dp *intel_dp) 269 { 270 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 271 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 272 enum port port = dig_port->base.port; 273 274 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 275 276 /* Low voltage SKUs are limited to max of 5.4G */ 277 if (voltage == VOLTAGE_INFO_0_85V) 278 return 540000; 279 280 /* For this SKU 8.1G is supported in all ports */ 281 if (IS_CNL_WITH_PORT_F(dev_priv)) 282 return 810000; 283 284 /* For other SKUs, max rate on ports A and D is 5.4G */ 285 if (port == PORT_A || port == PORT_D) 286 return 540000; 287 288 return 810000; 289 } 290 291 static int icl_max_source_rate(struct intel_dp *intel_dp) 292 { 293 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 294 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 295 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 296 297 if (intel_phy_is_combo(dev_priv, phy) && 298 !intel_dp_is_edp(intel_dp)) 299 return 540000; 300 301 return 810000; 302 } 303 304 static int ehl_max_source_rate(struct intel_dp *intel_dp) 305 { 306 if (intel_dp_is_edp(intel_dp)) 307 return 540000; 308 309 return 810000; 310 } 311 312 static void 313 intel_dp_set_source_rates(struct intel_dp *intel_dp) 314 { 315 /* The values must be in increasing order */ 316 static const int cnl_rates[] = { 317 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 318 }; 319 static const int bxt_rates[] = { 320 162000, 216000, 243000, 270000, 324000, 432000, 540000 321 }; 322 static const int skl_rates[] = { 323 162000, 216000, 270000, 324000, 432000, 540000 324 }; 325 static const int hsw_rates[] = { 326 162000, 270000, 540000 327 }; 328 static const int g4x_rates[] = { 329 162000, 270000 330 }; 331 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 332 struct intel_encoder *encoder = &dig_port->base; 333 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 334 const int *source_rates; 335 int size, max_rate = 0, vbt_max_rate; 336 337 /* This should only be done once */ 338 drm_WARN_ON(&dev_priv->drm, 339 intel_dp->source_rates || intel_dp->num_source_rates); 340 341 if (INTEL_GEN(dev_priv) >= 10) { 342 source_rates = cnl_rates; 343 size = ARRAY_SIZE(cnl_rates); 344 if (IS_GEN(dev_priv, 10)) 345 max_rate = cnl_max_source_rate(intel_dp); 346 else if (IS_JSL_EHL(dev_priv)) 347 max_rate = ehl_max_source_rate(intel_dp); 348 else 349 max_rate = icl_max_source_rate(intel_dp); 350 } else if (IS_GEN9_LP(dev_priv)) { 351 source_rates = bxt_rates; 352 size = ARRAY_SIZE(bxt_rates); 353 } else if (IS_GEN9_BC(dev_priv)) { 354 source_rates = skl_rates; 355 size = ARRAY_SIZE(skl_rates); 356 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 357 IS_BROADWELL(dev_priv)) { 358 source_rates = hsw_rates; 359 size = ARRAY_SIZE(hsw_rates); 360 } else { 361 source_rates = g4x_rates; 362 size = ARRAY_SIZE(g4x_rates); 363 } 364 365 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 366 if (max_rate && vbt_max_rate) 367 max_rate = min(max_rate, vbt_max_rate); 368 else if (vbt_max_rate) 369 max_rate = vbt_max_rate; 370 371 if (max_rate) 372 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 373 374 intel_dp->source_rates = source_rates; 375 intel_dp->num_source_rates = size; 376 } 377 378 static int intersect_rates(const int *source_rates, int source_len, 379 const int *sink_rates, int sink_len, 380 int *common_rates) 381 { 382 int i = 0, j = 0, k = 0; 383 384 while (i < source_len && j < sink_len) { 385 if (source_rates[i] == sink_rates[j]) { 386 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 387 return k; 388 common_rates[k] = source_rates[i]; 389 ++k; 390 ++i; 391 ++j; 392 } else if (source_rates[i] < sink_rates[j]) { 393 ++i; 394 } else { 395 ++j; 396 } 397 } 398 return k; 399 } 400 401 /* return index of rate in rates array, or -1 if not found */ 402 static int intel_dp_rate_index(const int *rates, int len, int rate) 403 { 404 int i; 405 406 for (i = 0; i < len; i++) 407 if (rate == rates[i]) 408 return i; 409 410 return -1; 411 } 412 413 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 414 { 415 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 416 417 drm_WARN_ON(&i915->drm, 418 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 419 420 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 421 intel_dp->num_source_rates, 422 intel_dp->sink_rates, 423 intel_dp->num_sink_rates, 424 intel_dp->common_rates); 425 426 /* Paranoia, there should always be something in common. */ 427 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 428 intel_dp->common_rates[0] = 162000; 429 intel_dp->num_common_rates = 1; 430 } 431 } 432 433 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 434 u8 lane_count) 435 { 436 /* 437 * FIXME: we need to synchronize the current link parameters with 438 * hardware readout. Currently fast link training doesn't work on 439 * boot-up. 440 */ 441 if (link_rate == 0 || 442 link_rate > intel_dp->max_link_rate) 443 return false; 444 445 if (lane_count == 0 || 446 lane_count > intel_dp_max_lane_count(intel_dp)) 447 return false; 448 449 return true; 450 } 451 452 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 453 int link_rate, 454 u8 lane_count) 455 { 456 const struct drm_display_mode *fixed_mode = 457 intel_dp->attached_connector->panel.fixed_mode; 458 int mode_rate, max_rate; 459 460 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 461 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 462 if (mode_rate > max_rate) 463 return false; 464 465 return true; 466 } 467 468 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 469 int link_rate, u8 lane_count) 470 { 471 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 472 int index; 473 474 /* 475 * TODO: Enable fallback on MST links once MST link compute can handle 476 * the fallback params. 477 */ 478 if (intel_dp->is_mst) { 479 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 480 return -1; 481 } 482 483 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { 484 drm_dbg_kms(&i915->drm, 485 "Retrying Link training for eDP with max parameters\n"); 486 intel_dp->use_max_params = true; 487 return 0; 488 } 489 490 index = intel_dp_rate_index(intel_dp->common_rates, 491 intel_dp->num_common_rates, 492 link_rate); 493 if (index > 0) { 494 if (intel_dp_is_edp(intel_dp) && 495 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 496 intel_dp->common_rates[index - 1], 497 lane_count)) { 498 drm_dbg_kms(&i915->drm, 499 "Retrying Link training for eDP with same parameters\n"); 500 return 0; 501 } 502 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 503 intel_dp->max_link_lane_count = lane_count; 504 } else if (lane_count > 1) { 505 if (intel_dp_is_edp(intel_dp) && 506 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 507 intel_dp_max_common_rate(intel_dp), 508 lane_count >> 1)) { 509 drm_dbg_kms(&i915->drm, 510 "Retrying Link training for eDP with same parameters\n"); 511 return 0; 512 } 513 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 514 intel_dp->max_link_lane_count = lane_count >> 1; 515 } else { 516 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 517 return -1; 518 } 519 520 return 0; 521 } 522 523 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 524 { 525 return div_u64(mul_u32_u32(mode_clock, 1000000U), 526 DP_DSC_FEC_OVERHEAD_FACTOR); 527 } 528 529 static int 530 small_joiner_ram_size_bits(struct drm_i915_private *i915) 531 { 532 if (INTEL_GEN(i915) >= 11) 533 return 7680 * 8; 534 else 535 return 6144 * 8; 536 } 537 538 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 539 u32 link_clock, u32 lane_count, 540 u32 mode_clock, u32 mode_hdisplay, 541 bool bigjoiner) 542 { 543 u32 bits_per_pixel, max_bpp_small_joiner_ram; 544 int i; 545 546 /* 547 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 548 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 549 * for SST -> TimeSlotsPerMTP is 1, 550 * for MST -> TimeSlotsPerMTP has to be calculated 551 */ 552 bits_per_pixel = (link_clock * lane_count * 8) / 553 intel_dp_mode_to_fec_clock(mode_clock); 554 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 555 556 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 557 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 558 mode_hdisplay; 559 560 if (bigjoiner) 561 max_bpp_small_joiner_ram *= 2; 562 563 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 564 max_bpp_small_joiner_ram); 565 566 /* 567 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 568 * check, output bpp from small joiner RAM check) 569 */ 570 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 571 572 if (bigjoiner) { 573 u32 max_bpp_bigjoiner = 574 i915->max_cdclk_freq * 48 / 575 intel_dp_mode_to_fec_clock(mode_clock); 576 577 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner); 578 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); 579 } 580 581 /* Error out if the max bpp is less than smallest allowed valid bpp */ 582 if (bits_per_pixel < valid_dsc_bpp[0]) { 583 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 584 bits_per_pixel, valid_dsc_bpp[0]); 585 return 0; 586 } 587 588 /* Find the nearest match in the array of known BPPs from VESA */ 589 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 590 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 591 break; 592 } 593 bits_per_pixel = valid_dsc_bpp[i]; 594 595 /* 596 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 597 * fractional part is 0 598 */ 599 return bits_per_pixel << 4; 600 } 601 602 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 603 int mode_clock, int mode_hdisplay, 604 bool bigjoiner) 605 { 606 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 607 u8 min_slice_count, i; 608 int max_slice_width; 609 610 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 611 min_slice_count = DIV_ROUND_UP(mode_clock, 612 DP_DSC_MAX_ENC_THROUGHPUT_0); 613 else 614 min_slice_count = DIV_ROUND_UP(mode_clock, 615 DP_DSC_MAX_ENC_THROUGHPUT_1); 616 617 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 618 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 619 drm_dbg_kms(&i915->drm, 620 "Unsupported slice width %d by DP DSC Sink device\n", 621 max_slice_width); 622 return 0; 623 } 624 /* Also take into account max slice width */ 625 min_slice_count = max_t(u8, min_slice_count, 626 DIV_ROUND_UP(mode_hdisplay, 627 max_slice_width)); 628 629 /* Find the closest match to the valid slice count values */ 630 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 631 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; 632 633 if (test_slice_count > 634 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false)) 635 break; 636 637 /* big joiner needs small joiner to be enabled */ 638 if (bigjoiner && test_slice_count < 4) 639 continue; 640 641 if (min_slice_count <= test_slice_count) 642 return test_slice_count; 643 } 644 645 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 646 min_slice_count); 647 return 0; 648 } 649 650 static enum intel_output_format 651 intel_dp_output_format(struct drm_connector *connector, 652 const struct drm_display_mode *mode) 653 { 654 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 655 const struct drm_display_info *info = &connector->display_info; 656 657 if (!connector->ycbcr_420_allowed || 658 !drm_mode_is_420_only(info, mode)) 659 return INTEL_OUTPUT_FORMAT_RGB; 660 661 if (intel_dp->dfp.rgb_to_ycbcr && 662 intel_dp->dfp.ycbcr_444_to_420) 663 return INTEL_OUTPUT_FORMAT_RGB; 664 665 if (intel_dp->dfp.ycbcr_444_to_420) 666 return INTEL_OUTPUT_FORMAT_YCBCR444; 667 else 668 return INTEL_OUTPUT_FORMAT_YCBCR420; 669 } 670 671 int intel_dp_min_bpp(enum intel_output_format output_format) 672 { 673 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 674 return 6 * 3; 675 else 676 return 8 * 3; 677 } 678 679 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 680 { 681 /* 682 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 683 * format of the number of bytes per pixel will be half the number 684 * of bytes of RGB pixel. 685 */ 686 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 687 bpp /= 2; 688 689 return bpp; 690 } 691 692 static int 693 intel_dp_mode_min_output_bpp(struct drm_connector *connector, 694 const struct drm_display_mode *mode) 695 { 696 enum intel_output_format output_format = 697 intel_dp_output_format(connector, mode); 698 699 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 700 } 701 702 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 703 int hdisplay) 704 { 705 /* 706 * Older platforms don't like hdisplay==4096 with DP. 707 * 708 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 709 * and frame counter increment), but we don't get vblank interrupts, 710 * and the pipe underruns immediately. The link also doesn't seem 711 * to get trained properly. 712 * 713 * On CHV the vblank interrupts don't seem to disappear but 714 * otherwise the symptoms are similar. 715 * 716 * TODO: confirm the behaviour on HSW+ 717 */ 718 return hdisplay == 4096 && !HAS_DDI(dev_priv); 719 } 720 721 static enum drm_mode_status 722 intel_dp_mode_valid_downstream(struct intel_connector *connector, 723 const struct drm_display_mode *mode, 724 int target_clock) 725 { 726 struct intel_dp *intel_dp = intel_attached_dp(connector); 727 const struct drm_display_info *info = &connector->base.display_info; 728 int tmds_clock; 729 730 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 731 if (intel_dp->dfp.pcon_max_frl_bw) { 732 int target_bw; 733 int max_frl_bw; 734 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode); 735 736 target_bw = bpp * target_clock; 737 738 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 739 740 /* converting bw from Gbps to Kbps*/ 741 max_frl_bw = max_frl_bw * 1000000; 742 743 if (target_bw > max_frl_bw) 744 return MODE_CLOCK_HIGH; 745 746 return MODE_OK; 747 } 748 749 if (intel_dp->dfp.max_dotclock && 750 target_clock > intel_dp->dfp.max_dotclock) 751 return MODE_CLOCK_HIGH; 752 753 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 754 tmds_clock = target_clock; 755 if (drm_mode_is_420_only(info, mode)) 756 tmds_clock /= 2; 757 758 if (intel_dp->dfp.min_tmds_clock && 759 tmds_clock < intel_dp->dfp.min_tmds_clock) 760 return MODE_CLOCK_LOW; 761 if (intel_dp->dfp.max_tmds_clock && 762 tmds_clock > intel_dp->dfp.max_tmds_clock) 763 return MODE_CLOCK_HIGH; 764 765 return MODE_OK; 766 } 767 768 static enum drm_mode_status 769 intel_dp_mode_valid(struct drm_connector *connector, 770 struct drm_display_mode *mode) 771 { 772 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 773 struct intel_connector *intel_connector = to_intel_connector(connector); 774 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 775 struct drm_i915_private *dev_priv = to_i915(connector->dev); 776 int target_clock = mode->clock; 777 int max_rate, mode_rate, max_lanes, max_link_clock; 778 int max_dotclk = dev_priv->max_dotclk_freq; 779 u16 dsc_max_output_bpp = 0; 780 u8 dsc_slice_count = 0; 781 enum drm_mode_status status; 782 bool dsc = false, bigjoiner = false; 783 784 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 785 return MODE_NO_DBLESCAN; 786 787 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 788 return MODE_H_ILLEGAL; 789 790 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 791 if (mode->hdisplay > fixed_mode->hdisplay) 792 return MODE_PANEL; 793 794 if (mode->vdisplay > fixed_mode->vdisplay) 795 return MODE_PANEL; 796 797 target_clock = fixed_mode->clock; 798 } 799 800 if (mode->clock < 10000) 801 return MODE_CLOCK_LOW; 802 803 if ((target_clock > max_dotclk || mode->hdisplay > 5120) && 804 intel_dp_can_bigjoiner(intel_dp)) { 805 bigjoiner = true; 806 max_dotclk *= 2; 807 } 808 if (target_clock > max_dotclk) 809 return MODE_CLOCK_HIGH; 810 811 max_link_clock = intel_dp_max_link_rate(intel_dp); 812 max_lanes = intel_dp_max_lane_count(intel_dp); 813 814 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 815 mode_rate = intel_dp_link_required(target_clock, 816 intel_dp_mode_min_output_bpp(connector, mode)); 817 818 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 819 return MODE_H_ILLEGAL; 820 821 /* 822 * Output bpp is stored in 6.4 format so right shift by 4 to get the 823 * integer value since we support only integer values of bpp. 824 */ 825 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 826 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 827 if (intel_dp_is_edp(intel_dp)) { 828 dsc_max_output_bpp = 829 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 830 dsc_slice_count = 831 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 832 true); 833 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 834 dsc_max_output_bpp = 835 intel_dp_dsc_get_output_bpp(dev_priv, 836 max_link_clock, 837 max_lanes, 838 target_clock, 839 mode->hdisplay, 840 bigjoiner) >> 4; 841 dsc_slice_count = 842 intel_dp_dsc_get_slice_count(intel_dp, 843 target_clock, 844 mode->hdisplay, 845 bigjoiner); 846 } 847 848 dsc = dsc_max_output_bpp && dsc_slice_count; 849 } 850 851 /* big joiner configuration needs DSC */ 852 if (bigjoiner && !dsc) 853 return MODE_CLOCK_HIGH; 854 855 if (mode_rate > max_rate && !dsc) 856 return MODE_CLOCK_HIGH; 857 858 status = intel_dp_mode_valid_downstream(intel_connector, 859 mode, target_clock); 860 if (status != MODE_OK) 861 return status; 862 863 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); 864 } 865 866 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 867 { 868 int i; 869 u32 v = 0; 870 871 if (src_bytes > 4) 872 src_bytes = 4; 873 for (i = 0; i < src_bytes; i++) 874 v |= ((u32)src[i]) << ((3 - i) * 8); 875 return v; 876 } 877 878 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 879 { 880 int i; 881 if (dst_bytes > 4) 882 dst_bytes = 4; 883 for (i = 0; i < dst_bytes; i++) 884 dst[i] = src >> ((3-i) * 8); 885 } 886 887 static void 888 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); 889 static void 890 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 891 bool force_disable_vdd); 892 static void 893 intel_dp_pps_init(struct intel_dp *intel_dp); 894 895 static intel_wakeref_t 896 pps_lock(struct intel_dp *intel_dp) 897 { 898 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 899 intel_wakeref_t wakeref; 900 901 /* 902 * See intel_power_sequencer_reset() why we need 903 * a power domain reference here. 904 */ 905 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); 906 mutex_lock(&dev_priv->pps_mutex); 907 908 return wakeref; 909 } 910 911 static intel_wakeref_t 912 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) 913 { 914 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 915 916 mutex_unlock(&dev_priv->pps_mutex); 917 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 918 return 0; 919 } 920 921 #define with_pps_lock(dp, wf) \ 922 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) 923 924 static void 925 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 926 { 927 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 928 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 929 enum pipe pipe = intel_dp->pps_pipe; 930 bool pll_enabled, release_cl_override = false; 931 enum dpio_phy phy = DPIO_PHY(pipe); 932 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 933 u32 DP; 934 935 if (drm_WARN(&dev_priv->drm, 936 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, 937 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", 938 pipe_name(pipe), dig_port->base.base.base.id, 939 dig_port->base.base.name)) 940 return; 941 942 drm_dbg_kms(&dev_priv->drm, 943 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", 944 pipe_name(pipe), dig_port->base.base.base.id, 945 dig_port->base.base.name); 946 947 /* Preserve the BIOS-computed detected bit. This is 948 * supposed to be read-only. 949 */ 950 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 951 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 952 DP |= DP_PORT_WIDTH(1); 953 DP |= DP_LINK_TRAIN_PAT_1; 954 955 if (IS_CHERRYVIEW(dev_priv)) 956 DP |= DP_PIPE_SEL_CHV(pipe); 957 else 958 DP |= DP_PIPE_SEL(pipe); 959 960 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; 961 962 /* 963 * The DPLL for the pipe must be enabled for this to work. 964 * So enable temporarily it if it's not already enabled. 965 */ 966 if (!pll_enabled) { 967 release_cl_override = IS_CHERRYVIEW(dev_priv) && 968 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 969 970 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 971 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 972 drm_err(&dev_priv->drm, 973 "Failed to force on pll for pipe %c!\n", 974 pipe_name(pipe)); 975 return; 976 } 977 } 978 979 /* 980 * Similar magic as in intel_dp_enable_port(). 981 * We _must_ do this port enable + disable trick 982 * to make this power sequencer lock onto the port. 983 * Otherwise even VDD force bit won't work. 984 */ 985 intel_de_write(dev_priv, intel_dp->output_reg, DP); 986 intel_de_posting_read(dev_priv, intel_dp->output_reg); 987 988 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); 989 intel_de_posting_read(dev_priv, intel_dp->output_reg); 990 991 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); 992 intel_de_posting_read(dev_priv, intel_dp->output_reg); 993 994 if (!pll_enabled) { 995 vlv_force_pll_off(dev_priv, pipe); 996 997 if (release_cl_override) 998 chv_phy_powergate_ch(dev_priv, phy, ch, false); 999 } 1000 } 1001 1002 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 1003 { 1004 struct intel_encoder *encoder; 1005 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 1006 1007 /* 1008 * We don't have power sequencer currently. 1009 * Pick one that's not used by other ports. 1010 */ 1011 for_each_intel_dp(&dev_priv->drm, encoder) { 1012 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1013 1014 if (encoder->type == INTEL_OUTPUT_EDP) { 1015 drm_WARN_ON(&dev_priv->drm, 1016 intel_dp->active_pipe != INVALID_PIPE && 1017 intel_dp->active_pipe != 1018 intel_dp->pps_pipe); 1019 1020 if (intel_dp->pps_pipe != INVALID_PIPE) 1021 pipes &= ~(1 << intel_dp->pps_pipe); 1022 } else { 1023 drm_WARN_ON(&dev_priv->drm, 1024 intel_dp->pps_pipe != INVALID_PIPE); 1025 1026 if (intel_dp->active_pipe != INVALID_PIPE) 1027 pipes &= ~(1 << intel_dp->active_pipe); 1028 } 1029 } 1030 1031 if (pipes == 0) 1032 return INVALID_PIPE; 1033 1034 return ffs(pipes) - 1; 1035 } 1036 1037 static enum pipe 1038 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 1039 { 1040 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1041 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1042 enum pipe pipe; 1043 1044 lockdep_assert_held(&dev_priv->pps_mutex); 1045 1046 /* We should never land here with regular DP ports */ 1047 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 1048 1049 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE && 1050 intel_dp->active_pipe != intel_dp->pps_pipe); 1051 1052 if (intel_dp->pps_pipe != INVALID_PIPE) 1053 return intel_dp->pps_pipe; 1054 1055 pipe = vlv_find_free_pps(dev_priv); 1056 1057 /* 1058 * Didn't find one. This should not happen since there 1059 * are two power sequencers and up to two eDP ports. 1060 */ 1061 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) 1062 pipe = PIPE_A; 1063 1064 vlv_steal_power_sequencer(dev_priv, pipe); 1065 intel_dp->pps_pipe = pipe; 1066 1067 drm_dbg_kms(&dev_priv->drm, 1068 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", 1069 pipe_name(intel_dp->pps_pipe), 1070 dig_port->base.base.base.id, 1071 dig_port->base.base.name); 1072 1073 /* init power sequencer on this pipe and port */ 1074 intel_dp_init_panel_power_sequencer(intel_dp); 1075 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 1076 1077 /* 1078 * Even vdd force doesn't work until we've made 1079 * the power sequencer lock in on the port. 1080 */ 1081 vlv_power_sequencer_kick(intel_dp); 1082 1083 return intel_dp->pps_pipe; 1084 } 1085 1086 static int 1087 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 1088 { 1089 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1090 int backlight_controller = dev_priv->vbt.backlight.controller; 1091 1092 lockdep_assert_held(&dev_priv->pps_mutex); 1093 1094 /* We should never land here with regular DP ports */ 1095 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 1096 1097 if (!intel_dp->pps_reset) 1098 return backlight_controller; 1099 1100 intel_dp->pps_reset = false; 1101 1102 /* 1103 * Only the HW needs to be reprogrammed, the SW state is fixed and 1104 * has been setup during connector init. 1105 */ 1106 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1107 1108 return backlight_controller; 1109 } 1110 1111 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 1112 enum pipe pipe); 1113 1114 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 1115 enum pipe pipe) 1116 { 1117 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON; 1118 } 1119 1120 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 1121 enum pipe pipe) 1122 { 1123 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD; 1124 } 1125 1126 static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 1127 enum pipe pipe) 1128 { 1129 return true; 1130 } 1131 1132 static enum pipe 1133 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 1134 enum port port, 1135 vlv_pipe_check pipe_check) 1136 { 1137 enum pipe pipe; 1138 1139 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 1140 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & 1141 PANEL_PORT_SELECT_MASK; 1142 1143 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 1144 continue; 1145 1146 if (!pipe_check(dev_priv, pipe)) 1147 continue; 1148 1149 return pipe; 1150 } 1151 1152 return INVALID_PIPE; 1153 } 1154 1155 static void 1156 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 1157 { 1158 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1159 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1160 enum port port = dig_port->base.port; 1161 1162 lockdep_assert_held(&dev_priv->pps_mutex); 1163 1164 /* try to find a pipe with this port selected */ 1165 /* first pick one where the panel is on */ 1166 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1167 vlv_pipe_has_pp_on); 1168 /* didn't find one? pick one where vdd is on */ 1169 if (intel_dp->pps_pipe == INVALID_PIPE) 1170 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1171 vlv_pipe_has_vdd_on); 1172 /* didn't find one? pick one with just the correct port */ 1173 if (intel_dp->pps_pipe == INVALID_PIPE) 1174 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1175 vlv_pipe_any); 1176 1177 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 1178 if (intel_dp->pps_pipe == INVALID_PIPE) { 1179 drm_dbg_kms(&dev_priv->drm, 1180 "no initial power sequencer for [ENCODER:%d:%s]\n", 1181 dig_port->base.base.base.id, 1182 dig_port->base.base.name); 1183 return; 1184 } 1185 1186 drm_dbg_kms(&dev_priv->drm, 1187 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", 1188 dig_port->base.base.base.id, 1189 dig_port->base.base.name, 1190 pipe_name(intel_dp->pps_pipe)); 1191 1192 intel_dp_init_panel_power_sequencer(intel_dp); 1193 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1194 } 1195 1196 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 1197 { 1198 struct intel_encoder *encoder; 1199 1200 if (drm_WARN_ON(&dev_priv->drm, 1201 !(IS_VALLEYVIEW(dev_priv) || 1202 IS_CHERRYVIEW(dev_priv) || 1203 IS_GEN9_LP(dev_priv)))) 1204 return; 1205 1206 /* 1207 * We can't grab pps_mutex here due to deadlock with power_domain 1208 * mutex when power_domain functions are called while holding pps_mutex. 1209 * That also means that in order to use pps_pipe the code needs to 1210 * hold both a power domain reference and pps_mutex, and the power domain 1211 * reference get/put must be done while _not_ holding pps_mutex. 1212 * pps_{lock,unlock}() do these steps in the correct order, so one 1213 * should use them always. 1214 */ 1215 1216 for_each_intel_dp(&dev_priv->drm, encoder) { 1217 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1218 1219 drm_WARN_ON(&dev_priv->drm, 1220 intel_dp->active_pipe != INVALID_PIPE); 1221 1222 if (encoder->type != INTEL_OUTPUT_EDP) 1223 continue; 1224 1225 if (IS_GEN9_LP(dev_priv)) 1226 intel_dp->pps_reset = true; 1227 else 1228 intel_dp->pps_pipe = INVALID_PIPE; 1229 } 1230 } 1231 1232 struct pps_registers { 1233 i915_reg_t pp_ctrl; 1234 i915_reg_t pp_stat; 1235 i915_reg_t pp_on; 1236 i915_reg_t pp_off; 1237 i915_reg_t pp_div; 1238 }; 1239 1240 static void intel_pps_get_registers(struct intel_dp *intel_dp, 1241 struct pps_registers *regs) 1242 { 1243 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1244 int pps_idx = 0; 1245 1246 memset(regs, 0, sizeof(*regs)); 1247 1248 if (IS_GEN9_LP(dev_priv)) 1249 pps_idx = bxt_power_sequencer_idx(intel_dp); 1250 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1251 pps_idx = vlv_power_sequencer_pipe(intel_dp); 1252 1253 regs->pp_ctrl = PP_CONTROL(pps_idx); 1254 regs->pp_stat = PP_STATUS(pps_idx); 1255 regs->pp_on = PP_ON_DELAYS(pps_idx); 1256 regs->pp_off = PP_OFF_DELAYS(pps_idx); 1257 1258 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 1259 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 1260 regs->pp_div = INVALID_MMIO_REG; 1261 else 1262 regs->pp_div = PP_DIVISOR(pps_idx); 1263 } 1264 1265 static i915_reg_t 1266 _pp_ctrl_reg(struct intel_dp *intel_dp) 1267 { 1268 struct pps_registers regs; 1269 1270 intel_pps_get_registers(intel_dp, ®s); 1271 1272 return regs.pp_ctrl; 1273 } 1274 1275 static i915_reg_t 1276 _pp_stat_reg(struct intel_dp *intel_dp) 1277 { 1278 struct pps_registers regs; 1279 1280 intel_pps_get_registers(intel_dp, ®s); 1281 1282 return regs.pp_stat; 1283 } 1284 1285 static bool edp_have_panel_power(struct intel_dp *intel_dp) 1286 { 1287 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1288 1289 lockdep_assert_held(&dev_priv->pps_mutex); 1290 1291 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1292 intel_dp->pps_pipe == INVALID_PIPE) 1293 return false; 1294 1295 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 1296 } 1297 1298 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 1299 { 1300 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1301 1302 lockdep_assert_held(&dev_priv->pps_mutex); 1303 1304 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1305 intel_dp->pps_pipe == INVALID_PIPE) 1306 return false; 1307 1308 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 1309 } 1310 1311 static void 1312 intel_dp_check_edp(struct intel_dp *intel_dp) 1313 { 1314 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1315 1316 if (!intel_dp_is_edp(intel_dp)) 1317 return; 1318 1319 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 1320 drm_WARN(&dev_priv->drm, 1, 1321 "eDP powered off while attempting aux channel communication.\n"); 1322 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n", 1323 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), 1324 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); 1325 } 1326 } 1327 1328 static u32 1329 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 1330 { 1331 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1332 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1333 const unsigned int timeout_ms = 10; 1334 u32 status; 1335 bool done; 1336 1337 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1338 done = wait_event_timeout(i915->gmbus_wait_queue, C, 1339 msecs_to_jiffies_timeout(timeout_ms)); 1340 1341 /* just trace the final value */ 1342 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1343 1344 if (!done) 1345 drm_err(&i915->drm, 1346 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 1347 intel_dp->aux.name, timeout_ms, status); 1348 #undef C 1349 1350 return status; 1351 } 1352 1353 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1354 { 1355 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1356 1357 if (index) 1358 return 0; 1359 1360 /* 1361 * The clock divider is based off the hrawclk, and would like to run at 1362 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 1363 */ 1364 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 1365 } 1366 1367 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1368 { 1369 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1370 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1371 u32 freq; 1372 1373 if (index) 1374 return 0; 1375 1376 /* 1377 * The clock divider is based off the cdclk or PCH rawclk, and would 1378 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1379 * divide by 2000 and use that 1380 */ 1381 if (dig_port->aux_ch == AUX_CH_A) 1382 freq = dev_priv->cdclk.hw.cdclk; 1383 else 1384 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 1385 return DIV_ROUND_CLOSEST(freq, 2000); 1386 } 1387 1388 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1389 { 1390 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1391 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1392 1393 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1394 /* Workaround for non-ULT HSW */ 1395 switch (index) { 1396 case 0: return 63; 1397 case 1: return 72; 1398 default: return 0; 1399 } 1400 } 1401 1402 return ilk_get_aux_clock_divider(intel_dp, index); 1403 } 1404 1405 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1406 { 1407 /* 1408 * SKL doesn't need us to program the AUX clock divider (Hardware will 1409 * derive the clock from CDCLK automatically). We still implement the 1410 * get_aux_clock_divider vfunc to plug-in into the existing code. 1411 */ 1412 return index ? 0 : 1; 1413 } 1414 1415 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 1416 int send_bytes, 1417 u32 aux_clock_divider) 1418 { 1419 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1420 struct drm_i915_private *dev_priv = 1421 to_i915(dig_port->base.base.dev); 1422 u32 precharge, timeout; 1423 1424 if (IS_GEN(dev_priv, 6)) 1425 precharge = 3; 1426 else 1427 precharge = 5; 1428 1429 if (IS_BROADWELL(dev_priv)) 1430 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1431 else 1432 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1433 1434 return DP_AUX_CH_CTL_SEND_BUSY | 1435 DP_AUX_CH_CTL_DONE | 1436 DP_AUX_CH_CTL_INTERRUPT | 1437 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1438 timeout | 1439 DP_AUX_CH_CTL_RECEIVE_ERROR | 1440 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1441 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1442 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1443 } 1444 1445 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1446 int send_bytes, 1447 u32 unused) 1448 { 1449 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1450 struct drm_i915_private *i915 = 1451 to_i915(dig_port->base.base.dev); 1452 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 1453 u32 ret; 1454 1455 ret = DP_AUX_CH_CTL_SEND_BUSY | 1456 DP_AUX_CH_CTL_DONE | 1457 DP_AUX_CH_CTL_INTERRUPT | 1458 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1459 DP_AUX_CH_CTL_TIME_OUT_MAX | 1460 DP_AUX_CH_CTL_RECEIVE_ERROR | 1461 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1462 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1463 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1464 1465 if (intel_phy_is_tc(i915, phy) && 1466 dig_port->tc_mode == TC_PORT_TBT_ALT) 1467 ret |= DP_AUX_CH_CTL_TBT_IO; 1468 1469 return ret; 1470 } 1471 1472 static int 1473 intel_dp_aux_xfer(struct intel_dp *intel_dp, 1474 const u8 *send, int send_bytes, 1475 u8 *recv, int recv_size, 1476 u32 aux_send_ctl_flags) 1477 { 1478 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1479 struct drm_i915_private *i915 = 1480 to_i915(dig_port->base.base.dev); 1481 struct intel_uncore *uncore = &i915->uncore; 1482 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 1483 bool is_tc_port = intel_phy_is_tc(i915, phy); 1484 i915_reg_t ch_ctl, ch_data[5]; 1485 u32 aux_clock_divider; 1486 enum intel_display_power_domain aux_domain; 1487 intel_wakeref_t aux_wakeref; 1488 intel_wakeref_t pps_wakeref; 1489 int i, ret, recv_bytes; 1490 int try, clock = 0; 1491 u32 status; 1492 bool vdd; 1493 1494 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1495 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1496 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1497 1498 if (is_tc_port) 1499 intel_tc_port_lock(dig_port); 1500 1501 aux_domain = intel_aux_power_domain(dig_port); 1502 1503 aux_wakeref = intel_display_power_get(i915, aux_domain); 1504 pps_wakeref = pps_lock(intel_dp); 1505 1506 /* 1507 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1508 * In such cases we want to leave VDD enabled and it's up to upper layers 1509 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1510 * ourselves. 1511 */ 1512 vdd = edp_panel_vdd_on(intel_dp); 1513 1514 /* dp aux is extremely sensitive to irq latency, hence request the 1515 * lowest possible wakeup latency and so prevent the cpu from going into 1516 * deep sleep states. 1517 */ 1518 cpu_latency_qos_update_request(&intel_dp->pm_qos, 0); 1519 1520 intel_dp_check_edp(intel_dp); 1521 1522 /* Try to wait for any previous AUX channel activity */ 1523 for (try = 0; try < 3; try++) { 1524 status = intel_uncore_read_notrace(uncore, ch_ctl); 1525 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1526 break; 1527 msleep(1); 1528 } 1529 /* just trace the final value */ 1530 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1531 1532 if (try == 3) { 1533 const u32 status = intel_uncore_read(uncore, ch_ctl); 1534 1535 if (status != intel_dp->aux_busy_last_status) { 1536 drm_WARN(&i915->drm, 1, 1537 "%s: not started (status 0x%08x)\n", 1538 intel_dp->aux.name, status); 1539 intel_dp->aux_busy_last_status = status; 1540 } 1541 1542 ret = -EBUSY; 1543 goto out; 1544 } 1545 1546 /* Only 5 data registers! */ 1547 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 1548 ret = -E2BIG; 1549 goto out; 1550 } 1551 1552 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1553 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1554 send_bytes, 1555 aux_clock_divider); 1556 1557 send_ctl |= aux_send_ctl_flags; 1558 1559 /* Must try at least 3 times according to DP spec */ 1560 for (try = 0; try < 5; try++) { 1561 /* Load the send data into the aux channel data registers */ 1562 for (i = 0; i < send_bytes; i += 4) 1563 intel_uncore_write(uncore, 1564 ch_data[i >> 2], 1565 intel_dp_pack_aux(send + i, 1566 send_bytes - i)); 1567 1568 /* Send the command and wait for it to complete */ 1569 intel_uncore_write(uncore, ch_ctl, send_ctl); 1570 1571 status = intel_dp_aux_wait_done(intel_dp); 1572 1573 /* Clear done status and any errors */ 1574 intel_uncore_write(uncore, 1575 ch_ctl, 1576 status | 1577 DP_AUX_CH_CTL_DONE | 1578 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1579 DP_AUX_CH_CTL_RECEIVE_ERROR); 1580 1581 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1582 * 400us delay required for errors and timeouts 1583 * Timeout errors from the HW already meet this 1584 * requirement so skip to next iteration 1585 */ 1586 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1587 continue; 1588 1589 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1590 usleep_range(400, 500); 1591 continue; 1592 } 1593 if (status & DP_AUX_CH_CTL_DONE) 1594 goto done; 1595 } 1596 } 1597 1598 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1599 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 1600 intel_dp->aux.name, status); 1601 ret = -EBUSY; 1602 goto out; 1603 } 1604 1605 done: 1606 /* Check for timeout or receive error. 1607 * Timeouts occur when the sink is not connected 1608 */ 1609 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1610 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 1611 intel_dp->aux.name, status); 1612 ret = -EIO; 1613 goto out; 1614 } 1615 1616 /* Timeouts occur when the device isn't connected, so they're 1617 * "normal" -- don't fill the kernel log with these */ 1618 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1619 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 1620 intel_dp->aux.name, status); 1621 ret = -ETIMEDOUT; 1622 goto out; 1623 } 1624 1625 /* Unload any bytes sent back from the other side */ 1626 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1627 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1628 1629 /* 1630 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1631 * We have no idea of what happened so we return -EBUSY so 1632 * drm layer takes care for the necessary retries. 1633 */ 1634 if (recv_bytes == 0 || recv_bytes > 20) { 1635 drm_dbg_kms(&i915->drm, 1636 "%s: Forbidden recv_bytes = %d on aux transaction\n", 1637 intel_dp->aux.name, recv_bytes); 1638 ret = -EBUSY; 1639 goto out; 1640 } 1641 1642 if (recv_bytes > recv_size) 1643 recv_bytes = recv_size; 1644 1645 for (i = 0; i < recv_bytes; i += 4) 1646 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 1647 recv + i, recv_bytes - i); 1648 1649 ret = recv_bytes; 1650 out: 1651 cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 1652 1653 if (vdd) 1654 edp_panel_vdd_off(intel_dp, false); 1655 1656 pps_unlock(intel_dp, pps_wakeref); 1657 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 1658 1659 if (is_tc_port) 1660 intel_tc_port_unlock(dig_port); 1661 1662 return ret; 1663 } 1664 1665 #define BARE_ADDRESS_SIZE 3 1666 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1667 1668 static void 1669 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1670 const struct drm_dp_aux_msg *msg) 1671 { 1672 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1673 txbuf[1] = (msg->address >> 8) & 0xff; 1674 txbuf[2] = msg->address & 0xff; 1675 txbuf[3] = msg->size - 1; 1676 } 1677 1678 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) 1679 { 1680 /* 1681 * If we're trying to send the HDCP Aksv, we need to set a the Aksv 1682 * select bit to inform the hardware to send the Aksv after our header 1683 * since we can't access that data from software. 1684 */ 1685 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && 1686 msg->address == DP_AUX_HDCP_AKSV) 1687 return DP_AUX_CH_CTL_AUX_AKSV_SELECT; 1688 1689 return 0; 1690 } 1691 1692 static ssize_t 1693 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1694 { 1695 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1696 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1697 u8 txbuf[20], rxbuf[20]; 1698 size_t txsize, rxsize; 1699 u32 flags = intel_dp_aux_xfer_flags(msg); 1700 int ret; 1701 1702 intel_dp_aux_header(txbuf, msg); 1703 1704 switch (msg->request & ~DP_AUX_I2C_MOT) { 1705 case DP_AUX_NATIVE_WRITE: 1706 case DP_AUX_I2C_WRITE: 1707 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1708 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1709 rxsize = 2; /* 0 or 1 data bytes */ 1710 1711 if (drm_WARN_ON(&i915->drm, txsize > 20)) 1712 return -E2BIG; 1713 1714 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); 1715 1716 if (msg->buffer) 1717 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1718 1719 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1720 rxbuf, rxsize, flags); 1721 if (ret > 0) { 1722 msg->reply = rxbuf[0] >> 4; 1723 1724 if (ret > 1) { 1725 /* Number of bytes written in a short write. */ 1726 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1727 } else { 1728 /* Return payload size. */ 1729 ret = msg->size; 1730 } 1731 } 1732 break; 1733 1734 case DP_AUX_NATIVE_READ: 1735 case DP_AUX_I2C_READ: 1736 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1737 rxsize = msg->size + 1; 1738 1739 if (drm_WARN_ON(&i915->drm, rxsize > 20)) 1740 return -E2BIG; 1741 1742 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1743 rxbuf, rxsize, flags); 1744 if (ret > 0) { 1745 msg->reply = rxbuf[0] >> 4; 1746 /* 1747 * Assume happy day, and copy the data. The caller is 1748 * expected to check msg->reply before touching it. 1749 * 1750 * Return payload size. 1751 */ 1752 ret--; 1753 memcpy(msg->buffer, rxbuf + 1, ret); 1754 } 1755 break; 1756 1757 default: 1758 ret = -EINVAL; 1759 break; 1760 } 1761 1762 return ret; 1763 } 1764 1765 1766 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1767 { 1768 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1769 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1770 enum aux_ch aux_ch = dig_port->aux_ch; 1771 1772 switch (aux_ch) { 1773 case AUX_CH_B: 1774 case AUX_CH_C: 1775 case AUX_CH_D: 1776 return DP_AUX_CH_CTL(aux_ch); 1777 default: 1778 MISSING_CASE(aux_ch); 1779 return DP_AUX_CH_CTL(AUX_CH_B); 1780 } 1781 } 1782 1783 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1784 { 1785 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1786 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1787 enum aux_ch aux_ch = dig_port->aux_ch; 1788 1789 switch (aux_ch) { 1790 case AUX_CH_B: 1791 case AUX_CH_C: 1792 case AUX_CH_D: 1793 return DP_AUX_CH_DATA(aux_ch, index); 1794 default: 1795 MISSING_CASE(aux_ch); 1796 return DP_AUX_CH_DATA(AUX_CH_B, index); 1797 } 1798 } 1799 1800 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1801 { 1802 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1803 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1804 enum aux_ch aux_ch = dig_port->aux_ch; 1805 1806 switch (aux_ch) { 1807 case AUX_CH_A: 1808 return DP_AUX_CH_CTL(aux_ch); 1809 case AUX_CH_B: 1810 case AUX_CH_C: 1811 case AUX_CH_D: 1812 return PCH_DP_AUX_CH_CTL(aux_ch); 1813 default: 1814 MISSING_CASE(aux_ch); 1815 return DP_AUX_CH_CTL(AUX_CH_A); 1816 } 1817 } 1818 1819 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1820 { 1821 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1822 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1823 enum aux_ch aux_ch = dig_port->aux_ch; 1824 1825 switch (aux_ch) { 1826 case AUX_CH_A: 1827 return DP_AUX_CH_DATA(aux_ch, index); 1828 case AUX_CH_B: 1829 case AUX_CH_C: 1830 case AUX_CH_D: 1831 return PCH_DP_AUX_CH_DATA(aux_ch, index); 1832 default: 1833 MISSING_CASE(aux_ch); 1834 return DP_AUX_CH_DATA(AUX_CH_A, index); 1835 } 1836 } 1837 1838 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1839 { 1840 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1841 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1842 enum aux_ch aux_ch = dig_port->aux_ch; 1843 1844 switch (aux_ch) { 1845 case AUX_CH_A: 1846 case AUX_CH_B: 1847 case AUX_CH_C: 1848 case AUX_CH_D: 1849 case AUX_CH_E: 1850 case AUX_CH_F: 1851 return DP_AUX_CH_CTL(aux_ch); 1852 default: 1853 MISSING_CASE(aux_ch); 1854 return DP_AUX_CH_CTL(AUX_CH_A); 1855 } 1856 } 1857 1858 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1859 { 1860 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1861 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1862 enum aux_ch aux_ch = dig_port->aux_ch; 1863 1864 switch (aux_ch) { 1865 case AUX_CH_A: 1866 case AUX_CH_B: 1867 case AUX_CH_C: 1868 case AUX_CH_D: 1869 case AUX_CH_E: 1870 case AUX_CH_F: 1871 return DP_AUX_CH_DATA(aux_ch, index); 1872 default: 1873 MISSING_CASE(aux_ch); 1874 return DP_AUX_CH_DATA(AUX_CH_A, index); 1875 } 1876 } 1877 1878 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) 1879 { 1880 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1881 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1882 enum aux_ch aux_ch = dig_port->aux_ch; 1883 1884 switch (aux_ch) { 1885 case AUX_CH_A: 1886 case AUX_CH_B: 1887 case AUX_CH_C: 1888 case AUX_CH_USBC1: 1889 case AUX_CH_USBC2: 1890 case AUX_CH_USBC3: 1891 case AUX_CH_USBC4: 1892 case AUX_CH_USBC5: 1893 case AUX_CH_USBC6: 1894 return DP_AUX_CH_CTL(aux_ch); 1895 default: 1896 MISSING_CASE(aux_ch); 1897 return DP_AUX_CH_CTL(AUX_CH_A); 1898 } 1899 } 1900 1901 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) 1902 { 1903 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1904 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1905 enum aux_ch aux_ch = dig_port->aux_ch; 1906 1907 switch (aux_ch) { 1908 case AUX_CH_A: 1909 case AUX_CH_B: 1910 case AUX_CH_C: 1911 case AUX_CH_USBC1: 1912 case AUX_CH_USBC2: 1913 case AUX_CH_USBC3: 1914 case AUX_CH_USBC4: 1915 case AUX_CH_USBC5: 1916 case AUX_CH_USBC6: 1917 return DP_AUX_CH_DATA(aux_ch, index); 1918 default: 1919 MISSING_CASE(aux_ch); 1920 return DP_AUX_CH_DATA(AUX_CH_A, index); 1921 } 1922 } 1923 1924 static void 1925 intel_dp_aux_fini(struct intel_dp *intel_dp) 1926 { 1927 if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) 1928 cpu_latency_qos_remove_request(&intel_dp->pm_qos); 1929 1930 kfree(intel_dp->aux.name); 1931 } 1932 1933 static void 1934 intel_dp_aux_init(struct intel_dp *intel_dp) 1935 { 1936 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1937 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1938 struct intel_encoder *encoder = &dig_port->base; 1939 enum aux_ch aux_ch = dig_port->aux_ch; 1940 1941 if (INTEL_GEN(dev_priv) >= 12) { 1942 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; 1943 intel_dp->aux_ch_data_reg = tgl_aux_data_reg; 1944 } else if (INTEL_GEN(dev_priv) >= 9) { 1945 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1946 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1947 } else if (HAS_PCH_SPLIT(dev_priv)) { 1948 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1949 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1950 } else { 1951 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1952 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1953 } 1954 1955 if (INTEL_GEN(dev_priv) >= 9) 1956 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1957 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1958 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1959 else if (HAS_PCH_SPLIT(dev_priv)) 1960 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1961 else 1962 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1963 1964 if (INTEL_GEN(dev_priv) >= 9) 1965 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1966 else 1967 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1968 1969 drm_dp_aux_init(&intel_dp->aux); 1970 1971 /* Failure to allocate our preferred name is not critical */ 1972 if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1) 1973 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s", 1974 aux_ch - AUX_CH_USBC1 + '1', 1975 encoder->base.name); 1976 else 1977 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s", 1978 aux_ch_name(aux_ch), 1979 encoder->base.name); 1980 1981 intel_dp->aux.transfer = intel_dp_aux_transfer; 1982 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 1983 } 1984 1985 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1986 { 1987 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1988 1989 return max_rate >= 540000; 1990 } 1991 1992 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 1993 { 1994 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1995 1996 return max_rate >= 810000; 1997 } 1998 1999 static void 2000 intel_dp_set_clock(struct intel_encoder *encoder, 2001 struct intel_crtc_state *pipe_config) 2002 { 2003 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2004 const struct dp_link_dpll *divisor = NULL; 2005 int i, count = 0; 2006 2007 if (IS_G4X(dev_priv)) { 2008 divisor = g4x_dpll; 2009 count = ARRAY_SIZE(g4x_dpll); 2010 } else if (HAS_PCH_SPLIT(dev_priv)) { 2011 divisor = pch_dpll; 2012 count = ARRAY_SIZE(pch_dpll); 2013 } else if (IS_CHERRYVIEW(dev_priv)) { 2014 divisor = chv_dpll; 2015 count = ARRAY_SIZE(chv_dpll); 2016 } else if (IS_VALLEYVIEW(dev_priv)) { 2017 divisor = vlv_dpll; 2018 count = ARRAY_SIZE(vlv_dpll); 2019 } 2020 2021 if (divisor && count) { 2022 for (i = 0; i < count; i++) { 2023 if (pipe_config->port_clock == divisor[i].clock) { 2024 pipe_config->dpll = divisor[i].dpll; 2025 pipe_config->clock_set = true; 2026 break; 2027 } 2028 } 2029 } 2030 } 2031 2032 static void snprintf_int_array(char *str, size_t len, 2033 const int *array, int nelem) 2034 { 2035 int i; 2036 2037 str[0] = '\0'; 2038 2039 for (i = 0; i < nelem; i++) { 2040 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 2041 if (r >= len) 2042 return; 2043 str += r; 2044 len -= r; 2045 } 2046 } 2047 2048 static void intel_dp_print_rates(struct intel_dp *intel_dp) 2049 { 2050 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2051 char str[128]; /* FIXME: too big for stack? */ 2052 2053 if (!drm_debug_enabled(DRM_UT_KMS)) 2054 return; 2055 2056 snprintf_int_array(str, sizeof(str), 2057 intel_dp->source_rates, intel_dp->num_source_rates); 2058 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 2059 2060 snprintf_int_array(str, sizeof(str), 2061 intel_dp->sink_rates, intel_dp->num_sink_rates); 2062 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 2063 2064 snprintf_int_array(str, sizeof(str), 2065 intel_dp->common_rates, intel_dp->num_common_rates); 2066 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 2067 } 2068 2069 int 2070 intel_dp_max_link_rate(struct intel_dp *intel_dp) 2071 { 2072 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2073 int len; 2074 2075 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 2076 if (drm_WARN_ON(&i915->drm, len <= 0)) 2077 return 162000; 2078 2079 return intel_dp->common_rates[len - 1]; 2080 } 2081 2082 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 2083 { 2084 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2085 int i = intel_dp_rate_index(intel_dp->sink_rates, 2086 intel_dp->num_sink_rates, rate); 2087 2088 if (drm_WARN_ON(&i915->drm, i < 0)) 2089 i = 0; 2090 2091 return i; 2092 } 2093 2094 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 2095 u8 *link_bw, u8 *rate_select) 2096 { 2097 /* eDP 1.4 rate select method. */ 2098 if (intel_dp->use_rate_select) { 2099 *link_bw = 0; 2100 *rate_select = 2101 intel_dp_rate_select(intel_dp, port_clock); 2102 } else { 2103 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 2104 *rate_select = 0; 2105 } 2106 } 2107 2108 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 2109 const struct intel_crtc_state *pipe_config) 2110 { 2111 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2112 2113 /* On TGL, FEC is supported on all Pipes */ 2114 if (INTEL_GEN(dev_priv) >= 12) 2115 return true; 2116 2117 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 2118 return true; 2119 2120 return false; 2121 } 2122 2123 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 2124 const struct intel_crtc_state *pipe_config) 2125 { 2126 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 2127 drm_dp_sink_supports_fec(intel_dp->fec_capable); 2128 } 2129 2130 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 2131 const struct intel_crtc_state *crtc_state) 2132 { 2133 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) 2134 return false; 2135 2136 return intel_dsc_source_support(crtc_state) && 2137 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 2138 } 2139 2140 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, 2141 const struct intel_crtc_state *crtc_state) 2142 { 2143 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 2144 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 2145 intel_dp->dfp.ycbcr_444_to_420); 2146 } 2147 2148 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, 2149 const struct intel_crtc_state *crtc_state, int bpc) 2150 { 2151 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; 2152 2153 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) 2154 clock /= 2; 2155 2156 return clock; 2157 } 2158 2159 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, 2160 const struct intel_crtc_state *crtc_state, int bpc) 2161 { 2162 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); 2163 2164 if (intel_dp->dfp.min_tmds_clock && 2165 tmds_clock < intel_dp->dfp.min_tmds_clock) 2166 return false; 2167 2168 if (intel_dp->dfp.max_tmds_clock && 2169 tmds_clock > intel_dp->dfp.max_tmds_clock) 2170 return false; 2171 2172 return true; 2173 } 2174 2175 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, 2176 const struct intel_crtc_state *crtc_state, 2177 int bpc) 2178 { 2179 2180 return intel_hdmi_deep_color_possible(crtc_state, bpc, 2181 intel_dp->has_hdmi_sink, 2182 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && 2183 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); 2184 } 2185 2186 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 2187 const struct intel_crtc_state *crtc_state) 2188 { 2189 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2190 struct intel_connector *intel_connector = intel_dp->attached_connector; 2191 int bpp, bpc; 2192 2193 bpc = crtc_state->pipe_bpp / 3; 2194 2195 if (intel_dp->dfp.max_bpc) 2196 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 2197 2198 if (intel_dp->dfp.min_tmds_clock) { 2199 for (; bpc >= 10; bpc -= 2) { 2200 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) 2201 break; 2202 } 2203 } 2204 2205 bpp = bpc * 3; 2206 if (intel_dp_is_edp(intel_dp)) { 2207 /* Get bpp from vbt only for panels that dont have bpp in edid */ 2208 if (intel_connector->base.display_info.bpc == 0 && 2209 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 2210 drm_dbg_kms(&dev_priv->drm, 2211 "clamping bpp for eDP panel to BIOS-provided %i\n", 2212 dev_priv->vbt.edp.bpp); 2213 bpp = dev_priv->vbt.edp.bpp; 2214 } 2215 } 2216 2217 return bpp; 2218 } 2219 2220 /* Adjust link config limits based on compliance test requests. */ 2221 void 2222 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 2223 struct intel_crtc_state *pipe_config, 2224 struct link_config_limits *limits) 2225 { 2226 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2227 2228 /* For DP Compliance we override the computed bpp for the pipe */ 2229 if (intel_dp->compliance.test_data.bpc != 0) { 2230 int bpp = 3 * intel_dp->compliance.test_data.bpc; 2231 2232 limits->min_bpp = limits->max_bpp = bpp; 2233 pipe_config->dither_force_disable = bpp == 6 * 3; 2234 2235 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 2236 } 2237 2238 /* Use values requested by Compliance Test Request */ 2239 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 2240 int index; 2241 2242 /* Validate the compliance test data since max values 2243 * might have changed due to link train fallback. 2244 */ 2245 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 2246 intel_dp->compliance.test_lane_count)) { 2247 index = intel_dp_rate_index(intel_dp->common_rates, 2248 intel_dp->num_common_rates, 2249 intel_dp->compliance.test_link_rate); 2250 if (index >= 0) 2251 limits->min_clock = limits->max_clock = index; 2252 limits->min_lane_count = limits->max_lane_count = 2253 intel_dp->compliance.test_lane_count; 2254 } 2255 } 2256 } 2257 2258 /* Optimize link config in order: max bpp, min clock, min lanes */ 2259 static int 2260 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 2261 struct intel_crtc_state *pipe_config, 2262 const struct link_config_limits *limits) 2263 { 2264 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2265 int bpp, clock, lane_count; 2266 int mode_rate, link_clock, link_avail; 2267 2268 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2269 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 2270 2271 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2272 output_bpp); 2273 2274 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2275 for (lane_count = limits->min_lane_count; 2276 lane_count <= limits->max_lane_count; 2277 lane_count <<= 1) { 2278 link_clock = intel_dp->common_rates[clock]; 2279 link_avail = intel_dp_max_data_rate(link_clock, 2280 lane_count); 2281 2282 if (mode_rate <= link_avail) { 2283 pipe_config->lane_count = lane_count; 2284 pipe_config->pipe_bpp = bpp; 2285 pipe_config->port_clock = link_clock; 2286 2287 return 0; 2288 } 2289 } 2290 } 2291 } 2292 2293 return -EINVAL; 2294 } 2295 2296 /* Optimize link config in order: max bpp, min lanes, min clock */ 2297 static int 2298 intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, 2299 struct intel_crtc_state *pipe_config, 2300 const struct link_config_limits *limits) 2301 { 2302 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2303 int bpp, clock, lane_count; 2304 int mode_rate, link_clock, link_avail; 2305 2306 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2307 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 2308 2309 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2310 output_bpp); 2311 2312 for (lane_count = limits->min_lane_count; 2313 lane_count <= limits->max_lane_count; 2314 lane_count <<= 1) { 2315 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2316 link_clock = intel_dp->common_rates[clock]; 2317 link_avail = intel_dp_max_data_rate(link_clock, 2318 lane_count); 2319 2320 if (mode_rate <= link_avail) { 2321 pipe_config->lane_count = lane_count; 2322 pipe_config->pipe_bpp = bpp; 2323 pipe_config->port_clock = link_clock; 2324 2325 return 0; 2326 } 2327 } 2328 } 2329 } 2330 2331 return -EINVAL; 2332 } 2333 2334 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 2335 { 2336 int i, num_bpc; 2337 u8 dsc_bpc[3] = {0}; 2338 2339 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 2340 dsc_bpc); 2341 for (i = 0; i < num_bpc; i++) { 2342 if (dsc_max_bpc >= dsc_bpc[i]) 2343 return dsc_bpc[i] * 3; 2344 } 2345 2346 return 0; 2347 } 2348 2349 #define DSC_SUPPORTED_VERSION_MIN 1 2350 2351 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 2352 struct intel_crtc_state *crtc_state) 2353 { 2354 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2355 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2356 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2357 u8 line_buf_depth; 2358 int ret; 2359 2360 /* 2361 * RC_MODEL_SIZE is currently a constant across all configurations. 2362 * 2363 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 2364 * DP_DSC_RC_BUF_SIZE for this. 2365 */ 2366 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 2367 2368 ret = intel_dsc_compute_params(encoder, crtc_state); 2369 if (ret) 2370 return ret; 2371 2372 /* 2373 * Slice Height of 8 works for all currently available panels. So start 2374 * with that if pic_height is an integral multiple of 8. Eventually add 2375 * logic to try multiple slice heights. 2376 */ 2377 if (vdsc_cfg->pic_height % 8 == 0) 2378 vdsc_cfg->slice_height = 8; 2379 else if (vdsc_cfg->pic_height % 4 == 0) 2380 vdsc_cfg->slice_height = 4; 2381 else 2382 vdsc_cfg->slice_height = 2; 2383 2384 vdsc_cfg->dsc_version_major = 2385 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2386 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 2387 vdsc_cfg->dsc_version_minor = 2388 min(DSC_SUPPORTED_VERSION_MIN, 2389 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2390 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 2391 2392 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2393 DP_DSC_RGB; 2394 2395 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 2396 if (!line_buf_depth) { 2397 drm_dbg_kms(&i915->drm, 2398 "DSC Sink Line Buffer Depth invalid\n"); 2399 return -EINVAL; 2400 } 2401 2402 if (vdsc_cfg->dsc_version_minor == 2) 2403 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 2404 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 2405 else 2406 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 2407 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 2408 2409 vdsc_cfg->block_pred_enable = 2410 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2411 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2412 2413 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2414 } 2415 2416 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2417 struct intel_crtc_state *pipe_config, 2418 struct drm_connector_state *conn_state, 2419 struct link_config_limits *limits) 2420 { 2421 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2422 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2423 const struct drm_display_mode *adjusted_mode = 2424 &pipe_config->hw.adjusted_mode; 2425 u8 dsc_max_bpc; 2426 int pipe_bpp; 2427 int ret; 2428 2429 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2430 intel_dp_supports_fec(intel_dp, pipe_config); 2431 2432 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 2433 return -EINVAL; 2434 2435 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 2436 if (INTEL_GEN(dev_priv) >= 12) 2437 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 2438 else 2439 dsc_max_bpc = min_t(u8, 10, 2440 conn_state->max_requested_bpc); 2441 2442 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 2443 2444 /* Min Input BPC for ICL+ is 8 */ 2445 if (pipe_bpp < 8 * 3) { 2446 drm_dbg_kms(&dev_priv->drm, 2447 "No DSC support for less than 8bpc\n"); 2448 return -EINVAL; 2449 } 2450 2451 /* 2452 * For now enable DSC for max bpp, max link rate, max lane count. 2453 * Optimize this later for the minimum possible link rate/lane count 2454 * with DSC enabled for the requested mode. 2455 */ 2456 pipe_config->pipe_bpp = pipe_bpp; 2457 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 2458 pipe_config->lane_count = limits->max_lane_count; 2459 2460 if (intel_dp_is_edp(intel_dp)) { 2461 pipe_config->dsc.compressed_bpp = 2462 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 2463 pipe_config->pipe_bpp); 2464 pipe_config->dsc.slice_count = 2465 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 2466 true); 2467 } else { 2468 u16 dsc_max_output_bpp; 2469 u8 dsc_dp_slice_count; 2470 2471 dsc_max_output_bpp = 2472 intel_dp_dsc_get_output_bpp(dev_priv, 2473 pipe_config->port_clock, 2474 pipe_config->lane_count, 2475 adjusted_mode->crtc_clock, 2476 adjusted_mode->crtc_hdisplay, 2477 pipe_config->bigjoiner); 2478 dsc_dp_slice_count = 2479 intel_dp_dsc_get_slice_count(intel_dp, 2480 adjusted_mode->crtc_clock, 2481 adjusted_mode->crtc_hdisplay, 2482 pipe_config->bigjoiner); 2483 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 2484 drm_dbg_kms(&dev_priv->drm, 2485 "Compressed BPP/Slice Count not supported\n"); 2486 return -EINVAL; 2487 } 2488 pipe_config->dsc.compressed_bpp = min_t(u16, 2489 dsc_max_output_bpp >> 4, 2490 pipe_config->pipe_bpp); 2491 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2492 } 2493 /* 2494 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2495 * is greater than the maximum Cdclock and if slice count is even 2496 * then we need to use 2 VDSC instances. 2497 */ 2498 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq || 2499 pipe_config->bigjoiner) { 2500 if (pipe_config->dsc.slice_count < 2) { 2501 drm_dbg_kms(&dev_priv->drm, 2502 "Cannot split stream to use 2 VDSC instances\n"); 2503 return -EINVAL; 2504 } 2505 2506 pipe_config->dsc.dsc_split = true; 2507 } 2508 2509 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 2510 if (ret < 0) { 2511 drm_dbg_kms(&dev_priv->drm, 2512 "Cannot compute valid DSC parameters for Input Bpp = %d " 2513 "Compressed BPP = %d\n", 2514 pipe_config->pipe_bpp, 2515 pipe_config->dsc.compressed_bpp); 2516 return ret; 2517 } 2518 2519 pipe_config->dsc.compression_enable = true; 2520 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2521 "Compressed Bpp = %d Slice Count = %d\n", 2522 pipe_config->pipe_bpp, 2523 pipe_config->dsc.compressed_bpp, 2524 pipe_config->dsc.slice_count); 2525 2526 return 0; 2527 } 2528 2529 static int 2530 intel_dp_compute_link_config(struct intel_encoder *encoder, 2531 struct intel_crtc_state *pipe_config, 2532 struct drm_connector_state *conn_state) 2533 { 2534 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2535 const struct drm_display_mode *adjusted_mode = 2536 &pipe_config->hw.adjusted_mode; 2537 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2538 struct link_config_limits limits; 2539 int common_len; 2540 int ret; 2541 2542 common_len = intel_dp_common_len_rate_limit(intel_dp, 2543 intel_dp->max_link_rate); 2544 2545 /* No common link rates between source and sink */ 2546 drm_WARN_ON(encoder->base.dev, common_len <= 0); 2547 2548 limits.min_clock = 0; 2549 limits.max_clock = common_len - 1; 2550 2551 limits.min_lane_count = 1; 2552 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 2553 2554 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); 2555 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); 2556 2557 if (intel_dp->use_max_params) { 2558 /* 2559 * Use the maximum clock and number of lanes the eDP panel 2560 * advertizes being capable of in case the initial fast 2561 * optimal params failed us. The panels are generally 2562 * designed to support only a single clock and lane 2563 * configuration, and typically on older panels these 2564 * values correspond to the native resolution of the panel. 2565 */ 2566 limits.min_lane_count = limits.max_lane_count; 2567 limits.min_clock = limits.max_clock; 2568 } 2569 2570 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 2571 2572 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 2573 "max rate %d max bpp %d pixel clock %iKHz\n", 2574 limits.max_lane_count, 2575 intel_dp->common_rates[limits.max_clock], 2576 limits.max_bpp, adjusted_mode->crtc_clock); 2577 2578 if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq || 2579 adjusted_mode->crtc_hdisplay > 5120) && 2580 intel_dp_can_bigjoiner(intel_dp)) 2581 pipe_config->bigjoiner = true; 2582 2583 if (intel_dp_is_edp(intel_dp)) 2584 /* 2585 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 2586 * section A.1: "It is recommended that the minimum number of 2587 * lanes be used, using the minimum link rate allowed for that 2588 * lane configuration." 2589 * 2590 * Note that we fall back to the max clock and lane count for eDP 2591 * panels that fail with the fast optimal settings (see 2592 * intel_dp->use_max_params), in which case the fast vs. wide 2593 * choice doesn't matter. 2594 */ 2595 ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits); 2596 else 2597 /* Optimize for slow and wide. */ 2598 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 2599 2600 /* enable compression if the mode doesn't fit available BW */ 2601 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 2602 if (ret || intel_dp->force_dsc_en || pipe_config->bigjoiner) { 2603 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2604 conn_state, &limits); 2605 if (ret < 0) 2606 return ret; 2607 } 2608 2609 if (pipe_config->dsc.compression_enable) { 2610 drm_dbg_kms(&i915->drm, 2611 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2612 pipe_config->lane_count, pipe_config->port_clock, 2613 pipe_config->pipe_bpp, 2614 pipe_config->dsc.compressed_bpp); 2615 2616 drm_dbg_kms(&i915->drm, 2617 "DP link rate required %i available %i\n", 2618 intel_dp_link_required(adjusted_mode->crtc_clock, 2619 pipe_config->dsc.compressed_bpp), 2620 intel_dp_max_data_rate(pipe_config->port_clock, 2621 pipe_config->lane_count)); 2622 } else { 2623 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 2624 pipe_config->lane_count, pipe_config->port_clock, 2625 pipe_config->pipe_bpp); 2626 2627 drm_dbg_kms(&i915->drm, 2628 "DP link rate required %i available %i\n", 2629 intel_dp_link_required(adjusted_mode->crtc_clock, 2630 pipe_config->pipe_bpp), 2631 intel_dp_max_data_rate(pipe_config->port_clock, 2632 pipe_config->lane_count)); 2633 } 2634 return 0; 2635 } 2636 2637 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2638 const struct drm_connector_state *conn_state) 2639 { 2640 const struct intel_digital_connector_state *intel_conn_state = 2641 to_intel_digital_connector_state(conn_state); 2642 const struct drm_display_mode *adjusted_mode = 2643 &crtc_state->hw.adjusted_mode; 2644 2645 /* 2646 * Our YCbCr output is always limited range. 2647 * crtc_state->limited_color_range only applies to RGB, 2648 * and it must never be set for YCbCr or we risk setting 2649 * some conflicting bits in PIPECONF which will mess up 2650 * the colors on the monitor. 2651 */ 2652 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2653 return false; 2654 2655 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2656 /* 2657 * See: 2658 * CEA-861-E - 5.1 Default Encoding Parameters 2659 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2660 */ 2661 return crtc_state->pipe_bpp != 18 && 2662 drm_default_rgb_quant_range(adjusted_mode) == 2663 HDMI_QUANTIZATION_RANGE_LIMITED; 2664 } else { 2665 return intel_conn_state->broadcast_rgb == 2666 INTEL_BROADCAST_RGB_LIMITED; 2667 } 2668 } 2669 2670 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2671 enum port port) 2672 { 2673 if (IS_G4X(dev_priv)) 2674 return false; 2675 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 2676 return false; 2677 2678 return true; 2679 } 2680 2681 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 2682 const struct drm_connector_state *conn_state, 2683 struct drm_dp_vsc_sdp *vsc) 2684 { 2685 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2686 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2687 2688 /* 2689 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2690 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2691 * Colorimetry Format indication. 2692 */ 2693 vsc->revision = 0x5; 2694 vsc->length = 0x13; 2695 2696 /* DP 1.4a spec, Table 2-120 */ 2697 switch (crtc_state->output_format) { 2698 case INTEL_OUTPUT_FORMAT_YCBCR444: 2699 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 2700 break; 2701 case INTEL_OUTPUT_FORMAT_YCBCR420: 2702 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 2703 break; 2704 case INTEL_OUTPUT_FORMAT_RGB: 2705 default: 2706 vsc->pixelformat = DP_PIXELFORMAT_RGB; 2707 } 2708 2709 switch (conn_state->colorspace) { 2710 case DRM_MODE_COLORIMETRY_BT709_YCC: 2711 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2712 break; 2713 case DRM_MODE_COLORIMETRY_XVYCC_601: 2714 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 2715 break; 2716 case DRM_MODE_COLORIMETRY_XVYCC_709: 2717 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 2718 break; 2719 case DRM_MODE_COLORIMETRY_SYCC_601: 2720 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 2721 break; 2722 case DRM_MODE_COLORIMETRY_OPYCC_601: 2723 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 2724 break; 2725 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2726 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 2727 break; 2728 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2729 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 2730 break; 2731 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2732 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 2733 break; 2734 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 2735 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 2736 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 2737 break; 2738 default: 2739 /* 2740 * RGB->YCBCR color conversion uses the BT.709 2741 * color space. 2742 */ 2743 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2744 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2745 else 2746 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 2747 break; 2748 } 2749 2750 vsc->bpc = crtc_state->pipe_bpp / 3; 2751 2752 /* only RGB pixelformat supports 6 bpc */ 2753 drm_WARN_ON(&dev_priv->drm, 2754 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 2755 2756 /* all YCbCr are always limited range */ 2757 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 2758 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 2759 } 2760 2761 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 2762 struct intel_crtc_state *crtc_state, 2763 const struct drm_connector_state *conn_state) 2764 { 2765 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 2766 2767 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 2768 if (crtc_state->has_psr) 2769 return; 2770 2771 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 2772 return; 2773 2774 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 2775 vsc->sdp_type = DP_SDP_VSC; 2776 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2777 &crtc_state->infoframes.vsc); 2778 } 2779 2780 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 2781 const struct intel_crtc_state *crtc_state, 2782 const struct drm_connector_state *conn_state, 2783 struct drm_dp_vsc_sdp *vsc) 2784 { 2785 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2786 2787 vsc->sdp_type = DP_SDP_VSC; 2788 2789 if (dev_priv->psr.psr2_enabled) { 2790 if (dev_priv->psr.colorimetry_support && 2791 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2792 /* [PSR2, +Colorimetry] */ 2793 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2794 vsc); 2795 } else { 2796 /* 2797 * [PSR2, -Colorimetry] 2798 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 2799 * 3D stereo + PSR/PSR2 + Y-coordinate. 2800 */ 2801 vsc->revision = 0x4; 2802 vsc->length = 0xe; 2803 } 2804 } else { 2805 /* 2806 * [PSR1] 2807 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2808 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 2809 * higher). 2810 */ 2811 vsc->revision = 0x2; 2812 vsc->length = 0x8; 2813 } 2814 } 2815 2816 static void 2817 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 2818 struct intel_crtc_state *crtc_state, 2819 const struct drm_connector_state *conn_state) 2820 { 2821 int ret; 2822 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2823 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 2824 2825 if (!conn_state->hdr_output_metadata) 2826 return; 2827 2828 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 2829 2830 if (ret) { 2831 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 2832 return; 2833 } 2834 2835 crtc_state->infoframes.enable |= 2836 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2837 } 2838 2839 static void 2840 intel_dp_drrs_compute_config(struct intel_dp *intel_dp, 2841 struct intel_crtc_state *pipe_config, 2842 int output_bpp, bool constant_n) 2843 { 2844 struct intel_connector *intel_connector = intel_dp->attached_connector; 2845 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2846 2847 /* 2848 * DRRS and PSR can't be enable together, so giving preference to PSR 2849 * as it allows more power-savings by complete shutting down display, 2850 * so to guarantee this, intel_dp_drrs_compute_config() must be called 2851 * after intel_psr_compute_config(). 2852 */ 2853 if (pipe_config->has_psr) 2854 return; 2855 2856 if (!intel_connector->panel.downclock_mode || 2857 dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 2858 return; 2859 2860 pipe_config->has_drrs = true; 2861 intel_link_compute_m_n(output_bpp, pipe_config->lane_count, 2862 intel_connector->panel.downclock_mode->clock, 2863 pipe_config->port_clock, &pipe_config->dp_m2_n2, 2864 constant_n, pipe_config->fec_enable); 2865 } 2866 2867 int 2868 intel_dp_compute_config(struct intel_encoder *encoder, 2869 struct intel_crtc_state *pipe_config, 2870 struct drm_connector_state *conn_state) 2871 { 2872 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2873 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2874 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2875 enum port port = encoder->port; 2876 struct intel_connector *intel_connector = intel_dp->attached_connector; 2877 struct intel_digital_connector_state *intel_conn_state = 2878 to_intel_digital_connector_state(conn_state); 2879 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, 2880 DP_DPCD_QUIRK_CONSTANT_N); 2881 int ret = 0, output_bpp; 2882 2883 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 2884 pipe_config->has_pch_encoder = true; 2885 2886 pipe_config->output_format = intel_dp_output_format(&intel_connector->base, 2887 adjusted_mode); 2888 2889 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 2890 ret = intel_pch_panel_fitting(pipe_config, conn_state); 2891 if (ret) 2892 return ret; 2893 } 2894 2895 if (!intel_dp_port_has_audio(dev_priv, port)) 2896 pipe_config->has_audio = false; 2897 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2898 pipe_config->has_audio = intel_dp->has_audio; 2899 else 2900 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 2901 2902 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2903 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 2904 adjusted_mode); 2905 2906 if (HAS_GMCH(dev_priv)) 2907 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 2908 else 2909 ret = intel_pch_panel_fitting(pipe_config, conn_state); 2910 if (ret) 2911 return ret; 2912 } 2913 2914 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2915 return -EINVAL; 2916 2917 if (HAS_GMCH(dev_priv) && 2918 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2919 return -EINVAL; 2920 2921 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2922 return -EINVAL; 2923 2924 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2925 return -EINVAL; 2926 2927 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2928 if (ret < 0) 2929 return ret; 2930 2931 pipe_config->limited_color_range = 2932 intel_dp_limited_color_range(pipe_config, conn_state); 2933 2934 if (pipe_config->dsc.compression_enable) 2935 output_bpp = pipe_config->dsc.compressed_bpp; 2936 else 2937 output_bpp = intel_dp_output_bpp(pipe_config->output_format, 2938 pipe_config->pipe_bpp); 2939 2940 intel_link_compute_m_n(output_bpp, 2941 pipe_config->lane_count, 2942 adjusted_mode->crtc_clock, 2943 pipe_config->port_clock, 2944 &pipe_config->dp_m_n, 2945 constant_n, pipe_config->fec_enable); 2946 2947 if (!HAS_DDI(dev_priv)) 2948 intel_dp_set_clock(encoder, pipe_config); 2949 2950 intel_psr_compute_config(intel_dp, pipe_config); 2951 intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp, 2952 constant_n); 2953 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 2954 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 2955 2956 return 0; 2957 } 2958 2959 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2960 int link_rate, int lane_count) 2961 { 2962 intel_dp->link_trained = false; 2963 intel_dp->link_rate = link_rate; 2964 intel_dp->lane_count = lane_count; 2965 } 2966 2967 static void intel_dp_prepare(struct intel_encoder *encoder, 2968 const struct intel_crtc_state *pipe_config) 2969 { 2970 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2971 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2972 enum port port = encoder->port; 2973 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2974 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2975 2976 intel_dp_set_link_params(intel_dp, 2977 pipe_config->port_clock, 2978 pipe_config->lane_count); 2979 2980 /* 2981 * There are four kinds of DP registers: 2982 * 2983 * IBX PCH 2984 * SNB CPU 2985 * IVB CPU 2986 * CPT PCH 2987 * 2988 * IBX PCH and CPU are the same for almost everything, 2989 * except that the CPU DP PLL is configured in this 2990 * register 2991 * 2992 * CPT PCH is quite different, having many bits moved 2993 * to the TRANS_DP_CTL register instead. That 2994 * configuration happens (oddly) in ilk_pch_enable 2995 */ 2996 2997 /* Preserve the BIOS-computed detected bit. This is 2998 * supposed to be read-only. 2999 */ 3000 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 3001 3002 /* Handle DP bits in common between all three register formats */ 3003 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 3004 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 3005 3006 /* Split out the IBX/CPU vs CPT settings */ 3007 3008 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 3009 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 3010 intel_dp->DP |= DP_SYNC_HS_HIGH; 3011 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 3012 intel_dp->DP |= DP_SYNC_VS_HIGH; 3013 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 3014 3015 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 3016 intel_dp->DP |= DP_ENHANCED_FRAMING; 3017 3018 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 3019 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3020 u32 trans_dp; 3021 3022 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 3023 3024 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 3025 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 3026 trans_dp |= TRANS_DP_ENH_FRAMING; 3027 else 3028 trans_dp &= ~TRANS_DP_ENH_FRAMING; 3029 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 3030 } else { 3031 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 3032 intel_dp->DP |= DP_COLOR_RANGE_16_235; 3033 3034 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 3035 intel_dp->DP |= DP_SYNC_HS_HIGH; 3036 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 3037 intel_dp->DP |= DP_SYNC_VS_HIGH; 3038 intel_dp->DP |= DP_LINK_TRAIN_OFF; 3039 3040 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 3041 intel_dp->DP |= DP_ENHANCED_FRAMING; 3042 3043 if (IS_CHERRYVIEW(dev_priv)) 3044 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 3045 else 3046 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 3047 } 3048 } 3049 3050 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 3051 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 3052 3053 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 3054 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 3055 3056 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 3057 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 3058 3059 static void intel_pps_verify_state(struct intel_dp *intel_dp); 3060 3061 static void wait_panel_status(struct intel_dp *intel_dp, 3062 u32 mask, 3063 u32 value) 3064 { 3065 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3066 i915_reg_t pp_stat_reg, pp_ctrl_reg; 3067 3068 lockdep_assert_held(&dev_priv->pps_mutex); 3069 3070 intel_pps_verify_state(intel_dp); 3071 3072 pp_stat_reg = _pp_stat_reg(intel_dp); 3073 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3074 3075 drm_dbg_kms(&dev_priv->drm, 3076 "mask %08x value %08x status %08x control %08x\n", 3077 mask, value, 3078 intel_de_read(dev_priv, pp_stat_reg), 3079 intel_de_read(dev_priv, pp_ctrl_reg)); 3080 3081 if (intel_de_wait_for_register(dev_priv, pp_stat_reg, 3082 mask, value, 5000)) 3083 drm_err(&dev_priv->drm, 3084 "Panel status timeout: status %08x control %08x\n", 3085 intel_de_read(dev_priv, pp_stat_reg), 3086 intel_de_read(dev_priv, pp_ctrl_reg)); 3087 3088 drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); 3089 } 3090 3091 static void wait_panel_on(struct intel_dp *intel_dp) 3092 { 3093 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3094 3095 drm_dbg_kms(&i915->drm, "Wait for panel power on\n"); 3096 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 3097 } 3098 3099 static void wait_panel_off(struct intel_dp *intel_dp) 3100 { 3101 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3102 3103 drm_dbg_kms(&i915->drm, "Wait for panel power off time\n"); 3104 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 3105 } 3106 3107 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 3108 { 3109 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3110 ktime_t panel_power_on_time; 3111 s64 panel_power_off_duration; 3112 3113 drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n"); 3114 3115 /* take the difference of currrent time and panel power off time 3116 * and then make panel wait for t11_t12 if needed. */ 3117 panel_power_on_time = ktime_get_boottime(); 3118 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 3119 3120 /* When we disable the VDD override bit last we have to do the manual 3121 * wait. */ 3122 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 3123 wait_remaining_ms_from_jiffies(jiffies, 3124 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 3125 3126 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 3127 } 3128 3129 static void wait_backlight_on(struct intel_dp *intel_dp) 3130 { 3131 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 3132 intel_dp->backlight_on_delay); 3133 } 3134 3135 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 3136 { 3137 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 3138 intel_dp->backlight_off_delay); 3139 } 3140 3141 /* Read the current pp_control value, unlocking the register if it 3142 * is locked 3143 */ 3144 3145 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 3146 { 3147 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3148 u32 control; 3149 3150 lockdep_assert_held(&dev_priv->pps_mutex); 3151 3152 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); 3153 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && 3154 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 3155 control &= ~PANEL_UNLOCK_MASK; 3156 control |= PANEL_UNLOCK_REGS; 3157 } 3158 return control; 3159 } 3160 3161 /* 3162 * Must be paired with edp_panel_vdd_off(). 3163 * Must hold pps_mutex around the whole on/off sequence. 3164 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3165 */ 3166 static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 3167 { 3168 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3169 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3170 u32 pp; 3171 i915_reg_t pp_stat_reg, pp_ctrl_reg; 3172 bool need_to_disable = !intel_dp->want_panel_vdd; 3173 3174 lockdep_assert_held(&dev_priv->pps_mutex); 3175 3176 if (!intel_dp_is_edp(intel_dp)) 3177 return false; 3178 3179 cancel_delayed_work(&intel_dp->panel_vdd_work); 3180 intel_dp->want_panel_vdd = true; 3181 3182 if (edp_have_panel_vdd(intel_dp)) 3183 return need_to_disable; 3184 3185 drm_WARN_ON(&dev_priv->drm, intel_dp->vdd_wakeref); 3186 intel_dp->vdd_wakeref = intel_display_power_get(dev_priv, 3187 intel_aux_power_domain(dig_port)); 3188 3189 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", 3190 dig_port->base.base.base.id, 3191 dig_port->base.base.name); 3192 3193 if (!edp_have_panel_power(intel_dp)) 3194 wait_panel_power_cycle(intel_dp); 3195 3196 pp = ilk_get_pp_control(intel_dp); 3197 pp |= EDP_FORCE_VDD; 3198 3199 pp_stat_reg = _pp_stat_reg(intel_dp); 3200 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3201 3202 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3203 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3204 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 3205 intel_de_read(dev_priv, pp_stat_reg), 3206 intel_de_read(dev_priv, pp_ctrl_reg)); 3207 /* 3208 * If the panel wasn't on, delay before accessing aux channel 3209 */ 3210 if (!edp_have_panel_power(intel_dp)) { 3211 drm_dbg_kms(&dev_priv->drm, 3212 "[ENCODER:%d:%s] panel power wasn't enabled\n", 3213 dig_port->base.base.base.id, 3214 dig_port->base.base.name); 3215 msleep(intel_dp->panel_power_up_delay); 3216 } 3217 3218 return need_to_disable; 3219 } 3220 3221 /* 3222 * Must be paired with intel_edp_panel_vdd_off() or 3223 * intel_edp_panel_off(). 3224 * Nested calls to these functions are not allowed since 3225 * we drop the lock. Caller must use some higher level 3226 * locking to prevent nested calls from other threads. 3227 */ 3228 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 3229 { 3230 intel_wakeref_t wakeref; 3231 bool vdd; 3232 3233 if (!intel_dp_is_edp(intel_dp)) 3234 return; 3235 3236 vdd = false; 3237 with_pps_lock(intel_dp, wakeref) 3238 vdd = edp_panel_vdd_on(intel_dp); 3239 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", 3240 dp_to_dig_port(intel_dp)->base.base.base.id, 3241 dp_to_dig_port(intel_dp)->base.base.name); 3242 } 3243 3244 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 3245 { 3246 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3247 struct intel_digital_port *dig_port = 3248 dp_to_dig_port(intel_dp); 3249 u32 pp; 3250 i915_reg_t pp_stat_reg, pp_ctrl_reg; 3251 3252 lockdep_assert_held(&dev_priv->pps_mutex); 3253 3254 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd); 3255 3256 if (!edp_have_panel_vdd(intel_dp)) 3257 return; 3258 3259 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", 3260 dig_port->base.base.base.id, 3261 dig_port->base.base.name); 3262 3263 pp = ilk_get_pp_control(intel_dp); 3264 pp &= ~EDP_FORCE_VDD; 3265 3266 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3267 pp_stat_reg = _pp_stat_reg(intel_dp); 3268 3269 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3270 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3271 3272 /* Make sure sequencer is idle before allowing subsequent activity */ 3273 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 3274 intel_de_read(dev_priv, pp_stat_reg), 3275 intel_de_read(dev_priv, pp_ctrl_reg)); 3276 3277 if ((pp & PANEL_POWER_ON) == 0) 3278 intel_dp->panel_power_off_time = ktime_get_boottime(); 3279 3280 intel_display_power_put(dev_priv, 3281 intel_aux_power_domain(dig_port), 3282 fetch_and_zero(&intel_dp->vdd_wakeref)); 3283 } 3284 3285 static void edp_panel_vdd_work(struct work_struct *__work) 3286 { 3287 struct intel_dp *intel_dp = 3288 container_of(to_delayed_work(__work), 3289 struct intel_dp, panel_vdd_work); 3290 intel_wakeref_t wakeref; 3291 3292 with_pps_lock(intel_dp, wakeref) { 3293 if (!intel_dp->want_panel_vdd) 3294 edp_panel_vdd_off_sync(intel_dp); 3295 } 3296 } 3297 3298 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 3299 { 3300 unsigned long delay; 3301 3302 /* 3303 * Queue the timer to fire a long time from now (relative to the power 3304 * down delay) to keep the panel power up across a sequence of 3305 * operations. 3306 */ 3307 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 3308 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 3309 } 3310 3311 /* 3312 * Must be paired with edp_panel_vdd_on(). 3313 * Must hold pps_mutex around the whole on/off sequence. 3314 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3315 */ 3316 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 3317 { 3318 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3319 3320 lockdep_assert_held(&dev_priv->pps_mutex); 3321 3322 if (!intel_dp_is_edp(intel_dp)) 3323 return; 3324 3325 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", 3326 dp_to_dig_port(intel_dp)->base.base.base.id, 3327 dp_to_dig_port(intel_dp)->base.base.name); 3328 3329 intel_dp->want_panel_vdd = false; 3330 3331 if (sync) 3332 edp_panel_vdd_off_sync(intel_dp); 3333 else 3334 edp_panel_vdd_schedule_off(intel_dp); 3335 } 3336 3337 static void edp_panel_on(struct intel_dp *intel_dp) 3338 { 3339 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3340 u32 pp; 3341 i915_reg_t pp_ctrl_reg; 3342 3343 lockdep_assert_held(&dev_priv->pps_mutex); 3344 3345 if (!intel_dp_is_edp(intel_dp)) 3346 return; 3347 3348 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n", 3349 dp_to_dig_port(intel_dp)->base.base.base.id, 3350 dp_to_dig_port(intel_dp)->base.base.name); 3351 3352 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), 3353 "[ENCODER:%d:%s] panel power already on\n", 3354 dp_to_dig_port(intel_dp)->base.base.base.id, 3355 dp_to_dig_port(intel_dp)->base.base.name)) 3356 return; 3357 3358 wait_panel_power_cycle(intel_dp); 3359 3360 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3361 pp = ilk_get_pp_control(intel_dp); 3362 if (IS_GEN(dev_priv, 5)) { 3363 /* ILK workaround: disable reset around power sequence */ 3364 pp &= ~PANEL_POWER_RESET; 3365 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3366 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3367 } 3368 3369 pp |= PANEL_POWER_ON; 3370 if (!IS_GEN(dev_priv, 5)) 3371 pp |= PANEL_POWER_RESET; 3372 3373 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3374 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3375 3376 wait_panel_on(intel_dp); 3377 intel_dp->last_power_on = jiffies; 3378 3379 if (IS_GEN(dev_priv, 5)) { 3380 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 3381 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3382 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3383 } 3384 } 3385 3386 void intel_edp_panel_on(struct intel_dp *intel_dp) 3387 { 3388 intel_wakeref_t wakeref; 3389 3390 if (!intel_dp_is_edp(intel_dp)) 3391 return; 3392 3393 with_pps_lock(intel_dp, wakeref) 3394 edp_panel_on(intel_dp); 3395 } 3396 3397 3398 static void edp_panel_off(struct intel_dp *intel_dp) 3399 { 3400 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3401 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3402 u32 pp; 3403 i915_reg_t pp_ctrl_reg; 3404 3405 lockdep_assert_held(&dev_priv->pps_mutex); 3406 3407 if (!intel_dp_is_edp(intel_dp)) 3408 return; 3409 3410 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n", 3411 dig_port->base.base.base.id, dig_port->base.base.name); 3412 3413 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd, 3414 "Need [ENCODER:%d:%s] VDD to turn off panel\n", 3415 dig_port->base.base.base.id, dig_port->base.base.name); 3416 3417 pp = ilk_get_pp_control(intel_dp); 3418 /* We need to switch off panel power _and_ force vdd, for otherwise some 3419 * panels get very unhappy and cease to work. */ 3420 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 3421 EDP_BLC_ENABLE); 3422 3423 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3424 3425 intel_dp->want_panel_vdd = false; 3426 3427 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3428 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3429 3430 wait_panel_off(intel_dp); 3431 intel_dp->panel_power_off_time = ktime_get_boottime(); 3432 3433 /* We got a reference when we enabled the VDD. */ 3434 intel_display_power_put(dev_priv, 3435 intel_aux_power_domain(dig_port), 3436 fetch_and_zero(&intel_dp->vdd_wakeref)); 3437 } 3438 3439 void intel_edp_panel_off(struct intel_dp *intel_dp) 3440 { 3441 intel_wakeref_t wakeref; 3442 3443 if (!intel_dp_is_edp(intel_dp)) 3444 return; 3445 3446 with_pps_lock(intel_dp, wakeref) 3447 edp_panel_off(intel_dp); 3448 } 3449 3450 /* Enable backlight in the panel power control. */ 3451 static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 3452 { 3453 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3454 intel_wakeref_t wakeref; 3455 3456 /* 3457 * If we enable the backlight right away following a panel power 3458 * on, we may see slight flicker as the panel syncs with the eDP 3459 * link. So delay a bit to make sure the image is solid before 3460 * allowing it to appear. 3461 */ 3462 wait_backlight_on(intel_dp); 3463 3464 with_pps_lock(intel_dp, wakeref) { 3465 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3466 u32 pp; 3467 3468 pp = ilk_get_pp_control(intel_dp); 3469 pp |= EDP_BLC_ENABLE; 3470 3471 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3472 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3473 } 3474 } 3475 3476 /* Enable backlight PWM and backlight PP control. */ 3477 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3478 const struct drm_connector_state *conn_state) 3479 { 3480 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3481 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3482 3483 if (!intel_dp_is_edp(intel_dp)) 3484 return; 3485 3486 drm_dbg_kms(&i915->drm, "\n"); 3487 3488 intel_panel_enable_backlight(crtc_state, conn_state); 3489 _intel_edp_backlight_on(intel_dp); 3490 } 3491 3492 /* Disable backlight in the panel power control. */ 3493 static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 3494 { 3495 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3496 intel_wakeref_t wakeref; 3497 3498 if (!intel_dp_is_edp(intel_dp)) 3499 return; 3500 3501 with_pps_lock(intel_dp, wakeref) { 3502 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3503 u32 pp; 3504 3505 pp = ilk_get_pp_control(intel_dp); 3506 pp &= ~EDP_BLC_ENABLE; 3507 3508 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3509 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3510 } 3511 3512 intel_dp->last_backlight_off = jiffies; 3513 edp_wait_backlight_off(intel_dp); 3514 } 3515 3516 /* Disable backlight PP control and backlight PWM. */ 3517 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3518 { 3519 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3520 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3521 3522 if (!intel_dp_is_edp(intel_dp)) 3523 return; 3524 3525 drm_dbg_kms(&i915->drm, "\n"); 3526 3527 _intel_edp_backlight_off(intel_dp); 3528 intel_panel_disable_backlight(old_conn_state); 3529 } 3530 3531 /* 3532 * Hook for controlling the panel power control backlight through the bl_power 3533 * sysfs attribute. Take care to handle multiple calls. 3534 */ 3535 static void intel_edp_backlight_power(struct intel_connector *connector, 3536 bool enable) 3537 { 3538 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3539 struct intel_dp *intel_dp = intel_attached_dp(connector); 3540 intel_wakeref_t wakeref; 3541 bool is_enabled; 3542 3543 is_enabled = false; 3544 with_pps_lock(intel_dp, wakeref) 3545 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 3546 if (is_enabled == enable) 3547 return; 3548 3549 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n", 3550 enable ? "enable" : "disable"); 3551 3552 if (enable) 3553 _intel_edp_backlight_on(intel_dp); 3554 else 3555 _intel_edp_backlight_off(intel_dp); 3556 } 3557 3558 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 3559 { 3560 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3561 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3562 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; 3563 3564 I915_STATE_WARN(cur_state != state, 3565 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 3566 dig_port->base.base.base.id, dig_port->base.base.name, 3567 onoff(state), onoff(cur_state)); 3568 } 3569 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 3570 3571 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 3572 { 3573 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; 3574 3575 I915_STATE_WARN(cur_state != state, 3576 "eDP PLL state assertion failure (expected %s, current %s)\n", 3577 onoff(state), onoff(cur_state)); 3578 } 3579 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 3580 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 3581 3582 static void ilk_edp_pll_on(struct intel_dp *intel_dp, 3583 const struct intel_crtc_state *pipe_config) 3584 { 3585 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3586 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3587 3588 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 3589 assert_dp_port_disabled(intel_dp); 3590 assert_edp_pll_disabled(dev_priv); 3591 3592 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", 3593 pipe_config->port_clock); 3594 3595 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 3596 3597 if (pipe_config->port_clock == 162000) 3598 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 3599 else 3600 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 3601 3602 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3603 intel_de_posting_read(dev_priv, DP_A); 3604 udelay(500); 3605 3606 /* 3607 * [DevILK] Work around required when enabling DP PLL 3608 * while a pipe is enabled going to FDI: 3609 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 3610 * 2. Program DP PLL enable 3611 */ 3612 if (IS_GEN(dev_priv, 5)) 3613 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 3614 3615 intel_dp->DP |= DP_PLL_ENABLE; 3616 3617 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3618 intel_de_posting_read(dev_priv, DP_A); 3619 udelay(200); 3620 } 3621 3622 static void ilk_edp_pll_off(struct intel_dp *intel_dp, 3623 const struct intel_crtc_state *old_crtc_state) 3624 { 3625 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3626 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3627 3628 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 3629 assert_dp_port_disabled(intel_dp); 3630 assert_edp_pll_enabled(dev_priv); 3631 3632 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); 3633 3634 intel_dp->DP &= ~DP_PLL_ENABLE; 3635 3636 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3637 intel_de_posting_read(dev_priv, DP_A); 3638 udelay(200); 3639 } 3640 3641 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3642 { 3643 /* 3644 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3645 * be capable of signalling downstream hpd with a long pulse. 3646 * Whether or not that means D3 is safe to use is not clear, 3647 * but let's assume so until proven otherwise. 3648 * 3649 * FIXME should really check all downstream ports... 3650 */ 3651 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3652 drm_dp_is_branch(intel_dp->dpcd) && 3653 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3654 } 3655 3656 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3657 const struct intel_crtc_state *crtc_state, 3658 bool enable) 3659 { 3660 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3661 int ret; 3662 3663 if (!crtc_state->dsc.compression_enable) 3664 return; 3665 3666 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3667 enable ? DP_DECOMPRESSION_EN : 0); 3668 if (ret < 0) 3669 drm_dbg_kms(&i915->drm, 3670 "Failed to %s sink decompression state\n", 3671 enable ? "enable" : "disable"); 3672 } 3673 3674 static void 3675 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 3676 { 3677 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3678 u8 oui[] = { 0x00, 0xaa, 0x01 }; 3679 u8 buf[3] = { 0 }; 3680 3681 /* 3682 * During driver init, we want to be careful and avoid changing the source OUI if it's 3683 * already set to what we want, so as to avoid clearing any state by accident 3684 */ 3685 if (careful) { 3686 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 3687 drm_err(&i915->drm, "Failed to read source OUI\n"); 3688 3689 if (memcmp(oui, buf, sizeof(oui)) == 0) 3690 return; 3691 } 3692 3693 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 3694 drm_err(&i915->drm, "Failed to write source OUI\n"); 3695 } 3696 3697 /* If the device supports it, try to set the power state appropriately */ 3698 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 3699 { 3700 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3701 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3702 int ret, i; 3703 3704 /* Should have a valid DPCD by this point */ 3705 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3706 return; 3707 3708 if (mode != DP_SET_POWER_D0) { 3709 if (downstream_hpd_needs_d0(intel_dp)) 3710 return; 3711 3712 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3713 } else { 3714 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3715 3716 lspcon_resume(dp_to_dig_port(intel_dp)); 3717 3718 /* Write the source OUI as early as possible */ 3719 if (intel_dp_is_edp(intel_dp)) 3720 intel_edp_init_source_oui(intel_dp, false); 3721 3722 /* 3723 * When turning on, we need to retry for 1ms to give the sink 3724 * time to wake up. 3725 */ 3726 for (i = 0; i < 3; i++) { 3727 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3728 if (ret == 1) 3729 break; 3730 msleep(1); 3731 } 3732 3733 if (ret == 1 && lspcon->active) 3734 lspcon_wait_pcon_mode(lspcon); 3735 } 3736 3737 if (ret != 1) 3738 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 3739 encoder->base.base.id, encoder->base.name, 3740 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 3741 } 3742 3743 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 3744 enum port port, enum pipe *pipe) 3745 { 3746 enum pipe p; 3747 3748 for_each_pipe(dev_priv, p) { 3749 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); 3750 3751 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 3752 *pipe = p; 3753 return true; 3754 } 3755 } 3756 3757 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", 3758 port_name(port)); 3759 3760 /* must initialize pipe to something for the asserts */ 3761 *pipe = PIPE_A; 3762 3763 return false; 3764 } 3765 3766 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 3767 i915_reg_t dp_reg, enum port port, 3768 enum pipe *pipe) 3769 { 3770 bool ret; 3771 u32 val; 3772 3773 val = intel_de_read(dev_priv, dp_reg); 3774 3775 ret = val & DP_PORT_EN; 3776 3777 /* asserts want to know the pipe even if the port is disabled */ 3778 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3779 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 3780 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3781 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 3782 else if (IS_CHERRYVIEW(dev_priv)) 3783 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 3784 else 3785 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 3786 3787 return ret; 3788 } 3789 3790 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 3791 enum pipe *pipe) 3792 { 3793 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3794 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3795 intel_wakeref_t wakeref; 3796 bool ret; 3797 3798 wakeref = intel_display_power_get_if_enabled(dev_priv, 3799 encoder->power_domain); 3800 if (!wakeref) 3801 return false; 3802 3803 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 3804 encoder->port, pipe); 3805 3806 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 3807 3808 return ret; 3809 } 3810 3811 static void intel_dp_get_config(struct intel_encoder *encoder, 3812 struct intel_crtc_state *pipe_config) 3813 { 3814 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3815 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3816 u32 tmp, flags = 0; 3817 enum port port = encoder->port; 3818 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3819 3820 if (encoder->type == INTEL_OUTPUT_EDP) 3821 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 3822 else 3823 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 3824 3825 tmp = intel_de_read(dev_priv, intel_dp->output_reg); 3826 3827 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 3828 3829 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3830 u32 trans_dp = intel_de_read(dev_priv, 3831 TRANS_DP_CTL(crtc->pipe)); 3832 3833 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 3834 flags |= DRM_MODE_FLAG_PHSYNC; 3835 else 3836 flags |= DRM_MODE_FLAG_NHSYNC; 3837 3838 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 3839 flags |= DRM_MODE_FLAG_PVSYNC; 3840 else 3841 flags |= DRM_MODE_FLAG_NVSYNC; 3842 } else { 3843 if (tmp & DP_SYNC_HS_HIGH) 3844 flags |= DRM_MODE_FLAG_PHSYNC; 3845 else 3846 flags |= DRM_MODE_FLAG_NHSYNC; 3847 3848 if (tmp & DP_SYNC_VS_HIGH) 3849 flags |= DRM_MODE_FLAG_PVSYNC; 3850 else 3851 flags |= DRM_MODE_FLAG_NVSYNC; 3852 } 3853 3854 pipe_config->hw.adjusted_mode.flags |= flags; 3855 3856 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 3857 pipe_config->limited_color_range = true; 3858 3859 pipe_config->lane_count = 3860 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 3861 3862 intel_dp_get_m_n(crtc, pipe_config); 3863 3864 if (port == PORT_A) { 3865 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 3866 pipe_config->port_clock = 162000; 3867 else 3868 pipe_config->port_clock = 270000; 3869 } 3870 3871 pipe_config->hw.adjusted_mode.crtc_clock = 3872 intel_dotclock_calculate(pipe_config->port_clock, 3873 &pipe_config->dp_m_n); 3874 3875 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 3876 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 3877 /* 3878 * This is a big fat ugly hack. 3879 * 3880 * Some machines in UEFI boot mode provide us a VBT that has 18 3881 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3882 * unknown we fail to light up. Yet the same BIOS boots up with 3883 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3884 * max, not what it tells us to use. 3885 * 3886 * Note: This will still be broken if the eDP panel is not lit 3887 * up by the BIOS, and thus we can't get the mode at module 3888 * load. 3889 */ 3890 drm_dbg_kms(&dev_priv->drm, 3891 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3892 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 3893 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3894 } 3895 } 3896 3897 static bool 3898 intel_dp_get_dpcd(struct intel_dp *intel_dp); 3899 3900 /** 3901 * intel_dp_sync_state - sync the encoder state during init/resume 3902 * @encoder: intel encoder to sync 3903 * @crtc_state: state for the CRTC connected to the encoder 3904 * 3905 * Sync any state stored in the encoder wrt. HW state during driver init 3906 * and system resume. 3907 */ 3908 void intel_dp_sync_state(struct intel_encoder *encoder, 3909 const struct intel_crtc_state *crtc_state) 3910 { 3911 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3912 3913 /* 3914 * Don't clobber DPCD if it's been already read out during output 3915 * setup (eDP) or detect. 3916 */ 3917 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 3918 intel_dp_get_dpcd(intel_dp); 3919 3920 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 3921 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 3922 } 3923 3924 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 3925 struct intel_crtc_state *crtc_state) 3926 { 3927 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3928 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3929 3930 /* 3931 * If BIOS has set an unsupported or non-standard link rate for some 3932 * reason force an encoder recompute and full modeset. 3933 */ 3934 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 3935 crtc_state->port_clock) < 0) { 3936 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n"); 3937 crtc_state->uapi.connectors_changed = true; 3938 return false; 3939 } 3940 3941 /* 3942 * FIXME hack to force full modeset when DSC is being used. 3943 * 3944 * As long as we do not have full state readout and config comparison 3945 * of crtc_state->dsc, we have no way to ensure reliable fastset. 3946 * Remove once we have readout for DSC. 3947 */ 3948 if (crtc_state->dsc.compression_enable) { 3949 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n"); 3950 crtc_state->uapi.mode_changed = true; 3951 return false; 3952 } 3953 3954 if (CAN_PSR(i915) && intel_dp_is_edp(intel_dp)) { 3955 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n"); 3956 crtc_state->uapi.mode_changed = true; 3957 return false; 3958 } 3959 3960 return true; 3961 } 3962 3963 static void intel_disable_dp(struct intel_atomic_state *state, 3964 struct intel_encoder *encoder, 3965 const struct intel_crtc_state *old_crtc_state, 3966 const struct drm_connector_state *old_conn_state) 3967 { 3968 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3969 3970 intel_dp->link_trained = false; 3971 3972 if (old_crtc_state->has_audio) 3973 intel_audio_codec_disable(encoder, 3974 old_crtc_state, old_conn_state); 3975 3976 /* Make sure the panel is off before trying to change the mode. But also 3977 * ensure that we have vdd while we switch off the panel. */ 3978 intel_edp_panel_vdd_on(intel_dp); 3979 intel_edp_backlight_off(old_conn_state); 3980 intel_dp_set_power(intel_dp, DP_SET_POWER_D3); 3981 intel_edp_panel_off(intel_dp); 3982 intel_dp->frl.is_trained = false; 3983 intel_dp->frl.trained_rate_gbps = 0; 3984 } 3985 3986 static void g4x_disable_dp(struct intel_atomic_state *state, 3987 struct intel_encoder *encoder, 3988 const struct intel_crtc_state *old_crtc_state, 3989 const struct drm_connector_state *old_conn_state) 3990 { 3991 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3992 } 3993 3994 static void vlv_disable_dp(struct intel_atomic_state *state, 3995 struct intel_encoder *encoder, 3996 const struct intel_crtc_state *old_crtc_state, 3997 const struct drm_connector_state *old_conn_state) 3998 { 3999 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 4000 } 4001 4002 static void g4x_post_disable_dp(struct intel_atomic_state *state, 4003 struct intel_encoder *encoder, 4004 const struct intel_crtc_state *old_crtc_state, 4005 const struct drm_connector_state *old_conn_state) 4006 { 4007 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4008 enum port port = encoder->port; 4009 4010 /* 4011 * Bspec does not list a specific disable sequence for g4x DP. 4012 * Follow the ilk+ sequence (disable pipe before the port) for 4013 * g4x DP as it does not suffer from underruns like the normal 4014 * g4x modeset sequence (disable pipe after the port). 4015 */ 4016 intel_dp_link_down(encoder, old_crtc_state); 4017 4018 /* Only ilk+ has port A */ 4019 if (port == PORT_A) 4020 ilk_edp_pll_off(intel_dp, old_crtc_state); 4021 } 4022 4023 static void vlv_post_disable_dp(struct intel_atomic_state *state, 4024 struct intel_encoder *encoder, 4025 const struct intel_crtc_state *old_crtc_state, 4026 const struct drm_connector_state *old_conn_state) 4027 { 4028 intel_dp_link_down(encoder, old_crtc_state); 4029 } 4030 4031 static void chv_post_disable_dp(struct intel_atomic_state *state, 4032 struct intel_encoder *encoder, 4033 const struct intel_crtc_state *old_crtc_state, 4034 const struct drm_connector_state *old_conn_state) 4035 { 4036 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4037 4038 intel_dp_link_down(encoder, old_crtc_state); 4039 4040 vlv_dpio_get(dev_priv); 4041 4042 /* Assert data lane reset */ 4043 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 4044 4045 vlv_dpio_put(dev_priv); 4046 } 4047 4048 static void 4049 cpt_set_link_train(struct intel_dp *intel_dp, 4050 const struct intel_crtc_state *crtc_state, 4051 u8 dp_train_pat) 4052 { 4053 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4054 u32 *DP = &intel_dp->DP; 4055 4056 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 4057 4058 switch (intel_dp_training_pattern_symbol(dp_train_pat)) { 4059 case DP_TRAINING_PATTERN_DISABLE: 4060 *DP |= DP_LINK_TRAIN_OFF_CPT; 4061 break; 4062 case DP_TRAINING_PATTERN_1: 4063 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 4064 break; 4065 case DP_TRAINING_PATTERN_2: 4066 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 4067 break; 4068 case DP_TRAINING_PATTERN_3: 4069 drm_dbg_kms(&dev_priv->drm, 4070 "TPS3 not supported, using TPS2 instead\n"); 4071 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 4072 break; 4073 } 4074 4075 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4076 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4077 } 4078 4079 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 4080 { 4081 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4082 4083 /* Clear the cached register set to avoid using stale values */ 4084 4085 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 4086 4087 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 4088 intel_dp->pcon_dsc_dpcd, 4089 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 4090 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", 4091 DP_PCON_DSC_ENCODER); 4092 4093 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", 4094 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 4095 } 4096 4097 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 4098 { 4099 int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 4100 int i; 4101 4102 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 4103 if (frl_bw_mask & (1 << i)) 4104 return bw_gbps[i]; 4105 } 4106 return 0; 4107 } 4108 4109 static int intel_dp_pcon_set_frl_mask(int max_frl) 4110 { 4111 switch (max_frl) { 4112 case 48: 4113 return DP_PCON_FRL_BW_MASK_48GBPS; 4114 case 40: 4115 return DP_PCON_FRL_BW_MASK_40GBPS; 4116 case 32: 4117 return DP_PCON_FRL_BW_MASK_32GBPS; 4118 case 24: 4119 return DP_PCON_FRL_BW_MASK_24GBPS; 4120 case 18: 4121 return DP_PCON_FRL_BW_MASK_18GBPS; 4122 case 9: 4123 return DP_PCON_FRL_BW_MASK_9GBPS; 4124 } 4125 4126 return 0; 4127 } 4128 4129 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 4130 { 4131 struct intel_connector *intel_connector = intel_dp->attached_connector; 4132 struct drm_connector *connector = &intel_connector->base; 4133 int max_frl_rate; 4134 int max_lanes, rate_per_lane; 4135 int max_dsc_lanes, dsc_rate_per_lane; 4136 4137 max_lanes = connector->display_info.hdmi.max_lanes; 4138 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; 4139 max_frl_rate = max_lanes * rate_per_lane; 4140 4141 if (connector->display_info.hdmi.dsc_cap.v_1p2) { 4142 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; 4143 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; 4144 if (max_dsc_lanes && dsc_rate_per_lane) 4145 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 4146 } 4147 4148 return max_frl_rate; 4149 } 4150 4151 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 4152 { 4153 #define PCON_EXTENDED_TRAIN_MODE (1 > 0) 4154 #define PCON_CONCURRENT_MODE (1 > 0) 4155 #define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE 4156 #define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE 4157 #define TIMEOUT_FRL_READY_MS 500 4158 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 4159 4160 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4161 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 4162 u8 max_frl_bw_mask = 0, frl_trained_mask; 4163 bool is_active; 4164 4165 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 4166 if (ret < 0) 4167 return ret; 4168 4169 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 4170 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 4171 4172 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 4173 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); 4174 4175 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 4176 4177 if (max_frl_bw <= 0) 4178 return -EINVAL; 4179 4180 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 4181 if (ret < 0) 4182 return ret; 4183 /* Wait for PCON to be FRL Ready */ 4184 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); 4185 4186 if (!is_active) 4187 return -ETIMEDOUT; 4188 4189 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 4190 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE); 4191 if (ret < 0) 4192 return ret; 4193 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE); 4194 if (ret < 0) 4195 return ret; 4196 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 4197 if (ret < 0) 4198 return ret; 4199 /* 4200 * Wait for FRL to be completed 4201 * Check if the HDMI Link is up and active. 4202 */ 4203 wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); 4204 4205 if (!is_active) 4206 return -ETIMEDOUT; 4207 4208 /* Verify HDMI Link configuration shows FRL Mode */ 4209 if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != 4210 DP_PCON_HDMI_MODE_FRL) { 4211 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); 4212 return -EINVAL; 4213 } 4214 drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); 4215 4216 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 4217 intel_dp->frl.is_trained = true; 4218 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); 4219 4220 return 0; 4221 } 4222 4223 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 4224 { 4225 if (drm_dp_is_branch(intel_dp->dpcd) && 4226 intel_dp->has_hdmi_sink && 4227 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 4228 return true; 4229 4230 return false; 4231 } 4232 4233 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 4234 { 4235 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4236 4237 /* Always go for FRL training if supported */ 4238 if (!intel_dp_is_hdmi_2_1_sink(intel_dp) || 4239 intel_dp->frl.is_trained) 4240 return; 4241 4242 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 4243 int ret, mode; 4244 4245 drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n"); 4246 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 4247 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 4248 4249 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 4250 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); 4251 } else { 4252 drm_dbg(&dev_priv->drm, "FRL training Completed\n"); 4253 } 4254 } 4255 4256 static int 4257 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 4258 { 4259 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 4260 4261 return intel_hdmi_dsc_get_slice_height(vactive); 4262 } 4263 4264 static int 4265 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 4266 const struct intel_crtc_state *crtc_state) 4267 { 4268 struct intel_connector *intel_connector = intel_dp->attached_connector; 4269 struct drm_connector *connector = &intel_connector->base; 4270 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; 4271 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; 4272 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 4273 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 4274 4275 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 4276 pcon_max_slice_width, 4277 hdmi_max_slices, hdmi_throughput); 4278 } 4279 4280 static int 4281 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 4282 const struct intel_crtc_state *crtc_state, 4283 int num_slices, int slice_width) 4284 { 4285 struct intel_connector *intel_connector = intel_dp->attached_connector; 4286 struct drm_connector *connector = &intel_connector->base; 4287 int output_format = crtc_state->output_format; 4288 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; 4289 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 4290 int hdmi_max_chunk_bytes = 4291 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; 4292 4293 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 4294 num_slices, output_format, hdmi_all_bpp, 4295 hdmi_max_chunk_bytes); 4296 } 4297 4298 void 4299 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 4300 const struct intel_crtc_state *crtc_state) 4301 { 4302 u8 pps_param[6]; 4303 int slice_height; 4304 int slice_width; 4305 int num_slices; 4306 int bits_per_pixel; 4307 int ret; 4308 struct intel_connector *intel_connector = intel_dp->attached_connector; 4309 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4310 struct drm_connector *connector; 4311 bool hdmi_is_dsc_1_2; 4312 4313 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 4314 return; 4315 4316 if (!intel_connector) 4317 return; 4318 connector = &intel_connector->base; 4319 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; 4320 4321 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 4322 !hdmi_is_dsc_1_2) 4323 return; 4324 4325 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 4326 if (!slice_height) 4327 return; 4328 4329 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 4330 if (!num_slices) 4331 return; 4332 4333 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 4334 num_slices); 4335 4336 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 4337 num_slices, slice_width); 4338 if (!bits_per_pixel) 4339 return; 4340 4341 pps_param[0] = slice_height & 0xFF; 4342 pps_param[1] = slice_height >> 8; 4343 pps_param[2] = slice_width & 0xFF; 4344 pps_param[3] = slice_width >> 8; 4345 pps_param[4] = bits_per_pixel & 0xFF; 4346 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 4347 4348 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 4349 if (ret < 0) 4350 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); 4351 } 4352 4353 static void 4354 g4x_set_link_train(struct intel_dp *intel_dp, 4355 const struct intel_crtc_state *crtc_state, 4356 u8 dp_train_pat) 4357 { 4358 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4359 u32 *DP = &intel_dp->DP; 4360 4361 *DP &= ~DP_LINK_TRAIN_MASK; 4362 4363 switch (intel_dp_training_pattern_symbol(dp_train_pat)) { 4364 case DP_TRAINING_PATTERN_DISABLE: 4365 *DP |= DP_LINK_TRAIN_OFF; 4366 break; 4367 case DP_TRAINING_PATTERN_1: 4368 *DP |= DP_LINK_TRAIN_PAT_1; 4369 break; 4370 case DP_TRAINING_PATTERN_2: 4371 *DP |= DP_LINK_TRAIN_PAT_2; 4372 break; 4373 case DP_TRAINING_PATTERN_3: 4374 drm_dbg_kms(&dev_priv->drm, 4375 "TPS3 not supported, using TPS2 instead\n"); 4376 *DP |= DP_LINK_TRAIN_PAT_2; 4377 break; 4378 } 4379 4380 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4381 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4382 } 4383 4384 static void intel_dp_enable_port(struct intel_dp *intel_dp, 4385 const struct intel_crtc_state *crtc_state) 4386 { 4387 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4388 4389 /* enable with pattern 1 (as per spec) */ 4390 4391 intel_dp_program_link_training_pattern(intel_dp, crtc_state, 4392 DP_TRAINING_PATTERN_1); 4393 4394 /* 4395 * Magic for VLV/CHV. We _must_ first set up the register 4396 * without actually enabling the port, and then do another 4397 * write to enable the port. Otherwise link training will 4398 * fail when the power sequencer is freshly used for this port. 4399 */ 4400 intel_dp->DP |= DP_PORT_EN; 4401 if (crtc_state->has_audio) 4402 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 4403 4404 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4405 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4406 } 4407 4408 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 4409 const struct intel_crtc_state *crtc_state) 4410 { 4411 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4412 u8 tmp; 4413 4414 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 4415 return; 4416 4417 if (!drm_dp_is_branch(intel_dp->dpcd)) 4418 return; 4419 4420 tmp = intel_dp->has_hdmi_sink ? 4421 DP_HDMI_DVI_OUTPUT_CONFIG : 0; 4422 4423 if (drm_dp_dpcd_writeb(&intel_dp->aux, 4424 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 4425 drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n", 4426 enableddisabled(intel_dp->has_hdmi_sink)); 4427 4428 tmp = intel_dp->dfp.ycbcr_444_to_420 ? 4429 DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 4430 4431 if (drm_dp_dpcd_writeb(&intel_dp->aux, 4432 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 4433 drm_dbg_kms(&i915->drm, 4434 "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n", 4435 enableddisabled(intel_dp->dfp.ycbcr_444_to_420)); 4436 4437 tmp = 0; 4438 if (intel_dp->dfp.rgb_to_ycbcr) { 4439 bool bt2020, bt709; 4440 4441 /* 4442 * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only 4443 * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default. 4444 * 4445 */ 4446 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE; 4447 4448 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 4449 intel_dp->downstream_ports, 4450 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 4451 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 4452 intel_dp->downstream_ports, 4453 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 4454 switch (crtc_state->infoframes.vsc.colorimetry) { 4455 case DP_COLORIMETRY_BT2020_RGB: 4456 case DP_COLORIMETRY_BT2020_YCC: 4457 if (bt2020) 4458 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE; 4459 break; 4460 case DP_COLORIMETRY_BT709_YCC: 4461 case DP_COLORIMETRY_XVYCC_709: 4462 if (bt709) 4463 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE; 4464 break; 4465 default: 4466 break; 4467 } 4468 } 4469 4470 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 4471 drm_dbg_kms(&i915->drm, 4472 "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n", 4473 enableddisabled(tmp ? true : false)); 4474 } 4475 4476 static void intel_enable_dp(struct intel_atomic_state *state, 4477 struct intel_encoder *encoder, 4478 const struct intel_crtc_state *pipe_config, 4479 const struct drm_connector_state *conn_state) 4480 { 4481 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4482 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4483 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 4484 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); 4485 enum pipe pipe = crtc->pipe; 4486 intel_wakeref_t wakeref; 4487 4488 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) 4489 return; 4490 4491 with_pps_lock(intel_dp, wakeref) { 4492 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4493 vlv_init_panel_power_sequencer(encoder, pipe_config); 4494 4495 intel_dp_enable_port(intel_dp, pipe_config); 4496 4497 edp_panel_vdd_on(intel_dp); 4498 edp_panel_on(intel_dp); 4499 edp_panel_vdd_off(intel_dp, true); 4500 } 4501 4502 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4503 unsigned int lane_mask = 0x0; 4504 4505 if (IS_CHERRYVIEW(dev_priv)) 4506 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 4507 4508 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 4509 lane_mask); 4510 } 4511 4512 intel_dp_set_power(intel_dp, DP_SET_POWER_D0); 4513 intel_dp_configure_protocol_converter(intel_dp, pipe_config); 4514 intel_dp_check_frl_training(intel_dp); 4515 intel_dp_pcon_dsc_configure(intel_dp, pipe_config); 4516 intel_dp_start_link_train(intel_dp, pipe_config); 4517 intel_dp_stop_link_train(intel_dp, pipe_config); 4518 4519 if (pipe_config->has_audio) { 4520 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", 4521 pipe_name(pipe)); 4522 intel_audio_codec_enable(encoder, pipe_config, conn_state); 4523 } 4524 } 4525 4526 static void g4x_enable_dp(struct intel_atomic_state *state, 4527 struct intel_encoder *encoder, 4528 const struct intel_crtc_state *pipe_config, 4529 const struct drm_connector_state *conn_state) 4530 { 4531 intel_enable_dp(state, encoder, pipe_config, conn_state); 4532 intel_edp_backlight_on(pipe_config, conn_state); 4533 } 4534 4535 static void vlv_enable_dp(struct intel_atomic_state *state, 4536 struct intel_encoder *encoder, 4537 const struct intel_crtc_state *pipe_config, 4538 const struct drm_connector_state *conn_state) 4539 { 4540 intel_edp_backlight_on(pipe_config, conn_state); 4541 } 4542 4543 static void g4x_pre_enable_dp(struct intel_atomic_state *state, 4544 struct intel_encoder *encoder, 4545 const struct intel_crtc_state *pipe_config, 4546 const struct drm_connector_state *conn_state) 4547 { 4548 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4549 enum port port = encoder->port; 4550 4551 intel_dp_prepare(encoder, pipe_config); 4552 4553 /* Only ilk+ has port A */ 4554 if (port == PORT_A) 4555 ilk_edp_pll_on(intel_dp, pipe_config); 4556 } 4557 4558 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 4559 { 4560 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4561 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 4562 enum pipe pipe = intel_dp->pps_pipe; 4563 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 4564 4565 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 4566 4567 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) 4568 return; 4569 4570 edp_panel_vdd_off_sync(intel_dp); 4571 4572 /* 4573 * VLV seems to get confused when multiple power sequencers 4574 * have the same port selected (even if only one has power/vdd 4575 * enabled). The failure manifests as vlv_wait_port_ready() failing 4576 * CHV on the other hand doesn't seem to mind having the same port 4577 * selected in multiple power sequencers, but let's clear the 4578 * port select always when logically disconnecting a power sequencer 4579 * from a port. 4580 */ 4581 drm_dbg_kms(&dev_priv->drm, 4582 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", 4583 pipe_name(pipe), dig_port->base.base.base.id, 4584 dig_port->base.base.name); 4585 intel_de_write(dev_priv, pp_on_reg, 0); 4586 intel_de_posting_read(dev_priv, pp_on_reg); 4587 4588 intel_dp->pps_pipe = INVALID_PIPE; 4589 } 4590 4591 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 4592 enum pipe pipe) 4593 { 4594 struct intel_encoder *encoder; 4595 4596 lockdep_assert_held(&dev_priv->pps_mutex); 4597 4598 for_each_intel_dp(&dev_priv->drm, encoder) { 4599 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4600 4601 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe, 4602 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", 4603 pipe_name(pipe), encoder->base.base.id, 4604 encoder->base.name); 4605 4606 if (intel_dp->pps_pipe != pipe) 4607 continue; 4608 4609 drm_dbg_kms(&dev_priv->drm, 4610 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", 4611 pipe_name(pipe), encoder->base.base.id, 4612 encoder->base.name); 4613 4614 /* make sure vdd is off before we steal it */ 4615 vlv_detach_power_sequencer(intel_dp); 4616 } 4617 } 4618 4619 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 4620 const struct intel_crtc_state *crtc_state) 4621 { 4622 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4623 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4624 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4625 4626 lockdep_assert_held(&dev_priv->pps_mutex); 4627 4628 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 4629 4630 if (intel_dp->pps_pipe != INVALID_PIPE && 4631 intel_dp->pps_pipe != crtc->pipe) { 4632 /* 4633 * If another power sequencer was being used on this 4634 * port previously make sure to turn off vdd there while 4635 * we still have control of it. 4636 */ 4637 vlv_detach_power_sequencer(intel_dp); 4638 } 4639 4640 /* 4641 * We may be stealing the power 4642 * sequencer from another port. 4643 */ 4644 vlv_steal_power_sequencer(dev_priv, crtc->pipe); 4645 4646 intel_dp->active_pipe = crtc->pipe; 4647 4648 if (!intel_dp_is_edp(intel_dp)) 4649 return; 4650 4651 /* now it's all ours */ 4652 intel_dp->pps_pipe = crtc->pipe; 4653 4654 drm_dbg_kms(&dev_priv->drm, 4655 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", 4656 pipe_name(intel_dp->pps_pipe), encoder->base.base.id, 4657 encoder->base.name); 4658 4659 /* init power sequencer on this pipe and port */ 4660 intel_dp_init_panel_power_sequencer(intel_dp); 4661 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 4662 } 4663 4664 static void vlv_pre_enable_dp(struct intel_atomic_state *state, 4665 struct intel_encoder *encoder, 4666 const struct intel_crtc_state *pipe_config, 4667 const struct drm_connector_state *conn_state) 4668 { 4669 vlv_phy_pre_encoder_enable(encoder, pipe_config); 4670 4671 intel_enable_dp(state, encoder, pipe_config, conn_state); 4672 } 4673 4674 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, 4675 struct intel_encoder *encoder, 4676 const struct intel_crtc_state *pipe_config, 4677 const struct drm_connector_state *conn_state) 4678 { 4679 intel_dp_prepare(encoder, pipe_config); 4680 4681 vlv_phy_pre_pll_enable(encoder, pipe_config); 4682 } 4683 4684 static void chv_pre_enable_dp(struct intel_atomic_state *state, 4685 struct intel_encoder *encoder, 4686 const struct intel_crtc_state *pipe_config, 4687 const struct drm_connector_state *conn_state) 4688 { 4689 chv_phy_pre_encoder_enable(encoder, pipe_config); 4690 4691 intel_enable_dp(state, encoder, pipe_config, conn_state); 4692 4693 /* Second common lane will stay alive on its own now */ 4694 chv_phy_release_cl2_override(encoder); 4695 } 4696 4697 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, 4698 struct intel_encoder *encoder, 4699 const struct intel_crtc_state *pipe_config, 4700 const struct drm_connector_state *conn_state) 4701 { 4702 intel_dp_prepare(encoder, pipe_config); 4703 4704 chv_phy_pre_pll_enable(encoder, pipe_config); 4705 } 4706 4707 static void chv_dp_post_pll_disable(struct intel_atomic_state *state, 4708 struct intel_encoder *encoder, 4709 const struct intel_crtc_state *old_crtc_state, 4710 const struct drm_connector_state *old_conn_state) 4711 { 4712 chv_phy_post_pll_disable(encoder, old_crtc_state); 4713 } 4714 4715 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp, 4716 const struct intel_crtc_state *crtc_state) 4717 { 4718 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 4719 } 4720 4721 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp, 4722 const struct intel_crtc_state *crtc_state) 4723 { 4724 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 4725 } 4726 4727 static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp) 4728 { 4729 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4730 } 4731 4732 static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp) 4733 { 4734 return DP_TRAIN_PRE_EMPH_LEVEL_3; 4735 } 4736 4737 static void vlv_set_signal_levels(struct intel_dp *intel_dp, 4738 const struct intel_crtc_state *crtc_state) 4739 { 4740 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4741 unsigned long demph_reg_value, preemph_reg_value, 4742 uniqtranscale_reg_value; 4743 u8 train_set = intel_dp->train_set[0]; 4744 4745 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4746 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4747 preemph_reg_value = 0x0004000; 4748 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4749 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4750 demph_reg_value = 0x2B405555; 4751 uniqtranscale_reg_value = 0x552AB83A; 4752 break; 4753 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4754 demph_reg_value = 0x2B404040; 4755 uniqtranscale_reg_value = 0x5548B83A; 4756 break; 4757 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4758 demph_reg_value = 0x2B245555; 4759 uniqtranscale_reg_value = 0x5560B83A; 4760 break; 4761 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4762 demph_reg_value = 0x2B405555; 4763 uniqtranscale_reg_value = 0x5598DA3A; 4764 break; 4765 default: 4766 return; 4767 } 4768 break; 4769 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4770 preemph_reg_value = 0x0002000; 4771 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4772 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4773 demph_reg_value = 0x2B404040; 4774 uniqtranscale_reg_value = 0x5552B83A; 4775 break; 4776 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4777 demph_reg_value = 0x2B404848; 4778 uniqtranscale_reg_value = 0x5580B83A; 4779 break; 4780 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4781 demph_reg_value = 0x2B404040; 4782 uniqtranscale_reg_value = 0x55ADDA3A; 4783 break; 4784 default: 4785 return; 4786 } 4787 break; 4788 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4789 preemph_reg_value = 0x0000000; 4790 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4791 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4792 demph_reg_value = 0x2B305555; 4793 uniqtranscale_reg_value = 0x5570B83A; 4794 break; 4795 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4796 demph_reg_value = 0x2B2B4040; 4797 uniqtranscale_reg_value = 0x55ADDA3A; 4798 break; 4799 default: 4800 return; 4801 } 4802 break; 4803 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4804 preemph_reg_value = 0x0006000; 4805 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4806 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4807 demph_reg_value = 0x1B405555; 4808 uniqtranscale_reg_value = 0x55ADDA3A; 4809 break; 4810 default: 4811 return; 4812 } 4813 break; 4814 default: 4815 return; 4816 } 4817 4818 vlv_set_phy_signal_level(encoder, crtc_state, 4819 demph_reg_value, preemph_reg_value, 4820 uniqtranscale_reg_value, 0); 4821 } 4822 4823 static void chv_set_signal_levels(struct intel_dp *intel_dp, 4824 const struct intel_crtc_state *crtc_state) 4825 { 4826 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4827 u32 deemph_reg_value, margin_reg_value; 4828 bool uniq_trans_scale = false; 4829 u8 train_set = intel_dp->train_set[0]; 4830 4831 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4832 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4833 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4834 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4835 deemph_reg_value = 128; 4836 margin_reg_value = 52; 4837 break; 4838 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4839 deemph_reg_value = 128; 4840 margin_reg_value = 77; 4841 break; 4842 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4843 deemph_reg_value = 128; 4844 margin_reg_value = 102; 4845 break; 4846 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4847 deemph_reg_value = 128; 4848 margin_reg_value = 154; 4849 uniq_trans_scale = true; 4850 break; 4851 default: 4852 return; 4853 } 4854 break; 4855 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4856 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4857 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4858 deemph_reg_value = 85; 4859 margin_reg_value = 78; 4860 break; 4861 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4862 deemph_reg_value = 85; 4863 margin_reg_value = 116; 4864 break; 4865 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4866 deemph_reg_value = 85; 4867 margin_reg_value = 154; 4868 break; 4869 default: 4870 return; 4871 } 4872 break; 4873 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4874 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4875 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4876 deemph_reg_value = 64; 4877 margin_reg_value = 104; 4878 break; 4879 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4880 deemph_reg_value = 64; 4881 margin_reg_value = 154; 4882 break; 4883 default: 4884 return; 4885 } 4886 break; 4887 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4888 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4889 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4890 deemph_reg_value = 43; 4891 margin_reg_value = 154; 4892 break; 4893 default: 4894 return; 4895 } 4896 break; 4897 default: 4898 return; 4899 } 4900 4901 chv_set_phy_signal_level(encoder, crtc_state, 4902 deemph_reg_value, margin_reg_value, 4903 uniq_trans_scale); 4904 } 4905 4906 static u32 g4x_signal_levels(u8 train_set) 4907 { 4908 u32 signal_levels = 0; 4909 4910 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4911 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4912 default: 4913 signal_levels |= DP_VOLTAGE_0_4; 4914 break; 4915 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4916 signal_levels |= DP_VOLTAGE_0_6; 4917 break; 4918 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4919 signal_levels |= DP_VOLTAGE_0_8; 4920 break; 4921 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4922 signal_levels |= DP_VOLTAGE_1_2; 4923 break; 4924 } 4925 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4926 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4927 default: 4928 signal_levels |= DP_PRE_EMPHASIS_0; 4929 break; 4930 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4931 signal_levels |= DP_PRE_EMPHASIS_3_5; 4932 break; 4933 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4934 signal_levels |= DP_PRE_EMPHASIS_6; 4935 break; 4936 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4937 signal_levels |= DP_PRE_EMPHASIS_9_5; 4938 break; 4939 } 4940 return signal_levels; 4941 } 4942 4943 static void 4944 g4x_set_signal_levels(struct intel_dp *intel_dp, 4945 const struct intel_crtc_state *crtc_state) 4946 { 4947 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4948 u8 train_set = intel_dp->train_set[0]; 4949 u32 signal_levels; 4950 4951 signal_levels = g4x_signal_levels(train_set); 4952 4953 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4954 signal_levels); 4955 4956 intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); 4957 intel_dp->DP |= signal_levels; 4958 4959 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4960 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4961 } 4962 4963 /* SNB CPU eDP voltage swing and pre-emphasis control */ 4964 static u32 snb_cpu_edp_signal_levels(u8 train_set) 4965 { 4966 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4967 DP_TRAIN_PRE_EMPHASIS_MASK); 4968 4969 switch (signal_levels) { 4970 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4971 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4972 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4973 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4974 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 4975 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4976 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4977 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 4978 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4979 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4980 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 4981 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4982 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4983 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 4984 default: 4985 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4986 "0x%x\n", signal_levels); 4987 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4988 } 4989 } 4990 4991 static void 4992 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp, 4993 const struct intel_crtc_state *crtc_state) 4994 { 4995 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4996 u8 train_set = intel_dp->train_set[0]; 4997 u32 signal_levels; 4998 4999 signal_levels = snb_cpu_edp_signal_levels(train_set); 5000 5001 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 5002 signal_levels); 5003 5004 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 5005 intel_dp->DP |= signal_levels; 5006 5007 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 5008 intel_de_posting_read(dev_priv, intel_dp->output_reg); 5009 } 5010 5011 /* IVB CPU eDP voltage swing and pre-emphasis control */ 5012 static u32 ivb_cpu_edp_signal_levels(u8 train_set) 5013 { 5014 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 5015 DP_TRAIN_PRE_EMPHASIS_MASK); 5016 5017 switch (signal_levels) { 5018 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 5019 return EDP_LINK_TRAIN_400MV_0DB_IVB; 5020 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 5021 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 5022 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 5023 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 5024 return EDP_LINK_TRAIN_400MV_6DB_IVB; 5025 5026 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 5027 return EDP_LINK_TRAIN_600MV_0DB_IVB; 5028 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 5029 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 5030 5031 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 5032 return EDP_LINK_TRAIN_800MV_0DB_IVB; 5033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 5034 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 5035 5036 default: 5037 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 5038 "0x%x\n", signal_levels); 5039 return EDP_LINK_TRAIN_500MV_0DB_IVB; 5040 } 5041 } 5042 5043 static void 5044 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp, 5045 const struct intel_crtc_state *crtc_state) 5046 { 5047 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5048 u8 train_set = intel_dp->train_set[0]; 5049 u32 signal_levels; 5050 5051 signal_levels = ivb_cpu_edp_signal_levels(train_set); 5052 5053 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 5054 signal_levels); 5055 5056 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 5057 intel_dp->DP |= signal_levels; 5058 5059 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 5060 intel_de_posting_read(dev_priv, intel_dp->output_reg); 5061 } 5062 5063 void intel_dp_set_signal_levels(struct intel_dp *intel_dp, 5064 const struct intel_crtc_state *crtc_state) 5065 { 5066 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5067 u8 train_set = intel_dp->train_set[0]; 5068 5069 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", 5070 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 5071 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); 5072 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", 5073 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 5074 DP_TRAIN_PRE_EMPHASIS_SHIFT, 5075 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 5076 " (max)" : ""); 5077 5078 intel_dp->set_signal_levels(intel_dp, crtc_state); 5079 } 5080 5081 void 5082 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 5083 const struct intel_crtc_state *crtc_state, 5084 u8 dp_train_pat) 5085 { 5086 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5087 5088 if ((intel_dp_training_pattern_symbol(dp_train_pat)) != 5089 DP_TRAINING_PATTERN_DISABLE) 5090 drm_dbg_kms(&dev_priv->drm, 5091 "Using DP training pattern TPS%d\n", 5092 intel_dp_training_pattern_symbol(dp_train_pat)); 5093 5094 intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); 5095 } 5096 5097 static void 5098 intel_dp_link_down(struct intel_encoder *encoder, 5099 const struct intel_crtc_state *old_crtc_state) 5100 { 5101 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5102 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5103 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 5104 enum port port = encoder->port; 5105 u32 DP = intel_dp->DP; 5106 5107 if (drm_WARN_ON(&dev_priv->drm, 5108 (intel_de_read(dev_priv, intel_dp->output_reg) & 5109 DP_PORT_EN) == 0)) 5110 return; 5111 5112 drm_dbg_kms(&dev_priv->drm, "\n"); 5113 5114 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 5115 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 5116 DP &= ~DP_LINK_TRAIN_MASK_CPT; 5117 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 5118 } else { 5119 DP &= ~DP_LINK_TRAIN_MASK; 5120 DP |= DP_LINK_TRAIN_PAT_IDLE; 5121 } 5122 intel_de_write(dev_priv, intel_dp->output_reg, DP); 5123 intel_de_posting_read(dev_priv, intel_dp->output_reg); 5124 5125 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 5126 intel_de_write(dev_priv, intel_dp->output_reg, DP); 5127 intel_de_posting_read(dev_priv, intel_dp->output_reg); 5128 5129 /* 5130 * HW workaround for IBX, we need to move the port 5131 * to transcoder A after disabling it to allow the 5132 * matching HDMI port to be enabled on transcoder A. 5133 */ 5134 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 5135 /* 5136 * We get CPU/PCH FIFO underruns on the other pipe when 5137 * doing the workaround. Sweep them under the rug. 5138 */ 5139 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 5140 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 5141 5142 /* always enable with pattern 1 (as per spec) */ 5143 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 5144 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 5145 DP_LINK_TRAIN_PAT_1; 5146 intel_de_write(dev_priv, intel_dp->output_reg, DP); 5147 intel_de_posting_read(dev_priv, intel_dp->output_reg); 5148 5149 DP &= ~DP_PORT_EN; 5150 intel_de_write(dev_priv, intel_dp->output_reg, DP); 5151 intel_de_posting_read(dev_priv, intel_dp->output_reg); 5152 5153 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 5154 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 5155 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 5156 } 5157 5158 msleep(intel_dp->panel_power_down_delay); 5159 5160 intel_dp->DP = DP; 5161 5162 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 5163 intel_wakeref_t wakeref; 5164 5165 with_pps_lock(intel_dp, wakeref) 5166 intel_dp->active_pipe = INVALID_PIPE; 5167 } 5168 } 5169 5170 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 5171 { 5172 u8 dprx = 0; 5173 5174 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 5175 &dprx) != 1) 5176 return false; 5177 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 5178 } 5179 5180 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 5181 { 5182 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5183 5184 /* 5185 * Clear the cached register set to avoid using stale values 5186 * for the sinks that do not support DSC. 5187 */ 5188 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 5189 5190 /* Clear fec_capable to avoid using stale values */ 5191 intel_dp->fec_capable = 0; 5192 5193 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 5194 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 5195 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 5196 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 5197 intel_dp->dsc_dpcd, 5198 sizeof(intel_dp->dsc_dpcd)) < 0) 5199 drm_err(&i915->drm, 5200 "Failed to read DPCD register 0x%x\n", 5201 DP_DSC_SUPPORT); 5202 5203 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 5204 (int)sizeof(intel_dp->dsc_dpcd), 5205 intel_dp->dsc_dpcd); 5206 5207 /* FEC is supported only on DP 1.4 */ 5208 if (!intel_dp_is_edp(intel_dp) && 5209 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 5210 &intel_dp->fec_capable) < 0) 5211 drm_err(&i915->drm, 5212 "Failed to read FEC DPCD register\n"); 5213 5214 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 5215 intel_dp->fec_capable); 5216 } 5217 } 5218 5219 static bool 5220 intel_edp_init_dpcd(struct intel_dp *intel_dp) 5221 { 5222 struct drm_i915_private *dev_priv = 5223 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 5224 5225 /* this function is meant to be called only once */ 5226 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 5227 5228 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 5229 return false; 5230 5231 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 5232 drm_dp_is_branch(intel_dp->dpcd)); 5233 5234 /* 5235 * Read the eDP display control registers. 5236 * 5237 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 5238 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 5239 * set, but require eDP 1.4+ detection (e.g. for supported link rates 5240 * method). The display control registers should read zero if they're 5241 * not supported anyway. 5242 */ 5243 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 5244 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 5245 sizeof(intel_dp->edp_dpcd)) 5246 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 5247 (int)sizeof(intel_dp->edp_dpcd), 5248 intel_dp->edp_dpcd); 5249 5250 /* 5251 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 5252 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 5253 */ 5254 intel_psr_init_dpcd(intel_dp); 5255 5256 /* Read the eDP 1.4+ supported link rates. */ 5257 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 5258 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 5259 int i; 5260 5261 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 5262 sink_rates, sizeof(sink_rates)); 5263 5264 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 5265 int val = le16_to_cpu(sink_rates[i]); 5266 5267 if (val == 0) 5268 break; 5269 5270 /* Value read multiplied by 200kHz gives the per-lane 5271 * link rate in kHz. The source rates are, however, 5272 * stored in terms of LS_Clk kHz. The full conversion 5273 * back to symbols is 5274 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 5275 */ 5276 intel_dp->sink_rates[i] = (val * 200) / 10; 5277 } 5278 intel_dp->num_sink_rates = i; 5279 } 5280 5281 /* 5282 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 5283 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 5284 */ 5285 if (intel_dp->num_sink_rates) 5286 intel_dp->use_rate_select = true; 5287 else 5288 intel_dp_set_sink_rates(intel_dp); 5289 5290 intel_dp_set_common_rates(intel_dp); 5291 5292 /* Read the eDP DSC DPCD registers */ 5293 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 5294 intel_dp_get_dsc_sink_cap(intel_dp); 5295 5296 /* 5297 * If needed, program our source OUI so we can make various Intel-specific AUX services 5298 * available (such as HDR backlight controls) 5299 */ 5300 intel_edp_init_source_oui(intel_dp, true); 5301 5302 return true; 5303 } 5304 5305 static bool 5306 intel_dp_has_sink_count(struct intel_dp *intel_dp) 5307 { 5308 if (!intel_dp->attached_connector) 5309 return false; 5310 5311 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 5312 intel_dp->dpcd, 5313 &intel_dp->desc); 5314 } 5315 5316 static bool 5317 intel_dp_get_dpcd(struct intel_dp *intel_dp) 5318 { 5319 int ret; 5320 5321 intel_dp_lttpr_init(intel_dp); 5322 5323 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) 5324 return false; 5325 5326 /* 5327 * Don't clobber cached eDP rates. Also skip re-reading 5328 * the OUI/ID since we know it won't change. 5329 */ 5330 if (!intel_dp_is_edp(intel_dp)) { 5331 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 5332 drm_dp_is_branch(intel_dp->dpcd)); 5333 5334 intel_dp_set_sink_rates(intel_dp); 5335 intel_dp_set_common_rates(intel_dp); 5336 } 5337 5338 if (intel_dp_has_sink_count(intel_dp)) { 5339 ret = drm_dp_read_sink_count(&intel_dp->aux); 5340 if (ret < 0) 5341 return false; 5342 5343 /* 5344 * Sink count can change between short pulse hpd hence 5345 * a member variable in intel_dp will track any changes 5346 * between short pulse interrupts. 5347 */ 5348 intel_dp->sink_count = ret; 5349 5350 /* 5351 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 5352 * a dongle is present but no display. Unless we require to know 5353 * if a dongle is present or not, we don't need to update 5354 * downstream port information. So, an early return here saves 5355 * time from performing other operations which are not required. 5356 */ 5357 if (!intel_dp->sink_count) 5358 return false; 5359 } 5360 5361 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 5362 intel_dp->downstream_ports) == 0; 5363 } 5364 5365 static bool 5366 intel_dp_can_mst(struct intel_dp *intel_dp) 5367 { 5368 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5369 5370 return i915->params.enable_dp_mst && 5371 intel_dp->can_mst && 5372 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 5373 } 5374 5375 static void 5376 intel_dp_configure_mst(struct intel_dp *intel_dp) 5377 { 5378 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5379 struct intel_encoder *encoder = 5380 &dp_to_dig_port(intel_dp)->base; 5381 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 5382 5383 drm_dbg_kms(&i915->drm, 5384 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 5385 encoder->base.base.id, encoder->base.name, 5386 yesno(intel_dp->can_mst), yesno(sink_can_mst), 5387 yesno(i915->params.enable_dp_mst)); 5388 5389 if (!intel_dp->can_mst) 5390 return; 5391 5392 intel_dp->is_mst = sink_can_mst && 5393 i915->params.enable_dp_mst; 5394 5395 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5396 intel_dp->is_mst); 5397 } 5398 5399 static bool 5400 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 5401 { 5402 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 5403 sink_irq_vector, DP_DPRX_ESI_LEN) == 5404 DP_DPRX_ESI_LEN; 5405 } 5406 5407 bool 5408 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 5409 const struct drm_connector_state *conn_state) 5410 { 5411 /* 5412 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 5413 * of Color Encoding Format and Content Color Gamut], in order to 5414 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 5415 */ 5416 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 5417 return true; 5418 5419 switch (conn_state->colorspace) { 5420 case DRM_MODE_COLORIMETRY_SYCC_601: 5421 case DRM_MODE_COLORIMETRY_OPYCC_601: 5422 case DRM_MODE_COLORIMETRY_BT2020_YCC: 5423 case DRM_MODE_COLORIMETRY_BT2020_RGB: 5424 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 5425 return true; 5426 default: 5427 break; 5428 } 5429 5430 return false; 5431 } 5432 5433 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 5434 struct dp_sdp *sdp, size_t size) 5435 { 5436 size_t length = sizeof(struct dp_sdp); 5437 5438 if (size < length) 5439 return -ENOSPC; 5440 5441 memset(sdp, 0, size); 5442 5443 /* 5444 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 5445 * VSC SDP Header Bytes 5446 */ 5447 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 5448 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 5449 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 5450 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 5451 5452 /* 5453 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 5454 * per DP 1.4a spec. 5455 */ 5456 if (vsc->revision != 0x5) 5457 goto out; 5458 5459 /* VSC SDP Payload for DB16 through DB18 */ 5460 /* Pixel Encoding and Colorimetry Formats */ 5461 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 5462 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 5463 5464 switch (vsc->bpc) { 5465 case 6: 5466 /* 6bpc: 0x0 */ 5467 break; 5468 case 8: 5469 sdp->db[17] = 0x1; /* DB17[3:0] */ 5470 break; 5471 case 10: 5472 sdp->db[17] = 0x2; 5473 break; 5474 case 12: 5475 sdp->db[17] = 0x3; 5476 break; 5477 case 16: 5478 sdp->db[17] = 0x4; 5479 break; 5480 default: 5481 MISSING_CASE(vsc->bpc); 5482 break; 5483 } 5484 /* Dynamic Range and Component Bit Depth */ 5485 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 5486 sdp->db[17] |= 0x80; /* DB17[7] */ 5487 5488 /* Content Type */ 5489 sdp->db[18] = vsc->content_type & 0x7; 5490 5491 out: 5492 return length; 5493 } 5494 5495 static ssize_t 5496 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 5497 struct dp_sdp *sdp, 5498 size_t size) 5499 { 5500 size_t length = sizeof(struct dp_sdp); 5501 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 5502 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 5503 ssize_t len; 5504 5505 if (size < length) 5506 return -ENOSPC; 5507 5508 memset(sdp, 0, size); 5509 5510 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 5511 if (len < 0) { 5512 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 5513 return -ENOSPC; 5514 } 5515 5516 if (len != infoframe_size) { 5517 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 5518 return -ENOSPC; 5519 } 5520 5521 /* 5522 * Set up the infoframe sdp packet for HDR static metadata. 5523 * Prepare VSC Header for SU as per DP 1.4a spec, 5524 * Table 2-100 and Table 2-101 5525 */ 5526 5527 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 5528 sdp->sdp_header.HB0 = 0; 5529 /* 5530 * Packet Type 80h + Non-audio INFOFRAME Type value 5531 * HDMI_INFOFRAME_TYPE_DRM: 0x87 5532 * - 80h + Non-audio INFOFRAME Type value 5533 * - InfoFrame Type: 0x07 5534 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 5535 */ 5536 sdp->sdp_header.HB1 = drm_infoframe->type; 5537 /* 5538 * Least Significant Eight Bits of (Data Byte Count – 1) 5539 * infoframe_size - 1 5540 */ 5541 sdp->sdp_header.HB2 = 0x1D; 5542 /* INFOFRAME SDP Version Number */ 5543 sdp->sdp_header.HB3 = (0x13 << 2); 5544 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5545 sdp->db[0] = drm_infoframe->version; 5546 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5547 sdp->db[1] = drm_infoframe->length; 5548 /* 5549 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 5550 * HDMI_INFOFRAME_HEADER_SIZE 5551 */ 5552 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 5553 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 5554 HDMI_DRM_INFOFRAME_SIZE); 5555 5556 /* 5557 * Size of DP infoframe sdp packet for HDR static metadata consists of 5558 * - DP SDP Header(struct dp_sdp_header): 4 bytes 5559 * - Two Data Blocks: 2 bytes 5560 * CTA Header Byte2 (INFOFRAME Version Number) 5561 * CTA Header Byte3 (Length of INFOFRAME) 5562 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 5563 * 5564 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 5565 * infoframe size. But GEN11+ has larger than that size, write_infoframe 5566 * will pad rest of the size. 5567 */ 5568 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 5569 } 5570 5571 static void intel_write_dp_sdp(struct intel_encoder *encoder, 5572 const struct intel_crtc_state *crtc_state, 5573 unsigned int type) 5574 { 5575 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5576 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5577 struct dp_sdp sdp = {}; 5578 ssize_t len; 5579 5580 if ((crtc_state->infoframes.enable & 5581 intel_hdmi_infoframe_enable(type)) == 0) 5582 return; 5583 5584 switch (type) { 5585 case DP_SDP_VSC: 5586 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 5587 sizeof(sdp)); 5588 break; 5589 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5590 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 5591 &sdp, sizeof(sdp)); 5592 break; 5593 default: 5594 MISSING_CASE(type); 5595 return; 5596 } 5597 5598 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 5599 return; 5600 5601 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 5602 } 5603 5604 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 5605 const struct intel_crtc_state *crtc_state, 5606 struct drm_dp_vsc_sdp *vsc) 5607 { 5608 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5609 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5610 struct dp_sdp sdp = {}; 5611 ssize_t len; 5612 5613 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 5614 5615 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 5616 return; 5617 5618 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 5619 &sdp, len); 5620 } 5621 5622 void intel_dp_set_infoframes(struct intel_encoder *encoder, 5623 bool enable, 5624 const struct intel_crtc_state *crtc_state, 5625 const struct drm_connector_state *conn_state) 5626 { 5627 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5628 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5629 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 5630 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 5631 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 5632 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 5633 u32 val = intel_de_read(dev_priv, reg); 5634 5635 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 5636 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 5637 if (intel_psr_enabled(intel_dp)) 5638 val &= ~dip_enable; 5639 else 5640 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); 5641 5642 if (!enable) { 5643 intel_de_write(dev_priv, reg, val); 5644 intel_de_posting_read(dev_priv, reg); 5645 return; 5646 } 5647 5648 intel_de_write(dev_priv, reg, val); 5649 intel_de_posting_read(dev_priv, reg); 5650 5651 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5652 if (!intel_psr_enabled(intel_dp)) 5653 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 5654 5655 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 5656 } 5657 5658 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 5659 const void *buffer, size_t size) 5660 { 5661 const struct dp_sdp *sdp = buffer; 5662 5663 if (size < sizeof(struct dp_sdp)) 5664 return -EINVAL; 5665 5666 memset(vsc, 0, size); 5667 5668 if (sdp->sdp_header.HB0 != 0) 5669 return -EINVAL; 5670 5671 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 5672 return -EINVAL; 5673 5674 vsc->sdp_type = sdp->sdp_header.HB1; 5675 vsc->revision = sdp->sdp_header.HB2; 5676 vsc->length = sdp->sdp_header.HB3; 5677 5678 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 5679 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 5680 /* 5681 * - HB2 = 0x2, HB3 = 0x8 5682 * VSC SDP supporting 3D stereo + PSR 5683 * - HB2 = 0x4, HB3 = 0xe 5684 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 5685 * first scan line of the SU region (applies to eDP v1.4b 5686 * and higher). 5687 */ 5688 return 0; 5689 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 5690 /* 5691 * - HB2 = 0x5, HB3 = 0x13 5692 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 5693 * Format. 5694 */ 5695 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 5696 vsc->colorimetry = sdp->db[16] & 0xf; 5697 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 5698 5699 switch (sdp->db[17] & 0x7) { 5700 case 0x0: 5701 vsc->bpc = 6; 5702 break; 5703 case 0x1: 5704 vsc->bpc = 8; 5705 break; 5706 case 0x2: 5707 vsc->bpc = 10; 5708 break; 5709 case 0x3: 5710 vsc->bpc = 12; 5711 break; 5712 case 0x4: 5713 vsc->bpc = 16; 5714 break; 5715 default: 5716 MISSING_CASE(sdp->db[17] & 0x7); 5717 return -EINVAL; 5718 } 5719 5720 vsc->content_type = sdp->db[18] & 0x7; 5721 } else { 5722 return -EINVAL; 5723 } 5724 5725 return 0; 5726 } 5727 5728 static int 5729 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 5730 const void *buffer, size_t size) 5731 { 5732 int ret; 5733 5734 const struct dp_sdp *sdp = buffer; 5735 5736 if (size < sizeof(struct dp_sdp)) 5737 return -EINVAL; 5738 5739 if (sdp->sdp_header.HB0 != 0) 5740 return -EINVAL; 5741 5742 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 5743 return -EINVAL; 5744 5745 /* 5746 * Least Significant Eight Bits of (Data Byte Count – 1) 5747 * 1Dh (i.e., Data Byte Count = 30 bytes). 5748 */ 5749 if (sdp->sdp_header.HB2 != 0x1D) 5750 return -EINVAL; 5751 5752 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 5753 if ((sdp->sdp_header.HB3 & 0x3) != 0) 5754 return -EINVAL; 5755 5756 /* INFOFRAME SDP Version Number */ 5757 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 5758 return -EINVAL; 5759 5760 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5761 if (sdp->db[0] != 1) 5762 return -EINVAL; 5763 5764 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5765 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 5766 return -EINVAL; 5767 5768 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 5769 HDMI_DRM_INFOFRAME_SIZE); 5770 5771 return ret; 5772 } 5773 5774 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 5775 struct intel_crtc_state *crtc_state, 5776 struct drm_dp_vsc_sdp *vsc) 5777 { 5778 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5779 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5780 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5781 unsigned int type = DP_SDP_VSC; 5782 struct dp_sdp sdp = {}; 5783 int ret; 5784 5785 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5786 if (intel_psr_enabled(intel_dp)) 5787 return; 5788 5789 if ((crtc_state->infoframes.enable & 5790 intel_hdmi_infoframe_enable(type)) == 0) 5791 return; 5792 5793 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 5794 5795 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 5796 5797 if (ret) 5798 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 5799 } 5800 5801 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 5802 struct intel_crtc_state *crtc_state, 5803 struct hdmi_drm_infoframe *drm_infoframe) 5804 { 5805 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5806 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5807 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 5808 struct dp_sdp sdp = {}; 5809 int ret; 5810 5811 if ((crtc_state->infoframes.enable & 5812 intel_hdmi_infoframe_enable(type)) == 0) 5813 return; 5814 5815 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 5816 sizeof(sdp)); 5817 5818 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 5819 sizeof(sdp)); 5820 5821 if (ret) 5822 drm_dbg_kms(&dev_priv->drm, 5823 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 5824 } 5825 5826 void intel_read_dp_sdp(struct intel_encoder *encoder, 5827 struct intel_crtc_state *crtc_state, 5828 unsigned int type) 5829 { 5830 if (encoder->type != INTEL_OUTPUT_DDI) 5831 return; 5832 5833 switch (type) { 5834 case DP_SDP_VSC: 5835 intel_read_dp_vsc_sdp(encoder, crtc_state, 5836 &crtc_state->infoframes.vsc); 5837 break; 5838 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5839 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 5840 &crtc_state->infoframes.drm.drm); 5841 break; 5842 default: 5843 MISSING_CASE(type); 5844 break; 5845 } 5846 } 5847 5848 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 5849 { 5850 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5851 int status = 0; 5852 int test_link_rate; 5853 u8 test_lane_count, test_link_bw; 5854 /* (DP CTS 1.2) 5855 * 4.3.1.11 5856 */ 5857 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 5858 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 5859 &test_lane_count); 5860 5861 if (status <= 0) { 5862 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 5863 return DP_TEST_NAK; 5864 } 5865 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 5866 5867 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 5868 &test_link_bw); 5869 if (status <= 0) { 5870 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 5871 return DP_TEST_NAK; 5872 } 5873 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 5874 5875 /* Validate the requested link rate and lane count */ 5876 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 5877 test_lane_count)) 5878 return DP_TEST_NAK; 5879 5880 intel_dp->compliance.test_lane_count = test_lane_count; 5881 intel_dp->compliance.test_link_rate = test_link_rate; 5882 5883 return DP_TEST_ACK; 5884 } 5885 5886 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 5887 { 5888 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5889 u8 test_pattern; 5890 u8 test_misc; 5891 __be16 h_width, v_height; 5892 int status = 0; 5893 5894 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 5895 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 5896 &test_pattern); 5897 if (status <= 0) { 5898 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 5899 return DP_TEST_NAK; 5900 } 5901 if (test_pattern != DP_COLOR_RAMP) 5902 return DP_TEST_NAK; 5903 5904 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 5905 &h_width, 2); 5906 if (status <= 0) { 5907 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 5908 return DP_TEST_NAK; 5909 } 5910 5911 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 5912 &v_height, 2); 5913 if (status <= 0) { 5914 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 5915 return DP_TEST_NAK; 5916 } 5917 5918 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 5919 &test_misc); 5920 if (status <= 0) { 5921 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 5922 return DP_TEST_NAK; 5923 } 5924 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 5925 return DP_TEST_NAK; 5926 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 5927 return DP_TEST_NAK; 5928 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 5929 case DP_TEST_BIT_DEPTH_6: 5930 intel_dp->compliance.test_data.bpc = 6; 5931 break; 5932 case DP_TEST_BIT_DEPTH_8: 5933 intel_dp->compliance.test_data.bpc = 8; 5934 break; 5935 default: 5936 return DP_TEST_NAK; 5937 } 5938 5939 intel_dp->compliance.test_data.video_pattern = test_pattern; 5940 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 5941 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 5942 /* Set test active flag here so userspace doesn't interrupt things */ 5943 intel_dp->compliance.test_active = true; 5944 5945 return DP_TEST_ACK; 5946 } 5947 5948 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 5949 { 5950 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5951 u8 test_result = DP_TEST_ACK; 5952 struct intel_connector *intel_connector = intel_dp->attached_connector; 5953 struct drm_connector *connector = &intel_connector->base; 5954 5955 if (intel_connector->detect_edid == NULL || 5956 connector->edid_corrupt || 5957 intel_dp->aux.i2c_defer_count > 6) { 5958 /* Check EDID read for NACKs, DEFERs and corruption 5959 * (DP CTS 1.2 Core r1.1) 5960 * 4.2.2.4 : Failed EDID read, I2C_NAK 5961 * 4.2.2.5 : Failed EDID read, I2C_DEFER 5962 * 4.2.2.6 : EDID corruption detected 5963 * Use failsafe mode for all cases 5964 */ 5965 if (intel_dp->aux.i2c_nack_count > 0 || 5966 intel_dp->aux.i2c_defer_count > 0) 5967 drm_dbg_kms(&i915->drm, 5968 "EDID read had %d NACKs, %d DEFERs\n", 5969 intel_dp->aux.i2c_nack_count, 5970 intel_dp->aux.i2c_defer_count); 5971 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 5972 } else { 5973 struct edid *block = intel_connector->detect_edid; 5974 5975 /* We have to write the checksum 5976 * of the last block read 5977 */ 5978 block += intel_connector->detect_edid->extensions; 5979 5980 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 5981 block->checksum) <= 0) 5982 drm_dbg_kms(&i915->drm, 5983 "Failed to write EDID checksum\n"); 5984 5985 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 5986 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 5987 } 5988 5989 /* Set test active flag here so userspace doesn't interrupt things */ 5990 intel_dp->compliance.test_active = true; 5991 5992 return test_result; 5993 } 5994 5995 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 5996 const struct intel_crtc_state *crtc_state) 5997 { 5998 struct drm_i915_private *dev_priv = 5999 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 6000 struct drm_dp_phy_test_params *data = 6001 &intel_dp->compliance.test_data.phytest; 6002 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 6003 enum pipe pipe = crtc->pipe; 6004 u32 pattern_val; 6005 6006 switch (data->phy_pattern) { 6007 case DP_PHY_TEST_PATTERN_NONE: 6008 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 6009 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 6010 break; 6011 case DP_PHY_TEST_PATTERN_D10_2: 6012 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 6013 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 6014 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 6015 break; 6016 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 6017 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 6018 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 6019 DDI_DP_COMP_CTL_ENABLE | 6020 DDI_DP_COMP_CTL_SCRAMBLED_0); 6021 break; 6022 case DP_PHY_TEST_PATTERN_PRBS7: 6023 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 6024 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 6025 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 6026 break; 6027 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 6028 /* 6029 * FIXME: Ideally pattern should come from DPCD 0x250. As 6030 * current firmware of DPR-100 could not set it, so hardcoding 6031 * now for complaince test. 6032 */ 6033 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 6034 pattern_val = 0x3e0f83e0; 6035 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 6036 pattern_val = 0x0f83e0f8; 6037 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 6038 pattern_val = 0x0000f83e; 6039 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 6040 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 6041 DDI_DP_COMP_CTL_ENABLE | 6042 DDI_DP_COMP_CTL_CUSTOM80); 6043 break; 6044 case DP_PHY_TEST_PATTERN_CP2520: 6045 /* 6046 * FIXME: Ideally pattern should come from DPCD 0x24A. As 6047 * current firmware of DPR-100 could not set it, so hardcoding 6048 * now for complaince test. 6049 */ 6050 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 6051 pattern_val = 0xFB; 6052 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 6053 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 6054 pattern_val); 6055 break; 6056 default: 6057 WARN(1, "Invalid Phy Test Pattern\n"); 6058 } 6059 } 6060 6061 static void 6062 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, 6063 const struct intel_crtc_state *crtc_state) 6064 { 6065 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6066 struct drm_device *dev = dig_port->base.base.dev; 6067 struct drm_i915_private *dev_priv = to_i915(dev); 6068 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 6069 enum pipe pipe = crtc->pipe; 6070 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 6071 6072 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 6073 TRANS_DDI_FUNC_CTL(pipe)); 6074 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 6075 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 6076 6077 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 6078 TGL_TRANS_DDI_PORT_MASK); 6079 trans_conf_value &= ~PIPECONF_ENABLE; 6080 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 6081 6082 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 6083 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 6084 trans_ddi_func_ctl_value); 6085 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 6086 } 6087 6088 static void 6089 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, 6090 const struct intel_crtc_state *crtc_state) 6091 { 6092 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6093 struct drm_device *dev = dig_port->base.base.dev; 6094 struct drm_i915_private *dev_priv = to_i915(dev); 6095 enum port port = dig_port->base.port; 6096 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 6097 enum pipe pipe = crtc->pipe; 6098 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 6099 6100 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 6101 TRANS_DDI_FUNC_CTL(pipe)); 6102 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 6103 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 6104 6105 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 6106 TGL_TRANS_DDI_SELECT_PORT(port); 6107 trans_conf_value |= PIPECONF_ENABLE; 6108 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 6109 6110 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 6111 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 6112 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 6113 trans_ddi_func_ctl_value); 6114 } 6115 6116 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 6117 const struct intel_crtc_state *crtc_state) 6118 { 6119 struct drm_dp_phy_test_params *data = 6120 &intel_dp->compliance.test_data.phytest; 6121 u8 link_status[DP_LINK_STATUS_SIZE]; 6122 6123 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 6124 link_status) < 0) { 6125 DRM_DEBUG_KMS("failed to get link status\n"); 6126 return; 6127 } 6128 6129 /* retrieve vswing & pre-emphasis setting */ 6130 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 6131 link_status); 6132 6133 intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); 6134 6135 intel_dp_set_signal_levels(intel_dp, crtc_state); 6136 6137 intel_dp_phy_pattern_update(intel_dp, crtc_state); 6138 6139 intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); 6140 6141 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 6142 link_status[DP_DPCD_REV]); 6143 } 6144 6145 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 6146 { 6147 struct drm_dp_phy_test_params *data = 6148 &intel_dp->compliance.test_data.phytest; 6149 6150 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 6151 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 6152 return DP_TEST_NAK; 6153 } 6154 6155 /* Set test active flag here so userspace doesn't interrupt things */ 6156 intel_dp->compliance.test_active = true; 6157 6158 return DP_TEST_ACK; 6159 } 6160 6161 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 6162 { 6163 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6164 u8 response = DP_TEST_NAK; 6165 u8 request = 0; 6166 int status; 6167 6168 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 6169 if (status <= 0) { 6170 drm_dbg_kms(&i915->drm, 6171 "Could not read test request from sink\n"); 6172 goto update_status; 6173 } 6174 6175 switch (request) { 6176 case DP_TEST_LINK_TRAINING: 6177 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 6178 response = intel_dp_autotest_link_training(intel_dp); 6179 break; 6180 case DP_TEST_LINK_VIDEO_PATTERN: 6181 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 6182 response = intel_dp_autotest_video_pattern(intel_dp); 6183 break; 6184 case DP_TEST_LINK_EDID_READ: 6185 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 6186 response = intel_dp_autotest_edid(intel_dp); 6187 break; 6188 case DP_TEST_LINK_PHY_TEST_PATTERN: 6189 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 6190 response = intel_dp_autotest_phy_pattern(intel_dp); 6191 break; 6192 default: 6193 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 6194 request); 6195 break; 6196 } 6197 6198 if (response & DP_TEST_ACK) 6199 intel_dp->compliance.test_type = request; 6200 6201 update_status: 6202 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 6203 if (status <= 0) 6204 drm_dbg_kms(&i915->drm, 6205 "Could not write test response to sink\n"); 6206 } 6207 6208 /** 6209 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 6210 * @intel_dp: Intel DP struct 6211 * 6212 * Read any pending MST interrupts, call MST core to handle these and ack the 6213 * interrupts. Check if the main and AUX link state is ok. 6214 * 6215 * Returns: 6216 * - %true if pending interrupts were serviced (or no interrupts were 6217 * pending) w/o detecting an error condition. 6218 * - %false if an error condition - like AUX failure or a loss of link - is 6219 * detected, which needs servicing from the hotplug work. 6220 */ 6221 static bool 6222 intel_dp_check_mst_status(struct intel_dp *intel_dp) 6223 { 6224 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6225 bool link_ok = true; 6226 6227 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 6228 6229 for (;;) { 6230 u8 esi[DP_DPRX_ESI_LEN] = {}; 6231 bool handled; 6232 int retry; 6233 6234 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 6235 drm_dbg_kms(&i915->drm, 6236 "failed to get ESI - device may have failed\n"); 6237 link_ok = false; 6238 6239 break; 6240 } 6241 6242 /* check link status - esi[10] = 0x200c */ 6243 if (intel_dp->active_mst_links > 0 && link_ok && 6244 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 6245 drm_dbg_kms(&i915->drm, 6246 "channel EQ not ok, retraining\n"); 6247 link_ok = false; 6248 } 6249 6250 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 6251 6252 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 6253 if (!handled) 6254 break; 6255 6256 for (retry = 0; retry < 3; retry++) { 6257 int wret; 6258 6259 wret = drm_dp_dpcd_write(&intel_dp->aux, 6260 DP_SINK_COUNT_ESI+1, 6261 &esi[1], 3); 6262 if (wret == 3) 6263 break; 6264 } 6265 } 6266 6267 return link_ok; 6268 } 6269 6270 static void 6271 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 6272 { 6273 bool is_active; 6274 u8 buf = 0; 6275 6276 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 6277 if (intel_dp->frl.is_trained && !is_active) { 6278 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 6279 return; 6280 6281 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 6282 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 6283 return; 6284 6285 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 6286 6287 /* Restart FRL training or fall back to TMDS mode */ 6288 intel_dp_check_frl_training(intel_dp); 6289 } 6290 } 6291 6292 static bool 6293 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 6294 { 6295 u8 link_status[DP_LINK_STATUS_SIZE]; 6296 6297 if (!intel_dp->link_trained) 6298 return false; 6299 6300 /* 6301 * While PSR source HW is enabled, it will control main-link sending 6302 * frames, enabling and disabling it so trying to do a retrain will fail 6303 * as the link would or not be on or it could mix training patterns 6304 * and frame data at the same time causing retrain to fail. 6305 * Also when exiting PSR, HW will retrain the link anyways fixing 6306 * any link status error. 6307 */ 6308 if (intel_psr_enabled(intel_dp)) 6309 return false; 6310 6311 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 6312 link_status) < 0) 6313 return false; 6314 6315 /* 6316 * Validate the cached values of intel_dp->link_rate and 6317 * intel_dp->lane_count before attempting to retrain. 6318 * 6319 * FIXME would be nice to user the crtc state here, but since 6320 * we need to call this from the short HPD handler that seems 6321 * a bit hard. 6322 */ 6323 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 6324 intel_dp->lane_count)) 6325 return false; 6326 6327 /* Retrain if Channel EQ or CR not ok */ 6328 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 6329 } 6330 6331 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 6332 const struct drm_connector_state *conn_state) 6333 { 6334 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6335 struct intel_encoder *encoder; 6336 enum pipe pipe; 6337 6338 if (!conn_state->best_encoder) 6339 return false; 6340 6341 /* SST */ 6342 encoder = &dp_to_dig_port(intel_dp)->base; 6343 if (conn_state->best_encoder == &encoder->base) 6344 return true; 6345 6346 /* MST */ 6347 for_each_pipe(i915, pipe) { 6348 encoder = &intel_dp->mst_encoders[pipe]->base; 6349 if (conn_state->best_encoder == &encoder->base) 6350 return true; 6351 } 6352 6353 return false; 6354 } 6355 6356 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 6357 struct drm_modeset_acquire_ctx *ctx, 6358 u32 *crtc_mask) 6359 { 6360 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6361 struct drm_connector_list_iter conn_iter; 6362 struct intel_connector *connector; 6363 int ret = 0; 6364 6365 *crtc_mask = 0; 6366 6367 if (!intel_dp_needs_link_retrain(intel_dp)) 6368 return 0; 6369 6370 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 6371 for_each_intel_connector_iter(connector, &conn_iter) { 6372 struct drm_connector_state *conn_state = 6373 connector->base.state; 6374 struct intel_crtc_state *crtc_state; 6375 struct intel_crtc *crtc; 6376 6377 if (!intel_dp_has_connector(intel_dp, conn_state)) 6378 continue; 6379 6380 crtc = to_intel_crtc(conn_state->crtc); 6381 if (!crtc) 6382 continue; 6383 6384 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 6385 if (ret) 6386 break; 6387 6388 crtc_state = to_intel_crtc_state(crtc->base.state); 6389 6390 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 6391 6392 if (!crtc_state->hw.active) 6393 continue; 6394 6395 if (conn_state->commit && 6396 !try_wait_for_completion(&conn_state->commit->hw_done)) 6397 continue; 6398 6399 *crtc_mask |= drm_crtc_mask(&crtc->base); 6400 } 6401 drm_connector_list_iter_end(&conn_iter); 6402 6403 if (!intel_dp_needs_link_retrain(intel_dp)) 6404 *crtc_mask = 0; 6405 6406 return ret; 6407 } 6408 6409 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 6410 { 6411 struct intel_connector *connector = intel_dp->attached_connector; 6412 6413 return connector->base.status == connector_status_connected || 6414 intel_dp->is_mst; 6415 } 6416 6417 int intel_dp_retrain_link(struct intel_encoder *encoder, 6418 struct drm_modeset_acquire_ctx *ctx) 6419 { 6420 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6421 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 6422 struct intel_crtc *crtc; 6423 u32 crtc_mask; 6424 int ret; 6425 6426 if (!intel_dp_is_connected(intel_dp)) 6427 return 0; 6428 6429 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 6430 ctx); 6431 if (ret) 6432 return ret; 6433 6434 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 6435 if (ret) 6436 return ret; 6437 6438 if (crtc_mask == 0) 6439 return 0; 6440 6441 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 6442 encoder->base.base.id, encoder->base.name); 6443 6444 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 6445 const struct intel_crtc_state *crtc_state = 6446 to_intel_crtc_state(crtc->base.state); 6447 6448 /* Suppress underruns caused by re-training */ 6449 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 6450 if (crtc_state->has_pch_encoder) 6451 intel_set_pch_fifo_underrun_reporting(dev_priv, 6452 intel_crtc_pch_transcoder(crtc), false); 6453 } 6454 6455 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 6456 const struct intel_crtc_state *crtc_state = 6457 to_intel_crtc_state(crtc->base.state); 6458 6459 /* retrain on the MST master transcoder */ 6460 if (INTEL_GEN(dev_priv) >= 12 && 6461 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 6462 !intel_dp_mst_is_master_trans(crtc_state)) 6463 continue; 6464 6465 intel_dp_check_frl_training(intel_dp); 6466 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); 6467 intel_dp_start_link_train(intel_dp, crtc_state); 6468 intel_dp_stop_link_train(intel_dp, crtc_state); 6469 break; 6470 } 6471 6472 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 6473 const struct intel_crtc_state *crtc_state = 6474 to_intel_crtc_state(crtc->base.state); 6475 6476 /* Keep underrun reporting disabled until things are stable */ 6477 intel_wait_for_vblank(dev_priv, crtc->pipe); 6478 6479 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 6480 if (crtc_state->has_pch_encoder) 6481 intel_set_pch_fifo_underrun_reporting(dev_priv, 6482 intel_crtc_pch_transcoder(crtc), true); 6483 } 6484 6485 return 0; 6486 } 6487 6488 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 6489 struct drm_modeset_acquire_ctx *ctx, 6490 u32 *crtc_mask) 6491 { 6492 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6493 struct drm_connector_list_iter conn_iter; 6494 struct intel_connector *connector; 6495 int ret = 0; 6496 6497 *crtc_mask = 0; 6498 6499 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 6500 for_each_intel_connector_iter(connector, &conn_iter) { 6501 struct drm_connector_state *conn_state = 6502 connector->base.state; 6503 struct intel_crtc_state *crtc_state; 6504 struct intel_crtc *crtc; 6505 6506 if (!intel_dp_has_connector(intel_dp, conn_state)) 6507 continue; 6508 6509 crtc = to_intel_crtc(conn_state->crtc); 6510 if (!crtc) 6511 continue; 6512 6513 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 6514 if (ret) 6515 break; 6516 6517 crtc_state = to_intel_crtc_state(crtc->base.state); 6518 6519 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 6520 6521 if (!crtc_state->hw.active) 6522 continue; 6523 6524 if (conn_state->commit && 6525 !try_wait_for_completion(&conn_state->commit->hw_done)) 6526 continue; 6527 6528 *crtc_mask |= drm_crtc_mask(&crtc->base); 6529 } 6530 drm_connector_list_iter_end(&conn_iter); 6531 6532 return ret; 6533 } 6534 6535 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 6536 struct drm_modeset_acquire_ctx *ctx) 6537 { 6538 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6539 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 6540 struct intel_crtc *crtc; 6541 u32 crtc_mask; 6542 int ret; 6543 6544 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 6545 ctx); 6546 if (ret) 6547 return ret; 6548 6549 ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask); 6550 if (ret) 6551 return ret; 6552 6553 if (crtc_mask == 0) 6554 return 0; 6555 6556 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 6557 encoder->base.base.id, encoder->base.name); 6558 6559 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 6560 const struct intel_crtc_state *crtc_state = 6561 to_intel_crtc_state(crtc->base.state); 6562 6563 /* test on the MST master transcoder */ 6564 if (INTEL_GEN(dev_priv) >= 12 && 6565 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 6566 !intel_dp_mst_is_master_trans(crtc_state)) 6567 continue; 6568 6569 intel_dp_process_phy_request(intel_dp, crtc_state); 6570 break; 6571 } 6572 6573 return 0; 6574 } 6575 6576 static void intel_dp_phy_test(struct intel_encoder *encoder) 6577 { 6578 struct drm_modeset_acquire_ctx ctx; 6579 int ret; 6580 6581 drm_modeset_acquire_init(&ctx, 0); 6582 6583 for (;;) { 6584 ret = intel_dp_do_phy_test(encoder, &ctx); 6585 6586 if (ret == -EDEADLK) { 6587 drm_modeset_backoff(&ctx); 6588 continue; 6589 } 6590 6591 break; 6592 } 6593 6594 drm_modeset_drop_locks(&ctx); 6595 drm_modeset_acquire_fini(&ctx); 6596 drm_WARN(encoder->base.dev, ret, 6597 "Acquiring modeset locks failed with %i\n", ret); 6598 } 6599 6600 /* 6601 * If display is now connected check links status, 6602 * there has been known issues of link loss triggering 6603 * long pulse. 6604 * 6605 * Some sinks (eg. ASUS PB287Q) seem to perform some 6606 * weird HPD ping pong during modesets. So we can apparently 6607 * end up with HPD going low during a modeset, and then 6608 * going back up soon after. And once that happens we must 6609 * retrain the link to get a picture. That's in case no 6610 * userspace component reacted to intermittent HPD dip. 6611 */ 6612 static enum intel_hotplug_state 6613 intel_dp_hotplug(struct intel_encoder *encoder, 6614 struct intel_connector *connector) 6615 { 6616 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 6617 struct drm_modeset_acquire_ctx ctx; 6618 enum intel_hotplug_state state; 6619 int ret; 6620 6621 if (intel_dp->compliance.test_active && 6622 intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) { 6623 intel_dp_phy_test(encoder); 6624 /* just do the PHY test and nothing else */ 6625 return INTEL_HOTPLUG_UNCHANGED; 6626 } 6627 6628 state = intel_encoder_hotplug(encoder, connector); 6629 6630 drm_modeset_acquire_init(&ctx, 0); 6631 6632 for (;;) { 6633 ret = intel_dp_retrain_link(encoder, &ctx); 6634 6635 if (ret == -EDEADLK) { 6636 drm_modeset_backoff(&ctx); 6637 continue; 6638 } 6639 6640 break; 6641 } 6642 6643 drm_modeset_drop_locks(&ctx); 6644 drm_modeset_acquire_fini(&ctx); 6645 drm_WARN(encoder->base.dev, ret, 6646 "Acquiring modeset locks failed with %i\n", ret); 6647 6648 /* 6649 * Keeping it consistent with intel_ddi_hotplug() and 6650 * intel_hdmi_hotplug(). 6651 */ 6652 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) 6653 state = INTEL_HOTPLUG_RETRY; 6654 6655 return state; 6656 } 6657 6658 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) 6659 { 6660 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6661 u8 val; 6662 6663 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 6664 return; 6665 6666 if (drm_dp_dpcd_readb(&intel_dp->aux, 6667 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 6668 return; 6669 6670 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 6671 6672 if (val & DP_AUTOMATED_TEST_REQUEST) 6673 intel_dp_handle_test_request(intel_dp); 6674 6675 if (val & DP_CP_IRQ) 6676 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 6677 6678 if (val & DP_SINK_SPECIFIC_IRQ) 6679 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 6680 } 6681 6682 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) 6683 { 6684 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6685 u8 val; 6686 6687 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 6688 return; 6689 6690 if (drm_dp_dpcd_readb(&intel_dp->aux, 6691 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) { 6692 drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n"); 6693 return; 6694 } 6695 6696 if (drm_dp_dpcd_writeb(&intel_dp->aux, 6697 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) { 6698 drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n"); 6699 return; 6700 } 6701 6702 if (val & HDMI_LINK_STATUS_CHANGED) 6703 intel_dp_handle_hdmi_link_status_change(intel_dp); 6704 } 6705 6706 /* 6707 * According to DP spec 6708 * 5.1.2: 6709 * 1. Read DPCD 6710 * 2. Configure link according to Receiver Capabilities 6711 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 6712 * 4. Check link status on receipt of hot-plug interrupt 6713 * 6714 * intel_dp_short_pulse - handles short pulse interrupts 6715 * when full detection is not required. 6716 * Returns %true if short pulse is handled and full detection 6717 * is NOT required and %false otherwise. 6718 */ 6719 static bool 6720 intel_dp_short_pulse(struct intel_dp *intel_dp) 6721 { 6722 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6723 u8 old_sink_count = intel_dp->sink_count; 6724 bool ret; 6725 6726 /* 6727 * Clearing compliance test variables to allow capturing 6728 * of values for next automated test request. 6729 */ 6730 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 6731 6732 /* 6733 * Now read the DPCD to see if it's actually running 6734 * If the current value of sink count doesn't match with 6735 * the value that was stored earlier or dpcd read failed 6736 * we need to do full detection 6737 */ 6738 ret = intel_dp_get_dpcd(intel_dp); 6739 6740 if ((old_sink_count != intel_dp->sink_count) || !ret) { 6741 /* No need to proceed if we are going to do full detect */ 6742 return false; 6743 } 6744 6745 intel_dp_check_device_service_irq(intel_dp); 6746 intel_dp_check_link_service_irq(intel_dp); 6747 6748 /* Handle CEC interrupts, if any */ 6749 drm_dp_cec_irq(&intel_dp->aux); 6750 6751 /* defer to the hotplug work for link retraining if needed */ 6752 if (intel_dp_needs_link_retrain(intel_dp)) 6753 return false; 6754 6755 intel_psr_short_pulse(intel_dp); 6756 6757 switch (intel_dp->compliance.test_type) { 6758 case DP_TEST_LINK_TRAINING: 6759 drm_dbg_kms(&dev_priv->drm, 6760 "Link Training Compliance Test requested\n"); 6761 /* Send a Hotplug Uevent to userspace to start modeset */ 6762 drm_kms_helper_hotplug_event(&dev_priv->drm); 6763 break; 6764 case DP_TEST_LINK_PHY_TEST_PATTERN: 6765 drm_dbg_kms(&dev_priv->drm, 6766 "PHY test pattern Compliance Test requested\n"); 6767 /* 6768 * Schedule long hpd to do the test 6769 * 6770 * FIXME get rid of the ad-hoc phy test modeset code 6771 * and properly incorporate it into the normal modeset. 6772 */ 6773 return false; 6774 } 6775 6776 return true; 6777 } 6778 6779 /* XXX this is probably wrong for multiple downstream ports */ 6780 static enum drm_connector_status 6781 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 6782 { 6783 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6784 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6785 u8 *dpcd = intel_dp->dpcd; 6786 u8 type; 6787 6788 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 6789 return connector_status_connected; 6790 6791 lspcon_resume(dig_port); 6792 6793 if (!intel_dp_get_dpcd(intel_dp)) 6794 return connector_status_disconnected; 6795 6796 /* if there's no downstream port, we're done */ 6797 if (!drm_dp_is_branch(dpcd)) 6798 return connector_status_connected; 6799 6800 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 6801 if (intel_dp_has_sink_count(intel_dp) && 6802 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 6803 return intel_dp->sink_count ? 6804 connector_status_connected : connector_status_disconnected; 6805 } 6806 6807 if (intel_dp_can_mst(intel_dp)) 6808 return connector_status_connected; 6809 6810 /* If no HPD, poke DDC gently */ 6811 if (drm_probe_ddc(&intel_dp->aux.ddc)) 6812 return connector_status_connected; 6813 6814 /* Well we tried, say unknown for unreliable port types */ 6815 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 6816 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 6817 if (type == DP_DS_PORT_TYPE_VGA || 6818 type == DP_DS_PORT_TYPE_NON_EDID) 6819 return connector_status_unknown; 6820 } else { 6821 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 6822 DP_DWN_STRM_PORT_TYPE_MASK; 6823 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 6824 type == DP_DWN_STRM_PORT_TYPE_OTHER) 6825 return connector_status_unknown; 6826 } 6827 6828 /* Anything else is out of spec, warn and ignore */ 6829 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 6830 return connector_status_disconnected; 6831 } 6832 6833 static enum drm_connector_status 6834 edp_detect(struct intel_dp *intel_dp) 6835 { 6836 return connector_status_connected; 6837 } 6838 6839 static bool ibx_digital_port_connected(struct intel_encoder *encoder) 6840 { 6841 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6842 u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; 6843 6844 return intel_de_read(dev_priv, SDEISR) & bit; 6845 } 6846 6847 static bool g4x_digital_port_connected(struct intel_encoder *encoder) 6848 { 6849 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6850 u32 bit; 6851 6852 switch (encoder->hpd_pin) { 6853 case HPD_PORT_B: 6854 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 6855 break; 6856 case HPD_PORT_C: 6857 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 6858 break; 6859 case HPD_PORT_D: 6860 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 6861 break; 6862 default: 6863 MISSING_CASE(encoder->hpd_pin); 6864 return false; 6865 } 6866 6867 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6868 } 6869 6870 static bool gm45_digital_port_connected(struct intel_encoder *encoder) 6871 { 6872 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6873 u32 bit; 6874 6875 switch (encoder->hpd_pin) { 6876 case HPD_PORT_B: 6877 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 6878 break; 6879 case HPD_PORT_C: 6880 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 6881 break; 6882 case HPD_PORT_D: 6883 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 6884 break; 6885 default: 6886 MISSING_CASE(encoder->hpd_pin); 6887 return false; 6888 } 6889 6890 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6891 } 6892 6893 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 6894 { 6895 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6896 u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; 6897 6898 return intel_de_read(dev_priv, DEISR) & bit; 6899 } 6900 6901 /* 6902 * intel_digital_port_connected - is the specified port connected? 6903 * @encoder: intel_encoder 6904 * 6905 * In cases where there's a connector physically connected but it can't be used 6906 * by our hardware we also return false, since the rest of the driver should 6907 * pretty much treat the port as disconnected. This is relevant for type-C 6908 * (starting on ICL) where there's ownership involved. 6909 * 6910 * Return %true if port is connected, %false otherwise. 6911 */ 6912 bool intel_digital_port_connected(struct intel_encoder *encoder) 6913 { 6914 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6915 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6916 bool is_connected = false; 6917 intel_wakeref_t wakeref; 6918 6919 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 6920 is_connected = dig_port->connected(encoder); 6921 6922 return is_connected; 6923 } 6924 6925 static struct edid * 6926 intel_dp_get_edid(struct intel_dp *intel_dp) 6927 { 6928 struct intel_connector *intel_connector = intel_dp->attached_connector; 6929 6930 /* use cached edid if we have one */ 6931 if (intel_connector->edid) { 6932 /* invalid edid */ 6933 if (IS_ERR(intel_connector->edid)) 6934 return NULL; 6935 6936 return drm_edid_duplicate(intel_connector->edid); 6937 } else 6938 return drm_get_edid(&intel_connector->base, 6939 &intel_dp->aux.ddc); 6940 } 6941 6942 static void 6943 intel_dp_update_dfp(struct intel_dp *intel_dp, 6944 const struct edid *edid) 6945 { 6946 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6947 struct intel_connector *connector = intel_dp->attached_connector; 6948 6949 intel_dp->dfp.max_bpc = 6950 drm_dp_downstream_max_bpc(intel_dp->dpcd, 6951 intel_dp->downstream_ports, edid); 6952 6953 intel_dp->dfp.max_dotclock = 6954 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 6955 intel_dp->downstream_ports); 6956 6957 intel_dp->dfp.min_tmds_clock = 6958 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 6959 intel_dp->downstream_ports, 6960 edid); 6961 intel_dp->dfp.max_tmds_clock = 6962 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 6963 intel_dp->downstream_ports, 6964 edid); 6965 6966 intel_dp->dfp.pcon_max_frl_bw = 6967 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 6968 intel_dp->downstream_ports); 6969 6970 drm_dbg_kms(&i915->drm, 6971 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 6972 connector->base.base.id, connector->base.name, 6973 intel_dp->dfp.max_bpc, 6974 intel_dp->dfp.max_dotclock, 6975 intel_dp->dfp.min_tmds_clock, 6976 intel_dp->dfp.max_tmds_clock, 6977 intel_dp->dfp.pcon_max_frl_bw); 6978 6979 intel_dp_get_pcon_dsc_cap(intel_dp); 6980 } 6981 6982 static void 6983 intel_dp_update_420(struct intel_dp *intel_dp) 6984 { 6985 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6986 struct intel_connector *connector = intel_dp->attached_connector; 6987 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr; 6988 6989 /* No YCbCr output support on gmch platforms */ 6990 if (HAS_GMCH(i915)) 6991 return; 6992 6993 /* 6994 * ILK doesn't seem capable of DP YCbCr output. The 6995 * displayed image is severly corrupted. SNB+ is fine. 6996 */ 6997 if (IS_GEN(i915, 5)) 6998 return; 6999 7000 is_branch = drm_dp_is_branch(intel_dp->dpcd); 7001 ycbcr_420_passthrough = 7002 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 7003 intel_dp->downstream_ports); 7004 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 7005 ycbcr_444_to_420 = 7006 dp_to_dig_port(intel_dp)->lspcon.active || 7007 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 7008 intel_dp->downstream_ports); 7009 rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 7010 intel_dp->downstream_ports, 7011 DP_DS_HDMI_BT601_RGB_YCBCR_CONV || 7012 DP_DS_HDMI_BT709_RGB_YCBCR_CONV || 7013 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 7014 7015 if (INTEL_GEN(i915) >= 11) { 7016 /* Let PCON convert from RGB->YCbCr if possible */ 7017 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) { 7018 intel_dp->dfp.rgb_to_ycbcr = true; 7019 intel_dp->dfp.ycbcr_444_to_420 = true; 7020 connector->base.ycbcr_420_allowed = true; 7021 } else { 7022 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ 7023 intel_dp->dfp.ycbcr_444_to_420 = 7024 ycbcr_444_to_420 && !ycbcr_420_passthrough; 7025 7026 connector->base.ycbcr_420_allowed = 7027 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; 7028 } 7029 } else { 7030 /* 4:4:4->4:2:0 conversion is the only way */ 7031 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; 7032 7033 connector->base.ycbcr_420_allowed = ycbcr_444_to_420; 7034 } 7035 7036 drm_dbg_kms(&i915->drm, 7037 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 7038 connector->base.base.id, connector->base.name, 7039 yesno(intel_dp->dfp.rgb_to_ycbcr), 7040 yesno(connector->base.ycbcr_420_allowed), 7041 yesno(intel_dp->dfp.ycbcr_444_to_420)); 7042 } 7043 7044 static void 7045 intel_dp_set_edid(struct intel_dp *intel_dp) 7046 { 7047 struct intel_connector *connector = intel_dp->attached_connector; 7048 struct edid *edid; 7049 7050 intel_dp_unset_edid(intel_dp); 7051 edid = intel_dp_get_edid(intel_dp); 7052 connector->detect_edid = edid; 7053 7054 intel_dp_update_dfp(intel_dp, edid); 7055 intel_dp_update_420(intel_dp); 7056 7057 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 7058 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 7059 intel_dp->has_audio = drm_detect_monitor_audio(edid); 7060 } 7061 7062 drm_dp_cec_set_edid(&intel_dp->aux, edid); 7063 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 7064 } 7065 7066 static void 7067 intel_dp_unset_edid(struct intel_dp *intel_dp) 7068 { 7069 struct intel_connector *connector = intel_dp->attached_connector; 7070 7071 drm_dp_cec_unset_edid(&intel_dp->aux); 7072 kfree(connector->detect_edid); 7073 connector->detect_edid = NULL; 7074 7075 intel_dp->has_hdmi_sink = false; 7076 intel_dp->has_audio = false; 7077 intel_dp->edid_quirks = 0; 7078 7079 intel_dp->dfp.max_bpc = 0; 7080 intel_dp->dfp.max_dotclock = 0; 7081 intel_dp->dfp.min_tmds_clock = 0; 7082 intel_dp->dfp.max_tmds_clock = 0; 7083 7084 intel_dp->dfp.pcon_max_frl_bw = 0; 7085 7086 intel_dp->dfp.ycbcr_444_to_420 = false; 7087 connector->base.ycbcr_420_allowed = false; 7088 } 7089 7090 static int 7091 intel_dp_detect(struct drm_connector *connector, 7092 struct drm_modeset_acquire_ctx *ctx, 7093 bool force) 7094 { 7095 struct drm_i915_private *dev_priv = to_i915(connector->dev); 7096 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 7097 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 7098 struct intel_encoder *encoder = &dig_port->base; 7099 enum drm_connector_status status; 7100 7101 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 7102 connector->base.id, connector->name); 7103 drm_WARN_ON(&dev_priv->drm, 7104 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 7105 7106 if (!INTEL_DISPLAY_ENABLED(dev_priv)) 7107 return connector_status_disconnected; 7108 7109 /* Can't disconnect eDP */ 7110 if (intel_dp_is_edp(intel_dp)) 7111 status = edp_detect(intel_dp); 7112 else if (intel_digital_port_connected(encoder)) 7113 status = intel_dp_detect_dpcd(intel_dp); 7114 else 7115 status = connector_status_disconnected; 7116 7117 if (status == connector_status_disconnected) { 7118 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 7119 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 7120 7121 if (intel_dp->is_mst) { 7122 drm_dbg_kms(&dev_priv->drm, 7123 "MST device may have disappeared %d vs %d\n", 7124 intel_dp->is_mst, 7125 intel_dp->mst_mgr.mst_state); 7126 intel_dp->is_mst = false; 7127 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 7128 intel_dp->is_mst); 7129 } 7130 7131 goto out; 7132 } 7133 7134 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 7135 if (INTEL_GEN(dev_priv) >= 11) 7136 intel_dp_get_dsc_sink_cap(intel_dp); 7137 7138 intel_dp_configure_mst(intel_dp); 7139 7140 /* 7141 * TODO: Reset link params when switching to MST mode, until MST 7142 * supports link training fallback params. 7143 */ 7144 if (intel_dp->reset_link_params || intel_dp->is_mst) { 7145 /* Initial max link lane count */ 7146 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 7147 7148 /* Initial max link rate */ 7149 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 7150 7151 intel_dp->reset_link_params = false; 7152 } 7153 7154 intel_dp_print_rates(intel_dp); 7155 7156 if (intel_dp->is_mst) { 7157 /* 7158 * If we are in MST mode then this connector 7159 * won't appear connected or have anything 7160 * with EDID on it 7161 */ 7162 status = connector_status_disconnected; 7163 goto out; 7164 } 7165 7166 /* 7167 * Some external monitors do not signal loss of link synchronization 7168 * with an IRQ_HPD, so force a link status check. 7169 */ 7170 if (!intel_dp_is_edp(intel_dp)) { 7171 int ret; 7172 7173 ret = intel_dp_retrain_link(encoder, ctx); 7174 if (ret) 7175 return ret; 7176 } 7177 7178 /* 7179 * Clearing NACK and defer counts to get their exact values 7180 * while reading EDID which are required by Compliance tests 7181 * 4.2.2.4 and 4.2.2.5 7182 */ 7183 intel_dp->aux.i2c_nack_count = 0; 7184 intel_dp->aux.i2c_defer_count = 0; 7185 7186 intel_dp_set_edid(intel_dp); 7187 if (intel_dp_is_edp(intel_dp) || 7188 to_intel_connector(connector)->detect_edid) 7189 status = connector_status_connected; 7190 7191 intel_dp_check_device_service_irq(intel_dp); 7192 7193 out: 7194 if (status != connector_status_connected && !intel_dp->is_mst) 7195 intel_dp_unset_edid(intel_dp); 7196 7197 /* 7198 * Make sure the refs for power wells enabled during detect are 7199 * dropped to avoid a new detect cycle triggered by HPD polling. 7200 */ 7201 intel_display_power_flush_work(dev_priv); 7202 7203 if (!intel_dp_is_edp(intel_dp)) 7204 drm_dp_set_subconnector_property(connector, 7205 status, 7206 intel_dp->dpcd, 7207 intel_dp->downstream_ports); 7208 return status; 7209 } 7210 7211 static void 7212 intel_dp_force(struct drm_connector *connector) 7213 { 7214 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 7215 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 7216 struct intel_encoder *intel_encoder = &dig_port->base; 7217 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 7218 enum intel_display_power_domain aux_domain = 7219 intel_aux_power_domain(dig_port); 7220 intel_wakeref_t wakeref; 7221 7222 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 7223 connector->base.id, connector->name); 7224 intel_dp_unset_edid(intel_dp); 7225 7226 if (connector->status != connector_status_connected) 7227 return; 7228 7229 wakeref = intel_display_power_get(dev_priv, aux_domain); 7230 7231 intel_dp_set_edid(intel_dp); 7232 7233 intel_display_power_put(dev_priv, aux_domain, wakeref); 7234 } 7235 7236 static int intel_dp_get_modes(struct drm_connector *connector) 7237 { 7238 struct intel_connector *intel_connector = to_intel_connector(connector); 7239 struct edid *edid; 7240 7241 edid = intel_connector->detect_edid; 7242 if (edid) { 7243 int ret = intel_connector_update_modes(connector, edid); 7244 if (ret) 7245 return ret; 7246 } 7247 7248 /* if eDP has no EDID, fall back to fixed mode */ 7249 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && 7250 intel_connector->panel.fixed_mode) { 7251 struct drm_display_mode *mode; 7252 7253 mode = drm_mode_duplicate(connector->dev, 7254 intel_connector->panel.fixed_mode); 7255 if (mode) { 7256 drm_mode_probed_add(connector, mode); 7257 return 1; 7258 } 7259 } 7260 7261 if (!edid) { 7262 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 7263 struct drm_display_mode *mode; 7264 7265 mode = drm_dp_downstream_mode(connector->dev, 7266 intel_dp->dpcd, 7267 intel_dp->downstream_ports); 7268 if (mode) { 7269 drm_mode_probed_add(connector, mode); 7270 return 1; 7271 } 7272 } 7273 7274 return 0; 7275 } 7276 7277 static int 7278 intel_dp_connector_register(struct drm_connector *connector) 7279 { 7280 struct drm_i915_private *i915 = to_i915(connector->dev); 7281 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 7282 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 7283 struct intel_lspcon *lspcon = &dig_port->lspcon; 7284 int ret; 7285 7286 ret = intel_connector_register(connector); 7287 if (ret) 7288 return ret; 7289 7290 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 7291 intel_dp->aux.name, connector->kdev->kobj.name); 7292 7293 intel_dp->aux.dev = connector->kdev; 7294 ret = drm_dp_aux_register(&intel_dp->aux); 7295 if (!ret) 7296 drm_dp_cec_register_connector(&intel_dp->aux, connector); 7297 7298 if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) 7299 return ret; 7300 7301 /* 7302 * ToDo: Clean this up to handle lspcon init and resume more 7303 * efficiently and streamlined. 7304 */ 7305 if (lspcon_init(dig_port)) { 7306 lspcon_detect_hdr_capability(lspcon); 7307 if (lspcon->hdr_supported) 7308 drm_object_attach_property(&connector->base, 7309 connector->dev->mode_config.hdr_output_metadata_property, 7310 0); 7311 } 7312 7313 return ret; 7314 } 7315 7316 static void 7317 intel_dp_connector_unregister(struct drm_connector *connector) 7318 { 7319 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 7320 7321 drm_dp_cec_unregister_connector(&intel_dp->aux); 7322 drm_dp_aux_unregister(&intel_dp->aux); 7323 intel_connector_unregister(connector); 7324 } 7325 7326 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 7327 { 7328 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 7329 struct intel_dp *intel_dp = &dig_port->dp; 7330 7331 intel_dp_mst_encoder_cleanup(dig_port); 7332 if (intel_dp_is_edp(intel_dp)) { 7333 intel_wakeref_t wakeref; 7334 7335 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 7336 /* 7337 * vdd might still be enabled do to the delayed vdd off. 7338 * Make sure vdd is actually turned off here. 7339 */ 7340 with_pps_lock(intel_dp, wakeref) 7341 edp_panel_vdd_off_sync(intel_dp); 7342 } 7343 7344 intel_dp_aux_fini(intel_dp); 7345 } 7346 7347 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 7348 { 7349 intel_dp_encoder_flush_work(encoder); 7350 7351 drm_encoder_cleanup(encoder); 7352 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 7353 } 7354 7355 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 7356 { 7357 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 7358 intel_wakeref_t wakeref; 7359 7360 if (!intel_dp_is_edp(intel_dp)) 7361 return; 7362 7363 /* 7364 * vdd might still be enabled do to the delayed vdd off. 7365 * Make sure vdd is actually turned off here. 7366 */ 7367 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 7368 with_pps_lock(intel_dp, wakeref) 7369 edp_panel_vdd_off_sync(intel_dp); 7370 } 7371 7372 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 7373 { 7374 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 7375 intel_wakeref_t wakeref; 7376 7377 if (!intel_dp_is_edp(intel_dp)) 7378 return; 7379 7380 with_pps_lock(intel_dp, wakeref) 7381 wait_panel_power_cycle(intel_dp); 7382 } 7383 7384 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 7385 { 7386 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7387 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 7388 7389 lockdep_assert_held(&dev_priv->pps_mutex); 7390 7391 if (!edp_have_panel_vdd(intel_dp)) 7392 return; 7393 7394 /* 7395 * The VDD bit needs a power domain reference, so if the bit is 7396 * already enabled when we boot or resume, grab this reference and 7397 * schedule a vdd off, so we don't hold on to the reference 7398 * indefinitely. 7399 */ 7400 drm_dbg_kms(&dev_priv->drm, 7401 "VDD left on by BIOS, adjusting state tracking\n"); 7402 drm_WARN_ON(&dev_priv->drm, intel_dp->vdd_wakeref); 7403 intel_dp->vdd_wakeref = intel_display_power_get(dev_priv, 7404 intel_aux_power_domain(dig_port)); 7405 7406 edp_panel_vdd_schedule_off(intel_dp); 7407 } 7408 7409 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 7410 { 7411 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7412 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 7413 enum pipe pipe; 7414 7415 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 7416 encoder->port, &pipe)) 7417 return pipe; 7418 7419 return INVALID_PIPE; 7420 } 7421 7422 void intel_dp_encoder_reset(struct drm_encoder *encoder) 7423 { 7424 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 7425 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 7426 intel_wakeref_t wakeref; 7427 7428 if (!HAS_DDI(dev_priv)) 7429 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 7430 7431 intel_dp->reset_link_params = true; 7432 7433 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 7434 !intel_dp_is_edp(intel_dp)) 7435 return; 7436 7437 with_pps_lock(intel_dp, wakeref) { 7438 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7439 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 7440 7441 if (intel_dp_is_edp(intel_dp)) { 7442 /* 7443 * Reinit the power sequencer, in case BIOS did 7444 * something nasty with it. 7445 */ 7446 intel_dp_pps_init(intel_dp); 7447 intel_edp_panel_vdd_sanitize(intel_dp); 7448 } 7449 } 7450 } 7451 7452 static int intel_modeset_tile_group(struct intel_atomic_state *state, 7453 int tile_group_id) 7454 { 7455 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7456 struct drm_connector_list_iter conn_iter; 7457 struct drm_connector *connector; 7458 int ret = 0; 7459 7460 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 7461 drm_for_each_connector_iter(connector, &conn_iter) { 7462 struct drm_connector_state *conn_state; 7463 struct intel_crtc_state *crtc_state; 7464 struct intel_crtc *crtc; 7465 7466 if (!connector->has_tile || 7467 connector->tile_group->id != tile_group_id) 7468 continue; 7469 7470 conn_state = drm_atomic_get_connector_state(&state->base, 7471 connector); 7472 if (IS_ERR(conn_state)) { 7473 ret = PTR_ERR(conn_state); 7474 break; 7475 } 7476 7477 crtc = to_intel_crtc(conn_state->crtc); 7478 7479 if (!crtc) 7480 continue; 7481 7482 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 7483 crtc_state->uapi.mode_changed = true; 7484 7485 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 7486 if (ret) 7487 break; 7488 } 7489 drm_connector_list_iter_end(&conn_iter); 7490 7491 return ret; 7492 } 7493 7494 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 7495 { 7496 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7497 struct intel_crtc *crtc; 7498 7499 if (transcoders == 0) 7500 return 0; 7501 7502 for_each_intel_crtc(&dev_priv->drm, crtc) { 7503 struct intel_crtc_state *crtc_state; 7504 int ret; 7505 7506 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 7507 if (IS_ERR(crtc_state)) 7508 return PTR_ERR(crtc_state); 7509 7510 if (!crtc_state->hw.enable) 7511 continue; 7512 7513 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 7514 continue; 7515 7516 crtc_state->uapi.mode_changed = true; 7517 7518 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 7519 if (ret) 7520 return ret; 7521 7522 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 7523 if (ret) 7524 return ret; 7525 7526 transcoders &= ~BIT(crtc_state->cpu_transcoder); 7527 } 7528 7529 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 7530 7531 return 0; 7532 } 7533 7534 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 7535 struct drm_connector *connector) 7536 { 7537 const struct drm_connector_state *old_conn_state = 7538 drm_atomic_get_old_connector_state(&state->base, connector); 7539 const struct intel_crtc_state *old_crtc_state; 7540 struct intel_crtc *crtc; 7541 u8 transcoders; 7542 7543 crtc = to_intel_crtc(old_conn_state->crtc); 7544 if (!crtc) 7545 return 0; 7546 7547 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 7548 7549 if (!old_crtc_state->hw.active) 7550 return 0; 7551 7552 transcoders = old_crtc_state->sync_mode_slaves_mask; 7553 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 7554 transcoders |= BIT(old_crtc_state->master_transcoder); 7555 7556 return intel_modeset_affected_transcoders(state, 7557 transcoders); 7558 } 7559 7560 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 7561 struct drm_atomic_state *_state) 7562 { 7563 struct drm_i915_private *dev_priv = to_i915(conn->dev); 7564 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7565 int ret; 7566 7567 ret = intel_digital_connector_atomic_check(conn, &state->base); 7568 if (ret) 7569 return ret; 7570 7571 /* 7572 * We don't enable port sync on BDW due to missing w/as and 7573 * due to not having adjusted the modeset sequence appropriately. 7574 */ 7575 if (INTEL_GEN(dev_priv) < 9) 7576 return 0; 7577 7578 if (!intel_connector_needs_modeset(state, conn)) 7579 return 0; 7580 7581 if (conn->has_tile) { 7582 ret = intel_modeset_tile_group(state, conn->tile_group->id); 7583 if (ret) 7584 return ret; 7585 } 7586 7587 return intel_modeset_synced_crtcs(state, conn); 7588 } 7589 7590 static const struct drm_connector_funcs intel_dp_connector_funcs = { 7591 .force = intel_dp_force, 7592 .fill_modes = drm_helper_probe_single_connector_modes, 7593 .atomic_get_property = intel_digital_connector_atomic_get_property, 7594 .atomic_set_property = intel_digital_connector_atomic_set_property, 7595 .late_register = intel_dp_connector_register, 7596 .early_unregister = intel_dp_connector_unregister, 7597 .destroy = intel_connector_destroy, 7598 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7599 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 7600 }; 7601 7602 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 7603 .detect_ctx = intel_dp_detect, 7604 .get_modes = intel_dp_get_modes, 7605 .mode_valid = intel_dp_mode_valid, 7606 .atomic_check = intel_dp_connector_atomic_check, 7607 }; 7608 7609 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 7610 .reset = intel_dp_encoder_reset, 7611 .destroy = intel_dp_encoder_destroy, 7612 }; 7613 7614 static bool intel_edp_have_power(struct intel_dp *intel_dp) 7615 { 7616 intel_wakeref_t wakeref; 7617 bool have_power = false; 7618 7619 with_pps_lock(intel_dp, wakeref) { 7620 have_power = edp_have_panel_power(intel_dp) && 7621 edp_have_panel_vdd(intel_dp); 7622 } 7623 7624 return have_power; 7625 } 7626 7627 enum irqreturn 7628 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 7629 { 7630 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 7631 struct intel_dp *intel_dp = &dig_port->dp; 7632 7633 if (dig_port->base.type == INTEL_OUTPUT_EDP && 7634 (long_hpd || !intel_edp_have_power(intel_dp))) { 7635 /* 7636 * vdd off can generate a long/short pulse on eDP which 7637 * would require vdd on to handle it, and thus we 7638 * would end up in an endless cycle of 7639 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 7640 */ 7641 drm_dbg_kms(&i915->drm, 7642 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 7643 long_hpd ? "long" : "short", 7644 dig_port->base.base.base.id, 7645 dig_port->base.base.name); 7646 return IRQ_HANDLED; 7647 } 7648 7649 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 7650 dig_port->base.base.base.id, 7651 dig_port->base.base.name, 7652 long_hpd ? "long" : "short"); 7653 7654 if (long_hpd) { 7655 intel_dp->reset_link_params = true; 7656 return IRQ_NONE; 7657 } 7658 7659 if (intel_dp->is_mst) { 7660 if (!intel_dp_check_mst_status(intel_dp)) 7661 return IRQ_NONE; 7662 } else if (!intel_dp_short_pulse(intel_dp)) { 7663 return IRQ_NONE; 7664 } 7665 7666 return IRQ_HANDLED; 7667 } 7668 7669 /* check the VBT to see whether the eDP is on another port */ 7670 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 7671 { 7672 /* 7673 * eDP not supported on g4x. so bail out early just 7674 * for a bit extra safety in case the VBT is bonkers. 7675 */ 7676 if (INTEL_GEN(dev_priv) < 5) 7677 return false; 7678 7679 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 7680 return true; 7681 7682 return intel_bios_is_port_edp(dev_priv, port); 7683 } 7684 7685 static void 7686 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 7687 { 7688 struct drm_i915_private *dev_priv = to_i915(connector->dev); 7689 enum port port = dp_to_dig_port(intel_dp)->base.port; 7690 7691 if (!intel_dp_is_edp(intel_dp)) 7692 drm_connector_attach_dp_subconnector_property(connector); 7693 7694 if (!IS_G4X(dev_priv) && port != PORT_A) 7695 intel_attach_force_audio_property(connector); 7696 7697 intel_attach_broadcast_rgb_property(connector); 7698 if (HAS_GMCH(dev_priv)) 7699 drm_connector_attach_max_bpc_property(connector, 6, 10); 7700 else if (INTEL_GEN(dev_priv) >= 5) 7701 drm_connector_attach_max_bpc_property(connector, 6, 12); 7702 7703 /* Register HDMI colorspace for case of lspcon */ 7704 if (intel_bios_is_lspcon_present(dev_priv, port)) { 7705 drm_connector_attach_content_type_property(connector); 7706 intel_attach_hdmi_colorspace_property(connector); 7707 } else { 7708 intel_attach_dp_colorspace_property(connector); 7709 } 7710 7711 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 7712 drm_object_attach_property(&connector->base, 7713 connector->dev->mode_config.hdr_output_metadata_property, 7714 0); 7715 7716 if (intel_dp_is_edp(intel_dp)) { 7717 u32 allowed_scalers; 7718 7719 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 7720 if (!HAS_GMCH(dev_priv)) 7721 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 7722 7723 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 7724 7725 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 7726 7727 } 7728 } 7729 7730 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 7731 { 7732 intel_dp->panel_power_off_time = ktime_get_boottime(); 7733 intel_dp->last_power_on = jiffies; 7734 intel_dp->last_backlight_off = jiffies; 7735 } 7736 7737 static void 7738 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 7739 { 7740 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7741 u32 pp_on, pp_off, pp_ctl; 7742 struct pps_registers regs; 7743 7744 intel_pps_get_registers(intel_dp, ®s); 7745 7746 pp_ctl = ilk_get_pp_control(intel_dp); 7747 7748 /* Ensure PPS is unlocked */ 7749 if (!HAS_DDI(dev_priv)) 7750 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7751 7752 pp_on = intel_de_read(dev_priv, regs.pp_on); 7753 pp_off = intel_de_read(dev_priv, regs.pp_off); 7754 7755 /* Pull timing values out of registers */ 7756 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 7757 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 7758 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 7759 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 7760 7761 if (i915_mmio_reg_valid(regs.pp_div)) { 7762 u32 pp_div; 7763 7764 pp_div = intel_de_read(dev_priv, regs.pp_div); 7765 7766 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 7767 } else { 7768 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 7769 } 7770 } 7771 7772 static void 7773 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 7774 { 7775 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 7776 state_name, 7777 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 7778 } 7779 7780 static void 7781 intel_pps_verify_state(struct intel_dp *intel_dp) 7782 { 7783 struct edp_power_seq hw; 7784 struct edp_power_seq *sw = &intel_dp->pps_delays; 7785 7786 intel_pps_readout_hw_state(intel_dp, &hw); 7787 7788 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 7789 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 7790 DRM_ERROR("PPS state mismatch\n"); 7791 intel_pps_dump_state("sw", sw); 7792 intel_pps_dump_state("hw", &hw); 7793 } 7794 } 7795 7796 static void 7797 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) 7798 { 7799 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7800 struct edp_power_seq cur, vbt, spec, 7801 *final = &intel_dp->pps_delays; 7802 7803 lockdep_assert_held(&dev_priv->pps_mutex); 7804 7805 /* already initialized? */ 7806 if (final->t11_t12 != 0) 7807 return; 7808 7809 intel_pps_readout_hw_state(intel_dp, &cur); 7810 7811 intel_pps_dump_state("cur", &cur); 7812 7813 vbt = dev_priv->vbt.edp.pps; 7814 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 7815 * of 500ms appears to be too short. Ocassionally the panel 7816 * just fails to power back on. Increasing the delay to 800ms 7817 * seems sufficient to avoid this problem. 7818 */ 7819 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 7820 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 7821 drm_dbg_kms(&dev_priv->drm, 7822 "Increasing T12 panel delay as per the quirk to %d\n", 7823 vbt.t11_t12); 7824 } 7825 /* T11_T12 delay is special and actually in units of 100ms, but zero 7826 * based in the hw (so we need to add 100 ms). But the sw vbt 7827 * table multiplies it with 1000 to make it in units of 100usec, 7828 * too. */ 7829 vbt.t11_t12 += 100 * 10; 7830 7831 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 7832 * our hw here, which are all in 100usec. */ 7833 spec.t1_t3 = 210 * 10; 7834 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 7835 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 7836 spec.t10 = 500 * 10; 7837 /* This one is special and actually in units of 100ms, but zero 7838 * based in the hw (so we need to add 100 ms). But the sw vbt 7839 * table multiplies it with 1000 to make it in units of 100usec, 7840 * too. */ 7841 spec.t11_t12 = (510 + 100) * 10; 7842 7843 intel_pps_dump_state("vbt", &vbt); 7844 7845 /* Use the max of the register settings and vbt. If both are 7846 * unset, fall back to the spec limits. */ 7847 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 7848 spec.field : \ 7849 max(cur.field, vbt.field)) 7850 assign_final(t1_t3); 7851 assign_final(t8); 7852 assign_final(t9); 7853 assign_final(t10); 7854 assign_final(t11_t12); 7855 #undef assign_final 7856 7857 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 7858 intel_dp->panel_power_up_delay = get_delay(t1_t3); 7859 intel_dp->backlight_on_delay = get_delay(t8); 7860 intel_dp->backlight_off_delay = get_delay(t9); 7861 intel_dp->panel_power_down_delay = get_delay(t10); 7862 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 7863 #undef get_delay 7864 7865 drm_dbg_kms(&dev_priv->drm, 7866 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 7867 intel_dp->panel_power_up_delay, 7868 intel_dp->panel_power_down_delay, 7869 intel_dp->panel_power_cycle_delay); 7870 7871 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", 7872 intel_dp->backlight_on_delay, 7873 intel_dp->backlight_off_delay); 7874 7875 /* 7876 * We override the HW backlight delays to 1 because we do manual waits 7877 * on them. For T8, even BSpec recommends doing it. For T9, if we 7878 * don't do this, we'll end up waiting for the backlight off delay 7879 * twice: once when we do the manual sleep, and once when we disable 7880 * the panel and wait for the PP_STATUS bit to become zero. 7881 */ 7882 final->t8 = 1; 7883 final->t9 = 1; 7884 7885 /* 7886 * HW has only a 100msec granularity for t11_t12 so round it up 7887 * accordingly. 7888 */ 7889 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 7890 } 7891 7892 static void 7893 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 7894 bool force_disable_vdd) 7895 { 7896 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7897 u32 pp_on, pp_off, port_sel = 0; 7898 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; 7899 struct pps_registers regs; 7900 enum port port = dp_to_dig_port(intel_dp)->base.port; 7901 const struct edp_power_seq *seq = &intel_dp->pps_delays; 7902 7903 lockdep_assert_held(&dev_priv->pps_mutex); 7904 7905 intel_pps_get_registers(intel_dp, ®s); 7906 7907 /* 7908 * On some VLV machines the BIOS can leave the VDD 7909 * enabled even on power sequencers which aren't 7910 * hooked up to any port. This would mess up the 7911 * power domain tracking the first time we pick 7912 * one of these power sequencers for use since 7913 * edp_panel_vdd_on() would notice that the VDD was 7914 * already on and therefore wouldn't grab the power 7915 * domain reference. Disable VDD first to avoid this. 7916 * This also avoids spuriously turning the VDD on as 7917 * soon as the new power sequencer gets initialized. 7918 */ 7919 if (force_disable_vdd) { 7920 u32 pp = ilk_get_pp_control(intel_dp); 7921 7922 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, 7923 "Panel power already on\n"); 7924 7925 if (pp & EDP_FORCE_VDD) 7926 drm_dbg_kms(&dev_priv->drm, 7927 "VDD already on, disabling first\n"); 7928 7929 pp &= ~EDP_FORCE_VDD; 7930 7931 intel_de_write(dev_priv, regs.pp_ctrl, pp); 7932 } 7933 7934 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 7935 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 7936 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 7937 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 7938 7939 /* Haswell doesn't have any port selection bits for the panel 7940 * power sequencer any more. */ 7941 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7942 port_sel = PANEL_PORT_SELECT_VLV(port); 7943 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7944 switch (port) { 7945 case PORT_A: 7946 port_sel = PANEL_PORT_SELECT_DPA; 7947 break; 7948 case PORT_C: 7949 port_sel = PANEL_PORT_SELECT_DPC; 7950 break; 7951 case PORT_D: 7952 port_sel = PANEL_PORT_SELECT_DPD; 7953 break; 7954 default: 7955 MISSING_CASE(port); 7956 break; 7957 } 7958 } 7959 7960 pp_on |= port_sel; 7961 7962 intel_de_write(dev_priv, regs.pp_on, pp_on); 7963 intel_de_write(dev_priv, regs.pp_off, pp_off); 7964 7965 /* 7966 * Compute the divisor for the pp clock, simply match the Bspec formula. 7967 */ 7968 if (i915_mmio_reg_valid(regs.pp_div)) { 7969 intel_de_write(dev_priv, regs.pp_div, 7970 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 7971 } else { 7972 u32 pp_ctl; 7973 7974 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); 7975 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 7976 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 7977 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7978 } 7979 7980 drm_dbg_kms(&dev_priv->drm, 7981 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 7982 intel_de_read(dev_priv, regs.pp_on), 7983 intel_de_read(dev_priv, regs.pp_off), 7984 i915_mmio_reg_valid(regs.pp_div) ? 7985 intel_de_read(dev_priv, regs.pp_div) : 7986 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 7987 } 7988 7989 static void intel_dp_pps_init(struct intel_dp *intel_dp) 7990 { 7991 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7992 7993 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7994 vlv_initial_power_sequencer_setup(intel_dp); 7995 } else { 7996 intel_dp_init_panel_power_sequencer(intel_dp); 7997 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 7998 } 7999 } 8000 8001 /** 8002 * intel_dp_set_drrs_state - program registers for RR switch to take effect 8003 * @dev_priv: i915 device 8004 * @crtc_state: a pointer to the active intel_crtc_state 8005 * @refresh_rate: RR to be programmed 8006 * 8007 * This function gets called when refresh rate (RR) has to be changed from 8008 * one frequency to another. Switches can be between high and low RR 8009 * supported by the panel or to any other RR based on media playback (in 8010 * this case, RR value needs to be passed from user space). 8011 * 8012 * The caller of this function needs to take a lock on dev_priv->drrs. 8013 */ 8014 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 8015 const struct intel_crtc_state *crtc_state, 8016 int refresh_rate) 8017 { 8018 struct intel_dp *intel_dp = dev_priv->drrs.dp; 8019 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 8020 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 8021 8022 if (refresh_rate <= 0) { 8023 drm_dbg_kms(&dev_priv->drm, 8024 "Refresh rate should be positive non-zero.\n"); 8025 return; 8026 } 8027 8028 if (intel_dp == NULL) { 8029 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 8030 return; 8031 } 8032 8033 if (!intel_crtc) { 8034 drm_dbg_kms(&dev_priv->drm, 8035 "DRRS: intel_crtc not initialized\n"); 8036 return; 8037 } 8038 8039 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 8040 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 8041 return; 8042 } 8043 8044 if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == 8045 refresh_rate) 8046 index = DRRS_LOW_RR; 8047 8048 if (index == dev_priv->drrs.refresh_rate_type) { 8049 drm_dbg_kms(&dev_priv->drm, 8050 "DRRS requested for previously set RR...ignoring\n"); 8051 return; 8052 } 8053 8054 if (!crtc_state->hw.active) { 8055 drm_dbg_kms(&dev_priv->drm, 8056 "eDP encoder disabled. CRTC not Active\n"); 8057 return; 8058 } 8059 8060 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 8061 switch (index) { 8062 case DRRS_HIGH_RR: 8063 intel_dp_set_m_n(crtc_state, M1_N1); 8064 break; 8065 case DRRS_LOW_RR: 8066 intel_dp_set_m_n(crtc_state, M2_N2); 8067 break; 8068 case DRRS_MAX_RR: 8069 default: 8070 drm_err(&dev_priv->drm, 8071 "Unsupported refreshrate type\n"); 8072 } 8073 } else if (INTEL_GEN(dev_priv) > 6) { 8074 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 8075 u32 val; 8076 8077 val = intel_de_read(dev_priv, reg); 8078 if (index > DRRS_HIGH_RR) { 8079 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 8080 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 8081 else 8082 val |= PIPECONF_EDP_RR_MODE_SWITCH; 8083 } else { 8084 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 8085 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 8086 else 8087 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 8088 } 8089 intel_de_write(dev_priv, reg, val); 8090 } 8091 8092 dev_priv->drrs.refresh_rate_type = index; 8093 8094 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 8095 refresh_rate); 8096 } 8097 8098 static void 8099 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) 8100 { 8101 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 8102 8103 dev_priv->drrs.busy_frontbuffer_bits = 0; 8104 dev_priv->drrs.dp = intel_dp; 8105 } 8106 8107 /** 8108 * intel_edp_drrs_enable - init drrs struct if supported 8109 * @intel_dp: DP struct 8110 * @crtc_state: A pointer to the active crtc state. 8111 * 8112 * Initializes frontbuffer_bits and drrs.dp 8113 */ 8114 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 8115 const struct intel_crtc_state *crtc_state) 8116 { 8117 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 8118 8119 if (!crtc_state->has_drrs) 8120 return; 8121 8122 drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); 8123 8124 mutex_lock(&dev_priv->drrs.mutex); 8125 8126 if (dev_priv->drrs.dp) { 8127 drm_warn(&dev_priv->drm, "DRRS already enabled\n"); 8128 goto unlock; 8129 } 8130 8131 intel_edp_drrs_enable_locked(intel_dp); 8132 8133 unlock: 8134 mutex_unlock(&dev_priv->drrs.mutex); 8135 } 8136 8137 static void 8138 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, 8139 const struct intel_crtc_state *crtc_state) 8140 { 8141 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 8142 8143 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { 8144 int refresh; 8145 8146 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); 8147 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); 8148 } 8149 8150 dev_priv->drrs.dp = NULL; 8151 } 8152 8153 /** 8154 * intel_edp_drrs_disable - Disable DRRS 8155 * @intel_dp: DP struct 8156 * @old_crtc_state: Pointer to old crtc_state. 8157 * 8158 */ 8159 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 8160 const struct intel_crtc_state *old_crtc_state) 8161 { 8162 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 8163 8164 if (!old_crtc_state->has_drrs) 8165 return; 8166 8167 mutex_lock(&dev_priv->drrs.mutex); 8168 if (!dev_priv->drrs.dp) { 8169 mutex_unlock(&dev_priv->drrs.mutex); 8170 return; 8171 } 8172 8173 intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); 8174 mutex_unlock(&dev_priv->drrs.mutex); 8175 8176 cancel_delayed_work_sync(&dev_priv->drrs.work); 8177 } 8178 8179 /** 8180 * intel_edp_drrs_update - Update DRRS state 8181 * @intel_dp: Intel DP 8182 * @crtc_state: new CRTC state 8183 * 8184 * This function will update DRRS states, disabling or enabling DRRS when 8185 * executing fastsets. For full modeset, intel_edp_drrs_disable() and 8186 * intel_edp_drrs_enable() should be called instead. 8187 */ 8188 void 8189 intel_edp_drrs_update(struct intel_dp *intel_dp, 8190 const struct intel_crtc_state *crtc_state) 8191 { 8192 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 8193 8194 if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 8195 return; 8196 8197 mutex_lock(&dev_priv->drrs.mutex); 8198 8199 /* New state matches current one? */ 8200 if (crtc_state->has_drrs == !!dev_priv->drrs.dp) 8201 goto unlock; 8202 8203 if (crtc_state->has_drrs) 8204 intel_edp_drrs_enable_locked(intel_dp); 8205 else 8206 intel_edp_drrs_disable_locked(intel_dp, crtc_state); 8207 8208 unlock: 8209 mutex_unlock(&dev_priv->drrs.mutex); 8210 } 8211 8212 static void intel_edp_drrs_downclock_work(struct work_struct *work) 8213 { 8214 struct drm_i915_private *dev_priv = 8215 container_of(work, typeof(*dev_priv), drrs.work.work); 8216 struct intel_dp *intel_dp; 8217 8218 mutex_lock(&dev_priv->drrs.mutex); 8219 8220 intel_dp = dev_priv->drrs.dp; 8221 8222 if (!intel_dp) 8223 goto unlock; 8224 8225 /* 8226 * The delayed work can race with an invalidate hence we need to 8227 * recheck. 8228 */ 8229 8230 if (dev_priv->drrs.busy_frontbuffer_bits) 8231 goto unlock; 8232 8233 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 8234 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 8235 8236 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 8237 drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); 8238 } 8239 8240 unlock: 8241 mutex_unlock(&dev_priv->drrs.mutex); 8242 } 8243 8244 /** 8245 * intel_edp_drrs_invalidate - Disable Idleness DRRS 8246 * @dev_priv: i915 device 8247 * @frontbuffer_bits: frontbuffer plane tracking bits 8248 * 8249 * This function gets called everytime rendering on the given planes start. 8250 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 8251 * 8252 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 8253 */ 8254 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 8255 unsigned int frontbuffer_bits) 8256 { 8257 struct intel_dp *intel_dp; 8258 struct drm_crtc *crtc; 8259 enum pipe pipe; 8260 8261 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 8262 return; 8263 8264 cancel_delayed_work(&dev_priv->drrs.work); 8265 8266 mutex_lock(&dev_priv->drrs.mutex); 8267 8268 intel_dp = dev_priv->drrs.dp; 8269 if (!intel_dp) { 8270 mutex_unlock(&dev_priv->drrs.mutex); 8271 return; 8272 } 8273 8274 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 8275 pipe = to_intel_crtc(crtc)->pipe; 8276 8277 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 8278 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 8279 8280 /* invalidate means busy screen hence upclock */ 8281 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 8282 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 8283 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 8284 8285 mutex_unlock(&dev_priv->drrs.mutex); 8286 } 8287 8288 /** 8289 * intel_edp_drrs_flush - Restart Idleness DRRS 8290 * @dev_priv: i915 device 8291 * @frontbuffer_bits: frontbuffer plane tracking bits 8292 * 8293 * This function gets called every time rendering on the given planes has 8294 * completed or flip on a crtc is completed. So DRRS should be upclocked 8295 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 8296 * if no other planes are dirty. 8297 * 8298 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 8299 */ 8300 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 8301 unsigned int frontbuffer_bits) 8302 { 8303 struct intel_dp *intel_dp; 8304 struct drm_crtc *crtc; 8305 enum pipe pipe; 8306 8307 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 8308 return; 8309 8310 cancel_delayed_work(&dev_priv->drrs.work); 8311 8312 mutex_lock(&dev_priv->drrs.mutex); 8313 8314 intel_dp = dev_priv->drrs.dp; 8315 if (!intel_dp) { 8316 mutex_unlock(&dev_priv->drrs.mutex); 8317 return; 8318 } 8319 8320 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 8321 pipe = to_intel_crtc(crtc)->pipe; 8322 8323 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 8324 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 8325 8326 /* flush means busy screen hence upclock */ 8327 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 8328 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 8329 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 8330 8331 /* 8332 * flush also means no more activity hence schedule downclock, if all 8333 * other fbs are quiescent too 8334 */ 8335 if (!dev_priv->drrs.busy_frontbuffer_bits) 8336 schedule_delayed_work(&dev_priv->drrs.work, 8337 msecs_to_jiffies(1000)); 8338 mutex_unlock(&dev_priv->drrs.mutex); 8339 } 8340 8341 /** 8342 * DOC: Display Refresh Rate Switching (DRRS) 8343 * 8344 * Display Refresh Rate Switching (DRRS) is a power conservation feature 8345 * which enables swtching between low and high refresh rates, 8346 * dynamically, based on the usage scenario. This feature is applicable 8347 * for internal panels. 8348 * 8349 * Indication that the panel supports DRRS is given by the panel EDID, which 8350 * would list multiple refresh rates for one resolution. 8351 * 8352 * DRRS is of 2 types - static and seamless. 8353 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 8354 * (may appear as a blink on screen) and is used in dock-undock scenario. 8355 * Seamless DRRS involves changing RR without any visual effect to the user 8356 * and can be used during normal system usage. This is done by programming 8357 * certain registers. 8358 * 8359 * Support for static/seamless DRRS may be indicated in the VBT based on 8360 * inputs from the panel spec. 8361 * 8362 * DRRS saves power by switching to low RR based on usage scenarios. 8363 * 8364 * The implementation is based on frontbuffer tracking implementation. When 8365 * there is a disturbance on the screen triggered by user activity or a periodic 8366 * system activity, DRRS is disabled (RR is changed to high RR). When there is 8367 * no movement on screen, after a timeout of 1 second, a switch to low RR is 8368 * made. 8369 * 8370 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 8371 * and intel_edp_drrs_flush() are called. 8372 * 8373 * DRRS can be further extended to support other internal panels and also 8374 * the scenario of video playback wherein RR is set based on the rate 8375 * requested by userspace. 8376 */ 8377 8378 /** 8379 * intel_dp_drrs_init - Init basic DRRS work and mutex. 8380 * @connector: eDP connector 8381 * @fixed_mode: preferred mode of panel 8382 * 8383 * This function is called only once at driver load to initialize basic 8384 * DRRS stuff. 8385 * 8386 * Returns: 8387 * Downclock mode if panel supports it, else return NULL. 8388 * DRRS support is determined by the presence of downclock mode (apart 8389 * from VBT setting). 8390 */ 8391 static struct drm_display_mode * 8392 intel_dp_drrs_init(struct intel_connector *connector, 8393 struct drm_display_mode *fixed_mode) 8394 { 8395 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 8396 struct drm_display_mode *downclock_mode = NULL; 8397 8398 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 8399 mutex_init(&dev_priv->drrs.mutex); 8400 8401 if (INTEL_GEN(dev_priv) <= 6) { 8402 drm_dbg_kms(&dev_priv->drm, 8403 "DRRS supported for Gen7 and above\n"); 8404 return NULL; 8405 } 8406 8407 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 8408 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 8409 return NULL; 8410 } 8411 8412 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 8413 if (!downclock_mode) { 8414 drm_dbg_kms(&dev_priv->drm, 8415 "Downclock mode is not found. DRRS not supported\n"); 8416 return NULL; 8417 } 8418 8419 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 8420 8421 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 8422 drm_dbg_kms(&dev_priv->drm, 8423 "seamless DRRS supported for eDP panel.\n"); 8424 return downclock_mode; 8425 } 8426 8427 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 8428 struct intel_connector *intel_connector) 8429 { 8430 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 8431 struct drm_device *dev = &dev_priv->drm; 8432 struct drm_connector *connector = &intel_connector->base; 8433 struct drm_display_mode *fixed_mode = NULL; 8434 struct drm_display_mode *downclock_mode = NULL; 8435 bool has_dpcd; 8436 enum pipe pipe = INVALID_PIPE; 8437 intel_wakeref_t wakeref; 8438 struct edid *edid; 8439 8440 if (!intel_dp_is_edp(intel_dp)) 8441 return true; 8442 8443 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); 8444 8445 /* 8446 * On IBX/CPT we may get here with LVDS already registered. Since the 8447 * driver uses the only internal power sequencer available for both 8448 * eDP and LVDS bail out early in this case to prevent interfering 8449 * with an already powered-on LVDS power sequencer. 8450 */ 8451 if (intel_get_lvds_encoder(dev_priv)) { 8452 drm_WARN_ON(dev, 8453 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 8454 drm_info(&dev_priv->drm, 8455 "LVDS was detected, not registering eDP\n"); 8456 8457 return false; 8458 } 8459 8460 with_pps_lock(intel_dp, wakeref) { 8461 intel_dp_init_panel_power_timestamps(intel_dp); 8462 intel_dp_pps_init(intel_dp); 8463 intel_edp_panel_vdd_sanitize(intel_dp); 8464 } 8465 8466 /* Cache DPCD and EDID for edp. */ 8467 has_dpcd = intel_edp_init_dpcd(intel_dp); 8468 8469 if (!has_dpcd) { 8470 /* if this fails, presume the device is a ghost */ 8471 drm_info(&dev_priv->drm, 8472 "failed to retrieve link info, disabling eDP\n"); 8473 goto out_vdd_off; 8474 } 8475 8476 mutex_lock(&dev->mode_config.mutex); 8477 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 8478 if (edid) { 8479 if (drm_add_edid_modes(connector, edid)) { 8480 drm_connector_update_edid_property(connector, edid); 8481 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 8482 } else { 8483 kfree(edid); 8484 edid = ERR_PTR(-EINVAL); 8485 } 8486 } else { 8487 edid = ERR_PTR(-ENOENT); 8488 } 8489 intel_connector->edid = edid; 8490 8491 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 8492 if (fixed_mode) 8493 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 8494 8495 /* fallback to VBT if available for eDP */ 8496 if (!fixed_mode) 8497 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 8498 mutex_unlock(&dev->mode_config.mutex); 8499 8500 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 8501 /* 8502 * Figure out the current pipe for the initial backlight setup. 8503 * If the current pipe isn't valid, try the PPS pipe, and if that 8504 * fails just assume pipe A. 8505 */ 8506 pipe = vlv_active_pipe(intel_dp); 8507 8508 if (pipe != PIPE_A && pipe != PIPE_B) 8509 pipe = intel_dp->pps_pipe; 8510 8511 if (pipe != PIPE_A && pipe != PIPE_B) 8512 pipe = PIPE_A; 8513 8514 drm_dbg_kms(&dev_priv->drm, 8515 "using pipe %c for initial backlight setup\n", 8516 pipe_name(pipe)); 8517 } 8518 8519 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 8520 intel_connector->panel.backlight.power = intel_edp_backlight_power; 8521 intel_panel_setup_backlight(connector, pipe); 8522 8523 if (fixed_mode) { 8524 drm_connector_set_panel_orientation_with_quirk(connector, 8525 dev_priv->vbt.orientation, 8526 fixed_mode->hdisplay, fixed_mode->vdisplay); 8527 } 8528 8529 return true; 8530 8531 out_vdd_off: 8532 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 8533 /* 8534 * vdd might still be enabled do to the delayed vdd off. 8535 * Make sure vdd is actually turned off here. 8536 */ 8537 with_pps_lock(intel_dp, wakeref) 8538 edp_panel_vdd_off_sync(intel_dp); 8539 8540 return false; 8541 } 8542 8543 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 8544 { 8545 struct intel_connector *intel_connector; 8546 struct drm_connector *connector; 8547 8548 intel_connector = container_of(work, typeof(*intel_connector), 8549 modeset_retry_work); 8550 connector = &intel_connector->base; 8551 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 8552 connector->name); 8553 8554 /* Grab the locks before changing connector property*/ 8555 mutex_lock(&connector->dev->mode_config.mutex); 8556 /* Set connector link status to BAD and send a Uevent to notify 8557 * userspace to do a modeset. 8558 */ 8559 drm_connector_set_link_status_property(connector, 8560 DRM_MODE_LINK_STATUS_BAD); 8561 mutex_unlock(&connector->dev->mode_config.mutex); 8562 /* Send Hotplug uevent so userspace can reprobe */ 8563 drm_kms_helper_hotplug_event(connector->dev); 8564 } 8565 8566 bool 8567 intel_dp_init_connector(struct intel_digital_port *dig_port, 8568 struct intel_connector *intel_connector) 8569 { 8570 struct drm_connector *connector = &intel_connector->base; 8571 struct intel_dp *intel_dp = &dig_port->dp; 8572 struct intel_encoder *intel_encoder = &dig_port->base; 8573 struct drm_device *dev = intel_encoder->base.dev; 8574 struct drm_i915_private *dev_priv = to_i915(dev); 8575 enum port port = intel_encoder->port; 8576 enum phy phy = intel_port_to_phy(dev_priv, port); 8577 int type; 8578 8579 /* Initialize the work for modeset in case of link train failure */ 8580 INIT_WORK(&intel_connector->modeset_retry_work, 8581 intel_dp_modeset_retry_work_fn); 8582 8583 if (drm_WARN(dev, dig_port->max_lanes < 1, 8584 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 8585 dig_port->max_lanes, intel_encoder->base.base.id, 8586 intel_encoder->base.name)) 8587 return false; 8588 8589 intel_dp_set_source_rates(intel_dp); 8590 8591 intel_dp->reset_link_params = true; 8592 intel_dp->pps_pipe = INVALID_PIPE; 8593 intel_dp->active_pipe = INVALID_PIPE; 8594 8595 /* Preserve the current hw state. */ 8596 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 8597 intel_dp->attached_connector = intel_connector; 8598 8599 if (intel_dp_is_port_edp(dev_priv, port)) { 8600 /* 8601 * Currently we don't support eDP on TypeC ports, although in 8602 * theory it could work on TypeC legacy ports. 8603 */ 8604 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 8605 type = DRM_MODE_CONNECTOR_eDP; 8606 } else { 8607 type = DRM_MODE_CONNECTOR_DisplayPort; 8608 } 8609 8610 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 8611 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 8612 8613 /* 8614 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 8615 * for DP the encoder type can be set by the caller to 8616 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 8617 */ 8618 if (type == DRM_MODE_CONNECTOR_eDP) 8619 intel_encoder->type = INTEL_OUTPUT_EDP; 8620 8621 /* eDP only on port B and/or C on vlv/chv */ 8622 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 8623 IS_CHERRYVIEW(dev_priv)) && 8624 intel_dp_is_edp(intel_dp) && 8625 port != PORT_B && port != PORT_C)) 8626 return false; 8627 8628 drm_dbg_kms(&dev_priv->drm, 8629 "Adding %s connector on [ENCODER:%d:%s]\n", 8630 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 8631 intel_encoder->base.base.id, intel_encoder->base.name); 8632 8633 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 8634 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 8635 8636 if (!HAS_GMCH(dev_priv)) 8637 connector->interlace_allowed = true; 8638 connector->doublescan_allowed = 0; 8639 8640 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 8641 8642 intel_dp_aux_init(intel_dp); 8643 8644 intel_connector_attach_encoder(intel_connector, intel_encoder); 8645 8646 if (HAS_DDI(dev_priv)) 8647 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 8648 else 8649 intel_connector->get_hw_state = intel_connector_get_hw_state; 8650 8651 /* init MST on ports that can support it */ 8652 intel_dp_mst_encoder_init(dig_port, 8653 intel_connector->base.base.id); 8654 8655 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 8656 intel_dp_aux_fini(intel_dp); 8657 intel_dp_mst_encoder_cleanup(dig_port); 8658 goto fail; 8659 } 8660 8661 intel_dp_add_properties(intel_dp, connector); 8662 8663 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 8664 int ret = intel_dp_init_hdcp(dig_port, intel_connector); 8665 if (ret) 8666 drm_dbg_kms(&dev_priv->drm, 8667 "HDCP init failed, skipping.\n"); 8668 } 8669 8670 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 8671 * 0xd. Failure to do so will result in spurious interrupts being 8672 * generated on the port when a cable is not attached. 8673 */ 8674 if (IS_G45(dev_priv)) { 8675 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 8676 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 8677 (temp & ~0xf) | 0xd); 8678 } 8679 8680 intel_dp->frl.is_trained = false; 8681 intel_dp->frl.trained_rate_gbps = 0; 8682 8683 return true; 8684 8685 fail: 8686 drm_connector_cleanup(connector); 8687 8688 return false; 8689 } 8690 8691 bool intel_dp_init(struct drm_i915_private *dev_priv, 8692 i915_reg_t output_reg, 8693 enum port port) 8694 { 8695 struct intel_digital_port *dig_port; 8696 struct intel_encoder *intel_encoder; 8697 struct drm_encoder *encoder; 8698 struct intel_connector *intel_connector; 8699 8700 dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); 8701 if (!dig_port) 8702 return false; 8703 8704 intel_connector = intel_connector_alloc(); 8705 if (!intel_connector) 8706 goto err_connector_alloc; 8707 8708 intel_encoder = &dig_port->base; 8709 encoder = &intel_encoder->base; 8710 8711 mutex_init(&dig_port->hdcp_mutex); 8712 8713 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 8714 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 8715 "DP %c", port_name(port))) 8716 goto err_encoder_init; 8717 8718 intel_encoder->hotplug = intel_dp_hotplug; 8719 intel_encoder->compute_config = intel_dp_compute_config; 8720 intel_encoder->get_hw_state = intel_dp_get_hw_state; 8721 intel_encoder->get_config = intel_dp_get_config; 8722 intel_encoder->sync_state = intel_dp_sync_state; 8723 intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check; 8724 intel_encoder->update_pipe = intel_panel_update_backlight; 8725 intel_encoder->suspend = intel_dp_encoder_suspend; 8726 intel_encoder->shutdown = intel_dp_encoder_shutdown; 8727 if (IS_CHERRYVIEW(dev_priv)) { 8728 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 8729 intel_encoder->pre_enable = chv_pre_enable_dp; 8730 intel_encoder->enable = vlv_enable_dp; 8731 intel_encoder->disable = vlv_disable_dp; 8732 intel_encoder->post_disable = chv_post_disable_dp; 8733 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 8734 } else if (IS_VALLEYVIEW(dev_priv)) { 8735 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 8736 intel_encoder->pre_enable = vlv_pre_enable_dp; 8737 intel_encoder->enable = vlv_enable_dp; 8738 intel_encoder->disable = vlv_disable_dp; 8739 intel_encoder->post_disable = vlv_post_disable_dp; 8740 } else { 8741 intel_encoder->pre_enable = g4x_pre_enable_dp; 8742 intel_encoder->enable = g4x_enable_dp; 8743 intel_encoder->disable = g4x_disable_dp; 8744 intel_encoder->post_disable = g4x_post_disable_dp; 8745 } 8746 8747 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 8748 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) 8749 dig_port->dp.set_link_train = cpt_set_link_train; 8750 else 8751 dig_port->dp.set_link_train = g4x_set_link_train; 8752 8753 if (IS_CHERRYVIEW(dev_priv)) 8754 dig_port->dp.set_signal_levels = chv_set_signal_levels; 8755 else if (IS_VALLEYVIEW(dev_priv)) 8756 dig_port->dp.set_signal_levels = vlv_set_signal_levels; 8757 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 8758 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; 8759 else if (IS_GEN(dev_priv, 6) && port == PORT_A) 8760 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; 8761 else 8762 dig_port->dp.set_signal_levels = g4x_set_signal_levels; 8763 8764 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || 8765 (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { 8766 dig_port->dp.preemph_max = intel_dp_preemph_max_3; 8767 dig_port->dp.voltage_max = intel_dp_voltage_max_3; 8768 } else { 8769 dig_port->dp.preemph_max = intel_dp_preemph_max_2; 8770 dig_port->dp.voltage_max = intel_dp_voltage_max_2; 8771 } 8772 8773 dig_port->dp.output_reg = output_reg; 8774 dig_port->max_lanes = 4; 8775 8776 intel_encoder->type = INTEL_OUTPUT_DP; 8777 intel_encoder->power_domain = intel_port_to_power_domain(port); 8778 if (IS_CHERRYVIEW(dev_priv)) { 8779 if (port == PORT_D) 8780 intel_encoder->pipe_mask = BIT(PIPE_C); 8781 else 8782 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 8783 } else { 8784 intel_encoder->pipe_mask = ~0; 8785 } 8786 intel_encoder->cloneable = 0; 8787 intel_encoder->port = port; 8788 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 8789 8790 dig_port->hpd_pulse = intel_dp_hpd_pulse; 8791 8792 if (HAS_GMCH(dev_priv)) { 8793 if (IS_GM45(dev_priv)) 8794 dig_port->connected = gm45_digital_port_connected; 8795 else 8796 dig_port->connected = g4x_digital_port_connected; 8797 } else { 8798 if (port == PORT_A) 8799 dig_port->connected = ilk_digital_port_connected; 8800 else 8801 dig_port->connected = ibx_digital_port_connected; 8802 } 8803 8804 if (port != PORT_A) 8805 intel_infoframe_init(dig_port); 8806 8807 dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 8808 if (!intel_dp_init_connector(dig_port, intel_connector)) 8809 goto err_init_connector; 8810 8811 return true; 8812 8813 err_init_connector: 8814 drm_encoder_cleanup(encoder); 8815 err_encoder_init: 8816 kfree(intel_connector); 8817 err_connector_alloc: 8818 kfree(dig_port); 8819 return false; 8820 } 8821 8822 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 8823 { 8824 struct intel_encoder *encoder; 8825 8826 for_each_intel_encoder(&dev_priv->drm, encoder) { 8827 struct intel_dp *intel_dp; 8828 8829 if (encoder->type != INTEL_OUTPUT_DDI) 8830 continue; 8831 8832 intel_dp = enc_to_intel_dp(encoder); 8833 8834 if (!intel_dp->can_mst) 8835 continue; 8836 8837 if (intel_dp->is_mst) 8838 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 8839 } 8840 } 8841 8842 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 8843 { 8844 struct intel_encoder *encoder; 8845 8846 for_each_intel_encoder(&dev_priv->drm, encoder) { 8847 struct intel_dp *intel_dp; 8848 int ret; 8849 8850 if (encoder->type != INTEL_OUTPUT_DDI) 8851 continue; 8852 8853 intel_dp = enc_to_intel_dp(encoder); 8854 8855 if (!intel_dp->can_mst) 8856 continue; 8857 8858 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 8859 true); 8860 if (ret) { 8861 intel_dp->is_mst = false; 8862 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 8863 false); 8864 } 8865 } 8866 } 8867