1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/types.h> 33 34 #include <asm/byteorder.h> 35 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_crtc.h> 38 #include <drm/drm_dp_helper.h> 39 #include <drm/drm_edid.h> 40 #include <drm/drm_probe_helper.h> 41 42 #include "i915_debugfs.h" 43 #include "i915_drv.h" 44 #include "intel_atomic.h" 45 #include "intel_audio.h" 46 #include "intel_connector.h" 47 #include "intel_ddi.h" 48 #include "intel_display_types.h" 49 #include "intel_dp.h" 50 #include "intel_dp_aux.h" 51 #include "intel_dp_link_training.h" 52 #include "intel_dp_mst.h" 53 #include "intel_dpll.h" 54 #include "intel_dpio_phy.h" 55 #include "intel_fifo_underrun.h" 56 #include "intel_hdcp.h" 57 #include "intel_hdmi.h" 58 #include "intel_hotplug.h" 59 #include "intel_lspcon.h" 60 #include "intel_lvds.h" 61 #include "intel_panel.h" 62 #include "intel_pps.h" 63 #include "intel_psr.h" 64 #include "intel_sideband.h" 65 #include "intel_tc.h" 66 #include "intel_vdsc.h" 67 #include "intel_vrr.h" 68 69 #define DP_DPRX_ESI_LEN 14 70 71 /* DP DSC throughput values used for slice count calculations KPixels/s */ 72 #define DP_DSC_PEAK_PIXEL_RATE 2720000 73 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 74 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 75 76 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 77 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 78 79 /* Compliance test status bits */ 80 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 81 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 82 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 83 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 84 85 struct dp_link_dpll { 86 int clock; 87 struct dpll dpll; 88 }; 89 90 static const struct dp_link_dpll g4x_dpll[] = { 91 { 162000, 92 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 93 { 270000, 94 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 95 }; 96 97 static const struct dp_link_dpll pch_dpll[] = { 98 { 162000, 99 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 100 { 270000, 101 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 102 }; 103 104 static const struct dp_link_dpll vlv_dpll[] = { 105 { 162000, 106 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 107 { 270000, 108 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 109 }; 110 111 /* 112 * CHV supports eDP 1.4 that have more link rates. 113 * Below only provides the fixed rate but exclude variable rate. 114 */ 115 static const struct dp_link_dpll chv_dpll[] = { 116 /* 117 * CHV requires to program fractional division for m2. 118 * m2 is stored in fixed point format using formula below 119 * (m2_int << 22) | m2_fraction 120 */ 121 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 122 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 123 { 270000, /* m2_int = 27, m2_fraction = 0 */ 124 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 125 }; 126 127 const struct dpll *vlv_get_dpll(struct drm_i915_private *i915) 128 { 129 return IS_CHERRYVIEW(i915) ? &chv_dpll[0].dpll : &vlv_dpll[0].dpll; 130 } 131 132 /* Constants for DP DSC configurations */ 133 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 134 135 /* With Single pipe configuration, HW is capable of supporting maximum 136 * of 4 slices per line. 137 */ 138 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 139 140 /** 141 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 142 * @intel_dp: DP struct 143 * 144 * If a CPU or PCH DP output is attached to an eDP panel, this function 145 * will return true, and false otherwise. 146 */ 147 bool intel_dp_is_edp(struct intel_dp *intel_dp) 148 { 149 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 150 151 return dig_port->base.type == INTEL_OUTPUT_EDP; 152 } 153 154 static void intel_dp_link_down(struct intel_encoder *encoder, 155 const struct intel_crtc_state *old_crtc_state); 156 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 157 158 /* update sink rates from dpcd */ 159 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 160 { 161 static const int dp_rates[] = { 162 162000, 270000, 540000, 810000 163 }; 164 int i, max_rate; 165 int max_lttpr_rate; 166 167 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 168 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 169 static const int quirk_rates[] = { 162000, 270000, 324000 }; 170 171 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 172 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 173 174 return; 175 } 176 177 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 178 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 179 if (max_lttpr_rate) 180 max_rate = min(max_rate, max_lttpr_rate); 181 182 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 183 if (dp_rates[i] > max_rate) 184 break; 185 intel_dp->sink_rates[i] = dp_rates[i]; 186 } 187 188 intel_dp->num_sink_rates = i; 189 } 190 191 /* Get length of rates array potentially limited by max_rate. */ 192 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 193 { 194 int i; 195 196 /* Limit results by potentially reduced max rate */ 197 for (i = 0; i < len; i++) { 198 if (rates[len - i - 1] <= max_rate) 199 return len - i; 200 } 201 202 return 0; 203 } 204 205 /* Get length of common rates array potentially limited by max_rate. */ 206 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 207 int max_rate) 208 { 209 return intel_dp_rate_limit_len(intel_dp->common_rates, 210 intel_dp->num_common_rates, max_rate); 211 } 212 213 /* Theoretical max between source and sink */ 214 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 215 { 216 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 217 } 218 219 /* Theoretical max between source and sink */ 220 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 221 { 222 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 223 int source_max = dig_port->max_lanes; 224 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 225 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 226 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 227 228 if (lttpr_max) 229 sink_max = min(sink_max, lttpr_max); 230 231 return min3(source_max, sink_max, fia_max); 232 } 233 234 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 235 { 236 return intel_dp->max_link_lane_count; 237 } 238 239 int 240 intel_dp_link_required(int pixel_clock, int bpp) 241 { 242 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 243 return DIV_ROUND_UP(pixel_clock * bpp, 8); 244 } 245 246 int 247 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 248 { 249 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 250 * link rate that is generally expressed in Gbps. Since, 8 bits of data 251 * is transmitted every LS_Clk per lane, there is no need to account for 252 * the channel encoding that is done in the PHY layer here. 253 */ 254 255 return max_link_clock * max_lanes; 256 } 257 258 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) 259 { 260 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 261 struct intel_encoder *encoder = &intel_dig_port->base; 262 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 263 264 return INTEL_GEN(dev_priv) >= 12 || 265 (INTEL_GEN(dev_priv) == 11 && 266 encoder->port != PORT_A); 267 } 268 269 static int cnl_max_source_rate(struct intel_dp *intel_dp) 270 { 271 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 272 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 273 enum port port = dig_port->base.port; 274 275 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 276 277 /* Low voltage SKUs are limited to max of 5.4G */ 278 if (voltage == VOLTAGE_INFO_0_85V) 279 return 540000; 280 281 /* For this SKU 8.1G is supported in all ports */ 282 if (IS_CNL_WITH_PORT_F(dev_priv)) 283 return 810000; 284 285 /* For other SKUs, max rate on ports A and D is 5.4G */ 286 if (port == PORT_A || port == PORT_D) 287 return 540000; 288 289 return 810000; 290 } 291 292 static int icl_max_source_rate(struct intel_dp *intel_dp) 293 { 294 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 295 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 296 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 297 298 if (intel_phy_is_combo(dev_priv, phy) && 299 !intel_dp_is_edp(intel_dp)) 300 return 540000; 301 302 return 810000; 303 } 304 305 static int ehl_max_source_rate(struct intel_dp *intel_dp) 306 { 307 if (intel_dp_is_edp(intel_dp)) 308 return 540000; 309 310 return 810000; 311 } 312 313 static void 314 intel_dp_set_source_rates(struct intel_dp *intel_dp) 315 { 316 /* The values must be in increasing order */ 317 static const int cnl_rates[] = { 318 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 319 }; 320 static const int bxt_rates[] = { 321 162000, 216000, 243000, 270000, 324000, 432000, 540000 322 }; 323 static const int skl_rates[] = { 324 162000, 216000, 270000, 324000, 432000, 540000 325 }; 326 static const int hsw_rates[] = { 327 162000, 270000, 540000 328 }; 329 static const int g4x_rates[] = { 330 162000, 270000 331 }; 332 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 333 struct intel_encoder *encoder = &dig_port->base; 334 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 335 const int *source_rates; 336 int size, max_rate = 0, vbt_max_rate; 337 338 /* This should only be done once */ 339 drm_WARN_ON(&dev_priv->drm, 340 intel_dp->source_rates || intel_dp->num_source_rates); 341 342 if (INTEL_GEN(dev_priv) >= 10) { 343 source_rates = cnl_rates; 344 size = ARRAY_SIZE(cnl_rates); 345 if (IS_GEN(dev_priv, 10)) 346 max_rate = cnl_max_source_rate(intel_dp); 347 else if (IS_JSL_EHL(dev_priv)) 348 max_rate = ehl_max_source_rate(intel_dp); 349 else 350 max_rate = icl_max_source_rate(intel_dp); 351 } else if (IS_GEN9_LP(dev_priv)) { 352 source_rates = bxt_rates; 353 size = ARRAY_SIZE(bxt_rates); 354 } else if (IS_GEN9_BC(dev_priv)) { 355 source_rates = skl_rates; 356 size = ARRAY_SIZE(skl_rates); 357 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 358 IS_BROADWELL(dev_priv)) { 359 source_rates = hsw_rates; 360 size = ARRAY_SIZE(hsw_rates); 361 } else { 362 source_rates = g4x_rates; 363 size = ARRAY_SIZE(g4x_rates); 364 } 365 366 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 367 if (max_rate && vbt_max_rate) 368 max_rate = min(max_rate, vbt_max_rate); 369 else if (vbt_max_rate) 370 max_rate = vbt_max_rate; 371 372 if (max_rate) 373 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 374 375 intel_dp->source_rates = source_rates; 376 intel_dp->num_source_rates = size; 377 } 378 379 static int intersect_rates(const int *source_rates, int source_len, 380 const int *sink_rates, int sink_len, 381 int *common_rates) 382 { 383 int i = 0, j = 0, k = 0; 384 385 while (i < source_len && j < sink_len) { 386 if (source_rates[i] == sink_rates[j]) { 387 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 388 return k; 389 common_rates[k] = source_rates[i]; 390 ++k; 391 ++i; 392 ++j; 393 } else if (source_rates[i] < sink_rates[j]) { 394 ++i; 395 } else { 396 ++j; 397 } 398 } 399 return k; 400 } 401 402 /* return index of rate in rates array, or -1 if not found */ 403 static int intel_dp_rate_index(const int *rates, int len, int rate) 404 { 405 int i; 406 407 for (i = 0; i < len; i++) 408 if (rate == rates[i]) 409 return i; 410 411 return -1; 412 } 413 414 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 415 { 416 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 417 418 drm_WARN_ON(&i915->drm, 419 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 420 421 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 422 intel_dp->num_source_rates, 423 intel_dp->sink_rates, 424 intel_dp->num_sink_rates, 425 intel_dp->common_rates); 426 427 /* Paranoia, there should always be something in common. */ 428 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 429 intel_dp->common_rates[0] = 162000; 430 intel_dp->num_common_rates = 1; 431 } 432 } 433 434 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 435 u8 lane_count) 436 { 437 /* 438 * FIXME: we need to synchronize the current link parameters with 439 * hardware readout. Currently fast link training doesn't work on 440 * boot-up. 441 */ 442 if (link_rate == 0 || 443 link_rate > intel_dp->max_link_rate) 444 return false; 445 446 if (lane_count == 0 || 447 lane_count > intel_dp_max_lane_count(intel_dp)) 448 return false; 449 450 return true; 451 } 452 453 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 454 int link_rate, 455 u8 lane_count) 456 { 457 const struct drm_display_mode *fixed_mode = 458 intel_dp->attached_connector->panel.fixed_mode; 459 int mode_rate, max_rate; 460 461 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 462 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 463 if (mode_rate > max_rate) 464 return false; 465 466 return true; 467 } 468 469 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 470 int link_rate, u8 lane_count) 471 { 472 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 473 int index; 474 475 /* 476 * TODO: Enable fallback on MST links once MST link compute can handle 477 * the fallback params. 478 */ 479 if (intel_dp->is_mst) { 480 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 481 return -1; 482 } 483 484 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { 485 drm_dbg_kms(&i915->drm, 486 "Retrying Link training for eDP with max parameters\n"); 487 intel_dp->use_max_params = true; 488 return 0; 489 } 490 491 index = intel_dp_rate_index(intel_dp->common_rates, 492 intel_dp->num_common_rates, 493 link_rate); 494 if (index > 0) { 495 if (intel_dp_is_edp(intel_dp) && 496 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 497 intel_dp->common_rates[index - 1], 498 lane_count)) { 499 drm_dbg_kms(&i915->drm, 500 "Retrying Link training for eDP with same parameters\n"); 501 return 0; 502 } 503 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 504 intel_dp->max_link_lane_count = lane_count; 505 } else if (lane_count > 1) { 506 if (intel_dp_is_edp(intel_dp) && 507 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 508 intel_dp_max_common_rate(intel_dp), 509 lane_count >> 1)) { 510 drm_dbg_kms(&i915->drm, 511 "Retrying Link training for eDP with same parameters\n"); 512 return 0; 513 } 514 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 515 intel_dp->max_link_lane_count = lane_count >> 1; 516 } else { 517 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 518 return -1; 519 } 520 521 return 0; 522 } 523 524 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 525 { 526 return div_u64(mul_u32_u32(mode_clock, 1000000U), 527 DP_DSC_FEC_OVERHEAD_FACTOR); 528 } 529 530 static int 531 small_joiner_ram_size_bits(struct drm_i915_private *i915) 532 { 533 if (INTEL_GEN(i915) >= 11) 534 return 7680 * 8; 535 else 536 return 6144 * 8; 537 } 538 539 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 540 u32 link_clock, u32 lane_count, 541 u32 mode_clock, u32 mode_hdisplay, 542 bool bigjoiner) 543 { 544 u32 bits_per_pixel, max_bpp_small_joiner_ram; 545 int i; 546 547 /* 548 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 549 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 550 * for SST -> TimeSlotsPerMTP is 1, 551 * for MST -> TimeSlotsPerMTP has to be calculated 552 */ 553 bits_per_pixel = (link_clock * lane_count * 8) / 554 intel_dp_mode_to_fec_clock(mode_clock); 555 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 556 557 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 558 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 559 mode_hdisplay; 560 561 if (bigjoiner) 562 max_bpp_small_joiner_ram *= 2; 563 564 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 565 max_bpp_small_joiner_ram); 566 567 /* 568 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 569 * check, output bpp from small joiner RAM check) 570 */ 571 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 572 573 if (bigjoiner) { 574 u32 max_bpp_bigjoiner = 575 i915->max_cdclk_freq * 48 / 576 intel_dp_mode_to_fec_clock(mode_clock); 577 578 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner); 579 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); 580 } 581 582 /* Error out if the max bpp is less than smallest allowed valid bpp */ 583 if (bits_per_pixel < valid_dsc_bpp[0]) { 584 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 585 bits_per_pixel, valid_dsc_bpp[0]); 586 return 0; 587 } 588 589 /* Find the nearest match in the array of known BPPs from VESA */ 590 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 591 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 592 break; 593 } 594 bits_per_pixel = valid_dsc_bpp[i]; 595 596 /* 597 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 598 * fractional part is 0 599 */ 600 return bits_per_pixel << 4; 601 } 602 603 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 604 int mode_clock, int mode_hdisplay, 605 bool bigjoiner) 606 { 607 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 608 u8 min_slice_count, i; 609 int max_slice_width; 610 611 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 612 min_slice_count = DIV_ROUND_UP(mode_clock, 613 DP_DSC_MAX_ENC_THROUGHPUT_0); 614 else 615 min_slice_count = DIV_ROUND_UP(mode_clock, 616 DP_DSC_MAX_ENC_THROUGHPUT_1); 617 618 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 619 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 620 drm_dbg_kms(&i915->drm, 621 "Unsupported slice width %d by DP DSC Sink device\n", 622 max_slice_width); 623 return 0; 624 } 625 /* Also take into account max slice width */ 626 min_slice_count = max_t(u8, min_slice_count, 627 DIV_ROUND_UP(mode_hdisplay, 628 max_slice_width)); 629 630 /* Find the closest match to the valid slice count values */ 631 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 632 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; 633 634 if (test_slice_count > 635 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false)) 636 break; 637 638 /* big joiner needs small joiner to be enabled */ 639 if (bigjoiner && test_slice_count < 4) 640 continue; 641 642 if (min_slice_count <= test_slice_count) 643 return test_slice_count; 644 } 645 646 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 647 min_slice_count); 648 return 0; 649 } 650 651 static enum intel_output_format 652 intel_dp_output_format(struct drm_connector *connector, 653 const struct drm_display_mode *mode) 654 { 655 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 656 const struct drm_display_info *info = &connector->display_info; 657 658 if (!connector->ycbcr_420_allowed || 659 !drm_mode_is_420_only(info, mode)) 660 return INTEL_OUTPUT_FORMAT_RGB; 661 662 if (intel_dp->dfp.rgb_to_ycbcr && 663 intel_dp->dfp.ycbcr_444_to_420) 664 return INTEL_OUTPUT_FORMAT_RGB; 665 666 if (intel_dp->dfp.ycbcr_444_to_420) 667 return INTEL_OUTPUT_FORMAT_YCBCR444; 668 else 669 return INTEL_OUTPUT_FORMAT_YCBCR420; 670 } 671 672 int intel_dp_min_bpp(enum intel_output_format output_format) 673 { 674 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 675 return 6 * 3; 676 else 677 return 8 * 3; 678 } 679 680 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 681 { 682 /* 683 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 684 * format of the number of bytes per pixel will be half the number 685 * of bytes of RGB pixel. 686 */ 687 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 688 bpp /= 2; 689 690 return bpp; 691 } 692 693 static int 694 intel_dp_mode_min_output_bpp(struct drm_connector *connector, 695 const struct drm_display_mode *mode) 696 { 697 enum intel_output_format output_format = 698 intel_dp_output_format(connector, mode); 699 700 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 701 } 702 703 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 704 int hdisplay) 705 { 706 /* 707 * Older platforms don't like hdisplay==4096 with DP. 708 * 709 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 710 * and frame counter increment), but we don't get vblank interrupts, 711 * and the pipe underruns immediately. The link also doesn't seem 712 * to get trained properly. 713 * 714 * On CHV the vblank interrupts don't seem to disappear but 715 * otherwise the symptoms are similar. 716 * 717 * TODO: confirm the behaviour on HSW+ 718 */ 719 return hdisplay == 4096 && !HAS_DDI(dev_priv); 720 } 721 722 static enum drm_mode_status 723 intel_dp_mode_valid_downstream(struct intel_connector *connector, 724 const struct drm_display_mode *mode, 725 int target_clock) 726 { 727 struct intel_dp *intel_dp = intel_attached_dp(connector); 728 const struct drm_display_info *info = &connector->base.display_info; 729 int tmds_clock; 730 731 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 732 if (intel_dp->dfp.pcon_max_frl_bw) { 733 int target_bw; 734 int max_frl_bw; 735 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode); 736 737 target_bw = bpp * target_clock; 738 739 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 740 741 /* converting bw from Gbps to Kbps*/ 742 max_frl_bw = max_frl_bw * 1000000; 743 744 if (target_bw > max_frl_bw) 745 return MODE_CLOCK_HIGH; 746 747 return MODE_OK; 748 } 749 750 if (intel_dp->dfp.max_dotclock && 751 target_clock > intel_dp->dfp.max_dotclock) 752 return MODE_CLOCK_HIGH; 753 754 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 755 tmds_clock = target_clock; 756 if (drm_mode_is_420_only(info, mode)) 757 tmds_clock /= 2; 758 759 if (intel_dp->dfp.min_tmds_clock && 760 tmds_clock < intel_dp->dfp.min_tmds_clock) 761 return MODE_CLOCK_LOW; 762 if (intel_dp->dfp.max_tmds_clock && 763 tmds_clock > intel_dp->dfp.max_tmds_clock) 764 return MODE_CLOCK_HIGH; 765 766 return MODE_OK; 767 } 768 769 static enum drm_mode_status 770 intel_dp_mode_valid(struct drm_connector *connector, 771 struct drm_display_mode *mode) 772 { 773 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 774 struct intel_connector *intel_connector = to_intel_connector(connector); 775 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 776 struct drm_i915_private *dev_priv = to_i915(connector->dev); 777 int target_clock = mode->clock; 778 int max_rate, mode_rate, max_lanes, max_link_clock; 779 int max_dotclk = dev_priv->max_dotclk_freq; 780 u16 dsc_max_output_bpp = 0; 781 u8 dsc_slice_count = 0; 782 enum drm_mode_status status; 783 bool dsc = false, bigjoiner = false; 784 785 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 786 return MODE_NO_DBLESCAN; 787 788 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 789 return MODE_H_ILLEGAL; 790 791 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 792 if (mode->hdisplay != fixed_mode->hdisplay) 793 return MODE_PANEL; 794 795 if (mode->vdisplay != fixed_mode->vdisplay) 796 return MODE_PANEL; 797 798 target_clock = fixed_mode->clock; 799 } 800 801 if (mode->clock < 10000) 802 return MODE_CLOCK_LOW; 803 804 if ((target_clock > max_dotclk || mode->hdisplay > 5120) && 805 intel_dp_can_bigjoiner(intel_dp)) { 806 bigjoiner = true; 807 max_dotclk *= 2; 808 } 809 if (target_clock > max_dotclk) 810 return MODE_CLOCK_HIGH; 811 812 max_link_clock = intel_dp_max_link_rate(intel_dp); 813 max_lanes = intel_dp_max_lane_count(intel_dp); 814 815 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 816 mode_rate = intel_dp_link_required(target_clock, 817 intel_dp_mode_min_output_bpp(connector, mode)); 818 819 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 820 return MODE_H_ILLEGAL; 821 822 /* 823 * Output bpp is stored in 6.4 format so right shift by 4 to get the 824 * integer value since we support only integer values of bpp. 825 */ 826 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 827 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 828 if (intel_dp_is_edp(intel_dp)) { 829 dsc_max_output_bpp = 830 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 831 dsc_slice_count = 832 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 833 true); 834 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 835 dsc_max_output_bpp = 836 intel_dp_dsc_get_output_bpp(dev_priv, 837 max_link_clock, 838 max_lanes, 839 target_clock, 840 mode->hdisplay, 841 bigjoiner) >> 4; 842 dsc_slice_count = 843 intel_dp_dsc_get_slice_count(intel_dp, 844 target_clock, 845 mode->hdisplay, 846 bigjoiner); 847 } 848 849 dsc = dsc_max_output_bpp && dsc_slice_count; 850 } 851 852 /* big joiner configuration needs DSC */ 853 if (bigjoiner && !dsc) 854 return MODE_CLOCK_HIGH; 855 856 if (mode_rate > max_rate && !dsc) 857 return MODE_CLOCK_HIGH; 858 859 status = intel_dp_mode_valid_downstream(intel_connector, 860 mode, target_clock); 861 if (status != MODE_OK) 862 return status; 863 864 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); 865 } 866 867 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 868 { 869 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 870 871 return max_rate >= 540000; 872 } 873 874 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 875 { 876 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 877 878 return max_rate >= 810000; 879 } 880 881 static void 882 intel_dp_set_clock(struct intel_encoder *encoder, 883 struct intel_crtc_state *pipe_config) 884 { 885 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 886 const struct dp_link_dpll *divisor = NULL; 887 int i, count = 0; 888 889 if (IS_G4X(dev_priv)) { 890 divisor = g4x_dpll; 891 count = ARRAY_SIZE(g4x_dpll); 892 } else if (HAS_PCH_SPLIT(dev_priv)) { 893 divisor = pch_dpll; 894 count = ARRAY_SIZE(pch_dpll); 895 } else if (IS_CHERRYVIEW(dev_priv)) { 896 divisor = chv_dpll; 897 count = ARRAY_SIZE(chv_dpll); 898 } else if (IS_VALLEYVIEW(dev_priv)) { 899 divisor = vlv_dpll; 900 count = ARRAY_SIZE(vlv_dpll); 901 } 902 903 if (divisor && count) { 904 for (i = 0; i < count; i++) { 905 if (pipe_config->port_clock == divisor[i].clock) { 906 pipe_config->dpll = divisor[i].dpll; 907 pipe_config->clock_set = true; 908 break; 909 } 910 } 911 } 912 } 913 914 static void snprintf_int_array(char *str, size_t len, 915 const int *array, int nelem) 916 { 917 int i; 918 919 str[0] = '\0'; 920 921 for (i = 0; i < nelem; i++) { 922 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 923 if (r >= len) 924 return; 925 str += r; 926 len -= r; 927 } 928 } 929 930 static void intel_dp_print_rates(struct intel_dp *intel_dp) 931 { 932 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 933 char str[128]; /* FIXME: too big for stack? */ 934 935 if (!drm_debug_enabled(DRM_UT_KMS)) 936 return; 937 938 snprintf_int_array(str, sizeof(str), 939 intel_dp->source_rates, intel_dp->num_source_rates); 940 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 941 942 snprintf_int_array(str, sizeof(str), 943 intel_dp->sink_rates, intel_dp->num_sink_rates); 944 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 945 946 snprintf_int_array(str, sizeof(str), 947 intel_dp->common_rates, intel_dp->num_common_rates); 948 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 949 } 950 951 int 952 intel_dp_max_link_rate(struct intel_dp *intel_dp) 953 { 954 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 955 int len; 956 957 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 958 if (drm_WARN_ON(&i915->drm, len <= 0)) 959 return 162000; 960 961 return intel_dp->common_rates[len - 1]; 962 } 963 964 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 965 { 966 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 967 int i = intel_dp_rate_index(intel_dp->sink_rates, 968 intel_dp->num_sink_rates, rate); 969 970 if (drm_WARN_ON(&i915->drm, i < 0)) 971 i = 0; 972 973 return i; 974 } 975 976 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 977 u8 *link_bw, u8 *rate_select) 978 { 979 /* eDP 1.4 rate select method. */ 980 if (intel_dp->use_rate_select) { 981 *link_bw = 0; 982 *rate_select = 983 intel_dp_rate_select(intel_dp, port_clock); 984 } else { 985 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 986 *rate_select = 0; 987 } 988 } 989 990 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 991 const struct intel_crtc_state *pipe_config) 992 { 993 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 994 995 /* On TGL, FEC is supported on all Pipes */ 996 if (INTEL_GEN(dev_priv) >= 12) 997 return true; 998 999 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 1000 return true; 1001 1002 return false; 1003 } 1004 1005 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1006 const struct intel_crtc_state *pipe_config) 1007 { 1008 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1009 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1010 } 1011 1012 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1013 const struct intel_crtc_state *crtc_state) 1014 { 1015 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) 1016 return false; 1017 1018 return intel_dsc_source_support(crtc_state) && 1019 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1020 } 1021 1022 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, 1023 const struct intel_crtc_state *crtc_state) 1024 { 1025 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 1026 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 1027 intel_dp->dfp.ycbcr_444_to_420); 1028 } 1029 1030 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, 1031 const struct intel_crtc_state *crtc_state, int bpc) 1032 { 1033 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; 1034 1035 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) 1036 clock /= 2; 1037 1038 return clock; 1039 } 1040 1041 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, 1042 const struct intel_crtc_state *crtc_state, int bpc) 1043 { 1044 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); 1045 1046 if (intel_dp->dfp.min_tmds_clock && 1047 tmds_clock < intel_dp->dfp.min_tmds_clock) 1048 return false; 1049 1050 if (intel_dp->dfp.max_tmds_clock && 1051 tmds_clock > intel_dp->dfp.max_tmds_clock) 1052 return false; 1053 1054 return true; 1055 } 1056 1057 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, 1058 const struct intel_crtc_state *crtc_state, 1059 int bpc) 1060 { 1061 1062 return intel_hdmi_deep_color_possible(crtc_state, bpc, 1063 intel_dp->has_hdmi_sink, 1064 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && 1065 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); 1066 } 1067 1068 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 1069 const struct intel_crtc_state *crtc_state) 1070 { 1071 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1072 struct intel_connector *intel_connector = intel_dp->attached_connector; 1073 int bpp, bpc; 1074 1075 bpc = crtc_state->pipe_bpp / 3; 1076 1077 if (intel_dp->dfp.max_bpc) 1078 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 1079 1080 if (intel_dp->dfp.min_tmds_clock) { 1081 for (; bpc >= 10; bpc -= 2) { 1082 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) 1083 break; 1084 } 1085 } 1086 1087 bpp = bpc * 3; 1088 if (intel_dp_is_edp(intel_dp)) { 1089 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1090 if (intel_connector->base.display_info.bpc == 0 && 1091 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1092 drm_dbg_kms(&dev_priv->drm, 1093 "clamping bpp for eDP panel to BIOS-provided %i\n", 1094 dev_priv->vbt.edp.bpp); 1095 bpp = dev_priv->vbt.edp.bpp; 1096 } 1097 } 1098 1099 return bpp; 1100 } 1101 1102 /* Adjust link config limits based on compliance test requests. */ 1103 void 1104 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1105 struct intel_crtc_state *pipe_config, 1106 struct link_config_limits *limits) 1107 { 1108 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1109 1110 /* For DP Compliance we override the computed bpp for the pipe */ 1111 if (intel_dp->compliance.test_data.bpc != 0) { 1112 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1113 1114 limits->min_bpp = limits->max_bpp = bpp; 1115 pipe_config->dither_force_disable = bpp == 6 * 3; 1116 1117 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1118 } 1119 1120 /* Use values requested by Compliance Test Request */ 1121 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1122 int index; 1123 1124 /* Validate the compliance test data since max values 1125 * might have changed due to link train fallback. 1126 */ 1127 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1128 intel_dp->compliance.test_lane_count)) { 1129 index = intel_dp_rate_index(intel_dp->common_rates, 1130 intel_dp->num_common_rates, 1131 intel_dp->compliance.test_link_rate); 1132 if (index >= 0) 1133 limits->min_clock = limits->max_clock = index; 1134 limits->min_lane_count = limits->max_lane_count = 1135 intel_dp->compliance.test_lane_count; 1136 } 1137 } 1138 } 1139 1140 /* Optimize link config in order: max bpp, min clock, min lanes */ 1141 static int 1142 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 1143 struct intel_crtc_state *pipe_config, 1144 const struct link_config_limits *limits) 1145 { 1146 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1147 int bpp, clock, lane_count; 1148 int mode_rate, link_clock, link_avail; 1149 1150 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 1151 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1152 1153 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1154 output_bpp); 1155 1156 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 1157 for (lane_count = limits->min_lane_count; 1158 lane_count <= limits->max_lane_count; 1159 lane_count <<= 1) { 1160 link_clock = intel_dp->common_rates[clock]; 1161 link_avail = intel_dp_max_data_rate(link_clock, 1162 lane_count); 1163 1164 if (mode_rate <= link_avail) { 1165 pipe_config->lane_count = lane_count; 1166 pipe_config->pipe_bpp = bpp; 1167 pipe_config->port_clock = link_clock; 1168 1169 return 0; 1170 } 1171 } 1172 } 1173 } 1174 1175 return -EINVAL; 1176 } 1177 1178 /* Optimize link config in order: max bpp, min lanes, min clock */ 1179 static int 1180 intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, 1181 struct intel_crtc_state *pipe_config, 1182 const struct link_config_limits *limits) 1183 { 1184 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1185 int bpp, clock, lane_count; 1186 int mode_rate, link_clock, link_avail; 1187 1188 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 1189 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1190 1191 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1192 output_bpp); 1193 1194 for (lane_count = limits->min_lane_count; 1195 lane_count <= limits->max_lane_count; 1196 lane_count <<= 1) { 1197 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 1198 link_clock = intel_dp->common_rates[clock]; 1199 link_avail = intel_dp_max_data_rate(link_clock, 1200 lane_count); 1201 1202 if (mode_rate <= link_avail) { 1203 pipe_config->lane_count = lane_count; 1204 pipe_config->pipe_bpp = bpp; 1205 pipe_config->port_clock = link_clock; 1206 1207 return 0; 1208 } 1209 } 1210 } 1211 } 1212 1213 return -EINVAL; 1214 } 1215 1216 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 1217 { 1218 int i, num_bpc; 1219 u8 dsc_bpc[3] = {0}; 1220 1221 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 1222 dsc_bpc); 1223 for (i = 0; i < num_bpc; i++) { 1224 if (dsc_max_bpc >= dsc_bpc[i]) 1225 return dsc_bpc[i] * 3; 1226 } 1227 1228 return 0; 1229 } 1230 1231 #define DSC_SUPPORTED_VERSION_MIN 1 1232 1233 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 1234 struct intel_crtc_state *crtc_state) 1235 { 1236 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1237 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1238 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1239 u8 line_buf_depth; 1240 int ret; 1241 1242 /* 1243 * RC_MODEL_SIZE is currently a constant across all configurations. 1244 * 1245 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 1246 * DP_DSC_RC_BUF_SIZE for this. 1247 */ 1248 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1249 1250 ret = intel_dsc_compute_params(encoder, crtc_state); 1251 if (ret) 1252 return ret; 1253 1254 /* 1255 * Slice Height of 8 works for all currently available panels. So start 1256 * with that if pic_height is an integral multiple of 8. Eventually add 1257 * logic to try multiple slice heights. 1258 */ 1259 if (vdsc_cfg->pic_height % 8 == 0) 1260 vdsc_cfg->slice_height = 8; 1261 else if (vdsc_cfg->pic_height % 4 == 0) 1262 vdsc_cfg->slice_height = 4; 1263 else 1264 vdsc_cfg->slice_height = 2; 1265 1266 vdsc_cfg->dsc_version_major = 1267 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1268 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 1269 vdsc_cfg->dsc_version_minor = 1270 min(DSC_SUPPORTED_VERSION_MIN, 1271 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1272 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 1273 1274 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1275 DP_DSC_RGB; 1276 1277 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 1278 if (!line_buf_depth) { 1279 drm_dbg_kms(&i915->drm, 1280 "DSC Sink Line Buffer Depth invalid\n"); 1281 return -EINVAL; 1282 } 1283 1284 if (vdsc_cfg->dsc_version_minor == 2) 1285 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 1286 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 1287 else 1288 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 1289 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 1290 1291 vdsc_cfg->block_pred_enable = 1292 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 1293 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 1294 1295 return drm_dsc_compute_rc_parameters(vdsc_cfg); 1296 } 1297 1298 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 1299 struct intel_crtc_state *pipe_config, 1300 struct drm_connector_state *conn_state, 1301 struct link_config_limits *limits) 1302 { 1303 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1304 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 1305 const struct drm_display_mode *adjusted_mode = 1306 &pipe_config->hw.adjusted_mode; 1307 u8 dsc_max_bpc; 1308 int pipe_bpp; 1309 int ret; 1310 1311 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 1312 intel_dp_supports_fec(intel_dp, pipe_config); 1313 1314 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 1315 return -EINVAL; 1316 1317 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 1318 if (INTEL_GEN(dev_priv) >= 12) 1319 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 1320 else 1321 dsc_max_bpc = min_t(u8, 10, 1322 conn_state->max_requested_bpc); 1323 1324 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 1325 1326 /* Min Input BPC for ICL+ is 8 */ 1327 if (pipe_bpp < 8 * 3) { 1328 drm_dbg_kms(&dev_priv->drm, 1329 "No DSC support for less than 8bpc\n"); 1330 return -EINVAL; 1331 } 1332 1333 /* 1334 * For now enable DSC for max bpp, max link rate, max lane count. 1335 * Optimize this later for the minimum possible link rate/lane count 1336 * with DSC enabled for the requested mode. 1337 */ 1338 pipe_config->pipe_bpp = pipe_bpp; 1339 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 1340 pipe_config->lane_count = limits->max_lane_count; 1341 1342 if (intel_dp_is_edp(intel_dp)) { 1343 pipe_config->dsc.compressed_bpp = 1344 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 1345 pipe_config->pipe_bpp); 1346 pipe_config->dsc.slice_count = 1347 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 1348 true); 1349 } else { 1350 u16 dsc_max_output_bpp; 1351 u8 dsc_dp_slice_count; 1352 1353 dsc_max_output_bpp = 1354 intel_dp_dsc_get_output_bpp(dev_priv, 1355 pipe_config->port_clock, 1356 pipe_config->lane_count, 1357 adjusted_mode->crtc_clock, 1358 adjusted_mode->crtc_hdisplay, 1359 pipe_config->bigjoiner); 1360 dsc_dp_slice_count = 1361 intel_dp_dsc_get_slice_count(intel_dp, 1362 adjusted_mode->crtc_clock, 1363 adjusted_mode->crtc_hdisplay, 1364 pipe_config->bigjoiner); 1365 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 1366 drm_dbg_kms(&dev_priv->drm, 1367 "Compressed BPP/Slice Count not supported\n"); 1368 return -EINVAL; 1369 } 1370 pipe_config->dsc.compressed_bpp = min_t(u16, 1371 dsc_max_output_bpp >> 4, 1372 pipe_config->pipe_bpp); 1373 pipe_config->dsc.slice_count = dsc_dp_slice_count; 1374 } 1375 /* 1376 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 1377 * is greater than the maximum Cdclock and if slice count is even 1378 * then we need to use 2 VDSC instances. 1379 */ 1380 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq || 1381 pipe_config->bigjoiner) { 1382 if (pipe_config->dsc.slice_count < 2) { 1383 drm_dbg_kms(&dev_priv->drm, 1384 "Cannot split stream to use 2 VDSC instances\n"); 1385 return -EINVAL; 1386 } 1387 1388 pipe_config->dsc.dsc_split = true; 1389 } 1390 1391 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 1392 if (ret < 0) { 1393 drm_dbg_kms(&dev_priv->drm, 1394 "Cannot compute valid DSC parameters for Input Bpp = %d " 1395 "Compressed BPP = %d\n", 1396 pipe_config->pipe_bpp, 1397 pipe_config->dsc.compressed_bpp); 1398 return ret; 1399 } 1400 1401 pipe_config->dsc.compression_enable = true; 1402 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 1403 "Compressed Bpp = %d Slice Count = %d\n", 1404 pipe_config->pipe_bpp, 1405 pipe_config->dsc.compressed_bpp, 1406 pipe_config->dsc.slice_count); 1407 1408 return 0; 1409 } 1410 1411 static int 1412 intel_dp_compute_link_config(struct intel_encoder *encoder, 1413 struct intel_crtc_state *pipe_config, 1414 struct drm_connector_state *conn_state) 1415 { 1416 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1417 const struct drm_display_mode *adjusted_mode = 1418 &pipe_config->hw.adjusted_mode; 1419 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1420 struct link_config_limits limits; 1421 int common_len; 1422 int ret; 1423 1424 common_len = intel_dp_common_len_rate_limit(intel_dp, 1425 intel_dp->max_link_rate); 1426 1427 /* No common link rates between source and sink */ 1428 drm_WARN_ON(encoder->base.dev, common_len <= 0); 1429 1430 limits.min_clock = 0; 1431 limits.max_clock = common_len - 1; 1432 1433 limits.min_lane_count = 1; 1434 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 1435 1436 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); 1437 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); 1438 1439 if (intel_dp->use_max_params) { 1440 /* 1441 * Use the maximum clock and number of lanes the eDP panel 1442 * advertizes being capable of in case the initial fast 1443 * optimal params failed us. The panels are generally 1444 * designed to support only a single clock and lane 1445 * configuration, and typically on older panels these 1446 * values correspond to the native resolution of the panel. 1447 */ 1448 limits.min_lane_count = limits.max_lane_count; 1449 limits.min_clock = limits.max_clock; 1450 } 1451 1452 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 1453 1454 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 1455 "max rate %d max bpp %d pixel clock %iKHz\n", 1456 limits.max_lane_count, 1457 intel_dp->common_rates[limits.max_clock], 1458 limits.max_bpp, adjusted_mode->crtc_clock); 1459 1460 if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq || 1461 adjusted_mode->crtc_hdisplay > 5120) && 1462 intel_dp_can_bigjoiner(intel_dp)) 1463 pipe_config->bigjoiner = true; 1464 1465 if (intel_dp_is_edp(intel_dp)) 1466 /* 1467 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 1468 * section A.1: "It is recommended that the minimum number of 1469 * lanes be used, using the minimum link rate allowed for that 1470 * lane configuration." 1471 * 1472 * Note that we fall back to the max clock and lane count for eDP 1473 * panels that fail with the fast optimal settings (see 1474 * intel_dp->use_max_params), in which case the fast vs. wide 1475 * choice doesn't matter. 1476 */ 1477 ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits); 1478 else 1479 /* Optimize for slow and wide. */ 1480 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 1481 1482 /* enable compression if the mode doesn't fit available BW */ 1483 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 1484 if (ret || intel_dp->force_dsc_en || pipe_config->bigjoiner) { 1485 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 1486 conn_state, &limits); 1487 if (ret < 0) 1488 return ret; 1489 } 1490 1491 if (pipe_config->dsc.compression_enable) { 1492 drm_dbg_kms(&i915->drm, 1493 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 1494 pipe_config->lane_count, pipe_config->port_clock, 1495 pipe_config->pipe_bpp, 1496 pipe_config->dsc.compressed_bpp); 1497 1498 drm_dbg_kms(&i915->drm, 1499 "DP link rate required %i available %i\n", 1500 intel_dp_link_required(adjusted_mode->crtc_clock, 1501 pipe_config->dsc.compressed_bpp), 1502 intel_dp_max_data_rate(pipe_config->port_clock, 1503 pipe_config->lane_count)); 1504 } else { 1505 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 1506 pipe_config->lane_count, pipe_config->port_clock, 1507 pipe_config->pipe_bpp); 1508 1509 drm_dbg_kms(&i915->drm, 1510 "DP link rate required %i available %i\n", 1511 intel_dp_link_required(adjusted_mode->crtc_clock, 1512 pipe_config->pipe_bpp), 1513 intel_dp_max_data_rate(pipe_config->port_clock, 1514 pipe_config->lane_count)); 1515 } 1516 return 0; 1517 } 1518 1519 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 1520 const struct drm_connector_state *conn_state) 1521 { 1522 const struct intel_digital_connector_state *intel_conn_state = 1523 to_intel_digital_connector_state(conn_state); 1524 const struct drm_display_mode *adjusted_mode = 1525 &crtc_state->hw.adjusted_mode; 1526 1527 /* 1528 * Our YCbCr output is always limited range. 1529 * crtc_state->limited_color_range only applies to RGB, 1530 * and it must never be set for YCbCr or we risk setting 1531 * some conflicting bits in PIPECONF which will mess up 1532 * the colors on the monitor. 1533 */ 1534 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 1535 return false; 1536 1537 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 1538 /* 1539 * See: 1540 * CEA-861-E - 5.1 Default Encoding Parameters 1541 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 1542 */ 1543 return crtc_state->pipe_bpp != 18 && 1544 drm_default_rgb_quant_range(adjusted_mode) == 1545 HDMI_QUANTIZATION_RANGE_LIMITED; 1546 } else { 1547 return intel_conn_state->broadcast_rgb == 1548 INTEL_BROADCAST_RGB_LIMITED; 1549 } 1550 } 1551 1552 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 1553 enum port port) 1554 { 1555 if (IS_G4X(dev_priv)) 1556 return false; 1557 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 1558 return false; 1559 1560 return true; 1561 } 1562 1563 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 1564 const struct drm_connector_state *conn_state, 1565 struct drm_dp_vsc_sdp *vsc) 1566 { 1567 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1568 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1569 1570 /* 1571 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1572 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 1573 * Colorimetry Format indication. 1574 */ 1575 vsc->revision = 0x5; 1576 vsc->length = 0x13; 1577 1578 /* DP 1.4a spec, Table 2-120 */ 1579 switch (crtc_state->output_format) { 1580 case INTEL_OUTPUT_FORMAT_YCBCR444: 1581 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 1582 break; 1583 case INTEL_OUTPUT_FORMAT_YCBCR420: 1584 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 1585 break; 1586 case INTEL_OUTPUT_FORMAT_RGB: 1587 default: 1588 vsc->pixelformat = DP_PIXELFORMAT_RGB; 1589 } 1590 1591 switch (conn_state->colorspace) { 1592 case DRM_MODE_COLORIMETRY_BT709_YCC: 1593 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1594 break; 1595 case DRM_MODE_COLORIMETRY_XVYCC_601: 1596 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 1597 break; 1598 case DRM_MODE_COLORIMETRY_XVYCC_709: 1599 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 1600 break; 1601 case DRM_MODE_COLORIMETRY_SYCC_601: 1602 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 1603 break; 1604 case DRM_MODE_COLORIMETRY_OPYCC_601: 1605 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 1606 break; 1607 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 1608 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 1609 break; 1610 case DRM_MODE_COLORIMETRY_BT2020_RGB: 1611 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 1612 break; 1613 case DRM_MODE_COLORIMETRY_BT2020_YCC: 1614 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 1615 break; 1616 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 1617 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 1618 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 1619 break; 1620 default: 1621 /* 1622 * RGB->YCBCR color conversion uses the BT.709 1623 * color space. 1624 */ 1625 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1626 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1627 else 1628 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 1629 break; 1630 } 1631 1632 vsc->bpc = crtc_state->pipe_bpp / 3; 1633 1634 /* only RGB pixelformat supports 6 bpc */ 1635 drm_WARN_ON(&dev_priv->drm, 1636 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 1637 1638 /* all YCbCr are always limited range */ 1639 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 1640 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 1641 } 1642 1643 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 1644 struct intel_crtc_state *crtc_state, 1645 const struct drm_connector_state *conn_state) 1646 { 1647 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 1648 1649 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 1650 if (crtc_state->has_psr) 1651 return; 1652 1653 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 1654 return; 1655 1656 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 1657 vsc->sdp_type = DP_SDP_VSC; 1658 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1659 &crtc_state->infoframes.vsc); 1660 } 1661 1662 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 1663 const struct intel_crtc_state *crtc_state, 1664 const struct drm_connector_state *conn_state, 1665 struct drm_dp_vsc_sdp *vsc) 1666 { 1667 vsc->sdp_type = DP_SDP_VSC; 1668 1669 if (intel_dp->psr.psr2_enabled) { 1670 if (intel_dp->psr.colorimetry_support && 1671 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 1672 /* [PSR2, +Colorimetry] */ 1673 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1674 vsc); 1675 } else { 1676 /* 1677 * [PSR2, -Colorimetry] 1678 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 1679 * 3D stereo + PSR/PSR2 + Y-coordinate. 1680 */ 1681 vsc->revision = 0x4; 1682 vsc->length = 0xe; 1683 } 1684 } else { 1685 /* 1686 * [PSR1] 1687 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1688 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 1689 * higher). 1690 */ 1691 vsc->revision = 0x2; 1692 vsc->length = 0x8; 1693 } 1694 } 1695 1696 static void 1697 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 1698 struct intel_crtc_state *crtc_state, 1699 const struct drm_connector_state *conn_state) 1700 { 1701 int ret; 1702 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1703 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 1704 1705 if (!conn_state->hdr_output_metadata) 1706 return; 1707 1708 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 1709 1710 if (ret) { 1711 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 1712 return; 1713 } 1714 1715 crtc_state->infoframes.enable |= 1716 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 1717 } 1718 1719 static void 1720 intel_dp_drrs_compute_config(struct intel_dp *intel_dp, 1721 struct intel_crtc_state *pipe_config, 1722 int output_bpp, bool constant_n) 1723 { 1724 struct intel_connector *intel_connector = intel_dp->attached_connector; 1725 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1726 int pixel_clock; 1727 1728 if (pipe_config->vrr.enable) 1729 return; 1730 1731 /* 1732 * DRRS and PSR can't be enable together, so giving preference to PSR 1733 * as it allows more power-savings by complete shutting down display, 1734 * so to guarantee this, intel_dp_drrs_compute_config() must be called 1735 * after intel_psr_compute_config(). 1736 */ 1737 if (pipe_config->has_psr) 1738 return; 1739 1740 if (!intel_connector->panel.downclock_mode || 1741 dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 1742 return; 1743 1744 pipe_config->has_drrs = true; 1745 1746 pixel_clock = intel_connector->panel.downclock_mode->clock; 1747 if (pipe_config->splitter.enable) 1748 pixel_clock /= pipe_config->splitter.link_count; 1749 1750 intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, 1751 pipe_config->port_clock, &pipe_config->dp_m2_n2, 1752 constant_n, pipe_config->fec_enable); 1753 1754 /* FIXME: abstract this better */ 1755 if (pipe_config->splitter.enable) 1756 pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count; 1757 } 1758 1759 int 1760 intel_dp_compute_config(struct intel_encoder *encoder, 1761 struct intel_crtc_state *pipe_config, 1762 struct drm_connector_state *conn_state) 1763 { 1764 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1765 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1766 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1767 enum port port = encoder->port; 1768 struct intel_connector *intel_connector = intel_dp->attached_connector; 1769 struct intel_digital_connector_state *intel_conn_state = 1770 to_intel_digital_connector_state(conn_state); 1771 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); 1772 int ret = 0, output_bpp; 1773 1774 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 1775 pipe_config->has_pch_encoder = true; 1776 1777 pipe_config->output_format = intel_dp_output_format(&intel_connector->base, 1778 adjusted_mode); 1779 1780 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 1781 ret = intel_pch_panel_fitting(pipe_config, conn_state); 1782 if (ret) 1783 return ret; 1784 } 1785 1786 if (!intel_dp_port_has_audio(dev_priv, port)) 1787 pipe_config->has_audio = false; 1788 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 1789 pipe_config->has_audio = intel_dp->has_audio; 1790 else 1791 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 1792 1793 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1794 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 1795 adjusted_mode); 1796 1797 if (HAS_GMCH(dev_priv)) 1798 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 1799 else 1800 ret = intel_pch_panel_fitting(pipe_config, conn_state); 1801 if (ret) 1802 return ret; 1803 } 1804 1805 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1806 return -EINVAL; 1807 1808 if (HAS_GMCH(dev_priv) && 1809 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 1810 return -EINVAL; 1811 1812 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 1813 return -EINVAL; 1814 1815 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 1816 return -EINVAL; 1817 1818 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 1819 if (ret < 0) 1820 return ret; 1821 1822 pipe_config->limited_color_range = 1823 intel_dp_limited_color_range(pipe_config, conn_state); 1824 1825 if (pipe_config->dsc.compression_enable) 1826 output_bpp = pipe_config->dsc.compressed_bpp; 1827 else 1828 output_bpp = intel_dp_output_bpp(pipe_config->output_format, 1829 pipe_config->pipe_bpp); 1830 1831 if (intel_dp->mso_link_count) { 1832 int n = intel_dp->mso_link_count; 1833 int overlap = intel_dp->mso_pixel_overlap; 1834 1835 pipe_config->splitter.enable = true; 1836 pipe_config->splitter.link_count = n; 1837 pipe_config->splitter.pixel_overlap = overlap; 1838 1839 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", 1840 n, overlap); 1841 1842 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; 1843 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; 1844 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; 1845 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; 1846 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; 1847 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; 1848 adjusted_mode->crtc_clock /= n; 1849 } 1850 1851 intel_link_compute_m_n(output_bpp, 1852 pipe_config->lane_count, 1853 adjusted_mode->crtc_clock, 1854 pipe_config->port_clock, 1855 &pipe_config->dp_m_n, 1856 constant_n, pipe_config->fec_enable); 1857 1858 /* FIXME: abstract this better */ 1859 if (pipe_config->splitter.enable) 1860 pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count; 1861 1862 if (!HAS_DDI(dev_priv)) 1863 intel_dp_set_clock(encoder, pipe_config); 1864 1865 intel_vrr_compute_config(pipe_config, conn_state); 1866 intel_psr_compute_config(intel_dp, pipe_config); 1867 intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp, 1868 constant_n); 1869 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 1870 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 1871 1872 return 0; 1873 } 1874 1875 void intel_dp_set_link_params(struct intel_dp *intel_dp, 1876 int link_rate, int lane_count) 1877 { 1878 intel_dp->link_trained = false; 1879 intel_dp->link_rate = link_rate; 1880 intel_dp->lane_count = lane_count; 1881 } 1882 1883 static void intel_dp_prepare(struct intel_encoder *encoder, 1884 const struct intel_crtc_state *pipe_config) 1885 { 1886 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1887 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1888 enum port port = encoder->port; 1889 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 1890 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1891 1892 intel_dp_set_link_params(intel_dp, 1893 pipe_config->port_clock, 1894 pipe_config->lane_count); 1895 1896 /* 1897 * There are four kinds of DP registers: 1898 * 1899 * IBX PCH 1900 * SNB CPU 1901 * IVB CPU 1902 * CPT PCH 1903 * 1904 * IBX PCH and CPU are the same for almost everything, 1905 * except that the CPU DP PLL is configured in this 1906 * register 1907 * 1908 * CPT PCH is quite different, having many bits moved 1909 * to the TRANS_DP_CTL register instead. That 1910 * configuration happens (oddly) in ilk_pch_enable 1911 */ 1912 1913 /* Preserve the BIOS-computed detected bit. This is 1914 * supposed to be read-only. 1915 */ 1916 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 1917 1918 /* Handle DP bits in common between all three register formats */ 1919 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 1920 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 1921 1922 /* Split out the IBX/CPU vs CPT settings */ 1923 1924 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 1925 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1926 intel_dp->DP |= DP_SYNC_HS_HIGH; 1927 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1928 intel_dp->DP |= DP_SYNC_VS_HIGH; 1929 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 1930 1931 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 1932 intel_dp->DP |= DP_ENHANCED_FRAMING; 1933 1934 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 1935 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 1936 u32 trans_dp; 1937 1938 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 1939 1940 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 1941 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 1942 trans_dp |= TRANS_DP_ENH_FRAMING; 1943 else 1944 trans_dp &= ~TRANS_DP_ENH_FRAMING; 1945 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 1946 } else { 1947 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 1948 intel_dp->DP |= DP_COLOR_RANGE_16_235; 1949 1950 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1951 intel_dp->DP |= DP_SYNC_HS_HIGH; 1952 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1953 intel_dp->DP |= DP_SYNC_VS_HIGH; 1954 intel_dp->DP |= DP_LINK_TRAIN_OFF; 1955 1956 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 1957 intel_dp->DP |= DP_ENHANCED_FRAMING; 1958 1959 if (IS_CHERRYVIEW(dev_priv)) 1960 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 1961 else 1962 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 1963 } 1964 } 1965 1966 1967 /* Enable backlight PWM and backlight PP control. */ 1968 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 1969 const struct drm_connector_state *conn_state) 1970 { 1971 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 1972 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1973 1974 if (!intel_dp_is_edp(intel_dp)) 1975 return; 1976 1977 drm_dbg_kms(&i915->drm, "\n"); 1978 1979 intel_panel_enable_backlight(crtc_state, conn_state); 1980 intel_pps_backlight_on(intel_dp); 1981 } 1982 1983 /* Disable backlight PP control and backlight PWM. */ 1984 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 1985 { 1986 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 1987 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1988 1989 if (!intel_dp_is_edp(intel_dp)) 1990 return; 1991 1992 drm_dbg_kms(&i915->drm, "\n"); 1993 1994 intel_pps_backlight_off(intel_dp); 1995 intel_panel_disable_backlight(old_conn_state); 1996 } 1997 1998 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 1999 { 2000 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2001 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2002 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; 2003 2004 I915_STATE_WARN(cur_state != state, 2005 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 2006 dig_port->base.base.base.id, dig_port->base.base.name, 2007 onoff(state), onoff(cur_state)); 2008 } 2009 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 2010 2011 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 2012 { 2013 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; 2014 2015 I915_STATE_WARN(cur_state != state, 2016 "eDP PLL state assertion failure (expected %s, current %s)\n", 2017 onoff(state), onoff(cur_state)); 2018 } 2019 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 2020 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 2021 2022 static void ilk_edp_pll_on(struct intel_dp *intel_dp, 2023 const struct intel_crtc_state *pipe_config) 2024 { 2025 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2026 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2027 2028 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 2029 assert_dp_port_disabled(intel_dp); 2030 assert_edp_pll_disabled(dev_priv); 2031 2032 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", 2033 pipe_config->port_clock); 2034 2035 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 2036 2037 if (pipe_config->port_clock == 162000) 2038 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 2039 else 2040 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 2041 2042 intel_de_write(dev_priv, DP_A, intel_dp->DP); 2043 intel_de_posting_read(dev_priv, DP_A); 2044 udelay(500); 2045 2046 /* 2047 * [DevILK] Work around required when enabling DP PLL 2048 * while a pipe is enabled going to FDI: 2049 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 2050 * 2. Program DP PLL enable 2051 */ 2052 if (IS_GEN(dev_priv, 5)) 2053 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 2054 2055 intel_dp->DP |= DP_PLL_ENABLE; 2056 2057 intel_de_write(dev_priv, DP_A, intel_dp->DP); 2058 intel_de_posting_read(dev_priv, DP_A); 2059 udelay(200); 2060 } 2061 2062 static void ilk_edp_pll_off(struct intel_dp *intel_dp, 2063 const struct intel_crtc_state *old_crtc_state) 2064 { 2065 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 2066 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2067 2068 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 2069 assert_dp_port_disabled(intel_dp); 2070 assert_edp_pll_enabled(dev_priv); 2071 2072 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); 2073 2074 intel_dp->DP &= ~DP_PLL_ENABLE; 2075 2076 intel_de_write(dev_priv, DP_A, intel_dp->DP); 2077 intel_de_posting_read(dev_priv, DP_A); 2078 udelay(200); 2079 } 2080 2081 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 2082 { 2083 /* 2084 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 2085 * be capable of signalling downstream hpd with a long pulse. 2086 * Whether or not that means D3 is safe to use is not clear, 2087 * but let's assume so until proven otherwise. 2088 * 2089 * FIXME should really check all downstream ports... 2090 */ 2091 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 2092 drm_dp_is_branch(intel_dp->dpcd) && 2093 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 2094 } 2095 2096 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 2097 const struct intel_crtc_state *crtc_state, 2098 bool enable) 2099 { 2100 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2101 int ret; 2102 2103 if (!crtc_state->dsc.compression_enable) 2104 return; 2105 2106 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 2107 enable ? DP_DECOMPRESSION_EN : 0); 2108 if (ret < 0) 2109 drm_dbg_kms(&i915->drm, 2110 "Failed to %s sink decompression state\n", 2111 enable ? "enable" : "disable"); 2112 } 2113 2114 static void 2115 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 2116 { 2117 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2118 u8 oui[] = { 0x00, 0xaa, 0x01 }; 2119 u8 buf[3] = { 0 }; 2120 2121 /* 2122 * During driver init, we want to be careful and avoid changing the source OUI if it's 2123 * already set to what we want, so as to avoid clearing any state by accident 2124 */ 2125 if (careful) { 2126 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 2127 drm_err(&i915->drm, "Failed to read source OUI\n"); 2128 2129 if (memcmp(oui, buf, sizeof(oui)) == 0) 2130 return; 2131 } 2132 2133 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 2134 drm_err(&i915->drm, "Failed to write source OUI\n"); 2135 } 2136 2137 /* If the device supports it, try to set the power state appropriately */ 2138 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 2139 { 2140 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 2141 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2142 int ret, i; 2143 2144 /* Should have a valid DPCD by this point */ 2145 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 2146 return; 2147 2148 if (mode != DP_SET_POWER_D0) { 2149 if (downstream_hpd_needs_d0(intel_dp)) 2150 return; 2151 2152 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 2153 } else { 2154 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 2155 2156 lspcon_resume(dp_to_dig_port(intel_dp)); 2157 2158 /* Write the source OUI as early as possible */ 2159 if (intel_dp_is_edp(intel_dp)) 2160 intel_edp_init_source_oui(intel_dp, false); 2161 2162 /* 2163 * When turning on, we need to retry for 1ms to give the sink 2164 * time to wake up. 2165 */ 2166 for (i = 0; i < 3; i++) { 2167 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 2168 if (ret == 1) 2169 break; 2170 msleep(1); 2171 } 2172 2173 if (ret == 1 && lspcon->active) 2174 lspcon_wait_pcon_mode(lspcon); 2175 } 2176 2177 if (ret != 1) 2178 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 2179 encoder->base.base.id, encoder->base.name, 2180 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 2181 } 2182 2183 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 2184 enum port port, enum pipe *pipe) 2185 { 2186 enum pipe p; 2187 2188 for_each_pipe(dev_priv, p) { 2189 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); 2190 2191 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 2192 *pipe = p; 2193 return true; 2194 } 2195 } 2196 2197 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", 2198 port_name(port)); 2199 2200 /* must initialize pipe to something for the asserts */ 2201 *pipe = PIPE_A; 2202 2203 return false; 2204 } 2205 2206 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 2207 i915_reg_t dp_reg, enum port port, 2208 enum pipe *pipe) 2209 { 2210 bool ret; 2211 u32 val; 2212 2213 val = intel_de_read(dev_priv, dp_reg); 2214 2215 ret = val & DP_PORT_EN; 2216 2217 /* asserts want to know the pipe even if the port is disabled */ 2218 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 2219 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 2220 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 2221 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 2222 else if (IS_CHERRYVIEW(dev_priv)) 2223 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 2224 else 2225 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 2226 2227 return ret; 2228 } 2229 2230 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 2231 enum pipe *pipe) 2232 { 2233 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2234 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2235 intel_wakeref_t wakeref; 2236 bool ret; 2237 2238 wakeref = intel_display_power_get_if_enabled(dev_priv, 2239 encoder->power_domain); 2240 if (!wakeref) 2241 return false; 2242 2243 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 2244 encoder->port, pipe); 2245 2246 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 2247 2248 return ret; 2249 } 2250 2251 static void intel_dp_get_config(struct intel_encoder *encoder, 2252 struct intel_crtc_state *pipe_config) 2253 { 2254 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2255 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2256 u32 tmp, flags = 0; 2257 enum port port = encoder->port; 2258 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2259 2260 if (encoder->type == INTEL_OUTPUT_EDP) 2261 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 2262 else 2263 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 2264 2265 tmp = intel_de_read(dev_priv, intel_dp->output_reg); 2266 2267 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 2268 2269 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2270 u32 trans_dp = intel_de_read(dev_priv, 2271 TRANS_DP_CTL(crtc->pipe)); 2272 2273 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 2274 flags |= DRM_MODE_FLAG_PHSYNC; 2275 else 2276 flags |= DRM_MODE_FLAG_NHSYNC; 2277 2278 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 2279 flags |= DRM_MODE_FLAG_PVSYNC; 2280 else 2281 flags |= DRM_MODE_FLAG_NVSYNC; 2282 } else { 2283 if (tmp & DP_SYNC_HS_HIGH) 2284 flags |= DRM_MODE_FLAG_PHSYNC; 2285 else 2286 flags |= DRM_MODE_FLAG_NHSYNC; 2287 2288 if (tmp & DP_SYNC_VS_HIGH) 2289 flags |= DRM_MODE_FLAG_PVSYNC; 2290 else 2291 flags |= DRM_MODE_FLAG_NVSYNC; 2292 } 2293 2294 pipe_config->hw.adjusted_mode.flags |= flags; 2295 2296 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 2297 pipe_config->limited_color_range = true; 2298 2299 pipe_config->lane_count = 2300 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 2301 2302 intel_dp_get_m_n(crtc, pipe_config); 2303 2304 if (port == PORT_A) { 2305 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 2306 pipe_config->port_clock = 162000; 2307 else 2308 pipe_config->port_clock = 270000; 2309 } 2310 2311 pipe_config->hw.adjusted_mode.crtc_clock = 2312 intel_dotclock_calculate(pipe_config->port_clock, 2313 &pipe_config->dp_m_n); 2314 2315 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 2316 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 2317 /* 2318 * This is a big fat ugly hack. 2319 * 2320 * Some machines in UEFI boot mode provide us a VBT that has 18 2321 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 2322 * unknown we fail to light up. Yet the same BIOS boots up with 2323 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 2324 * max, not what it tells us to use. 2325 * 2326 * Note: This will still be broken if the eDP panel is not lit 2327 * up by the BIOS, and thus we can't get the mode at module 2328 * load. 2329 */ 2330 drm_dbg_kms(&dev_priv->drm, 2331 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 2332 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 2333 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 2334 } 2335 } 2336 2337 static bool 2338 intel_dp_get_dpcd(struct intel_dp *intel_dp); 2339 2340 /** 2341 * intel_dp_sync_state - sync the encoder state during init/resume 2342 * @encoder: intel encoder to sync 2343 * @crtc_state: state for the CRTC connected to the encoder 2344 * 2345 * Sync any state stored in the encoder wrt. HW state during driver init 2346 * and system resume. 2347 */ 2348 void intel_dp_sync_state(struct intel_encoder *encoder, 2349 const struct intel_crtc_state *crtc_state) 2350 { 2351 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2352 2353 /* 2354 * Don't clobber DPCD if it's been already read out during output 2355 * setup (eDP) or detect. 2356 */ 2357 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2358 intel_dp_get_dpcd(intel_dp); 2359 2360 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 2361 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 2362 } 2363 2364 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 2365 struct intel_crtc_state *crtc_state) 2366 { 2367 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2368 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2369 2370 /* 2371 * If BIOS has set an unsupported or non-standard link rate for some 2372 * reason force an encoder recompute and full modeset. 2373 */ 2374 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 2375 crtc_state->port_clock) < 0) { 2376 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n"); 2377 crtc_state->uapi.connectors_changed = true; 2378 return false; 2379 } 2380 2381 /* 2382 * FIXME hack to force full modeset when DSC is being used. 2383 * 2384 * As long as we do not have full state readout and config comparison 2385 * of crtc_state->dsc, we have no way to ensure reliable fastset. 2386 * Remove once we have readout for DSC. 2387 */ 2388 if (crtc_state->dsc.compression_enable) { 2389 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n"); 2390 crtc_state->uapi.mode_changed = true; 2391 return false; 2392 } 2393 2394 if (CAN_PSR(intel_dp)) { 2395 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n"); 2396 crtc_state->uapi.mode_changed = true; 2397 return false; 2398 } 2399 2400 return true; 2401 } 2402 2403 static void intel_disable_dp(struct intel_atomic_state *state, 2404 struct intel_encoder *encoder, 2405 const struct intel_crtc_state *old_crtc_state, 2406 const struct drm_connector_state *old_conn_state) 2407 { 2408 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2409 2410 intel_dp->link_trained = false; 2411 2412 if (old_crtc_state->has_audio) 2413 intel_audio_codec_disable(encoder, 2414 old_crtc_state, old_conn_state); 2415 2416 /* Make sure the panel is off before trying to change the mode. But also 2417 * ensure that we have vdd while we switch off the panel. */ 2418 intel_pps_vdd_on(intel_dp); 2419 intel_edp_backlight_off(old_conn_state); 2420 intel_dp_set_power(intel_dp, DP_SET_POWER_D3); 2421 intel_pps_off(intel_dp); 2422 intel_dp->frl.is_trained = false; 2423 intel_dp->frl.trained_rate_gbps = 0; 2424 } 2425 2426 static void g4x_disable_dp(struct intel_atomic_state *state, 2427 struct intel_encoder *encoder, 2428 const struct intel_crtc_state *old_crtc_state, 2429 const struct drm_connector_state *old_conn_state) 2430 { 2431 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 2432 } 2433 2434 static void vlv_disable_dp(struct intel_atomic_state *state, 2435 struct intel_encoder *encoder, 2436 const struct intel_crtc_state *old_crtc_state, 2437 const struct drm_connector_state *old_conn_state) 2438 { 2439 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 2440 } 2441 2442 static void g4x_post_disable_dp(struct intel_atomic_state *state, 2443 struct intel_encoder *encoder, 2444 const struct intel_crtc_state *old_crtc_state, 2445 const struct drm_connector_state *old_conn_state) 2446 { 2447 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2448 enum port port = encoder->port; 2449 2450 /* 2451 * Bspec does not list a specific disable sequence for g4x DP. 2452 * Follow the ilk+ sequence (disable pipe before the port) for 2453 * g4x DP as it does not suffer from underruns like the normal 2454 * g4x modeset sequence (disable pipe after the port). 2455 */ 2456 intel_dp_link_down(encoder, old_crtc_state); 2457 2458 /* Only ilk+ has port A */ 2459 if (port == PORT_A) 2460 ilk_edp_pll_off(intel_dp, old_crtc_state); 2461 } 2462 2463 static void vlv_post_disable_dp(struct intel_atomic_state *state, 2464 struct intel_encoder *encoder, 2465 const struct intel_crtc_state *old_crtc_state, 2466 const struct drm_connector_state *old_conn_state) 2467 { 2468 intel_dp_link_down(encoder, old_crtc_state); 2469 } 2470 2471 static void chv_post_disable_dp(struct intel_atomic_state *state, 2472 struct intel_encoder *encoder, 2473 const struct intel_crtc_state *old_crtc_state, 2474 const struct drm_connector_state *old_conn_state) 2475 { 2476 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2477 2478 intel_dp_link_down(encoder, old_crtc_state); 2479 2480 vlv_dpio_get(dev_priv); 2481 2482 /* Assert data lane reset */ 2483 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 2484 2485 vlv_dpio_put(dev_priv); 2486 } 2487 2488 static void 2489 cpt_set_link_train(struct intel_dp *intel_dp, 2490 const struct intel_crtc_state *crtc_state, 2491 u8 dp_train_pat) 2492 { 2493 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2494 u32 *DP = &intel_dp->DP; 2495 2496 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 2497 2498 switch (intel_dp_training_pattern_symbol(dp_train_pat)) { 2499 case DP_TRAINING_PATTERN_DISABLE: 2500 *DP |= DP_LINK_TRAIN_OFF_CPT; 2501 break; 2502 case DP_TRAINING_PATTERN_1: 2503 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 2504 break; 2505 case DP_TRAINING_PATTERN_2: 2506 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 2507 break; 2508 case DP_TRAINING_PATTERN_3: 2509 drm_dbg_kms(&dev_priv->drm, 2510 "TPS3 not supported, using TPS2 instead\n"); 2511 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 2512 break; 2513 } 2514 2515 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 2516 intel_de_posting_read(dev_priv, intel_dp->output_reg); 2517 } 2518 2519 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 2520 { 2521 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2522 2523 /* Clear the cached register set to avoid using stale values */ 2524 2525 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 2526 2527 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 2528 intel_dp->pcon_dsc_dpcd, 2529 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 2530 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", 2531 DP_PCON_DSC_ENCODER); 2532 2533 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", 2534 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 2535 } 2536 2537 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 2538 { 2539 int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 2540 int i; 2541 2542 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 2543 if (frl_bw_mask & (1 << i)) 2544 return bw_gbps[i]; 2545 } 2546 return 0; 2547 } 2548 2549 static int intel_dp_pcon_set_frl_mask(int max_frl) 2550 { 2551 switch (max_frl) { 2552 case 48: 2553 return DP_PCON_FRL_BW_MASK_48GBPS; 2554 case 40: 2555 return DP_PCON_FRL_BW_MASK_40GBPS; 2556 case 32: 2557 return DP_PCON_FRL_BW_MASK_32GBPS; 2558 case 24: 2559 return DP_PCON_FRL_BW_MASK_24GBPS; 2560 case 18: 2561 return DP_PCON_FRL_BW_MASK_18GBPS; 2562 case 9: 2563 return DP_PCON_FRL_BW_MASK_9GBPS; 2564 } 2565 2566 return 0; 2567 } 2568 2569 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 2570 { 2571 struct intel_connector *intel_connector = intel_dp->attached_connector; 2572 struct drm_connector *connector = &intel_connector->base; 2573 int max_frl_rate; 2574 int max_lanes, rate_per_lane; 2575 int max_dsc_lanes, dsc_rate_per_lane; 2576 2577 max_lanes = connector->display_info.hdmi.max_lanes; 2578 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; 2579 max_frl_rate = max_lanes * rate_per_lane; 2580 2581 if (connector->display_info.hdmi.dsc_cap.v_1p2) { 2582 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; 2583 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; 2584 if (max_dsc_lanes && dsc_rate_per_lane) 2585 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 2586 } 2587 2588 return max_frl_rate; 2589 } 2590 2591 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 2592 { 2593 #define PCON_EXTENDED_TRAIN_MODE (1 > 0) 2594 #define PCON_CONCURRENT_MODE (1 > 0) 2595 #define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE 2596 #define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE 2597 #define TIMEOUT_FRL_READY_MS 500 2598 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 2599 2600 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2601 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 2602 u8 max_frl_bw_mask = 0, frl_trained_mask; 2603 bool is_active; 2604 2605 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2606 if (ret < 0) 2607 return ret; 2608 2609 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 2610 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 2611 2612 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 2613 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); 2614 2615 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 2616 2617 if (max_frl_bw <= 0) 2618 return -EINVAL; 2619 2620 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 2621 if (ret < 0) 2622 return ret; 2623 /* Wait for PCON to be FRL Ready */ 2624 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); 2625 2626 if (!is_active) 2627 return -ETIMEDOUT; 2628 2629 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 2630 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE); 2631 if (ret < 0) 2632 return ret; 2633 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE); 2634 if (ret < 0) 2635 return ret; 2636 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 2637 if (ret < 0) 2638 return ret; 2639 /* 2640 * Wait for FRL to be completed 2641 * Check if the HDMI Link is up and active. 2642 */ 2643 wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); 2644 2645 if (!is_active) 2646 return -ETIMEDOUT; 2647 2648 /* Verify HDMI Link configuration shows FRL Mode */ 2649 if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != 2650 DP_PCON_HDMI_MODE_FRL) { 2651 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); 2652 return -EINVAL; 2653 } 2654 drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); 2655 2656 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 2657 intel_dp->frl.is_trained = true; 2658 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); 2659 2660 return 0; 2661 } 2662 2663 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 2664 { 2665 if (drm_dp_is_branch(intel_dp->dpcd) && 2666 intel_dp->has_hdmi_sink && 2667 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 2668 return true; 2669 2670 return false; 2671 } 2672 2673 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 2674 { 2675 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2676 2677 /* Always go for FRL training if supported */ 2678 if (!intel_dp_is_hdmi_2_1_sink(intel_dp) || 2679 intel_dp->frl.is_trained) 2680 return; 2681 2682 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 2683 int ret, mode; 2684 2685 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); 2686 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2687 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 2688 2689 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 2690 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); 2691 } else { 2692 drm_dbg(&dev_priv->drm, "FRL training Completed\n"); 2693 } 2694 } 2695 2696 static int 2697 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 2698 { 2699 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 2700 2701 return intel_hdmi_dsc_get_slice_height(vactive); 2702 } 2703 2704 static int 2705 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 2706 const struct intel_crtc_state *crtc_state) 2707 { 2708 struct intel_connector *intel_connector = intel_dp->attached_connector; 2709 struct drm_connector *connector = &intel_connector->base; 2710 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; 2711 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; 2712 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 2713 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 2714 2715 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 2716 pcon_max_slice_width, 2717 hdmi_max_slices, hdmi_throughput); 2718 } 2719 2720 static int 2721 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 2722 const struct intel_crtc_state *crtc_state, 2723 int num_slices, int slice_width) 2724 { 2725 struct intel_connector *intel_connector = intel_dp->attached_connector; 2726 struct drm_connector *connector = &intel_connector->base; 2727 int output_format = crtc_state->output_format; 2728 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; 2729 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 2730 int hdmi_max_chunk_bytes = 2731 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; 2732 2733 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 2734 num_slices, output_format, hdmi_all_bpp, 2735 hdmi_max_chunk_bytes); 2736 } 2737 2738 void 2739 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 2740 const struct intel_crtc_state *crtc_state) 2741 { 2742 u8 pps_param[6]; 2743 int slice_height; 2744 int slice_width; 2745 int num_slices; 2746 int bits_per_pixel; 2747 int ret; 2748 struct intel_connector *intel_connector = intel_dp->attached_connector; 2749 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2750 struct drm_connector *connector; 2751 bool hdmi_is_dsc_1_2; 2752 2753 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 2754 return; 2755 2756 if (!intel_connector) 2757 return; 2758 connector = &intel_connector->base; 2759 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; 2760 2761 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 2762 !hdmi_is_dsc_1_2) 2763 return; 2764 2765 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 2766 if (!slice_height) 2767 return; 2768 2769 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 2770 if (!num_slices) 2771 return; 2772 2773 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 2774 num_slices); 2775 2776 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 2777 num_slices, slice_width); 2778 if (!bits_per_pixel) 2779 return; 2780 2781 pps_param[0] = slice_height & 0xFF; 2782 pps_param[1] = slice_height >> 8; 2783 pps_param[2] = slice_width & 0xFF; 2784 pps_param[3] = slice_width >> 8; 2785 pps_param[4] = bits_per_pixel & 0xFF; 2786 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 2787 2788 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 2789 if (ret < 0) 2790 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); 2791 } 2792 2793 static void 2794 g4x_set_link_train(struct intel_dp *intel_dp, 2795 const struct intel_crtc_state *crtc_state, 2796 u8 dp_train_pat) 2797 { 2798 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2799 u32 *DP = &intel_dp->DP; 2800 2801 *DP &= ~DP_LINK_TRAIN_MASK; 2802 2803 switch (intel_dp_training_pattern_symbol(dp_train_pat)) { 2804 case DP_TRAINING_PATTERN_DISABLE: 2805 *DP |= DP_LINK_TRAIN_OFF; 2806 break; 2807 case DP_TRAINING_PATTERN_1: 2808 *DP |= DP_LINK_TRAIN_PAT_1; 2809 break; 2810 case DP_TRAINING_PATTERN_2: 2811 *DP |= DP_LINK_TRAIN_PAT_2; 2812 break; 2813 case DP_TRAINING_PATTERN_3: 2814 drm_dbg_kms(&dev_priv->drm, 2815 "TPS3 not supported, using TPS2 instead\n"); 2816 *DP |= DP_LINK_TRAIN_PAT_2; 2817 break; 2818 } 2819 2820 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 2821 intel_de_posting_read(dev_priv, intel_dp->output_reg); 2822 } 2823 2824 static void intel_dp_enable_port(struct intel_dp *intel_dp, 2825 const struct intel_crtc_state *crtc_state) 2826 { 2827 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2828 2829 /* enable with pattern 1 (as per spec) */ 2830 2831 intel_dp_program_link_training_pattern(intel_dp, crtc_state, 2832 DP_TRAINING_PATTERN_1); 2833 2834 /* 2835 * Magic for VLV/CHV. We _must_ first set up the register 2836 * without actually enabling the port, and then do another 2837 * write to enable the port. Otherwise link training will 2838 * fail when the power sequencer is freshly used for this port. 2839 */ 2840 intel_dp->DP |= DP_PORT_EN; 2841 if (crtc_state->has_audio) 2842 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 2843 2844 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 2845 intel_de_posting_read(dev_priv, intel_dp->output_reg); 2846 } 2847 2848 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 2849 const struct intel_crtc_state *crtc_state) 2850 { 2851 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2852 u8 tmp; 2853 2854 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 2855 return; 2856 2857 if (!drm_dp_is_branch(intel_dp->dpcd)) 2858 return; 2859 2860 tmp = intel_dp->has_hdmi_sink ? 2861 DP_HDMI_DVI_OUTPUT_CONFIG : 0; 2862 2863 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2864 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 2865 drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n", 2866 enableddisabled(intel_dp->has_hdmi_sink)); 2867 2868 tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 2869 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 2870 2871 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2872 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 2873 drm_dbg_kms(&i915->drm, 2874 "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n", 2875 enableddisabled(intel_dp->dfp.ycbcr_444_to_420)); 2876 2877 tmp = 0; 2878 if (intel_dp->dfp.rgb_to_ycbcr) { 2879 bool bt2020, bt709; 2880 2881 /* 2882 * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only 2883 * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default. 2884 * 2885 */ 2886 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE; 2887 2888 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2889 intel_dp->downstream_ports, 2890 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 2891 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2892 intel_dp->downstream_ports, 2893 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 2894 switch (crtc_state->infoframes.vsc.colorimetry) { 2895 case DP_COLORIMETRY_BT2020_RGB: 2896 case DP_COLORIMETRY_BT2020_YCC: 2897 if (bt2020) 2898 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE; 2899 break; 2900 case DP_COLORIMETRY_BT709_YCC: 2901 case DP_COLORIMETRY_XVYCC_709: 2902 if (bt709) 2903 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE; 2904 break; 2905 default: 2906 break; 2907 } 2908 } 2909 2910 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 2911 drm_dbg_kms(&i915->drm, 2912 "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n", 2913 enableddisabled(tmp ? true : false)); 2914 } 2915 2916 static void intel_enable_dp(struct intel_atomic_state *state, 2917 struct intel_encoder *encoder, 2918 const struct intel_crtc_state *pipe_config, 2919 const struct drm_connector_state *conn_state) 2920 { 2921 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2922 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2923 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2924 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); 2925 enum pipe pipe = crtc->pipe; 2926 intel_wakeref_t wakeref; 2927 2928 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) 2929 return; 2930 2931 with_intel_pps_lock(intel_dp, wakeref) { 2932 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2933 vlv_pps_init(encoder, pipe_config); 2934 2935 intel_dp_enable_port(intel_dp, pipe_config); 2936 2937 intel_pps_vdd_on_unlocked(intel_dp); 2938 intel_pps_on_unlocked(intel_dp); 2939 intel_pps_vdd_off_unlocked(intel_dp, true); 2940 } 2941 2942 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2943 unsigned int lane_mask = 0x0; 2944 2945 if (IS_CHERRYVIEW(dev_priv)) 2946 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 2947 2948 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 2949 lane_mask); 2950 } 2951 2952 intel_dp_set_power(intel_dp, DP_SET_POWER_D0); 2953 intel_dp_configure_protocol_converter(intel_dp, pipe_config); 2954 intel_dp_check_frl_training(intel_dp); 2955 intel_dp_pcon_dsc_configure(intel_dp, pipe_config); 2956 intel_dp_start_link_train(intel_dp, pipe_config); 2957 intel_dp_stop_link_train(intel_dp, pipe_config); 2958 2959 if (pipe_config->has_audio) { 2960 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", 2961 pipe_name(pipe)); 2962 intel_audio_codec_enable(encoder, pipe_config, conn_state); 2963 } 2964 } 2965 2966 static void g4x_enable_dp(struct intel_atomic_state *state, 2967 struct intel_encoder *encoder, 2968 const struct intel_crtc_state *pipe_config, 2969 const struct drm_connector_state *conn_state) 2970 { 2971 intel_enable_dp(state, encoder, pipe_config, conn_state); 2972 intel_edp_backlight_on(pipe_config, conn_state); 2973 } 2974 2975 static void vlv_enable_dp(struct intel_atomic_state *state, 2976 struct intel_encoder *encoder, 2977 const struct intel_crtc_state *pipe_config, 2978 const struct drm_connector_state *conn_state) 2979 { 2980 intel_edp_backlight_on(pipe_config, conn_state); 2981 } 2982 2983 static void g4x_pre_enable_dp(struct intel_atomic_state *state, 2984 struct intel_encoder *encoder, 2985 const struct intel_crtc_state *pipe_config, 2986 const struct drm_connector_state *conn_state) 2987 { 2988 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2989 enum port port = encoder->port; 2990 2991 intel_dp_prepare(encoder, pipe_config); 2992 2993 /* Only ilk+ has port A */ 2994 if (port == PORT_A) 2995 ilk_edp_pll_on(intel_dp, pipe_config); 2996 } 2997 2998 static void vlv_pre_enable_dp(struct intel_atomic_state *state, 2999 struct intel_encoder *encoder, 3000 const struct intel_crtc_state *pipe_config, 3001 const struct drm_connector_state *conn_state) 3002 { 3003 vlv_phy_pre_encoder_enable(encoder, pipe_config); 3004 3005 intel_enable_dp(state, encoder, pipe_config, conn_state); 3006 } 3007 3008 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, 3009 struct intel_encoder *encoder, 3010 const struct intel_crtc_state *pipe_config, 3011 const struct drm_connector_state *conn_state) 3012 { 3013 intel_dp_prepare(encoder, pipe_config); 3014 3015 vlv_phy_pre_pll_enable(encoder, pipe_config); 3016 } 3017 3018 static void chv_pre_enable_dp(struct intel_atomic_state *state, 3019 struct intel_encoder *encoder, 3020 const struct intel_crtc_state *pipe_config, 3021 const struct drm_connector_state *conn_state) 3022 { 3023 chv_phy_pre_encoder_enable(encoder, pipe_config); 3024 3025 intel_enable_dp(state, encoder, pipe_config, conn_state); 3026 3027 /* Second common lane will stay alive on its own now */ 3028 chv_phy_release_cl2_override(encoder); 3029 } 3030 3031 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, 3032 struct intel_encoder *encoder, 3033 const struct intel_crtc_state *pipe_config, 3034 const struct drm_connector_state *conn_state) 3035 { 3036 intel_dp_prepare(encoder, pipe_config); 3037 3038 chv_phy_pre_pll_enable(encoder, pipe_config); 3039 } 3040 3041 static void chv_dp_post_pll_disable(struct intel_atomic_state *state, 3042 struct intel_encoder *encoder, 3043 const struct intel_crtc_state *old_crtc_state, 3044 const struct drm_connector_state *old_conn_state) 3045 { 3046 chv_phy_post_pll_disable(encoder, old_crtc_state); 3047 } 3048 3049 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp, 3050 const struct intel_crtc_state *crtc_state) 3051 { 3052 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 3053 } 3054 3055 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp, 3056 const struct intel_crtc_state *crtc_state) 3057 { 3058 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 3059 } 3060 3061 static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp) 3062 { 3063 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3064 } 3065 3066 static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp) 3067 { 3068 return DP_TRAIN_PRE_EMPH_LEVEL_3; 3069 } 3070 3071 static void vlv_set_signal_levels(struct intel_dp *intel_dp, 3072 const struct intel_crtc_state *crtc_state) 3073 { 3074 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3075 unsigned long demph_reg_value, preemph_reg_value, 3076 uniqtranscale_reg_value; 3077 u8 train_set = intel_dp->train_set[0]; 3078 3079 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3080 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3081 preemph_reg_value = 0x0004000; 3082 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3083 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3084 demph_reg_value = 0x2B405555; 3085 uniqtranscale_reg_value = 0x552AB83A; 3086 break; 3087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3088 demph_reg_value = 0x2B404040; 3089 uniqtranscale_reg_value = 0x5548B83A; 3090 break; 3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3092 demph_reg_value = 0x2B245555; 3093 uniqtranscale_reg_value = 0x5560B83A; 3094 break; 3095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3096 demph_reg_value = 0x2B405555; 3097 uniqtranscale_reg_value = 0x5598DA3A; 3098 break; 3099 default: 3100 return; 3101 } 3102 break; 3103 case DP_TRAIN_PRE_EMPH_LEVEL_1: 3104 preemph_reg_value = 0x0002000; 3105 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3106 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3107 demph_reg_value = 0x2B404040; 3108 uniqtranscale_reg_value = 0x5552B83A; 3109 break; 3110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3111 demph_reg_value = 0x2B404848; 3112 uniqtranscale_reg_value = 0x5580B83A; 3113 break; 3114 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3115 demph_reg_value = 0x2B404040; 3116 uniqtranscale_reg_value = 0x55ADDA3A; 3117 break; 3118 default: 3119 return; 3120 } 3121 break; 3122 case DP_TRAIN_PRE_EMPH_LEVEL_2: 3123 preemph_reg_value = 0x0000000; 3124 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3125 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3126 demph_reg_value = 0x2B305555; 3127 uniqtranscale_reg_value = 0x5570B83A; 3128 break; 3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3130 demph_reg_value = 0x2B2B4040; 3131 uniqtranscale_reg_value = 0x55ADDA3A; 3132 break; 3133 default: 3134 return; 3135 } 3136 break; 3137 case DP_TRAIN_PRE_EMPH_LEVEL_3: 3138 preemph_reg_value = 0x0006000; 3139 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3140 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3141 demph_reg_value = 0x1B405555; 3142 uniqtranscale_reg_value = 0x55ADDA3A; 3143 break; 3144 default: 3145 return; 3146 } 3147 break; 3148 default: 3149 return; 3150 } 3151 3152 vlv_set_phy_signal_level(encoder, crtc_state, 3153 demph_reg_value, preemph_reg_value, 3154 uniqtranscale_reg_value, 0); 3155 } 3156 3157 static void chv_set_signal_levels(struct intel_dp *intel_dp, 3158 const struct intel_crtc_state *crtc_state) 3159 { 3160 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3161 u32 deemph_reg_value, margin_reg_value; 3162 bool uniq_trans_scale = false; 3163 u8 train_set = intel_dp->train_set[0]; 3164 3165 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3166 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3167 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3168 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3169 deemph_reg_value = 128; 3170 margin_reg_value = 52; 3171 break; 3172 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3173 deemph_reg_value = 128; 3174 margin_reg_value = 77; 3175 break; 3176 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3177 deemph_reg_value = 128; 3178 margin_reg_value = 102; 3179 break; 3180 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3181 deemph_reg_value = 128; 3182 margin_reg_value = 154; 3183 uniq_trans_scale = true; 3184 break; 3185 default: 3186 return; 3187 } 3188 break; 3189 case DP_TRAIN_PRE_EMPH_LEVEL_1: 3190 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3191 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3192 deemph_reg_value = 85; 3193 margin_reg_value = 78; 3194 break; 3195 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3196 deemph_reg_value = 85; 3197 margin_reg_value = 116; 3198 break; 3199 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3200 deemph_reg_value = 85; 3201 margin_reg_value = 154; 3202 break; 3203 default: 3204 return; 3205 } 3206 break; 3207 case DP_TRAIN_PRE_EMPH_LEVEL_2: 3208 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3210 deemph_reg_value = 64; 3211 margin_reg_value = 104; 3212 break; 3213 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3214 deemph_reg_value = 64; 3215 margin_reg_value = 154; 3216 break; 3217 default: 3218 return; 3219 } 3220 break; 3221 case DP_TRAIN_PRE_EMPH_LEVEL_3: 3222 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3223 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3224 deemph_reg_value = 43; 3225 margin_reg_value = 154; 3226 break; 3227 default: 3228 return; 3229 } 3230 break; 3231 default: 3232 return; 3233 } 3234 3235 chv_set_phy_signal_level(encoder, crtc_state, 3236 deemph_reg_value, margin_reg_value, 3237 uniq_trans_scale); 3238 } 3239 3240 static u32 g4x_signal_levels(u8 train_set) 3241 { 3242 u32 signal_levels = 0; 3243 3244 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3245 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3246 default: 3247 signal_levels |= DP_VOLTAGE_0_4; 3248 break; 3249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3250 signal_levels |= DP_VOLTAGE_0_6; 3251 break; 3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3253 signal_levels |= DP_VOLTAGE_0_8; 3254 break; 3255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3256 signal_levels |= DP_VOLTAGE_1_2; 3257 break; 3258 } 3259 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3260 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3261 default: 3262 signal_levels |= DP_PRE_EMPHASIS_0; 3263 break; 3264 case DP_TRAIN_PRE_EMPH_LEVEL_1: 3265 signal_levels |= DP_PRE_EMPHASIS_3_5; 3266 break; 3267 case DP_TRAIN_PRE_EMPH_LEVEL_2: 3268 signal_levels |= DP_PRE_EMPHASIS_6; 3269 break; 3270 case DP_TRAIN_PRE_EMPH_LEVEL_3: 3271 signal_levels |= DP_PRE_EMPHASIS_9_5; 3272 break; 3273 } 3274 return signal_levels; 3275 } 3276 3277 static void 3278 g4x_set_signal_levels(struct intel_dp *intel_dp, 3279 const struct intel_crtc_state *crtc_state) 3280 { 3281 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3282 u8 train_set = intel_dp->train_set[0]; 3283 u32 signal_levels; 3284 3285 signal_levels = g4x_signal_levels(train_set); 3286 3287 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 3288 signal_levels); 3289 3290 intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); 3291 intel_dp->DP |= signal_levels; 3292 3293 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3294 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3295 } 3296 3297 /* SNB CPU eDP voltage swing and pre-emphasis control */ 3298 static u32 snb_cpu_edp_signal_levels(u8 train_set) 3299 { 3300 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 3301 DP_TRAIN_PRE_EMPHASIS_MASK); 3302 3303 switch (signal_levels) { 3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3306 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 3307 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3308 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 3310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 3311 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3314 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3317 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 3318 default: 3319 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 3320 "0x%x\n", signal_levels); 3321 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 3322 } 3323 } 3324 3325 static void 3326 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp, 3327 const struct intel_crtc_state *crtc_state) 3328 { 3329 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3330 u8 train_set = intel_dp->train_set[0]; 3331 u32 signal_levels; 3332 3333 signal_levels = snb_cpu_edp_signal_levels(train_set); 3334 3335 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 3336 signal_levels); 3337 3338 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 3339 intel_dp->DP |= signal_levels; 3340 3341 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3342 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3343 } 3344 3345 /* IVB CPU eDP voltage swing and pre-emphasis control */ 3346 static u32 ivb_cpu_edp_signal_levels(u8 train_set) 3347 { 3348 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 3349 DP_TRAIN_PRE_EMPHASIS_MASK); 3350 3351 switch (signal_levels) { 3352 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3353 return EDP_LINK_TRAIN_400MV_0DB_IVB; 3354 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3355 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 3356 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 3357 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 3358 return EDP_LINK_TRAIN_400MV_6DB_IVB; 3359 3360 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3361 return EDP_LINK_TRAIN_600MV_0DB_IVB; 3362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3363 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 3364 3365 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 3366 return EDP_LINK_TRAIN_800MV_0DB_IVB; 3367 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 3368 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 3369 3370 default: 3371 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 3372 "0x%x\n", signal_levels); 3373 return EDP_LINK_TRAIN_500MV_0DB_IVB; 3374 } 3375 } 3376 3377 static void 3378 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp, 3379 const struct intel_crtc_state *crtc_state) 3380 { 3381 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3382 u8 train_set = intel_dp->train_set[0]; 3383 u32 signal_levels; 3384 3385 signal_levels = ivb_cpu_edp_signal_levels(train_set); 3386 3387 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 3388 signal_levels); 3389 3390 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 3391 intel_dp->DP |= signal_levels; 3392 3393 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3394 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3395 } 3396 3397 static char dp_training_pattern_name(u8 train_pat) 3398 { 3399 switch (train_pat) { 3400 case DP_TRAINING_PATTERN_1: 3401 case DP_TRAINING_PATTERN_2: 3402 case DP_TRAINING_PATTERN_3: 3403 return '0' + train_pat; 3404 case DP_TRAINING_PATTERN_4: 3405 return '4'; 3406 default: 3407 MISSING_CASE(train_pat); 3408 return '?'; 3409 } 3410 } 3411 3412 void 3413 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 3414 const struct intel_crtc_state *crtc_state, 3415 u8 dp_train_pat) 3416 { 3417 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3418 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3419 u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat); 3420 3421 if (train_pat != DP_TRAINING_PATTERN_DISABLE) 3422 drm_dbg_kms(&dev_priv->drm, 3423 "[ENCODER:%d:%s] Using DP training pattern TPS%c\n", 3424 encoder->base.base.id, encoder->base.name, 3425 dp_training_pattern_name(train_pat)); 3426 3427 intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); 3428 } 3429 3430 static void 3431 intel_dp_link_down(struct intel_encoder *encoder, 3432 const struct intel_crtc_state *old_crtc_state) 3433 { 3434 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3435 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3436 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3437 enum port port = encoder->port; 3438 u32 DP = intel_dp->DP; 3439 3440 if (drm_WARN_ON(&dev_priv->drm, 3441 (intel_de_read(dev_priv, intel_dp->output_reg) & 3442 DP_PORT_EN) == 0)) 3443 return; 3444 3445 drm_dbg_kms(&dev_priv->drm, "\n"); 3446 3447 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 3448 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 3449 DP &= ~DP_LINK_TRAIN_MASK_CPT; 3450 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 3451 } else { 3452 DP &= ~DP_LINK_TRAIN_MASK; 3453 DP |= DP_LINK_TRAIN_PAT_IDLE; 3454 } 3455 intel_de_write(dev_priv, intel_dp->output_reg, DP); 3456 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3457 3458 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 3459 intel_de_write(dev_priv, intel_dp->output_reg, DP); 3460 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3461 3462 /* 3463 * HW workaround for IBX, we need to move the port 3464 * to transcoder A after disabling it to allow the 3465 * matching HDMI port to be enabled on transcoder A. 3466 */ 3467 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 3468 /* 3469 * We get CPU/PCH FIFO underruns on the other pipe when 3470 * doing the workaround. Sweep them under the rug. 3471 */ 3472 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 3473 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 3474 3475 /* always enable with pattern 1 (as per spec) */ 3476 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 3477 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 3478 DP_LINK_TRAIN_PAT_1; 3479 intel_de_write(dev_priv, intel_dp->output_reg, DP); 3480 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3481 3482 DP &= ~DP_PORT_EN; 3483 intel_de_write(dev_priv, intel_dp->output_reg, DP); 3484 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3485 3486 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 3487 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 3488 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 3489 } 3490 3491 msleep(intel_dp->pps.panel_power_down_delay); 3492 3493 intel_dp->DP = DP; 3494 3495 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3496 intel_wakeref_t wakeref; 3497 3498 with_intel_pps_lock(intel_dp, wakeref) 3499 intel_dp->pps.active_pipe = INVALID_PIPE; 3500 } 3501 } 3502 3503 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 3504 { 3505 u8 dprx = 0; 3506 3507 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 3508 &dprx) != 1) 3509 return false; 3510 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 3511 } 3512 3513 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 3514 { 3515 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3516 3517 /* 3518 * Clear the cached register set to avoid using stale values 3519 * for the sinks that do not support DSC. 3520 */ 3521 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 3522 3523 /* Clear fec_capable to avoid using stale values */ 3524 intel_dp->fec_capable = 0; 3525 3526 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 3527 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 3528 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 3529 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 3530 intel_dp->dsc_dpcd, 3531 sizeof(intel_dp->dsc_dpcd)) < 0) 3532 drm_err(&i915->drm, 3533 "Failed to read DPCD register 0x%x\n", 3534 DP_DSC_SUPPORT); 3535 3536 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 3537 (int)sizeof(intel_dp->dsc_dpcd), 3538 intel_dp->dsc_dpcd); 3539 3540 /* FEC is supported only on DP 1.4 */ 3541 if (!intel_dp_is_edp(intel_dp) && 3542 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 3543 &intel_dp->fec_capable) < 0) 3544 drm_err(&i915->drm, 3545 "Failed to read FEC DPCD register\n"); 3546 3547 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 3548 intel_dp->fec_capable); 3549 } 3550 } 3551 3552 static void intel_edp_mso_mode_fixup(struct intel_connector *connector, 3553 struct drm_display_mode *mode) 3554 { 3555 struct intel_dp *intel_dp = intel_attached_dp(connector); 3556 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3557 int n = intel_dp->mso_link_count; 3558 int overlap = intel_dp->mso_pixel_overlap; 3559 3560 if (!mode || !n) 3561 return; 3562 3563 mode->hdisplay = (mode->hdisplay - overlap) * n; 3564 mode->hsync_start = (mode->hsync_start - overlap) * n; 3565 mode->hsync_end = (mode->hsync_end - overlap) * n; 3566 mode->htotal = (mode->htotal - overlap) * n; 3567 mode->clock *= n; 3568 3569 drm_mode_set_name(mode); 3570 3571 drm_dbg_kms(&i915->drm, 3572 "[CONNECTOR:%d:%s] using generated MSO mode: ", 3573 connector->base.base.id, connector->base.name); 3574 drm_mode_debug_printmodeline(mode); 3575 } 3576 3577 static void intel_edp_mso_init(struct intel_dp *intel_dp) 3578 { 3579 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3580 u8 mso; 3581 3582 if (intel_dp->edp_dpcd[0] < DP_EDP_14) 3583 return; 3584 3585 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { 3586 drm_err(&i915->drm, "Failed to read MSO cap\n"); 3587 return; 3588 } 3589 3590 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ 3591 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; 3592 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { 3593 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); 3594 mso = 0; 3595 } 3596 3597 if (mso) { 3598 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n", 3599 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso); 3600 if (!HAS_MSO(i915)) { 3601 drm_err(&i915->drm, "No source MSO support, disabling\n"); 3602 mso = 0; 3603 } 3604 } 3605 3606 intel_dp->mso_link_count = mso; 3607 intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */ 3608 } 3609 3610 static bool 3611 intel_edp_init_dpcd(struct intel_dp *intel_dp) 3612 { 3613 struct drm_i915_private *dev_priv = 3614 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 3615 3616 /* this function is meant to be called only once */ 3617 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 3618 3619 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 3620 return false; 3621 3622 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 3623 drm_dp_is_branch(intel_dp->dpcd)); 3624 3625 /* 3626 * Read the eDP display control registers. 3627 * 3628 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 3629 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 3630 * set, but require eDP 1.4+ detection (e.g. for supported link rates 3631 * method). The display control registers should read zero if they're 3632 * not supported anyway. 3633 */ 3634 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 3635 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 3636 sizeof(intel_dp->edp_dpcd)) 3637 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 3638 (int)sizeof(intel_dp->edp_dpcd), 3639 intel_dp->edp_dpcd); 3640 3641 /* 3642 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 3643 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 3644 */ 3645 intel_psr_init_dpcd(intel_dp); 3646 3647 /* Read the eDP 1.4+ supported link rates. */ 3648 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 3649 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 3650 int i; 3651 3652 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 3653 sink_rates, sizeof(sink_rates)); 3654 3655 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 3656 int val = le16_to_cpu(sink_rates[i]); 3657 3658 if (val == 0) 3659 break; 3660 3661 /* Value read multiplied by 200kHz gives the per-lane 3662 * link rate in kHz. The source rates are, however, 3663 * stored in terms of LS_Clk kHz. The full conversion 3664 * back to symbols is 3665 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 3666 */ 3667 intel_dp->sink_rates[i] = (val * 200) / 10; 3668 } 3669 intel_dp->num_sink_rates = i; 3670 } 3671 3672 /* 3673 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 3674 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 3675 */ 3676 if (intel_dp->num_sink_rates) 3677 intel_dp->use_rate_select = true; 3678 else 3679 intel_dp_set_sink_rates(intel_dp); 3680 3681 intel_dp_set_common_rates(intel_dp); 3682 3683 /* Read the eDP DSC DPCD registers */ 3684 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 3685 intel_dp_get_dsc_sink_cap(intel_dp); 3686 3687 /* 3688 * If needed, program our source OUI so we can make various Intel-specific AUX services 3689 * available (such as HDR backlight controls) 3690 */ 3691 intel_edp_init_source_oui(intel_dp, true); 3692 3693 intel_edp_mso_init(intel_dp); 3694 3695 return true; 3696 } 3697 3698 static bool 3699 intel_dp_has_sink_count(struct intel_dp *intel_dp) 3700 { 3701 if (!intel_dp->attached_connector) 3702 return false; 3703 3704 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 3705 intel_dp->dpcd, 3706 &intel_dp->desc); 3707 } 3708 3709 static bool 3710 intel_dp_get_dpcd(struct intel_dp *intel_dp) 3711 { 3712 int ret; 3713 3714 intel_dp_lttpr_init(intel_dp); 3715 3716 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) 3717 return false; 3718 3719 /* 3720 * Don't clobber cached eDP rates. Also skip re-reading 3721 * the OUI/ID since we know it won't change. 3722 */ 3723 if (!intel_dp_is_edp(intel_dp)) { 3724 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 3725 drm_dp_is_branch(intel_dp->dpcd)); 3726 3727 intel_dp_set_sink_rates(intel_dp); 3728 intel_dp_set_common_rates(intel_dp); 3729 } 3730 3731 if (intel_dp_has_sink_count(intel_dp)) { 3732 ret = drm_dp_read_sink_count(&intel_dp->aux); 3733 if (ret < 0) 3734 return false; 3735 3736 /* 3737 * Sink count can change between short pulse hpd hence 3738 * a member variable in intel_dp will track any changes 3739 * between short pulse interrupts. 3740 */ 3741 intel_dp->sink_count = ret; 3742 3743 /* 3744 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 3745 * a dongle is present but no display. Unless we require to know 3746 * if a dongle is present or not, we don't need to update 3747 * downstream port information. So, an early return here saves 3748 * time from performing other operations which are not required. 3749 */ 3750 if (!intel_dp->sink_count) 3751 return false; 3752 } 3753 3754 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 3755 intel_dp->downstream_ports) == 0; 3756 } 3757 3758 static bool 3759 intel_dp_can_mst(struct intel_dp *intel_dp) 3760 { 3761 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3762 3763 return i915->params.enable_dp_mst && 3764 intel_dp->can_mst && 3765 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 3766 } 3767 3768 static void 3769 intel_dp_configure_mst(struct intel_dp *intel_dp) 3770 { 3771 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3772 struct intel_encoder *encoder = 3773 &dp_to_dig_port(intel_dp)->base; 3774 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 3775 3776 drm_dbg_kms(&i915->drm, 3777 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 3778 encoder->base.base.id, encoder->base.name, 3779 yesno(intel_dp->can_mst), yesno(sink_can_mst), 3780 yesno(i915->params.enable_dp_mst)); 3781 3782 if (!intel_dp->can_mst) 3783 return; 3784 3785 intel_dp->is_mst = sink_can_mst && 3786 i915->params.enable_dp_mst; 3787 3788 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 3789 intel_dp->is_mst); 3790 } 3791 3792 static bool 3793 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 3794 { 3795 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 3796 sink_irq_vector, DP_DPRX_ESI_LEN) == 3797 DP_DPRX_ESI_LEN; 3798 } 3799 3800 bool 3801 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 3802 const struct drm_connector_state *conn_state) 3803 { 3804 /* 3805 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 3806 * of Color Encoding Format and Content Color Gamut], in order to 3807 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 3808 */ 3809 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3810 return true; 3811 3812 switch (conn_state->colorspace) { 3813 case DRM_MODE_COLORIMETRY_SYCC_601: 3814 case DRM_MODE_COLORIMETRY_OPYCC_601: 3815 case DRM_MODE_COLORIMETRY_BT2020_YCC: 3816 case DRM_MODE_COLORIMETRY_BT2020_RGB: 3817 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 3818 return true; 3819 default: 3820 break; 3821 } 3822 3823 return false; 3824 } 3825 3826 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 3827 struct dp_sdp *sdp, size_t size) 3828 { 3829 size_t length = sizeof(struct dp_sdp); 3830 3831 if (size < length) 3832 return -ENOSPC; 3833 3834 memset(sdp, 0, size); 3835 3836 /* 3837 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 3838 * VSC SDP Header Bytes 3839 */ 3840 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 3841 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 3842 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 3843 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 3844 3845 /* 3846 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 3847 * per DP 1.4a spec. 3848 */ 3849 if (vsc->revision != 0x5) 3850 goto out; 3851 3852 /* VSC SDP Payload for DB16 through DB18 */ 3853 /* Pixel Encoding and Colorimetry Formats */ 3854 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 3855 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 3856 3857 switch (vsc->bpc) { 3858 case 6: 3859 /* 6bpc: 0x0 */ 3860 break; 3861 case 8: 3862 sdp->db[17] = 0x1; /* DB17[3:0] */ 3863 break; 3864 case 10: 3865 sdp->db[17] = 0x2; 3866 break; 3867 case 12: 3868 sdp->db[17] = 0x3; 3869 break; 3870 case 16: 3871 sdp->db[17] = 0x4; 3872 break; 3873 default: 3874 MISSING_CASE(vsc->bpc); 3875 break; 3876 } 3877 /* Dynamic Range and Component Bit Depth */ 3878 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 3879 sdp->db[17] |= 0x80; /* DB17[7] */ 3880 3881 /* Content Type */ 3882 sdp->db[18] = vsc->content_type & 0x7; 3883 3884 out: 3885 return length; 3886 } 3887 3888 static ssize_t 3889 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 3890 struct dp_sdp *sdp, 3891 size_t size) 3892 { 3893 size_t length = sizeof(struct dp_sdp); 3894 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 3895 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 3896 ssize_t len; 3897 3898 if (size < length) 3899 return -ENOSPC; 3900 3901 memset(sdp, 0, size); 3902 3903 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 3904 if (len < 0) { 3905 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 3906 return -ENOSPC; 3907 } 3908 3909 if (len != infoframe_size) { 3910 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 3911 return -ENOSPC; 3912 } 3913 3914 /* 3915 * Set up the infoframe sdp packet for HDR static metadata. 3916 * Prepare VSC Header for SU as per DP 1.4a spec, 3917 * Table 2-100 and Table 2-101 3918 */ 3919 3920 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 3921 sdp->sdp_header.HB0 = 0; 3922 /* 3923 * Packet Type 80h + Non-audio INFOFRAME Type value 3924 * HDMI_INFOFRAME_TYPE_DRM: 0x87 3925 * - 80h + Non-audio INFOFRAME Type value 3926 * - InfoFrame Type: 0x07 3927 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 3928 */ 3929 sdp->sdp_header.HB1 = drm_infoframe->type; 3930 /* 3931 * Least Significant Eight Bits of (Data Byte Count – 1) 3932 * infoframe_size - 1 3933 */ 3934 sdp->sdp_header.HB2 = 0x1D; 3935 /* INFOFRAME SDP Version Number */ 3936 sdp->sdp_header.HB3 = (0x13 << 2); 3937 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 3938 sdp->db[0] = drm_infoframe->version; 3939 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 3940 sdp->db[1] = drm_infoframe->length; 3941 /* 3942 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 3943 * HDMI_INFOFRAME_HEADER_SIZE 3944 */ 3945 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 3946 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 3947 HDMI_DRM_INFOFRAME_SIZE); 3948 3949 /* 3950 * Size of DP infoframe sdp packet for HDR static metadata consists of 3951 * - DP SDP Header(struct dp_sdp_header): 4 bytes 3952 * - Two Data Blocks: 2 bytes 3953 * CTA Header Byte2 (INFOFRAME Version Number) 3954 * CTA Header Byte3 (Length of INFOFRAME) 3955 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 3956 * 3957 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 3958 * infoframe size. But GEN11+ has larger than that size, write_infoframe 3959 * will pad rest of the size. 3960 */ 3961 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 3962 } 3963 3964 static void intel_write_dp_sdp(struct intel_encoder *encoder, 3965 const struct intel_crtc_state *crtc_state, 3966 unsigned int type) 3967 { 3968 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3969 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3970 struct dp_sdp sdp = {}; 3971 ssize_t len; 3972 3973 if ((crtc_state->infoframes.enable & 3974 intel_hdmi_infoframe_enable(type)) == 0) 3975 return; 3976 3977 switch (type) { 3978 case DP_SDP_VSC: 3979 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 3980 sizeof(sdp)); 3981 break; 3982 case HDMI_PACKET_TYPE_GAMUT_METADATA: 3983 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 3984 &sdp, sizeof(sdp)); 3985 break; 3986 default: 3987 MISSING_CASE(type); 3988 return; 3989 } 3990 3991 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 3992 return; 3993 3994 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 3995 } 3996 3997 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 3998 const struct intel_crtc_state *crtc_state, 3999 struct drm_dp_vsc_sdp *vsc) 4000 { 4001 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4002 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4003 struct dp_sdp sdp = {}; 4004 ssize_t len; 4005 4006 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 4007 4008 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4009 return; 4010 4011 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 4012 &sdp, len); 4013 } 4014 4015 void intel_dp_set_infoframes(struct intel_encoder *encoder, 4016 bool enable, 4017 const struct intel_crtc_state *crtc_state, 4018 const struct drm_connector_state *conn_state) 4019 { 4020 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4021 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4022 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 4023 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 4024 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 4025 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 4026 u32 val = intel_de_read(dev_priv, reg); 4027 4028 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 4029 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 4030 if (intel_psr_enabled(intel_dp)) 4031 val &= ~dip_enable; 4032 else 4033 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); 4034 4035 if (!enable) { 4036 intel_de_write(dev_priv, reg, val); 4037 intel_de_posting_read(dev_priv, reg); 4038 return; 4039 } 4040 4041 intel_de_write(dev_priv, reg, val); 4042 intel_de_posting_read(dev_priv, reg); 4043 4044 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 4045 if (!intel_psr_enabled(intel_dp)) 4046 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 4047 4048 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 4049 } 4050 4051 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 4052 const void *buffer, size_t size) 4053 { 4054 const struct dp_sdp *sdp = buffer; 4055 4056 if (size < sizeof(struct dp_sdp)) 4057 return -EINVAL; 4058 4059 memset(vsc, 0, size); 4060 4061 if (sdp->sdp_header.HB0 != 0) 4062 return -EINVAL; 4063 4064 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 4065 return -EINVAL; 4066 4067 vsc->sdp_type = sdp->sdp_header.HB1; 4068 vsc->revision = sdp->sdp_header.HB2; 4069 vsc->length = sdp->sdp_header.HB3; 4070 4071 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 4072 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 4073 /* 4074 * - HB2 = 0x2, HB3 = 0x8 4075 * VSC SDP supporting 3D stereo + PSR 4076 * - HB2 = 0x4, HB3 = 0xe 4077 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 4078 * first scan line of the SU region (applies to eDP v1.4b 4079 * and higher). 4080 */ 4081 return 0; 4082 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 4083 /* 4084 * - HB2 = 0x5, HB3 = 0x13 4085 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 4086 * Format. 4087 */ 4088 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 4089 vsc->colorimetry = sdp->db[16] & 0xf; 4090 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 4091 4092 switch (sdp->db[17] & 0x7) { 4093 case 0x0: 4094 vsc->bpc = 6; 4095 break; 4096 case 0x1: 4097 vsc->bpc = 8; 4098 break; 4099 case 0x2: 4100 vsc->bpc = 10; 4101 break; 4102 case 0x3: 4103 vsc->bpc = 12; 4104 break; 4105 case 0x4: 4106 vsc->bpc = 16; 4107 break; 4108 default: 4109 MISSING_CASE(sdp->db[17] & 0x7); 4110 return -EINVAL; 4111 } 4112 4113 vsc->content_type = sdp->db[18] & 0x7; 4114 } else { 4115 return -EINVAL; 4116 } 4117 4118 return 0; 4119 } 4120 4121 static int 4122 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 4123 const void *buffer, size_t size) 4124 { 4125 int ret; 4126 4127 const struct dp_sdp *sdp = buffer; 4128 4129 if (size < sizeof(struct dp_sdp)) 4130 return -EINVAL; 4131 4132 if (sdp->sdp_header.HB0 != 0) 4133 return -EINVAL; 4134 4135 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 4136 return -EINVAL; 4137 4138 /* 4139 * Least Significant Eight Bits of (Data Byte Count – 1) 4140 * 1Dh (i.e., Data Byte Count = 30 bytes). 4141 */ 4142 if (sdp->sdp_header.HB2 != 0x1D) 4143 return -EINVAL; 4144 4145 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 4146 if ((sdp->sdp_header.HB3 & 0x3) != 0) 4147 return -EINVAL; 4148 4149 /* INFOFRAME SDP Version Number */ 4150 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 4151 return -EINVAL; 4152 4153 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4154 if (sdp->db[0] != 1) 4155 return -EINVAL; 4156 4157 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4158 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 4159 return -EINVAL; 4160 4161 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 4162 HDMI_DRM_INFOFRAME_SIZE); 4163 4164 return ret; 4165 } 4166 4167 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 4168 struct intel_crtc_state *crtc_state, 4169 struct drm_dp_vsc_sdp *vsc) 4170 { 4171 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4172 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4173 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4174 unsigned int type = DP_SDP_VSC; 4175 struct dp_sdp sdp = {}; 4176 int ret; 4177 4178 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 4179 if (intel_psr_enabled(intel_dp)) 4180 return; 4181 4182 if ((crtc_state->infoframes.enable & 4183 intel_hdmi_infoframe_enable(type)) == 0) 4184 return; 4185 4186 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 4187 4188 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 4189 4190 if (ret) 4191 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 4192 } 4193 4194 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 4195 struct intel_crtc_state *crtc_state, 4196 struct hdmi_drm_infoframe *drm_infoframe) 4197 { 4198 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4199 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4200 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 4201 struct dp_sdp sdp = {}; 4202 int ret; 4203 4204 if ((crtc_state->infoframes.enable & 4205 intel_hdmi_infoframe_enable(type)) == 0) 4206 return; 4207 4208 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 4209 sizeof(sdp)); 4210 4211 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 4212 sizeof(sdp)); 4213 4214 if (ret) 4215 drm_dbg_kms(&dev_priv->drm, 4216 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 4217 } 4218 4219 void intel_read_dp_sdp(struct intel_encoder *encoder, 4220 struct intel_crtc_state *crtc_state, 4221 unsigned int type) 4222 { 4223 if (encoder->type != INTEL_OUTPUT_DDI) 4224 return; 4225 4226 switch (type) { 4227 case DP_SDP_VSC: 4228 intel_read_dp_vsc_sdp(encoder, crtc_state, 4229 &crtc_state->infoframes.vsc); 4230 break; 4231 case HDMI_PACKET_TYPE_GAMUT_METADATA: 4232 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 4233 &crtc_state->infoframes.drm.drm); 4234 break; 4235 default: 4236 MISSING_CASE(type); 4237 break; 4238 } 4239 } 4240 4241 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 4242 { 4243 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4244 int status = 0; 4245 int test_link_rate; 4246 u8 test_lane_count, test_link_bw; 4247 /* (DP CTS 1.2) 4248 * 4.3.1.11 4249 */ 4250 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 4251 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 4252 &test_lane_count); 4253 4254 if (status <= 0) { 4255 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 4256 return DP_TEST_NAK; 4257 } 4258 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 4259 4260 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 4261 &test_link_bw); 4262 if (status <= 0) { 4263 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 4264 return DP_TEST_NAK; 4265 } 4266 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 4267 4268 /* Validate the requested link rate and lane count */ 4269 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 4270 test_lane_count)) 4271 return DP_TEST_NAK; 4272 4273 intel_dp->compliance.test_lane_count = test_lane_count; 4274 intel_dp->compliance.test_link_rate = test_link_rate; 4275 4276 return DP_TEST_ACK; 4277 } 4278 4279 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 4280 { 4281 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4282 u8 test_pattern; 4283 u8 test_misc; 4284 __be16 h_width, v_height; 4285 int status = 0; 4286 4287 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 4288 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 4289 &test_pattern); 4290 if (status <= 0) { 4291 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 4292 return DP_TEST_NAK; 4293 } 4294 if (test_pattern != DP_COLOR_RAMP) 4295 return DP_TEST_NAK; 4296 4297 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 4298 &h_width, 2); 4299 if (status <= 0) { 4300 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 4301 return DP_TEST_NAK; 4302 } 4303 4304 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 4305 &v_height, 2); 4306 if (status <= 0) { 4307 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 4308 return DP_TEST_NAK; 4309 } 4310 4311 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 4312 &test_misc); 4313 if (status <= 0) { 4314 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 4315 return DP_TEST_NAK; 4316 } 4317 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 4318 return DP_TEST_NAK; 4319 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 4320 return DP_TEST_NAK; 4321 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 4322 case DP_TEST_BIT_DEPTH_6: 4323 intel_dp->compliance.test_data.bpc = 6; 4324 break; 4325 case DP_TEST_BIT_DEPTH_8: 4326 intel_dp->compliance.test_data.bpc = 8; 4327 break; 4328 default: 4329 return DP_TEST_NAK; 4330 } 4331 4332 intel_dp->compliance.test_data.video_pattern = test_pattern; 4333 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 4334 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 4335 /* Set test active flag here so userspace doesn't interrupt things */ 4336 intel_dp->compliance.test_active = true; 4337 4338 return DP_TEST_ACK; 4339 } 4340 4341 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 4342 { 4343 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4344 u8 test_result = DP_TEST_ACK; 4345 struct intel_connector *intel_connector = intel_dp->attached_connector; 4346 struct drm_connector *connector = &intel_connector->base; 4347 4348 if (intel_connector->detect_edid == NULL || 4349 connector->edid_corrupt || 4350 intel_dp->aux.i2c_defer_count > 6) { 4351 /* Check EDID read for NACKs, DEFERs and corruption 4352 * (DP CTS 1.2 Core r1.1) 4353 * 4.2.2.4 : Failed EDID read, I2C_NAK 4354 * 4.2.2.5 : Failed EDID read, I2C_DEFER 4355 * 4.2.2.6 : EDID corruption detected 4356 * Use failsafe mode for all cases 4357 */ 4358 if (intel_dp->aux.i2c_nack_count > 0 || 4359 intel_dp->aux.i2c_defer_count > 0) 4360 drm_dbg_kms(&i915->drm, 4361 "EDID read had %d NACKs, %d DEFERs\n", 4362 intel_dp->aux.i2c_nack_count, 4363 intel_dp->aux.i2c_defer_count); 4364 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 4365 } else { 4366 struct edid *block = intel_connector->detect_edid; 4367 4368 /* We have to write the checksum 4369 * of the last block read 4370 */ 4371 block += intel_connector->detect_edid->extensions; 4372 4373 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 4374 block->checksum) <= 0) 4375 drm_dbg_kms(&i915->drm, 4376 "Failed to write EDID checksum\n"); 4377 4378 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 4379 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 4380 } 4381 4382 /* Set test active flag here so userspace doesn't interrupt things */ 4383 intel_dp->compliance.test_active = true; 4384 4385 return test_result; 4386 } 4387 4388 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 4389 const struct intel_crtc_state *crtc_state) 4390 { 4391 struct drm_i915_private *dev_priv = 4392 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4393 struct drm_dp_phy_test_params *data = 4394 &intel_dp->compliance.test_data.phytest; 4395 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4396 enum pipe pipe = crtc->pipe; 4397 u32 pattern_val; 4398 4399 switch (data->phy_pattern) { 4400 case DP_PHY_TEST_PATTERN_NONE: 4401 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 4402 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 4403 break; 4404 case DP_PHY_TEST_PATTERN_D10_2: 4405 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 4406 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4407 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 4408 break; 4409 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 4410 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 4411 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4412 DDI_DP_COMP_CTL_ENABLE | 4413 DDI_DP_COMP_CTL_SCRAMBLED_0); 4414 break; 4415 case DP_PHY_TEST_PATTERN_PRBS7: 4416 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 4417 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4418 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 4419 break; 4420 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 4421 /* 4422 * FIXME: Ideally pattern should come from DPCD 0x250. As 4423 * current firmware of DPR-100 could not set it, so hardcoding 4424 * now for complaince test. 4425 */ 4426 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 4427 pattern_val = 0x3e0f83e0; 4428 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 4429 pattern_val = 0x0f83e0f8; 4430 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 4431 pattern_val = 0x0000f83e; 4432 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 4433 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4434 DDI_DP_COMP_CTL_ENABLE | 4435 DDI_DP_COMP_CTL_CUSTOM80); 4436 break; 4437 case DP_PHY_TEST_PATTERN_CP2520: 4438 /* 4439 * FIXME: Ideally pattern should come from DPCD 0x24A. As 4440 * current firmware of DPR-100 could not set it, so hardcoding 4441 * now for complaince test. 4442 */ 4443 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 4444 pattern_val = 0xFB; 4445 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 4446 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 4447 pattern_val); 4448 break; 4449 default: 4450 WARN(1, "Invalid Phy Test Pattern\n"); 4451 } 4452 } 4453 4454 static void 4455 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, 4456 const struct intel_crtc_state *crtc_state) 4457 { 4458 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4459 struct drm_device *dev = dig_port->base.base.dev; 4460 struct drm_i915_private *dev_priv = to_i915(dev); 4461 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 4462 enum pipe pipe = crtc->pipe; 4463 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 4464 4465 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 4466 TRANS_DDI_FUNC_CTL(pipe)); 4467 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 4468 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 4469 4470 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 4471 TGL_TRANS_DDI_PORT_MASK); 4472 trans_conf_value &= ~PIPECONF_ENABLE; 4473 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 4474 4475 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 4476 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 4477 trans_ddi_func_ctl_value); 4478 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 4479 } 4480 4481 static void 4482 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, 4483 const struct intel_crtc_state *crtc_state) 4484 { 4485 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4486 struct drm_device *dev = dig_port->base.base.dev; 4487 struct drm_i915_private *dev_priv = to_i915(dev); 4488 enum port port = dig_port->base.port; 4489 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 4490 enum pipe pipe = crtc->pipe; 4491 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 4492 4493 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 4494 TRANS_DDI_FUNC_CTL(pipe)); 4495 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 4496 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 4497 4498 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 4499 TGL_TRANS_DDI_SELECT_PORT(port); 4500 trans_conf_value |= PIPECONF_ENABLE; 4501 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 4502 4503 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 4504 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 4505 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 4506 trans_ddi_func_ctl_value); 4507 } 4508 4509 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 4510 const struct intel_crtc_state *crtc_state) 4511 { 4512 struct drm_dp_phy_test_params *data = 4513 &intel_dp->compliance.test_data.phytest; 4514 u8 link_status[DP_LINK_STATUS_SIZE]; 4515 4516 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 4517 link_status) < 0) { 4518 DRM_DEBUG_KMS("failed to get link status\n"); 4519 return; 4520 } 4521 4522 /* retrieve vswing & pre-emphasis setting */ 4523 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 4524 link_status); 4525 4526 intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); 4527 4528 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); 4529 4530 intel_dp_phy_pattern_update(intel_dp, crtc_state); 4531 4532 intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); 4533 4534 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 4535 link_status[DP_DPCD_REV]); 4536 } 4537 4538 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 4539 { 4540 struct drm_dp_phy_test_params *data = 4541 &intel_dp->compliance.test_data.phytest; 4542 4543 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 4544 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 4545 return DP_TEST_NAK; 4546 } 4547 4548 /* Set test active flag here so userspace doesn't interrupt things */ 4549 intel_dp->compliance.test_active = true; 4550 4551 return DP_TEST_ACK; 4552 } 4553 4554 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 4555 { 4556 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4557 u8 response = DP_TEST_NAK; 4558 u8 request = 0; 4559 int status; 4560 4561 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 4562 if (status <= 0) { 4563 drm_dbg_kms(&i915->drm, 4564 "Could not read test request from sink\n"); 4565 goto update_status; 4566 } 4567 4568 switch (request) { 4569 case DP_TEST_LINK_TRAINING: 4570 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 4571 response = intel_dp_autotest_link_training(intel_dp); 4572 break; 4573 case DP_TEST_LINK_VIDEO_PATTERN: 4574 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 4575 response = intel_dp_autotest_video_pattern(intel_dp); 4576 break; 4577 case DP_TEST_LINK_EDID_READ: 4578 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 4579 response = intel_dp_autotest_edid(intel_dp); 4580 break; 4581 case DP_TEST_LINK_PHY_TEST_PATTERN: 4582 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 4583 response = intel_dp_autotest_phy_pattern(intel_dp); 4584 break; 4585 default: 4586 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 4587 request); 4588 break; 4589 } 4590 4591 if (response & DP_TEST_ACK) 4592 intel_dp->compliance.test_type = request; 4593 4594 update_status: 4595 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 4596 if (status <= 0) 4597 drm_dbg_kms(&i915->drm, 4598 "Could not write test response to sink\n"); 4599 } 4600 4601 static void 4602 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled) 4603 { 4604 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled); 4605 4606 if (esi[1] & DP_CP_IRQ) { 4607 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 4608 *handled = true; 4609 } 4610 } 4611 4612 /** 4613 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 4614 * @intel_dp: Intel DP struct 4615 * 4616 * Read any pending MST interrupts, call MST core to handle these and ack the 4617 * interrupts. Check if the main and AUX link state is ok. 4618 * 4619 * Returns: 4620 * - %true if pending interrupts were serviced (or no interrupts were 4621 * pending) w/o detecting an error condition. 4622 * - %false if an error condition - like AUX failure or a loss of link - is 4623 * detected, which needs servicing from the hotplug work. 4624 */ 4625 static bool 4626 intel_dp_check_mst_status(struct intel_dp *intel_dp) 4627 { 4628 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4629 bool link_ok = true; 4630 4631 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 4632 4633 for (;;) { 4634 u8 esi[DP_DPRX_ESI_LEN] = {}; 4635 bool handled; 4636 int retry; 4637 4638 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 4639 drm_dbg_kms(&i915->drm, 4640 "failed to get ESI - device may have failed\n"); 4641 link_ok = false; 4642 4643 break; 4644 } 4645 4646 /* check link status - esi[10] = 0x200c */ 4647 if (intel_dp->active_mst_links > 0 && link_ok && 4648 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 4649 drm_dbg_kms(&i915->drm, 4650 "channel EQ not ok, retraining\n"); 4651 link_ok = false; 4652 } 4653 4654 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 4655 4656 intel_dp_mst_hpd_irq(intel_dp, esi, &handled); 4657 4658 if (!handled) 4659 break; 4660 4661 for (retry = 0; retry < 3; retry++) { 4662 int wret; 4663 4664 wret = drm_dp_dpcd_write(&intel_dp->aux, 4665 DP_SINK_COUNT_ESI+1, 4666 &esi[1], 3); 4667 if (wret == 3) 4668 break; 4669 } 4670 } 4671 4672 return link_ok; 4673 } 4674 4675 static void 4676 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 4677 { 4678 bool is_active; 4679 u8 buf = 0; 4680 4681 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 4682 if (intel_dp->frl.is_trained && !is_active) { 4683 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 4684 return; 4685 4686 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 4687 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 4688 return; 4689 4690 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 4691 4692 /* Restart FRL training or fall back to TMDS mode */ 4693 intel_dp_check_frl_training(intel_dp); 4694 } 4695 } 4696 4697 static bool 4698 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 4699 { 4700 u8 link_status[DP_LINK_STATUS_SIZE]; 4701 4702 if (!intel_dp->link_trained) 4703 return false; 4704 4705 /* 4706 * While PSR source HW is enabled, it will control main-link sending 4707 * frames, enabling and disabling it so trying to do a retrain will fail 4708 * as the link would or not be on or it could mix training patterns 4709 * and frame data at the same time causing retrain to fail. 4710 * Also when exiting PSR, HW will retrain the link anyways fixing 4711 * any link status error. 4712 */ 4713 if (intel_psr_enabled(intel_dp)) 4714 return false; 4715 4716 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 4717 link_status) < 0) 4718 return false; 4719 4720 /* 4721 * Validate the cached values of intel_dp->link_rate and 4722 * intel_dp->lane_count before attempting to retrain. 4723 * 4724 * FIXME would be nice to user the crtc state here, but since 4725 * we need to call this from the short HPD handler that seems 4726 * a bit hard. 4727 */ 4728 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 4729 intel_dp->lane_count)) 4730 return false; 4731 4732 /* Retrain if Channel EQ or CR not ok */ 4733 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 4734 } 4735 4736 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 4737 const struct drm_connector_state *conn_state) 4738 { 4739 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4740 struct intel_encoder *encoder; 4741 enum pipe pipe; 4742 4743 if (!conn_state->best_encoder) 4744 return false; 4745 4746 /* SST */ 4747 encoder = &dp_to_dig_port(intel_dp)->base; 4748 if (conn_state->best_encoder == &encoder->base) 4749 return true; 4750 4751 /* MST */ 4752 for_each_pipe(i915, pipe) { 4753 encoder = &intel_dp->mst_encoders[pipe]->base; 4754 if (conn_state->best_encoder == &encoder->base) 4755 return true; 4756 } 4757 4758 return false; 4759 } 4760 4761 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 4762 struct drm_modeset_acquire_ctx *ctx, 4763 u32 *crtc_mask) 4764 { 4765 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4766 struct drm_connector_list_iter conn_iter; 4767 struct intel_connector *connector; 4768 int ret = 0; 4769 4770 *crtc_mask = 0; 4771 4772 if (!intel_dp_needs_link_retrain(intel_dp)) 4773 return 0; 4774 4775 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 4776 for_each_intel_connector_iter(connector, &conn_iter) { 4777 struct drm_connector_state *conn_state = 4778 connector->base.state; 4779 struct intel_crtc_state *crtc_state; 4780 struct intel_crtc *crtc; 4781 4782 if (!intel_dp_has_connector(intel_dp, conn_state)) 4783 continue; 4784 4785 crtc = to_intel_crtc(conn_state->crtc); 4786 if (!crtc) 4787 continue; 4788 4789 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 4790 if (ret) 4791 break; 4792 4793 crtc_state = to_intel_crtc_state(crtc->base.state); 4794 4795 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 4796 4797 if (!crtc_state->hw.active) 4798 continue; 4799 4800 if (conn_state->commit && 4801 !try_wait_for_completion(&conn_state->commit->hw_done)) 4802 continue; 4803 4804 *crtc_mask |= drm_crtc_mask(&crtc->base); 4805 } 4806 drm_connector_list_iter_end(&conn_iter); 4807 4808 if (!intel_dp_needs_link_retrain(intel_dp)) 4809 *crtc_mask = 0; 4810 4811 return ret; 4812 } 4813 4814 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 4815 { 4816 struct intel_connector *connector = intel_dp->attached_connector; 4817 4818 return connector->base.status == connector_status_connected || 4819 intel_dp->is_mst; 4820 } 4821 4822 int intel_dp_retrain_link(struct intel_encoder *encoder, 4823 struct drm_modeset_acquire_ctx *ctx) 4824 { 4825 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4826 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4827 struct intel_crtc *crtc; 4828 u32 crtc_mask; 4829 int ret; 4830 4831 if (!intel_dp_is_connected(intel_dp)) 4832 return 0; 4833 4834 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 4835 ctx); 4836 if (ret) 4837 return ret; 4838 4839 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 4840 if (ret) 4841 return ret; 4842 4843 if (crtc_mask == 0) 4844 return 0; 4845 4846 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 4847 encoder->base.base.id, encoder->base.name); 4848 4849 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 4850 const struct intel_crtc_state *crtc_state = 4851 to_intel_crtc_state(crtc->base.state); 4852 4853 /* Suppress underruns caused by re-training */ 4854 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 4855 if (crtc_state->has_pch_encoder) 4856 intel_set_pch_fifo_underrun_reporting(dev_priv, 4857 intel_crtc_pch_transcoder(crtc), false); 4858 } 4859 4860 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 4861 const struct intel_crtc_state *crtc_state = 4862 to_intel_crtc_state(crtc->base.state); 4863 4864 /* retrain on the MST master transcoder */ 4865 if (INTEL_GEN(dev_priv) >= 12 && 4866 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 4867 !intel_dp_mst_is_master_trans(crtc_state)) 4868 continue; 4869 4870 intel_dp_check_frl_training(intel_dp); 4871 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); 4872 intel_dp_start_link_train(intel_dp, crtc_state); 4873 intel_dp_stop_link_train(intel_dp, crtc_state); 4874 break; 4875 } 4876 4877 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 4878 const struct intel_crtc_state *crtc_state = 4879 to_intel_crtc_state(crtc->base.state); 4880 4881 /* Keep underrun reporting disabled until things are stable */ 4882 intel_wait_for_vblank(dev_priv, crtc->pipe); 4883 4884 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 4885 if (crtc_state->has_pch_encoder) 4886 intel_set_pch_fifo_underrun_reporting(dev_priv, 4887 intel_crtc_pch_transcoder(crtc), true); 4888 } 4889 4890 return 0; 4891 } 4892 4893 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 4894 struct drm_modeset_acquire_ctx *ctx, 4895 u32 *crtc_mask) 4896 { 4897 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4898 struct drm_connector_list_iter conn_iter; 4899 struct intel_connector *connector; 4900 int ret = 0; 4901 4902 *crtc_mask = 0; 4903 4904 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 4905 for_each_intel_connector_iter(connector, &conn_iter) { 4906 struct drm_connector_state *conn_state = 4907 connector->base.state; 4908 struct intel_crtc_state *crtc_state; 4909 struct intel_crtc *crtc; 4910 4911 if (!intel_dp_has_connector(intel_dp, conn_state)) 4912 continue; 4913 4914 crtc = to_intel_crtc(conn_state->crtc); 4915 if (!crtc) 4916 continue; 4917 4918 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 4919 if (ret) 4920 break; 4921 4922 crtc_state = to_intel_crtc_state(crtc->base.state); 4923 4924 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 4925 4926 if (!crtc_state->hw.active) 4927 continue; 4928 4929 if (conn_state->commit && 4930 !try_wait_for_completion(&conn_state->commit->hw_done)) 4931 continue; 4932 4933 *crtc_mask |= drm_crtc_mask(&crtc->base); 4934 } 4935 drm_connector_list_iter_end(&conn_iter); 4936 4937 return ret; 4938 } 4939 4940 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 4941 struct drm_modeset_acquire_ctx *ctx) 4942 { 4943 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4944 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4945 struct intel_crtc *crtc; 4946 u32 crtc_mask; 4947 int ret; 4948 4949 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 4950 ctx); 4951 if (ret) 4952 return ret; 4953 4954 ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask); 4955 if (ret) 4956 return ret; 4957 4958 if (crtc_mask == 0) 4959 return 0; 4960 4961 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 4962 encoder->base.base.id, encoder->base.name); 4963 4964 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 4965 const struct intel_crtc_state *crtc_state = 4966 to_intel_crtc_state(crtc->base.state); 4967 4968 /* test on the MST master transcoder */ 4969 if (INTEL_GEN(dev_priv) >= 12 && 4970 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 4971 !intel_dp_mst_is_master_trans(crtc_state)) 4972 continue; 4973 4974 intel_dp_process_phy_request(intel_dp, crtc_state); 4975 break; 4976 } 4977 4978 return 0; 4979 } 4980 4981 void intel_dp_phy_test(struct intel_encoder *encoder) 4982 { 4983 struct drm_modeset_acquire_ctx ctx; 4984 int ret; 4985 4986 drm_modeset_acquire_init(&ctx, 0); 4987 4988 for (;;) { 4989 ret = intel_dp_do_phy_test(encoder, &ctx); 4990 4991 if (ret == -EDEADLK) { 4992 drm_modeset_backoff(&ctx); 4993 continue; 4994 } 4995 4996 break; 4997 } 4998 4999 drm_modeset_drop_locks(&ctx); 5000 drm_modeset_acquire_fini(&ctx); 5001 drm_WARN(encoder->base.dev, ret, 5002 "Acquiring modeset locks failed with %i\n", ret); 5003 } 5004 5005 /* 5006 * If display is now connected check links status, 5007 * there has been known issues of link loss triggering 5008 * long pulse. 5009 * 5010 * Some sinks (eg. ASUS PB287Q) seem to perform some 5011 * weird HPD ping pong during modesets. So we can apparently 5012 * end up with HPD going low during a modeset, and then 5013 * going back up soon after. And once that happens we must 5014 * retrain the link to get a picture. That's in case no 5015 * userspace component reacted to intermittent HPD dip. 5016 */ 5017 static enum intel_hotplug_state 5018 intel_dp_hotplug(struct intel_encoder *encoder, 5019 struct intel_connector *connector) 5020 { 5021 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5022 struct drm_modeset_acquire_ctx ctx; 5023 enum intel_hotplug_state state; 5024 int ret; 5025 5026 if (intel_dp->compliance.test_active && 5027 intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) { 5028 intel_dp_phy_test(encoder); 5029 /* just do the PHY test and nothing else */ 5030 return INTEL_HOTPLUG_UNCHANGED; 5031 } 5032 5033 state = intel_encoder_hotplug(encoder, connector); 5034 5035 drm_modeset_acquire_init(&ctx, 0); 5036 5037 for (;;) { 5038 ret = intel_dp_retrain_link(encoder, &ctx); 5039 5040 if (ret == -EDEADLK) { 5041 drm_modeset_backoff(&ctx); 5042 continue; 5043 } 5044 5045 break; 5046 } 5047 5048 drm_modeset_drop_locks(&ctx); 5049 drm_modeset_acquire_fini(&ctx); 5050 drm_WARN(encoder->base.dev, ret, 5051 "Acquiring modeset locks failed with %i\n", ret); 5052 5053 /* 5054 * Keeping it consistent with intel_ddi_hotplug() and 5055 * intel_hdmi_hotplug(). 5056 */ 5057 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) 5058 state = INTEL_HOTPLUG_RETRY; 5059 5060 return state; 5061 } 5062 5063 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) 5064 { 5065 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5066 u8 val; 5067 5068 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5069 return; 5070 5071 if (drm_dp_dpcd_readb(&intel_dp->aux, 5072 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5073 return; 5074 5075 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5076 5077 if (val & DP_AUTOMATED_TEST_REQUEST) 5078 intel_dp_handle_test_request(intel_dp); 5079 5080 if (val & DP_CP_IRQ) 5081 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5082 5083 if (val & DP_SINK_SPECIFIC_IRQ) 5084 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 5085 } 5086 5087 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) 5088 { 5089 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5090 u8 val; 5091 5092 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5093 return; 5094 5095 if (drm_dp_dpcd_readb(&intel_dp->aux, 5096 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) { 5097 drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n"); 5098 return; 5099 } 5100 5101 if (drm_dp_dpcd_writeb(&intel_dp->aux, 5102 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) { 5103 drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n"); 5104 return; 5105 } 5106 5107 if (val & HDMI_LINK_STATUS_CHANGED) 5108 intel_dp_handle_hdmi_link_status_change(intel_dp); 5109 } 5110 5111 /* 5112 * According to DP spec 5113 * 5.1.2: 5114 * 1. Read DPCD 5115 * 2. Configure link according to Receiver Capabilities 5116 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5117 * 4. Check link status on receipt of hot-plug interrupt 5118 * 5119 * intel_dp_short_pulse - handles short pulse interrupts 5120 * when full detection is not required. 5121 * Returns %true if short pulse is handled and full detection 5122 * is NOT required and %false otherwise. 5123 */ 5124 static bool 5125 intel_dp_short_pulse(struct intel_dp *intel_dp) 5126 { 5127 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5128 u8 old_sink_count = intel_dp->sink_count; 5129 bool ret; 5130 5131 /* 5132 * Clearing compliance test variables to allow capturing 5133 * of values for next automated test request. 5134 */ 5135 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5136 5137 /* 5138 * Now read the DPCD to see if it's actually running 5139 * If the current value of sink count doesn't match with 5140 * the value that was stored earlier or dpcd read failed 5141 * we need to do full detection 5142 */ 5143 ret = intel_dp_get_dpcd(intel_dp); 5144 5145 if ((old_sink_count != intel_dp->sink_count) || !ret) { 5146 /* No need to proceed if we are going to do full detect */ 5147 return false; 5148 } 5149 5150 intel_dp_check_device_service_irq(intel_dp); 5151 intel_dp_check_link_service_irq(intel_dp); 5152 5153 /* Handle CEC interrupts, if any */ 5154 drm_dp_cec_irq(&intel_dp->aux); 5155 5156 /* defer to the hotplug work for link retraining if needed */ 5157 if (intel_dp_needs_link_retrain(intel_dp)) 5158 return false; 5159 5160 intel_psr_short_pulse(intel_dp); 5161 5162 switch (intel_dp->compliance.test_type) { 5163 case DP_TEST_LINK_TRAINING: 5164 drm_dbg_kms(&dev_priv->drm, 5165 "Link Training Compliance Test requested\n"); 5166 /* Send a Hotplug Uevent to userspace to start modeset */ 5167 drm_kms_helper_hotplug_event(&dev_priv->drm); 5168 break; 5169 case DP_TEST_LINK_PHY_TEST_PATTERN: 5170 drm_dbg_kms(&dev_priv->drm, 5171 "PHY test pattern Compliance Test requested\n"); 5172 /* 5173 * Schedule long hpd to do the test 5174 * 5175 * FIXME get rid of the ad-hoc phy test modeset code 5176 * and properly incorporate it into the normal modeset. 5177 */ 5178 return false; 5179 } 5180 5181 return true; 5182 } 5183 5184 /* XXX this is probably wrong for multiple downstream ports */ 5185 static enum drm_connector_status 5186 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 5187 { 5188 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5189 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5190 u8 *dpcd = intel_dp->dpcd; 5191 u8 type; 5192 5193 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 5194 return connector_status_connected; 5195 5196 lspcon_resume(dig_port); 5197 5198 if (!intel_dp_get_dpcd(intel_dp)) 5199 return connector_status_disconnected; 5200 5201 /* if there's no downstream port, we're done */ 5202 if (!drm_dp_is_branch(dpcd)) 5203 return connector_status_connected; 5204 5205 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 5206 if (intel_dp_has_sink_count(intel_dp) && 5207 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 5208 return intel_dp->sink_count ? 5209 connector_status_connected : connector_status_disconnected; 5210 } 5211 5212 if (intel_dp_can_mst(intel_dp)) 5213 return connector_status_connected; 5214 5215 /* If no HPD, poke DDC gently */ 5216 if (drm_probe_ddc(&intel_dp->aux.ddc)) 5217 return connector_status_connected; 5218 5219 /* Well we tried, say unknown for unreliable port types */ 5220 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 5221 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 5222 if (type == DP_DS_PORT_TYPE_VGA || 5223 type == DP_DS_PORT_TYPE_NON_EDID) 5224 return connector_status_unknown; 5225 } else { 5226 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 5227 DP_DWN_STRM_PORT_TYPE_MASK; 5228 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 5229 type == DP_DWN_STRM_PORT_TYPE_OTHER) 5230 return connector_status_unknown; 5231 } 5232 5233 /* Anything else is out of spec, warn and ignore */ 5234 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 5235 return connector_status_disconnected; 5236 } 5237 5238 static enum drm_connector_status 5239 edp_detect(struct intel_dp *intel_dp) 5240 { 5241 return connector_status_connected; 5242 } 5243 5244 static bool ibx_digital_port_connected(struct intel_encoder *encoder) 5245 { 5246 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5247 u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; 5248 5249 return intel_de_read(dev_priv, SDEISR) & bit; 5250 } 5251 5252 static bool g4x_digital_port_connected(struct intel_encoder *encoder) 5253 { 5254 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5255 u32 bit; 5256 5257 switch (encoder->hpd_pin) { 5258 case HPD_PORT_B: 5259 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 5260 break; 5261 case HPD_PORT_C: 5262 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 5263 break; 5264 case HPD_PORT_D: 5265 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 5266 break; 5267 default: 5268 MISSING_CASE(encoder->hpd_pin); 5269 return false; 5270 } 5271 5272 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 5273 } 5274 5275 static bool gm45_digital_port_connected(struct intel_encoder *encoder) 5276 { 5277 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5278 u32 bit; 5279 5280 switch (encoder->hpd_pin) { 5281 case HPD_PORT_B: 5282 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 5283 break; 5284 case HPD_PORT_C: 5285 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 5286 break; 5287 case HPD_PORT_D: 5288 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 5289 break; 5290 default: 5291 MISSING_CASE(encoder->hpd_pin); 5292 return false; 5293 } 5294 5295 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 5296 } 5297 5298 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 5299 { 5300 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5301 u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; 5302 5303 return intel_de_read(dev_priv, DEISR) & bit; 5304 } 5305 5306 /* 5307 * intel_digital_port_connected - is the specified port connected? 5308 * @encoder: intel_encoder 5309 * 5310 * In cases where there's a connector physically connected but it can't be used 5311 * by our hardware we also return false, since the rest of the driver should 5312 * pretty much treat the port as disconnected. This is relevant for type-C 5313 * (starting on ICL) where there's ownership involved. 5314 * 5315 * Return %true if port is connected, %false otherwise. 5316 */ 5317 bool intel_digital_port_connected(struct intel_encoder *encoder) 5318 { 5319 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5320 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5321 bool is_connected = false; 5322 intel_wakeref_t wakeref; 5323 5324 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 5325 is_connected = dig_port->connected(encoder); 5326 5327 return is_connected; 5328 } 5329 5330 static struct edid * 5331 intel_dp_get_edid(struct intel_dp *intel_dp) 5332 { 5333 struct intel_connector *intel_connector = intel_dp->attached_connector; 5334 5335 /* use cached edid if we have one */ 5336 if (intel_connector->edid) { 5337 /* invalid edid */ 5338 if (IS_ERR(intel_connector->edid)) 5339 return NULL; 5340 5341 return drm_edid_duplicate(intel_connector->edid); 5342 } else 5343 return drm_get_edid(&intel_connector->base, 5344 &intel_dp->aux.ddc); 5345 } 5346 5347 static void 5348 intel_dp_update_dfp(struct intel_dp *intel_dp, 5349 const struct edid *edid) 5350 { 5351 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5352 struct intel_connector *connector = intel_dp->attached_connector; 5353 5354 intel_dp->dfp.max_bpc = 5355 drm_dp_downstream_max_bpc(intel_dp->dpcd, 5356 intel_dp->downstream_ports, edid); 5357 5358 intel_dp->dfp.max_dotclock = 5359 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 5360 intel_dp->downstream_ports); 5361 5362 intel_dp->dfp.min_tmds_clock = 5363 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 5364 intel_dp->downstream_ports, 5365 edid); 5366 intel_dp->dfp.max_tmds_clock = 5367 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 5368 intel_dp->downstream_ports, 5369 edid); 5370 5371 intel_dp->dfp.pcon_max_frl_bw = 5372 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 5373 intel_dp->downstream_ports); 5374 5375 drm_dbg_kms(&i915->drm, 5376 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 5377 connector->base.base.id, connector->base.name, 5378 intel_dp->dfp.max_bpc, 5379 intel_dp->dfp.max_dotclock, 5380 intel_dp->dfp.min_tmds_clock, 5381 intel_dp->dfp.max_tmds_clock, 5382 intel_dp->dfp.pcon_max_frl_bw); 5383 5384 intel_dp_get_pcon_dsc_cap(intel_dp); 5385 } 5386 5387 static void 5388 intel_dp_update_420(struct intel_dp *intel_dp) 5389 { 5390 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5391 struct intel_connector *connector = intel_dp->attached_connector; 5392 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr; 5393 5394 /* No YCbCr output support on gmch platforms */ 5395 if (HAS_GMCH(i915)) 5396 return; 5397 5398 /* 5399 * ILK doesn't seem capable of DP YCbCr output. The 5400 * displayed image is severly corrupted. SNB+ is fine. 5401 */ 5402 if (IS_GEN(i915, 5)) 5403 return; 5404 5405 is_branch = drm_dp_is_branch(intel_dp->dpcd); 5406 ycbcr_420_passthrough = 5407 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 5408 intel_dp->downstream_ports); 5409 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 5410 ycbcr_444_to_420 = 5411 dp_to_dig_port(intel_dp)->lspcon.active || 5412 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 5413 intel_dp->downstream_ports); 5414 rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 5415 intel_dp->downstream_ports, 5416 DP_DS_HDMI_BT601_RGB_YCBCR_CONV | 5417 DP_DS_HDMI_BT709_RGB_YCBCR_CONV | 5418 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 5419 5420 if (INTEL_GEN(i915) >= 11) { 5421 /* Let PCON convert from RGB->YCbCr if possible */ 5422 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) { 5423 intel_dp->dfp.rgb_to_ycbcr = true; 5424 intel_dp->dfp.ycbcr_444_to_420 = true; 5425 connector->base.ycbcr_420_allowed = true; 5426 } else { 5427 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ 5428 intel_dp->dfp.ycbcr_444_to_420 = 5429 ycbcr_444_to_420 && !ycbcr_420_passthrough; 5430 5431 connector->base.ycbcr_420_allowed = 5432 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; 5433 } 5434 } else { 5435 /* 4:4:4->4:2:0 conversion is the only way */ 5436 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; 5437 5438 connector->base.ycbcr_420_allowed = ycbcr_444_to_420; 5439 } 5440 5441 drm_dbg_kms(&i915->drm, 5442 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 5443 connector->base.base.id, connector->base.name, 5444 yesno(intel_dp->dfp.rgb_to_ycbcr), 5445 yesno(connector->base.ycbcr_420_allowed), 5446 yesno(intel_dp->dfp.ycbcr_444_to_420)); 5447 } 5448 5449 static void 5450 intel_dp_set_edid(struct intel_dp *intel_dp) 5451 { 5452 struct intel_connector *connector = intel_dp->attached_connector; 5453 struct edid *edid; 5454 5455 intel_dp_unset_edid(intel_dp); 5456 edid = intel_dp_get_edid(intel_dp); 5457 connector->detect_edid = edid; 5458 5459 intel_dp_update_dfp(intel_dp, edid); 5460 intel_dp_update_420(intel_dp); 5461 5462 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 5463 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 5464 intel_dp->has_audio = drm_detect_monitor_audio(edid); 5465 } 5466 5467 drm_dp_cec_set_edid(&intel_dp->aux, edid); 5468 } 5469 5470 static void 5471 intel_dp_unset_edid(struct intel_dp *intel_dp) 5472 { 5473 struct intel_connector *connector = intel_dp->attached_connector; 5474 5475 drm_dp_cec_unset_edid(&intel_dp->aux); 5476 kfree(connector->detect_edid); 5477 connector->detect_edid = NULL; 5478 5479 intel_dp->has_hdmi_sink = false; 5480 intel_dp->has_audio = false; 5481 5482 intel_dp->dfp.max_bpc = 0; 5483 intel_dp->dfp.max_dotclock = 0; 5484 intel_dp->dfp.min_tmds_clock = 0; 5485 intel_dp->dfp.max_tmds_clock = 0; 5486 5487 intel_dp->dfp.pcon_max_frl_bw = 0; 5488 5489 intel_dp->dfp.ycbcr_444_to_420 = false; 5490 connector->base.ycbcr_420_allowed = false; 5491 } 5492 5493 static int 5494 intel_dp_detect(struct drm_connector *connector, 5495 struct drm_modeset_acquire_ctx *ctx, 5496 bool force) 5497 { 5498 struct drm_i915_private *dev_priv = to_i915(connector->dev); 5499 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5500 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5501 struct intel_encoder *encoder = &dig_port->base; 5502 enum drm_connector_status status; 5503 5504 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 5505 connector->base.id, connector->name); 5506 drm_WARN_ON(&dev_priv->drm, 5507 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 5508 5509 if (!INTEL_DISPLAY_ENABLED(dev_priv)) 5510 return connector_status_disconnected; 5511 5512 /* Can't disconnect eDP */ 5513 if (intel_dp_is_edp(intel_dp)) 5514 status = edp_detect(intel_dp); 5515 else if (intel_digital_port_connected(encoder)) 5516 status = intel_dp_detect_dpcd(intel_dp); 5517 else 5518 status = connector_status_disconnected; 5519 5520 if (status == connector_status_disconnected) { 5521 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5522 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 5523 5524 if (intel_dp->is_mst) { 5525 drm_dbg_kms(&dev_priv->drm, 5526 "MST device may have disappeared %d vs %d\n", 5527 intel_dp->is_mst, 5528 intel_dp->mst_mgr.mst_state); 5529 intel_dp->is_mst = false; 5530 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5531 intel_dp->is_mst); 5532 } 5533 5534 goto out; 5535 } 5536 5537 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 5538 if (INTEL_GEN(dev_priv) >= 11) 5539 intel_dp_get_dsc_sink_cap(intel_dp); 5540 5541 intel_dp_configure_mst(intel_dp); 5542 5543 /* 5544 * TODO: Reset link params when switching to MST mode, until MST 5545 * supports link training fallback params. 5546 */ 5547 if (intel_dp->reset_link_params || intel_dp->is_mst) { 5548 /* Initial max link lane count */ 5549 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 5550 5551 /* Initial max link rate */ 5552 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 5553 5554 intel_dp->reset_link_params = false; 5555 } 5556 5557 intel_dp_print_rates(intel_dp); 5558 5559 if (intel_dp->is_mst) { 5560 /* 5561 * If we are in MST mode then this connector 5562 * won't appear connected or have anything 5563 * with EDID on it 5564 */ 5565 status = connector_status_disconnected; 5566 goto out; 5567 } 5568 5569 /* 5570 * Some external monitors do not signal loss of link synchronization 5571 * with an IRQ_HPD, so force a link status check. 5572 */ 5573 if (!intel_dp_is_edp(intel_dp)) { 5574 int ret; 5575 5576 ret = intel_dp_retrain_link(encoder, ctx); 5577 if (ret) 5578 return ret; 5579 } 5580 5581 /* 5582 * Clearing NACK and defer counts to get their exact values 5583 * while reading EDID which are required by Compliance tests 5584 * 4.2.2.4 and 4.2.2.5 5585 */ 5586 intel_dp->aux.i2c_nack_count = 0; 5587 intel_dp->aux.i2c_defer_count = 0; 5588 5589 intel_dp_set_edid(intel_dp); 5590 if (intel_dp_is_edp(intel_dp) || 5591 to_intel_connector(connector)->detect_edid) 5592 status = connector_status_connected; 5593 5594 intel_dp_check_device_service_irq(intel_dp); 5595 5596 out: 5597 if (status != connector_status_connected && !intel_dp->is_mst) 5598 intel_dp_unset_edid(intel_dp); 5599 5600 /* 5601 * Make sure the refs for power wells enabled during detect are 5602 * dropped to avoid a new detect cycle triggered by HPD polling. 5603 */ 5604 intel_display_power_flush_work(dev_priv); 5605 5606 if (!intel_dp_is_edp(intel_dp)) 5607 drm_dp_set_subconnector_property(connector, 5608 status, 5609 intel_dp->dpcd, 5610 intel_dp->downstream_ports); 5611 return status; 5612 } 5613 5614 static void 5615 intel_dp_force(struct drm_connector *connector) 5616 { 5617 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5618 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5619 struct intel_encoder *intel_encoder = &dig_port->base; 5620 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 5621 enum intel_display_power_domain aux_domain = 5622 intel_aux_power_domain(dig_port); 5623 intel_wakeref_t wakeref; 5624 5625 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 5626 connector->base.id, connector->name); 5627 intel_dp_unset_edid(intel_dp); 5628 5629 if (connector->status != connector_status_connected) 5630 return; 5631 5632 wakeref = intel_display_power_get(dev_priv, aux_domain); 5633 5634 intel_dp_set_edid(intel_dp); 5635 5636 intel_display_power_put(dev_priv, aux_domain, wakeref); 5637 } 5638 5639 static int intel_dp_get_modes(struct drm_connector *connector) 5640 { 5641 struct intel_connector *intel_connector = to_intel_connector(connector); 5642 struct edid *edid; 5643 int num_modes = 0; 5644 5645 edid = intel_connector->detect_edid; 5646 if (edid) { 5647 num_modes = intel_connector_update_modes(connector, edid); 5648 5649 if (intel_vrr_is_capable(connector)) 5650 drm_connector_set_vrr_capable_property(connector, 5651 true); 5652 } 5653 5654 /* Also add fixed mode, which may or may not be present in EDID */ 5655 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && 5656 intel_connector->panel.fixed_mode) { 5657 struct drm_display_mode *mode; 5658 5659 mode = drm_mode_duplicate(connector->dev, 5660 intel_connector->panel.fixed_mode); 5661 if (mode) { 5662 drm_mode_probed_add(connector, mode); 5663 num_modes++; 5664 } 5665 } 5666 5667 if (num_modes) 5668 return num_modes; 5669 5670 if (!edid) { 5671 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 5672 struct drm_display_mode *mode; 5673 5674 mode = drm_dp_downstream_mode(connector->dev, 5675 intel_dp->dpcd, 5676 intel_dp->downstream_ports); 5677 if (mode) { 5678 drm_mode_probed_add(connector, mode); 5679 num_modes++; 5680 } 5681 } 5682 5683 return num_modes; 5684 } 5685 5686 static int 5687 intel_dp_connector_register(struct drm_connector *connector) 5688 { 5689 struct drm_i915_private *i915 = to_i915(connector->dev); 5690 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5691 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5692 struct intel_lspcon *lspcon = &dig_port->lspcon; 5693 int ret; 5694 5695 ret = intel_connector_register(connector); 5696 if (ret) 5697 return ret; 5698 5699 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 5700 intel_dp->aux.name, connector->kdev->kobj.name); 5701 5702 intel_dp->aux.dev = connector->kdev; 5703 ret = drm_dp_aux_register(&intel_dp->aux); 5704 if (!ret) 5705 drm_dp_cec_register_connector(&intel_dp->aux, connector); 5706 5707 if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) 5708 return ret; 5709 5710 /* 5711 * ToDo: Clean this up to handle lspcon init and resume more 5712 * efficiently and streamlined. 5713 */ 5714 if (lspcon_init(dig_port)) { 5715 lspcon_detect_hdr_capability(lspcon); 5716 if (lspcon->hdr_supported) 5717 drm_object_attach_property(&connector->base, 5718 connector->dev->mode_config.hdr_output_metadata_property, 5719 0); 5720 } 5721 5722 return ret; 5723 } 5724 5725 static void 5726 intel_dp_connector_unregister(struct drm_connector *connector) 5727 { 5728 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5729 5730 drm_dp_cec_unregister_connector(&intel_dp->aux); 5731 drm_dp_aux_unregister(&intel_dp->aux); 5732 intel_connector_unregister(connector); 5733 } 5734 5735 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 5736 { 5737 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 5738 struct intel_dp *intel_dp = &dig_port->dp; 5739 5740 intel_dp_mst_encoder_cleanup(dig_port); 5741 5742 intel_pps_vdd_off_sync(intel_dp); 5743 5744 intel_dp_aux_fini(intel_dp); 5745 } 5746 5747 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 5748 { 5749 intel_dp_encoder_flush_work(encoder); 5750 5751 drm_encoder_cleanup(encoder); 5752 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 5753 } 5754 5755 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 5756 { 5757 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 5758 5759 intel_pps_vdd_off_sync(intel_dp); 5760 } 5761 5762 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 5763 { 5764 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 5765 5766 intel_pps_wait_power_cycle(intel_dp); 5767 } 5768 5769 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 5770 { 5771 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5772 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 5773 enum pipe pipe; 5774 5775 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 5776 encoder->port, &pipe)) 5777 return pipe; 5778 5779 return INVALID_PIPE; 5780 } 5781 5782 void intel_dp_encoder_reset(struct drm_encoder *encoder) 5783 { 5784 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 5785 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 5786 5787 if (!HAS_DDI(dev_priv)) 5788 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 5789 5790 intel_dp->reset_link_params = true; 5791 5792 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 5793 intel_wakeref_t wakeref; 5794 5795 with_intel_pps_lock(intel_dp, wakeref) 5796 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); 5797 } 5798 5799 intel_pps_encoder_reset(intel_dp); 5800 } 5801 5802 static int intel_modeset_tile_group(struct intel_atomic_state *state, 5803 int tile_group_id) 5804 { 5805 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5806 struct drm_connector_list_iter conn_iter; 5807 struct drm_connector *connector; 5808 int ret = 0; 5809 5810 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 5811 drm_for_each_connector_iter(connector, &conn_iter) { 5812 struct drm_connector_state *conn_state; 5813 struct intel_crtc_state *crtc_state; 5814 struct intel_crtc *crtc; 5815 5816 if (!connector->has_tile || 5817 connector->tile_group->id != tile_group_id) 5818 continue; 5819 5820 conn_state = drm_atomic_get_connector_state(&state->base, 5821 connector); 5822 if (IS_ERR(conn_state)) { 5823 ret = PTR_ERR(conn_state); 5824 break; 5825 } 5826 5827 crtc = to_intel_crtc(conn_state->crtc); 5828 5829 if (!crtc) 5830 continue; 5831 5832 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 5833 crtc_state->uapi.mode_changed = true; 5834 5835 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 5836 if (ret) 5837 break; 5838 } 5839 drm_connector_list_iter_end(&conn_iter); 5840 5841 return ret; 5842 } 5843 5844 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 5845 { 5846 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5847 struct intel_crtc *crtc; 5848 5849 if (transcoders == 0) 5850 return 0; 5851 5852 for_each_intel_crtc(&dev_priv->drm, crtc) { 5853 struct intel_crtc_state *crtc_state; 5854 int ret; 5855 5856 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5857 if (IS_ERR(crtc_state)) 5858 return PTR_ERR(crtc_state); 5859 5860 if (!crtc_state->hw.enable) 5861 continue; 5862 5863 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 5864 continue; 5865 5866 crtc_state->uapi.mode_changed = true; 5867 5868 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 5869 if (ret) 5870 return ret; 5871 5872 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 5873 if (ret) 5874 return ret; 5875 5876 transcoders &= ~BIT(crtc_state->cpu_transcoder); 5877 } 5878 5879 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 5880 5881 return 0; 5882 } 5883 5884 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 5885 struct drm_connector *connector) 5886 { 5887 const struct drm_connector_state *old_conn_state = 5888 drm_atomic_get_old_connector_state(&state->base, connector); 5889 const struct intel_crtc_state *old_crtc_state; 5890 struct intel_crtc *crtc; 5891 u8 transcoders; 5892 5893 crtc = to_intel_crtc(old_conn_state->crtc); 5894 if (!crtc) 5895 return 0; 5896 5897 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 5898 5899 if (!old_crtc_state->hw.active) 5900 return 0; 5901 5902 transcoders = old_crtc_state->sync_mode_slaves_mask; 5903 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 5904 transcoders |= BIT(old_crtc_state->master_transcoder); 5905 5906 return intel_modeset_affected_transcoders(state, 5907 transcoders); 5908 } 5909 5910 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 5911 struct drm_atomic_state *_state) 5912 { 5913 struct drm_i915_private *dev_priv = to_i915(conn->dev); 5914 struct intel_atomic_state *state = to_intel_atomic_state(_state); 5915 int ret; 5916 5917 ret = intel_digital_connector_atomic_check(conn, &state->base); 5918 if (ret) 5919 return ret; 5920 5921 /* 5922 * We don't enable port sync on BDW due to missing w/as and 5923 * due to not having adjusted the modeset sequence appropriately. 5924 */ 5925 if (INTEL_GEN(dev_priv) < 9) 5926 return 0; 5927 5928 if (!intel_connector_needs_modeset(state, conn)) 5929 return 0; 5930 5931 if (conn->has_tile) { 5932 ret = intel_modeset_tile_group(state, conn->tile_group->id); 5933 if (ret) 5934 return ret; 5935 } 5936 5937 return intel_modeset_synced_crtcs(state, conn); 5938 } 5939 5940 static const struct drm_connector_funcs intel_dp_connector_funcs = { 5941 .force = intel_dp_force, 5942 .fill_modes = drm_helper_probe_single_connector_modes, 5943 .atomic_get_property = intel_digital_connector_atomic_get_property, 5944 .atomic_set_property = intel_digital_connector_atomic_set_property, 5945 .late_register = intel_dp_connector_register, 5946 .early_unregister = intel_dp_connector_unregister, 5947 .destroy = intel_connector_destroy, 5948 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 5949 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 5950 }; 5951 5952 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 5953 .detect_ctx = intel_dp_detect, 5954 .get_modes = intel_dp_get_modes, 5955 .mode_valid = intel_dp_mode_valid, 5956 .atomic_check = intel_dp_connector_atomic_check, 5957 }; 5958 5959 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 5960 .reset = intel_dp_encoder_reset, 5961 .destroy = intel_dp_encoder_destroy, 5962 }; 5963 5964 enum irqreturn 5965 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 5966 { 5967 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 5968 struct intel_dp *intel_dp = &dig_port->dp; 5969 5970 if (dig_port->base.type == INTEL_OUTPUT_EDP && 5971 (long_hpd || !intel_pps_have_power(intel_dp))) { 5972 /* 5973 * vdd off can generate a long/short pulse on eDP which 5974 * would require vdd on to handle it, and thus we 5975 * would end up in an endless cycle of 5976 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 5977 */ 5978 drm_dbg_kms(&i915->drm, 5979 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 5980 long_hpd ? "long" : "short", 5981 dig_port->base.base.base.id, 5982 dig_port->base.base.name); 5983 return IRQ_HANDLED; 5984 } 5985 5986 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 5987 dig_port->base.base.base.id, 5988 dig_port->base.base.name, 5989 long_hpd ? "long" : "short"); 5990 5991 if (long_hpd) { 5992 intel_dp->reset_link_params = true; 5993 return IRQ_NONE; 5994 } 5995 5996 if (intel_dp->is_mst) { 5997 if (!intel_dp_check_mst_status(intel_dp)) 5998 return IRQ_NONE; 5999 } else if (!intel_dp_short_pulse(intel_dp)) { 6000 return IRQ_NONE; 6001 } 6002 6003 return IRQ_HANDLED; 6004 } 6005 6006 /* check the VBT to see whether the eDP is on another port */ 6007 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 6008 { 6009 /* 6010 * eDP not supported on g4x. so bail out early just 6011 * for a bit extra safety in case the VBT is bonkers. 6012 */ 6013 if (INTEL_GEN(dev_priv) < 5) 6014 return false; 6015 6016 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 6017 return true; 6018 6019 return intel_bios_is_port_edp(dev_priv, port); 6020 } 6021 6022 static void 6023 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 6024 { 6025 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6026 enum port port = dp_to_dig_port(intel_dp)->base.port; 6027 6028 if (!intel_dp_is_edp(intel_dp)) 6029 drm_connector_attach_dp_subconnector_property(connector); 6030 6031 if (!IS_G4X(dev_priv) && port != PORT_A) 6032 intel_attach_force_audio_property(connector); 6033 6034 intel_attach_broadcast_rgb_property(connector); 6035 if (HAS_GMCH(dev_priv)) 6036 drm_connector_attach_max_bpc_property(connector, 6, 10); 6037 else if (INTEL_GEN(dev_priv) >= 5) 6038 drm_connector_attach_max_bpc_property(connector, 6, 12); 6039 6040 /* Register HDMI colorspace for case of lspcon */ 6041 if (intel_bios_is_lspcon_present(dev_priv, port)) { 6042 drm_connector_attach_content_type_property(connector); 6043 intel_attach_hdmi_colorspace_property(connector); 6044 } else { 6045 intel_attach_dp_colorspace_property(connector); 6046 } 6047 6048 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 6049 drm_object_attach_property(&connector->base, 6050 connector->dev->mode_config.hdr_output_metadata_property, 6051 0); 6052 6053 if (intel_dp_is_edp(intel_dp)) { 6054 u32 allowed_scalers; 6055 6056 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 6057 if (!HAS_GMCH(dev_priv)) 6058 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 6059 6060 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 6061 6062 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 6063 6064 } 6065 6066 if (HAS_VRR(dev_priv)) 6067 drm_connector_attach_vrr_capable_property(connector); 6068 } 6069 6070 /** 6071 * intel_dp_set_drrs_state - program registers for RR switch to take effect 6072 * @dev_priv: i915 device 6073 * @crtc_state: a pointer to the active intel_crtc_state 6074 * @refresh_rate: RR to be programmed 6075 * 6076 * This function gets called when refresh rate (RR) has to be changed from 6077 * one frequency to another. Switches can be between high and low RR 6078 * supported by the panel or to any other RR based on media playback (in 6079 * this case, RR value needs to be passed from user space). 6080 * 6081 * The caller of this function needs to take a lock on dev_priv->drrs. 6082 */ 6083 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 6084 const struct intel_crtc_state *crtc_state, 6085 int refresh_rate) 6086 { 6087 struct intel_dp *intel_dp = dev_priv->drrs.dp; 6088 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 6089 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 6090 6091 if (refresh_rate <= 0) { 6092 drm_dbg_kms(&dev_priv->drm, 6093 "Refresh rate should be positive non-zero.\n"); 6094 return; 6095 } 6096 6097 if (intel_dp == NULL) { 6098 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 6099 return; 6100 } 6101 6102 if (!intel_crtc) { 6103 drm_dbg_kms(&dev_priv->drm, 6104 "DRRS: intel_crtc not initialized\n"); 6105 return; 6106 } 6107 6108 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 6109 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 6110 return; 6111 } 6112 6113 if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == 6114 refresh_rate) 6115 index = DRRS_LOW_RR; 6116 6117 if (index == dev_priv->drrs.refresh_rate_type) { 6118 drm_dbg_kms(&dev_priv->drm, 6119 "DRRS requested for previously set RR...ignoring\n"); 6120 return; 6121 } 6122 6123 if (!crtc_state->hw.active) { 6124 drm_dbg_kms(&dev_priv->drm, 6125 "eDP encoder disabled. CRTC not Active\n"); 6126 return; 6127 } 6128 6129 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 6130 switch (index) { 6131 case DRRS_HIGH_RR: 6132 intel_dp_set_m_n(crtc_state, M1_N1); 6133 break; 6134 case DRRS_LOW_RR: 6135 intel_dp_set_m_n(crtc_state, M2_N2); 6136 break; 6137 case DRRS_MAX_RR: 6138 default: 6139 drm_err(&dev_priv->drm, 6140 "Unsupported refreshrate type\n"); 6141 } 6142 } else if (INTEL_GEN(dev_priv) > 6) { 6143 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 6144 u32 val; 6145 6146 val = intel_de_read(dev_priv, reg); 6147 if (index > DRRS_HIGH_RR) { 6148 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6149 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 6150 else 6151 val |= PIPECONF_EDP_RR_MODE_SWITCH; 6152 } else { 6153 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6154 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 6155 else 6156 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 6157 } 6158 intel_de_write(dev_priv, reg, val); 6159 } 6160 6161 dev_priv->drrs.refresh_rate_type = index; 6162 6163 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 6164 refresh_rate); 6165 } 6166 6167 static void 6168 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) 6169 { 6170 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6171 6172 dev_priv->drrs.busy_frontbuffer_bits = 0; 6173 dev_priv->drrs.dp = intel_dp; 6174 } 6175 6176 /** 6177 * intel_edp_drrs_enable - init drrs struct if supported 6178 * @intel_dp: DP struct 6179 * @crtc_state: A pointer to the active crtc state. 6180 * 6181 * Initializes frontbuffer_bits and drrs.dp 6182 */ 6183 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 6184 const struct intel_crtc_state *crtc_state) 6185 { 6186 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6187 6188 if (!crtc_state->has_drrs) 6189 return; 6190 6191 drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); 6192 6193 mutex_lock(&dev_priv->drrs.mutex); 6194 6195 if (dev_priv->drrs.dp) { 6196 drm_warn(&dev_priv->drm, "DRRS already enabled\n"); 6197 goto unlock; 6198 } 6199 6200 intel_edp_drrs_enable_locked(intel_dp); 6201 6202 unlock: 6203 mutex_unlock(&dev_priv->drrs.mutex); 6204 } 6205 6206 static void 6207 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, 6208 const struct intel_crtc_state *crtc_state) 6209 { 6210 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6211 6212 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { 6213 int refresh; 6214 6215 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); 6216 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); 6217 } 6218 6219 dev_priv->drrs.dp = NULL; 6220 } 6221 6222 /** 6223 * intel_edp_drrs_disable - Disable DRRS 6224 * @intel_dp: DP struct 6225 * @old_crtc_state: Pointer to old crtc_state. 6226 * 6227 */ 6228 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 6229 const struct intel_crtc_state *old_crtc_state) 6230 { 6231 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6232 6233 if (!old_crtc_state->has_drrs) 6234 return; 6235 6236 mutex_lock(&dev_priv->drrs.mutex); 6237 if (!dev_priv->drrs.dp) { 6238 mutex_unlock(&dev_priv->drrs.mutex); 6239 return; 6240 } 6241 6242 intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); 6243 mutex_unlock(&dev_priv->drrs.mutex); 6244 6245 cancel_delayed_work_sync(&dev_priv->drrs.work); 6246 } 6247 6248 /** 6249 * intel_edp_drrs_update - Update DRRS state 6250 * @intel_dp: Intel DP 6251 * @crtc_state: new CRTC state 6252 * 6253 * This function will update DRRS states, disabling or enabling DRRS when 6254 * executing fastsets. For full modeset, intel_edp_drrs_disable() and 6255 * intel_edp_drrs_enable() should be called instead. 6256 */ 6257 void 6258 intel_edp_drrs_update(struct intel_dp *intel_dp, 6259 const struct intel_crtc_state *crtc_state) 6260 { 6261 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6262 6263 if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 6264 return; 6265 6266 mutex_lock(&dev_priv->drrs.mutex); 6267 6268 /* New state matches current one? */ 6269 if (crtc_state->has_drrs == !!dev_priv->drrs.dp) 6270 goto unlock; 6271 6272 if (crtc_state->has_drrs) 6273 intel_edp_drrs_enable_locked(intel_dp); 6274 else 6275 intel_edp_drrs_disable_locked(intel_dp, crtc_state); 6276 6277 unlock: 6278 mutex_unlock(&dev_priv->drrs.mutex); 6279 } 6280 6281 static void intel_edp_drrs_downclock_work(struct work_struct *work) 6282 { 6283 struct drm_i915_private *dev_priv = 6284 container_of(work, typeof(*dev_priv), drrs.work.work); 6285 struct intel_dp *intel_dp; 6286 6287 mutex_lock(&dev_priv->drrs.mutex); 6288 6289 intel_dp = dev_priv->drrs.dp; 6290 6291 if (!intel_dp) 6292 goto unlock; 6293 6294 /* 6295 * The delayed work can race with an invalidate hence we need to 6296 * recheck. 6297 */ 6298 6299 if (dev_priv->drrs.busy_frontbuffer_bits) 6300 goto unlock; 6301 6302 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 6303 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 6304 6305 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 6306 drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); 6307 } 6308 6309 unlock: 6310 mutex_unlock(&dev_priv->drrs.mutex); 6311 } 6312 6313 /** 6314 * intel_edp_drrs_invalidate - Disable Idleness DRRS 6315 * @dev_priv: i915 device 6316 * @frontbuffer_bits: frontbuffer plane tracking bits 6317 * 6318 * This function gets called everytime rendering on the given planes start. 6319 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 6320 * 6321 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 6322 */ 6323 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 6324 unsigned int frontbuffer_bits) 6325 { 6326 struct intel_dp *intel_dp; 6327 struct drm_crtc *crtc; 6328 enum pipe pipe; 6329 6330 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 6331 return; 6332 6333 cancel_delayed_work(&dev_priv->drrs.work); 6334 6335 mutex_lock(&dev_priv->drrs.mutex); 6336 6337 intel_dp = dev_priv->drrs.dp; 6338 if (!intel_dp) { 6339 mutex_unlock(&dev_priv->drrs.mutex); 6340 return; 6341 } 6342 6343 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 6344 pipe = to_intel_crtc(crtc)->pipe; 6345 6346 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 6347 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 6348 6349 /* invalidate means busy screen hence upclock */ 6350 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 6351 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 6352 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 6353 6354 mutex_unlock(&dev_priv->drrs.mutex); 6355 } 6356 6357 /** 6358 * intel_edp_drrs_flush - Restart Idleness DRRS 6359 * @dev_priv: i915 device 6360 * @frontbuffer_bits: frontbuffer plane tracking bits 6361 * 6362 * This function gets called every time rendering on the given planes has 6363 * completed or flip on a crtc is completed. So DRRS should be upclocked 6364 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 6365 * if no other planes are dirty. 6366 * 6367 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 6368 */ 6369 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 6370 unsigned int frontbuffer_bits) 6371 { 6372 struct intel_dp *intel_dp; 6373 struct drm_crtc *crtc; 6374 enum pipe pipe; 6375 6376 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 6377 return; 6378 6379 cancel_delayed_work(&dev_priv->drrs.work); 6380 6381 mutex_lock(&dev_priv->drrs.mutex); 6382 6383 intel_dp = dev_priv->drrs.dp; 6384 if (!intel_dp) { 6385 mutex_unlock(&dev_priv->drrs.mutex); 6386 return; 6387 } 6388 6389 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 6390 pipe = to_intel_crtc(crtc)->pipe; 6391 6392 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 6393 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 6394 6395 /* flush means busy screen hence upclock */ 6396 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 6397 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 6398 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 6399 6400 /* 6401 * flush also means no more activity hence schedule downclock, if all 6402 * other fbs are quiescent too 6403 */ 6404 if (!dev_priv->drrs.busy_frontbuffer_bits) 6405 schedule_delayed_work(&dev_priv->drrs.work, 6406 msecs_to_jiffies(1000)); 6407 mutex_unlock(&dev_priv->drrs.mutex); 6408 } 6409 6410 /** 6411 * DOC: Display Refresh Rate Switching (DRRS) 6412 * 6413 * Display Refresh Rate Switching (DRRS) is a power conservation feature 6414 * which enables swtching between low and high refresh rates, 6415 * dynamically, based on the usage scenario. This feature is applicable 6416 * for internal panels. 6417 * 6418 * Indication that the panel supports DRRS is given by the panel EDID, which 6419 * would list multiple refresh rates for one resolution. 6420 * 6421 * DRRS is of 2 types - static and seamless. 6422 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 6423 * (may appear as a blink on screen) and is used in dock-undock scenario. 6424 * Seamless DRRS involves changing RR without any visual effect to the user 6425 * and can be used during normal system usage. This is done by programming 6426 * certain registers. 6427 * 6428 * Support for static/seamless DRRS may be indicated in the VBT based on 6429 * inputs from the panel spec. 6430 * 6431 * DRRS saves power by switching to low RR based on usage scenarios. 6432 * 6433 * The implementation is based on frontbuffer tracking implementation. When 6434 * there is a disturbance on the screen triggered by user activity or a periodic 6435 * system activity, DRRS is disabled (RR is changed to high RR). When there is 6436 * no movement on screen, after a timeout of 1 second, a switch to low RR is 6437 * made. 6438 * 6439 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 6440 * and intel_edp_drrs_flush() are called. 6441 * 6442 * DRRS can be further extended to support other internal panels and also 6443 * the scenario of video playback wherein RR is set based on the rate 6444 * requested by userspace. 6445 */ 6446 6447 /** 6448 * intel_dp_drrs_init - Init basic DRRS work and mutex. 6449 * @connector: eDP connector 6450 * @fixed_mode: preferred mode of panel 6451 * 6452 * This function is called only once at driver load to initialize basic 6453 * DRRS stuff. 6454 * 6455 * Returns: 6456 * Downclock mode if panel supports it, else return NULL. 6457 * DRRS support is determined by the presence of downclock mode (apart 6458 * from VBT setting). 6459 */ 6460 static struct drm_display_mode * 6461 intel_dp_drrs_init(struct intel_connector *connector, 6462 struct drm_display_mode *fixed_mode) 6463 { 6464 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 6465 struct drm_display_mode *downclock_mode = NULL; 6466 6467 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 6468 mutex_init(&dev_priv->drrs.mutex); 6469 6470 if (INTEL_GEN(dev_priv) <= 6) { 6471 drm_dbg_kms(&dev_priv->drm, 6472 "DRRS supported for Gen7 and above\n"); 6473 return NULL; 6474 } 6475 6476 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 6477 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 6478 return NULL; 6479 } 6480 6481 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 6482 if (!downclock_mode) { 6483 drm_dbg_kms(&dev_priv->drm, 6484 "Downclock mode is not found. DRRS not supported\n"); 6485 return NULL; 6486 } 6487 6488 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 6489 6490 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 6491 drm_dbg_kms(&dev_priv->drm, 6492 "seamless DRRS supported for eDP panel.\n"); 6493 return downclock_mode; 6494 } 6495 6496 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 6497 struct intel_connector *intel_connector) 6498 { 6499 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6500 struct drm_device *dev = &dev_priv->drm; 6501 struct drm_connector *connector = &intel_connector->base; 6502 struct drm_display_mode *fixed_mode = NULL; 6503 struct drm_display_mode *downclock_mode = NULL; 6504 bool has_dpcd; 6505 enum pipe pipe = INVALID_PIPE; 6506 struct edid *edid; 6507 6508 if (!intel_dp_is_edp(intel_dp)) 6509 return true; 6510 6511 /* 6512 * On IBX/CPT we may get here with LVDS already registered. Since the 6513 * driver uses the only internal power sequencer available for both 6514 * eDP and LVDS bail out early in this case to prevent interfering 6515 * with an already powered-on LVDS power sequencer. 6516 */ 6517 if (intel_get_lvds_encoder(dev_priv)) { 6518 drm_WARN_ON(dev, 6519 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 6520 drm_info(&dev_priv->drm, 6521 "LVDS was detected, not registering eDP\n"); 6522 6523 return false; 6524 } 6525 6526 intel_pps_init(intel_dp); 6527 6528 /* Cache DPCD and EDID for edp. */ 6529 has_dpcd = intel_edp_init_dpcd(intel_dp); 6530 6531 if (!has_dpcd) { 6532 /* if this fails, presume the device is a ghost */ 6533 drm_info(&dev_priv->drm, 6534 "failed to retrieve link info, disabling eDP\n"); 6535 goto out_vdd_off; 6536 } 6537 6538 mutex_lock(&dev->mode_config.mutex); 6539 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 6540 if (edid) { 6541 if (drm_add_edid_modes(connector, edid)) { 6542 drm_connector_update_edid_property(connector, edid); 6543 } else { 6544 kfree(edid); 6545 edid = ERR_PTR(-EINVAL); 6546 } 6547 } else { 6548 edid = ERR_PTR(-ENOENT); 6549 } 6550 intel_connector->edid = edid; 6551 6552 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 6553 if (fixed_mode) 6554 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 6555 6556 /* multiply the mode clock and horizontal timings for MSO */ 6557 intel_edp_mso_mode_fixup(intel_connector, fixed_mode); 6558 intel_edp_mso_mode_fixup(intel_connector, downclock_mode); 6559 6560 /* fallback to VBT if available for eDP */ 6561 if (!fixed_mode) 6562 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 6563 mutex_unlock(&dev->mode_config.mutex); 6564 6565 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 6566 /* 6567 * Figure out the current pipe for the initial backlight setup. 6568 * If the current pipe isn't valid, try the PPS pipe, and if that 6569 * fails just assume pipe A. 6570 */ 6571 pipe = vlv_active_pipe(intel_dp); 6572 6573 if (pipe != PIPE_A && pipe != PIPE_B) 6574 pipe = intel_dp->pps.pps_pipe; 6575 6576 if (pipe != PIPE_A && pipe != PIPE_B) 6577 pipe = PIPE_A; 6578 6579 drm_dbg_kms(&dev_priv->drm, 6580 "using pipe %c for initial backlight setup\n", 6581 pipe_name(pipe)); 6582 } 6583 6584 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 6585 intel_connector->panel.backlight.power = intel_pps_backlight_power; 6586 intel_panel_setup_backlight(connector, pipe); 6587 6588 if (fixed_mode) { 6589 drm_connector_set_panel_orientation_with_quirk(connector, 6590 dev_priv->vbt.orientation, 6591 fixed_mode->hdisplay, fixed_mode->vdisplay); 6592 } 6593 6594 return true; 6595 6596 out_vdd_off: 6597 intel_pps_vdd_off_sync(intel_dp); 6598 6599 return false; 6600 } 6601 6602 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 6603 { 6604 struct intel_connector *intel_connector; 6605 struct drm_connector *connector; 6606 6607 intel_connector = container_of(work, typeof(*intel_connector), 6608 modeset_retry_work); 6609 connector = &intel_connector->base; 6610 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 6611 connector->name); 6612 6613 /* Grab the locks before changing connector property*/ 6614 mutex_lock(&connector->dev->mode_config.mutex); 6615 /* Set connector link status to BAD and send a Uevent to notify 6616 * userspace to do a modeset. 6617 */ 6618 drm_connector_set_link_status_property(connector, 6619 DRM_MODE_LINK_STATUS_BAD); 6620 mutex_unlock(&connector->dev->mode_config.mutex); 6621 /* Send Hotplug uevent so userspace can reprobe */ 6622 drm_kms_helper_hotplug_event(connector->dev); 6623 } 6624 6625 bool 6626 intel_dp_init_connector(struct intel_digital_port *dig_port, 6627 struct intel_connector *intel_connector) 6628 { 6629 struct drm_connector *connector = &intel_connector->base; 6630 struct intel_dp *intel_dp = &dig_port->dp; 6631 struct intel_encoder *intel_encoder = &dig_port->base; 6632 struct drm_device *dev = intel_encoder->base.dev; 6633 struct drm_i915_private *dev_priv = to_i915(dev); 6634 enum port port = intel_encoder->port; 6635 enum phy phy = intel_port_to_phy(dev_priv, port); 6636 int type; 6637 6638 /* Initialize the work for modeset in case of link train failure */ 6639 INIT_WORK(&intel_connector->modeset_retry_work, 6640 intel_dp_modeset_retry_work_fn); 6641 6642 if (drm_WARN(dev, dig_port->max_lanes < 1, 6643 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 6644 dig_port->max_lanes, intel_encoder->base.base.id, 6645 intel_encoder->base.name)) 6646 return false; 6647 6648 intel_dp_set_source_rates(intel_dp); 6649 6650 intel_dp->reset_link_params = true; 6651 intel_dp->pps.pps_pipe = INVALID_PIPE; 6652 intel_dp->pps.active_pipe = INVALID_PIPE; 6653 6654 /* Preserve the current hw state. */ 6655 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 6656 intel_dp->attached_connector = intel_connector; 6657 6658 if (intel_dp_is_port_edp(dev_priv, port)) { 6659 /* 6660 * Currently we don't support eDP on TypeC ports, although in 6661 * theory it could work on TypeC legacy ports. 6662 */ 6663 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 6664 type = DRM_MODE_CONNECTOR_eDP; 6665 } else { 6666 type = DRM_MODE_CONNECTOR_DisplayPort; 6667 } 6668 6669 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6670 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); 6671 6672 /* 6673 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 6674 * for DP the encoder type can be set by the caller to 6675 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 6676 */ 6677 if (type == DRM_MODE_CONNECTOR_eDP) 6678 intel_encoder->type = INTEL_OUTPUT_EDP; 6679 6680 /* eDP only on port B and/or C on vlv/chv */ 6681 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 6682 IS_CHERRYVIEW(dev_priv)) && 6683 intel_dp_is_edp(intel_dp) && 6684 port != PORT_B && port != PORT_C)) 6685 return false; 6686 6687 drm_dbg_kms(&dev_priv->drm, 6688 "Adding %s connector on [ENCODER:%d:%s]\n", 6689 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 6690 intel_encoder->base.base.id, intel_encoder->base.name); 6691 6692 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 6693 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 6694 6695 if (!HAS_GMCH(dev_priv)) 6696 connector->interlace_allowed = true; 6697 connector->doublescan_allowed = 0; 6698 6699 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 6700 6701 intel_dp_aux_init(intel_dp); 6702 6703 intel_connector_attach_encoder(intel_connector, intel_encoder); 6704 6705 if (HAS_DDI(dev_priv)) 6706 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 6707 else 6708 intel_connector->get_hw_state = intel_connector_get_hw_state; 6709 6710 /* init MST on ports that can support it */ 6711 intel_dp_mst_encoder_init(dig_port, 6712 intel_connector->base.base.id); 6713 6714 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 6715 intel_dp_aux_fini(intel_dp); 6716 intel_dp_mst_encoder_cleanup(dig_port); 6717 goto fail; 6718 } 6719 6720 intel_dp_add_properties(intel_dp, connector); 6721 6722 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 6723 int ret = intel_dp_init_hdcp(dig_port, intel_connector); 6724 if (ret) 6725 drm_dbg_kms(&dev_priv->drm, 6726 "HDCP init failed, skipping.\n"); 6727 } 6728 6729 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 6730 * 0xd. Failure to do so will result in spurious interrupts being 6731 * generated on the port when a cable is not attached. 6732 */ 6733 if (IS_G45(dev_priv)) { 6734 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 6735 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 6736 (temp & ~0xf) | 0xd); 6737 } 6738 6739 intel_dp->frl.is_trained = false; 6740 intel_dp->frl.trained_rate_gbps = 0; 6741 6742 intel_psr_init(intel_dp); 6743 6744 return true; 6745 6746 fail: 6747 drm_connector_cleanup(connector); 6748 6749 return false; 6750 } 6751 6752 bool intel_dp_init(struct drm_i915_private *dev_priv, 6753 i915_reg_t output_reg, 6754 enum port port) 6755 { 6756 struct intel_digital_port *dig_port; 6757 struct intel_encoder *intel_encoder; 6758 struct drm_encoder *encoder; 6759 struct intel_connector *intel_connector; 6760 6761 dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); 6762 if (!dig_port) 6763 return false; 6764 6765 intel_connector = intel_connector_alloc(); 6766 if (!intel_connector) 6767 goto err_connector_alloc; 6768 6769 intel_encoder = &dig_port->base; 6770 encoder = &intel_encoder->base; 6771 6772 mutex_init(&dig_port->hdcp_mutex); 6773 6774 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 6775 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 6776 "DP %c", port_name(port))) 6777 goto err_encoder_init; 6778 6779 intel_encoder->hotplug = intel_dp_hotplug; 6780 intel_encoder->compute_config = intel_dp_compute_config; 6781 intel_encoder->get_hw_state = intel_dp_get_hw_state; 6782 intel_encoder->get_config = intel_dp_get_config; 6783 intel_encoder->sync_state = intel_dp_sync_state; 6784 intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check; 6785 intel_encoder->update_pipe = intel_panel_update_backlight; 6786 intel_encoder->suspend = intel_dp_encoder_suspend; 6787 intel_encoder->shutdown = intel_dp_encoder_shutdown; 6788 if (IS_CHERRYVIEW(dev_priv)) { 6789 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 6790 intel_encoder->pre_enable = chv_pre_enable_dp; 6791 intel_encoder->enable = vlv_enable_dp; 6792 intel_encoder->disable = vlv_disable_dp; 6793 intel_encoder->post_disable = chv_post_disable_dp; 6794 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 6795 } else if (IS_VALLEYVIEW(dev_priv)) { 6796 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 6797 intel_encoder->pre_enable = vlv_pre_enable_dp; 6798 intel_encoder->enable = vlv_enable_dp; 6799 intel_encoder->disable = vlv_disable_dp; 6800 intel_encoder->post_disable = vlv_post_disable_dp; 6801 } else { 6802 intel_encoder->pre_enable = g4x_pre_enable_dp; 6803 intel_encoder->enable = g4x_enable_dp; 6804 intel_encoder->disable = g4x_disable_dp; 6805 intel_encoder->post_disable = g4x_post_disable_dp; 6806 } 6807 6808 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 6809 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) 6810 dig_port->dp.set_link_train = cpt_set_link_train; 6811 else 6812 dig_port->dp.set_link_train = g4x_set_link_train; 6813 6814 if (IS_CHERRYVIEW(dev_priv)) 6815 dig_port->dp.set_signal_levels = chv_set_signal_levels; 6816 else if (IS_VALLEYVIEW(dev_priv)) 6817 dig_port->dp.set_signal_levels = vlv_set_signal_levels; 6818 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 6819 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; 6820 else if (IS_GEN(dev_priv, 6) && port == PORT_A) 6821 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; 6822 else 6823 dig_port->dp.set_signal_levels = g4x_set_signal_levels; 6824 6825 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || 6826 (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { 6827 dig_port->dp.preemph_max = intel_dp_preemph_max_3; 6828 dig_port->dp.voltage_max = intel_dp_voltage_max_3; 6829 } else { 6830 dig_port->dp.preemph_max = intel_dp_preemph_max_2; 6831 dig_port->dp.voltage_max = intel_dp_voltage_max_2; 6832 } 6833 6834 dig_port->dp.output_reg = output_reg; 6835 dig_port->max_lanes = 4; 6836 6837 intel_encoder->type = INTEL_OUTPUT_DP; 6838 intel_encoder->power_domain = intel_port_to_power_domain(port); 6839 if (IS_CHERRYVIEW(dev_priv)) { 6840 if (port == PORT_D) 6841 intel_encoder->pipe_mask = BIT(PIPE_C); 6842 else 6843 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 6844 } else { 6845 intel_encoder->pipe_mask = ~0; 6846 } 6847 intel_encoder->cloneable = 0; 6848 intel_encoder->port = port; 6849 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 6850 6851 dig_port->hpd_pulse = intel_dp_hpd_pulse; 6852 6853 if (HAS_GMCH(dev_priv)) { 6854 if (IS_GM45(dev_priv)) 6855 dig_port->connected = gm45_digital_port_connected; 6856 else 6857 dig_port->connected = g4x_digital_port_connected; 6858 } else { 6859 if (port == PORT_A) 6860 dig_port->connected = ilk_digital_port_connected; 6861 else 6862 dig_port->connected = ibx_digital_port_connected; 6863 } 6864 6865 if (port != PORT_A) 6866 intel_infoframe_init(dig_port); 6867 6868 dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 6869 if (!intel_dp_init_connector(dig_port, intel_connector)) 6870 goto err_init_connector; 6871 6872 return true; 6873 6874 err_init_connector: 6875 drm_encoder_cleanup(encoder); 6876 err_encoder_init: 6877 kfree(intel_connector); 6878 err_connector_alloc: 6879 kfree(dig_port); 6880 return false; 6881 } 6882 6883 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 6884 { 6885 struct intel_encoder *encoder; 6886 6887 for_each_intel_encoder(&dev_priv->drm, encoder) { 6888 struct intel_dp *intel_dp; 6889 6890 if (encoder->type != INTEL_OUTPUT_DDI) 6891 continue; 6892 6893 intel_dp = enc_to_intel_dp(encoder); 6894 6895 if (!intel_dp->can_mst) 6896 continue; 6897 6898 if (intel_dp->is_mst) 6899 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 6900 } 6901 } 6902 6903 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 6904 { 6905 struct intel_encoder *encoder; 6906 6907 for_each_intel_encoder(&dev_priv->drm, encoder) { 6908 struct intel_dp *intel_dp; 6909 int ret; 6910 6911 if (encoder->type != INTEL_OUTPUT_DDI) 6912 continue; 6913 6914 intel_dp = enc_to_intel_dp(encoder); 6915 6916 if (!intel_dp->can_mst) 6917 continue; 6918 6919 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 6920 true); 6921 if (ret) { 6922 intel_dp->is_mst = false; 6923 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6924 false); 6925 } 6926 } 6927 } 6928