1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/types.h> 33 34 #include <asm/byteorder.h> 35 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_crtc.h> 38 #include <drm/drm_dp_helper.h> 39 #include <drm/drm_edid.h> 40 #include <drm/drm_probe_helper.h> 41 42 #include "g4x_dp.h" 43 #include "i915_debugfs.h" 44 #include "i915_drv.h" 45 #include "intel_atomic.h" 46 #include "intel_audio.h" 47 #include "intel_connector.h" 48 #include "intel_ddi.h" 49 #include "intel_display_types.h" 50 #include "intel_dp.h" 51 #include "intel_dp_aux.h" 52 #include "intel_dp_link_training.h" 53 #include "intel_dp_mst.h" 54 #include "intel_dpll.h" 55 #include "intel_dpio_phy.h" 56 #include "intel_fifo_underrun.h" 57 #include "intel_hdcp.h" 58 #include "intel_hdmi.h" 59 #include "intel_hotplug.h" 60 #include "intel_lspcon.h" 61 #include "intel_lvds.h" 62 #include "intel_panel.h" 63 #include "intel_pps.h" 64 #include "intel_psr.h" 65 #include "intel_sideband.h" 66 #include "intel_tc.h" 67 #include "intel_vdsc.h" 68 #include "intel_vrr.h" 69 70 #define DP_DPRX_ESI_LEN 14 71 72 /* DP DSC throughput values used for slice count calculations KPixels/s */ 73 #define DP_DSC_PEAK_PIXEL_RATE 2720000 74 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 75 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 76 77 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 78 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 79 80 /* Compliance test status bits */ 81 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 82 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 83 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 84 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 85 86 87 /* Constants for DP DSC configurations */ 88 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 89 90 /* With Single pipe configuration, HW is capable of supporting maximum 91 * of 4 slices per line. 92 */ 93 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 94 95 /** 96 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 97 * @intel_dp: DP struct 98 * 99 * If a CPU or PCH DP output is attached to an eDP panel, this function 100 * will return true, and false otherwise. 101 */ 102 bool intel_dp_is_edp(struct intel_dp *intel_dp) 103 { 104 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 105 106 return dig_port->base.type == INTEL_OUTPUT_EDP; 107 } 108 109 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 110 111 /* update sink rates from dpcd */ 112 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 113 { 114 static const int dp_rates[] = { 115 162000, 270000, 540000, 810000 116 }; 117 int i, max_rate; 118 int max_lttpr_rate; 119 120 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 121 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 122 static const int quirk_rates[] = { 162000, 270000, 324000 }; 123 124 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 125 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 126 127 return; 128 } 129 130 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 131 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 132 if (max_lttpr_rate) 133 max_rate = min(max_rate, max_lttpr_rate); 134 135 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 136 if (dp_rates[i] > max_rate) 137 break; 138 intel_dp->sink_rates[i] = dp_rates[i]; 139 } 140 141 intel_dp->num_sink_rates = i; 142 } 143 144 /* Get length of rates array potentially limited by max_rate. */ 145 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 146 { 147 int i; 148 149 /* Limit results by potentially reduced max rate */ 150 for (i = 0; i < len; i++) { 151 if (rates[len - i - 1] <= max_rate) 152 return len - i; 153 } 154 155 return 0; 156 } 157 158 /* Get length of common rates array potentially limited by max_rate. */ 159 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 160 int max_rate) 161 { 162 return intel_dp_rate_limit_len(intel_dp->common_rates, 163 intel_dp->num_common_rates, max_rate); 164 } 165 166 /* Theoretical max between source and sink */ 167 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 168 { 169 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 170 } 171 172 /* Theoretical max between source and sink */ 173 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 174 { 175 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 176 int source_max = dig_port->max_lanes; 177 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 178 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 179 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 180 181 if (lttpr_max) 182 sink_max = min(sink_max, lttpr_max); 183 184 return min3(source_max, sink_max, fia_max); 185 } 186 187 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 188 { 189 return intel_dp->max_link_lane_count; 190 } 191 192 int 193 intel_dp_link_required(int pixel_clock, int bpp) 194 { 195 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 196 return DIV_ROUND_UP(pixel_clock * bpp, 8); 197 } 198 199 int 200 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 201 { 202 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 203 * link rate that is generally expressed in Gbps. Since, 8 bits of data 204 * is transmitted every LS_Clk per lane, there is no need to account for 205 * the channel encoding that is done in the PHY layer here. 206 */ 207 208 return max_link_clock * max_lanes; 209 } 210 211 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) 212 { 213 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 214 struct intel_encoder *encoder = &intel_dig_port->base; 215 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 216 217 return DISPLAY_VER(dev_priv) >= 12 || 218 (IS_DISPLAY_VER(dev_priv, 11) && 219 encoder->port != PORT_A); 220 } 221 222 static int cnl_max_source_rate(struct intel_dp *intel_dp) 223 { 224 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 225 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 226 enum port port = dig_port->base.port; 227 228 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 229 230 /* Low voltage SKUs are limited to max of 5.4G */ 231 if (voltage == VOLTAGE_INFO_0_85V) 232 return 540000; 233 234 /* For this SKU 8.1G is supported in all ports */ 235 if (IS_CNL_WITH_PORT_F(dev_priv)) 236 return 810000; 237 238 /* For other SKUs, max rate on ports A and D is 5.4G */ 239 if (port == PORT_A || port == PORT_D) 240 return 540000; 241 242 return 810000; 243 } 244 245 static int icl_max_source_rate(struct intel_dp *intel_dp) 246 { 247 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 248 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 249 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 250 251 if (intel_phy_is_combo(dev_priv, phy) && 252 !intel_dp_is_edp(intel_dp)) 253 return 540000; 254 255 return 810000; 256 } 257 258 static int ehl_max_source_rate(struct intel_dp *intel_dp) 259 { 260 if (intel_dp_is_edp(intel_dp)) 261 return 540000; 262 263 return 810000; 264 } 265 266 static void 267 intel_dp_set_source_rates(struct intel_dp *intel_dp) 268 { 269 /* The values must be in increasing order */ 270 static const int cnl_rates[] = { 271 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 272 }; 273 static const int bxt_rates[] = { 274 162000, 216000, 243000, 270000, 324000, 432000, 540000 275 }; 276 static const int skl_rates[] = { 277 162000, 216000, 270000, 324000, 432000, 540000 278 }; 279 static const int hsw_rates[] = { 280 162000, 270000, 540000 281 }; 282 static const int g4x_rates[] = { 283 162000, 270000 284 }; 285 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 286 struct intel_encoder *encoder = &dig_port->base; 287 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 288 const int *source_rates; 289 int size, max_rate = 0, vbt_max_rate; 290 291 /* This should only be done once */ 292 drm_WARN_ON(&dev_priv->drm, 293 intel_dp->source_rates || intel_dp->num_source_rates); 294 295 if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) { 296 source_rates = cnl_rates; 297 size = ARRAY_SIZE(cnl_rates); 298 if (IS_DISPLAY_VER(dev_priv, 10)) 299 max_rate = cnl_max_source_rate(intel_dp); 300 else if (IS_JSL_EHL(dev_priv)) 301 max_rate = ehl_max_source_rate(intel_dp); 302 else 303 max_rate = icl_max_source_rate(intel_dp); 304 } else if (IS_GEN9_LP(dev_priv)) { 305 source_rates = bxt_rates; 306 size = ARRAY_SIZE(bxt_rates); 307 } else if (IS_GEN9_BC(dev_priv)) { 308 source_rates = skl_rates; 309 size = ARRAY_SIZE(skl_rates); 310 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 311 IS_BROADWELL(dev_priv)) { 312 source_rates = hsw_rates; 313 size = ARRAY_SIZE(hsw_rates); 314 } else { 315 source_rates = g4x_rates; 316 size = ARRAY_SIZE(g4x_rates); 317 } 318 319 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 320 if (max_rate && vbt_max_rate) 321 max_rate = min(max_rate, vbt_max_rate); 322 else if (vbt_max_rate) 323 max_rate = vbt_max_rate; 324 325 if (max_rate) 326 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 327 328 intel_dp->source_rates = source_rates; 329 intel_dp->num_source_rates = size; 330 } 331 332 static int intersect_rates(const int *source_rates, int source_len, 333 const int *sink_rates, int sink_len, 334 int *common_rates) 335 { 336 int i = 0, j = 0, k = 0; 337 338 while (i < source_len && j < sink_len) { 339 if (source_rates[i] == sink_rates[j]) { 340 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 341 return k; 342 common_rates[k] = source_rates[i]; 343 ++k; 344 ++i; 345 ++j; 346 } else if (source_rates[i] < sink_rates[j]) { 347 ++i; 348 } else { 349 ++j; 350 } 351 } 352 return k; 353 } 354 355 /* return index of rate in rates array, or -1 if not found */ 356 static int intel_dp_rate_index(const int *rates, int len, int rate) 357 { 358 int i; 359 360 for (i = 0; i < len; i++) 361 if (rate == rates[i]) 362 return i; 363 364 return -1; 365 } 366 367 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 368 { 369 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 370 371 drm_WARN_ON(&i915->drm, 372 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 373 374 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 375 intel_dp->num_source_rates, 376 intel_dp->sink_rates, 377 intel_dp->num_sink_rates, 378 intel_dp->common_rates); 379 380 /* Paranoia, there should always be something in common. */ 381 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 382 intel_dp->common_rates[0] = 162000; 383 intel_dp->num_common_rates = 1; 384 } 385 } 386 387 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 388 u8 lane_count) 389 { 390 /* 391 * FIXME: we need to synchronize the current link parameters with 392 * hardware readout. Currently fast link training doesn't work on 393 * boot-up. 394 */ 395 if (link_rate == 0 || 396 link_rate > intel_dp->max_link_rate) 397 return false; 398 399 if (lane_count == 0 || 400 lane_count > intel_dp_max_lane_count(intel_dp)) 401 return false; 402 403 return true; 404 } 405 406 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 407 int link_rate, 408 u8 lane_count) 409 { 410 const struct drm_display_mode *fixed_mode = 411 intel_dp->attached_connector->panel.fixed_mode; 412 int mode_rate, max_rate; 413 414 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 415 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 416 if (mode_rate > max_rate) 417 return false; 418 419 return true; 420 } 421 422 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 423 int link_rate, u8 lane_count) 424 { 425 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 426 int index; 427 428 /* 429 * TODO: Enable fallback on MST links once MST link compute can handle 430 * the fallback params. 431 */ 432 if (intel_dp->is_mst) { 433 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 434 return -1; 435 } 436 437 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { 438 drm_dbg_kms(&i915->drm, 439 "Retrying Link training for eDP with max parameters\n"); 440 intel_dp->use_max_params = true; 441 return 0; 442 } 443 444 index = intel_dp_rate_index(intel_dp->common_rates, 445 intel_dp->num_common_rates, 446 link_rate); 447 if (index > 0) { 448 if (intel_dp_is_edp(intel_dp) && 449 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 450 intel_dp->common_rates[index - 1], 451 lane_count)) { 452 drm_dbg_kms(&i915->drm, 453 "Retrying Link training for eDP with same parameters\n"); 454 return 0; 455 } 456 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 457 intel_dp->max_link_lane_count = lane_count; 458 } else if (lane_count > 1) { 459 if (intel_dp_is_edp(intel_dp) && 460 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 461 intel_dp_max_common_rate(intel_dp), 462 lane_count >> 1)) { 463 drm_dbg_kms(&i915->drm, 464 "Retrying Link training for eDP with same parameters\n"); 465 return 0; 466 } 467 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 468 intel_dp->max_link_lane_count = lane_count >> 1; 469 } else { 470 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 471 return -1; 472 } 473 474 return 0; 475 } 476 477 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 478 { 479 return div_u64(mul_u32_u32(mode_clock, 1000000U), 480 DP_DSC_FEC_OVERHEAD_FACTOR); 481 } 482 483 static int 484 small_joiner_ram_size_bits(struct drm_i915_private *i915) 485 { 486 if (DISPLAY_VER(i915) >= 11) 487 return 7680 * 8; 488 else 489 return 6144 * 8; 490 } 491 492 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 493 u32 link_clock, u32 lane_count, 494 u32 mode_clock, u32 mode_hdisplay, 495 bool bigjoiner) 496 { 497 u32 bits_per_pixel, max_bpp_small_joiner_ram; 498 int i; 499 500 /* 501 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 502 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 503 * for SST -> TimeSlotsPerMTP is 1, 504 * for MST -> TimeSlotsPerMTP has to be calculated 505 */ 506 bits_per_pixel = (link_clock * lane_count * 8) / 507 intel_dp_mode_to_fec_clock(mode_clock); 508 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 509 510 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 511 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 512 mode_hdisplay; 513 514 if (bigjoiner) 515 max_bpp_small_joiner_ram *= 2; 516 517 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 518 max_bpp_small_joiner_ram); 519 520 /* 521 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 522 * check, output bpp from small joiner RAM check) 523 */ 524 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 525 526 if (bigjoiner) { 527 u32 max_bpp_bigjoiner = 528 i915->max_cdclk_freq * 48 / 529 intel_dp_mode_to_fec_clock(mode_clock); 530 531 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner); 532 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); 533 } 534 535 /* Error out if the max bpp is less than smallest allowed valid bpp */ 536 if (bits_per_pixel < valid_dsc_bpp[0]) { 537 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 538 bits_per_pixel, valid_dsc_bpp[0]); 539 return 0; 540 } 541 542 /* Find the nearest match in the array of known BPPs from VESA */ 543 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 544 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 545 break; 546 } 547 bits_per_pixel = valid_dsc_bpp[i]; 548 549 /* 550 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 551 * fractional part is 0 552 */ 553 return bits_per_pixel << 4; 554 } 555 556 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 557 int mode_clock, int mode_hdisplay, 558 bool bigjoiner) 559 { 560 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 561 u8 min_slice_count, i; 562 int max_slice_width; 563 564 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 565 min_slice_count = DIV_ROUND_UP(mode_clock, 566 DP_DSC_MAX_ENC_THROUGHPUT_0); 567 else 568 min_slice_count = DIV_ROUND_UP(mode_clock, 569 DP_DSC_MAX_ENC_THROUGHPUT_1); 570 571 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 572 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 573 drm_dbg_kms(&i915->drm, 574 "Unsupported slice width %d by DP DSC Sink device\n", 575 max_slice_width); 576 return 0; 577 } 578 /* Also take into account max slice width */ 579 min_slice_count = max_t(u8, min_slice_count, 580 DIV_ROUND_UP(mode_hdisplay, 581 max_slice_width)); 582 583 /* Find the closest match to the valid slice count values */ 584 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 585 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; 586 587 if (test_slice_count > 588 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false)) 589 break; 590 591 /* big joiner needs small joiner to be enabled */ 592 if (bigjoiner && test_slice_count < 4) 593 continue; 594 595 if (min_slice_count <= test_slice_count) 596 return test_slice_count; 597 } 598 599 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 600 min_slice_count); 601 return 0; 602 } 603 604 static enum intel_output_format 605 intel_dp_output_format(struct drm_connector *connector, 606 const struct drm_display_mode *mode) 607 { 608 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 609 const struct drm_display_info *info = &connector->display_info; 610 611 if (!connector->ycbcr_420_allowed || 612 !drm_mode_is_420_only(info, mode)) 613 return INTEL_OUTPUT_FORMAT_RGB; 614 615 if (intel_dp->dfp.rgb_to_ycbcr && 616 intel_dp->dfp.ycbcr_444_to_420) 617 return INTEL_OUTPUT_FORMAT_RGB; 618 619 if (intel_dp->dfp.ycbcr_444_to_420) 620 return INTEL_OUTPUT_FORMAT_YCBCR444; 621 else 622 return INTEL_OUTPUT_FORMAT_YCBCR420; 623 } 624 625 int intel_dp_min_bpp(enum intel_output_format output_format) 626 { 627 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 628 return 6 * 3; 629 else 630 return 8 * 3; 631 } 632 633 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 634 { 635 /* 636 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 637 * format of the number of bytes per pixel will be half the number 638 * of bytes of RGB pixel. 639 */ 640 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 641 bpp /= 2; 642 643 return bpp; 644 } 645 646 static int 647 intel_dp_mode_min_output_bpp(struct drm_connector *connector, 648 const struct drm_display_mode *mode) 649 { 650 enum intel_output_format output_format = 651 intel_dp_output_format(connector, mode); 652 653 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 654 } 655 656 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 657 int hdisplay) 658 { 659 /* 660 * Older platforms don't like hdisplay==4096 with DP. 661 * 662 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 663 * and frame counter increment), but we don't get vblank interrupts, 664 * and the pipe underruns immediately. The link also doesn't seem 665 * to get trained properly. 666 * 667 * On CHV the vblank interrupts don't seem to disappear but 668 * otherwise the symptoms are similar. 669 * 670 * TODO: confirm the behaviour on HSW+ 671 */ 672 return hdisplay == 4096 && !HAS_DDI(dev_priv); 673 } 674 675 static enum drm_mode_status 676 intel_dp_mode_valid_downstream(struct intel_connector *connector, 677 const struct drm_display_mode *mode, 678 int target_clock) 679 { 680 struct intel_dp *intel_dp = intel_attached_dp(connector); 681 const struct drm_display_info *info = &connector->base.display_info; 682 int tmds_clock; 683 684 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 685 if (intel_dp->dfp.pcon_max_frl_bw) { 686 int target_bw; 687 int max_frl_bw; 688 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode); 689 690 target_bw = bpp * target_clock; 691 692 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 693 694 /* converting bw from Gbps to Kbps*/ 695 max_frl_bw = max_frl_bw * 1000000; 696 697 if (target_bw > max_frl_bw) 698 return MODE_CLOCK_HIGH; 699 700 return MODE_OK; 701 } 702 703 if (intel_dp->dfp.max_dotclock && 704 target_clock > intel_dp->dfp.max_dotclock) 705 return MODE_CLOCK_HIGH; 706 707 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 708 tmds_clock = target_clock; 709 if (drm_mode_is_420_only(info, mode)) 710 tmds_clock /= 2; 711 712 if (intel_dp->dfp.min_tmds_clock && 713 tmds_clock < intel_dp->dfp.min_tmds_clock) 714 return MODE_CLOCK_LOW; 715 if (intel_dp->dfp.max_tmds_clock && 716 tmds_clock > intel_dp->dfp.max_tmds_clock) 717 return MODE_CLOCK_HIGH; 718 719 return MODE_OK; 720 } 721 722 static enum drm_mode_status 723 intel_dp_mode_valid(struct drm_connector *connector, 724 struct drm_display_mode *mode) 725 { 726 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 727 struct intel_connector *intel_connector = to_intel_connector(connector); 728 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 729 struct drm_i915_private *dev_priv = to_i915(connector->dev); 730 int target_clock = mode->clock; 731 int max_rate, mode_rate, max_lanes, max_link_clock; 732 int max_dotclk = dev_priv->max_dotclk_freq; 733 u16 dsc_max_output_bpp = 0; 734 u8 dsc_slice_count = 0; 735 enum drm_mode_status status; 736 bool dsc = false, bigjoiner = false; 737 738 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 739 return MODE_NO_DBLESCAN; 740 741 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 742 return MODE_H_ILLEGAL; 743 744 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 745 if (mode->hdisplay != fixed_mode->hdisplay) 746 return MODE_PANEL; 747 748 if (mode->vdisplay != fixed_mode->vdisplay) 749 return MODE_PANEL; 750 751 target_clock = fixed_mode->clock; 752 } 753 754 if (mode->clock < 10000) 755 return MODE_CLOCK_LOW; 756 757 if ((target_clock > max_dotclk || mode->hdisplay > 5120) && 758 intel_dp_can_bigjoiner(intel_dp)) { 759 bigjoiner = true; 760 max_dotclk *= 2; 761 } 762 if (target_clock > max_dotclk) 763 return MODE_CLOCK_HIGH; 764 765 max_link_clock = intel_dp_max_link_rate(intel_dp); 766 max_lanes = intel_dp_max_lane_count(intel_dp); 767 768 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 769 mode_rate = intel_dp_link_required(target_clock, 770 intel_dp_mode_min_output_bpp(connector, mode)); 771 772 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 773 return MODE_H_ILLEGAL; 774 775 /* 776 * Output bpp is stored in 6.4 format so right shift by 4 to get the 777 * integer value since we support only integer values of bpp. 778 */ 779 if (DISPLAY_VER(dev_priv) >= 10 && 780 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 781 if (intel_dp_is_edp(intel_dp)) { 782 dsc_max_output_bpp = 783 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 784 dsc_slice_count = 785 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 786 true); 787 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 788 dsc_max_output_bpp = 789 intel_dp_dsc_get_output_bpp(dev_priv, 790 max_link_clock, 791 max_lanes, 792 target_clock, 793 mode->hdisplay, 794 bigjoiner) >> 4; 795 dsc_slice_count = 796 intel_dp_dsc_get_slice_count(intel_dp, 797 target_clock, 798 mode->hdisplay, 799 bigjoiner); 800 } 801 802 dsc = dsc_max_output_bpp && dsc_slice_count; 803 } 804 805 /* big joiner configuration needs DSC */ 806 if (bigjoiner && !dsc) 807 return MODE_CLOCK_HIGH; 808 809 if (mode_rate > max_rate && !dsc) 810 return MODE_CLOCK_HIGH; 811 812 status = intel_dp_mode_valid_downstream(intel_connector, 813 mode, target_clock); 814 if (status != MODE_OK) 815 return status; 816 817 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); 818 } 819 820 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 821 { 822 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 823 824 return max_rate >= 540000; 825 } 826 827 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 828 { 829 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 830 831 return max_rate >= 810000; 832 } 833 834 static void snprintf_int_array(char *str, size_t len, 835 const int *array, int nelem) 836 { 837 int i; 838 839 str[0] = '\0'; 840 841 for (i = 0; i < nelem; i++) { 842 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 843 if (r >= len) 844 return; 845 str += r; 846 len -= r; 847 } 848 } 849 850 static void intel_dp_print_rates(struct intel_dp *intel_dp) 851 { 852 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 853 char str[128]; /* FIXME: too big for stack? */ 854 855 if (!drm_debug_enabled(DRM_UT_KMS)) 856 return; 857 858 snprintf_int_array(str, sizeof(str), 859 intel_dp->source_rates, intel_dp->num_source_rates); 860 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 861 862 snprintf_int_array(str, sizeof(str), 863 intel_dp->sink_rates, intel_dp->num_sink_rates); 864 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 865 866 snprintf_int_array(str, sizeof(str), 867 intel_dp->common_rates, intel_dp->num_common_rates); 868 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 869 } 870 871 int 872 intel_dp_max_link_rate(struct intel_dp *intel_dp) 873 { 874 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 875 int len; 876 877 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 878 if (drm_WARN_ON(&i915->drm, len <= 0)) 879 return 162000; 880 881 return intel_dp->common_rates[len - 1]; 882 } 883 884 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 885 { 886 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 887 int i = intel_dp_rate_index(intel_dp->sink_rates, 888 intel_dp->num_sink_rates, rate); 889 890 if (drm_WARN_ON(&i915->drm, i < 0)) 891 i = 0; 892 893 return i; 894 } 895 896 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 897 u8 *link_bw, u8 *rate_select) 898 { 899 /* eDP 1.4 rate select method. */ 900 if (intel_dp->use_rate_select) { 901 *link_bw = 0; 902 *rate_select = 903 intel_dp_rate_select(intel_dp, port_clock); 904 } else { 905 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 906 *rate_select = 0; 907 } 908 } 909 910 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 911 const struct intel_crtc_state *pipe_config) 912 { 913 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 914 915 /* On TGL, FEC is supported on all Pipes */ 916 if (DISPLAY_VER(dev_priv) >= 12) 917 return true; 918 919 if (IS_DISPLAY_VER(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 920 return true; 921 922 return false; 923 } 924 925 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 926 const struct intel_crtc_state *pipe_config) 927 { 928 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 929 drm_dp_sink_supports_fec(intel_dp->fec_capable); 930 } 931 932 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 933 const struct intel_crtc_state *crtc_state) 934 { 935 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) 936 return false; 937 938 return intel_dsc_source_support(crtc_state) && 939 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 940 } 941 942 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, 943 const struct intel_crtc_state *crtc_state) 944 { 945 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 946 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 947 intel_dp->dfp.ycbcr_444_to_420); 948 } 949 950 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, 951 const struct intel_crtc_state *crtc_state, int bpc) 952 { 953 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; 954 955 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) 956 clock /= 2; 957 958 return clock; 959 } 960 961 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, 962 const struct intel_crtc_state *crtc_state, int bpc) 963 { 964 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); 965 966 if (intel_dp->dfp.min_tmds_clock && 967 tmds_clock < intel_dp->dfp.min_tmds_clock) 968 return false; 969 970 if (intel_dp->dfp.max_tmds_clock && 971 tmds_clock > intel_dp->dfp.max_tmds_clock) 972 return false; 973 974 return true; 975 } 976 977 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, 978 const struct intel_crtc_state *crtc_state, 979 int bpc) 980 { 981 982 return intel_hdmi_deep_color_possible(crtc_state, bpc, 983 intel_dp->has_hdmi_sink, 984 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && 985 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); 986 } 987 988 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 989 const struct intel_crtc_state *crtc_state) 990 { 991 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 992 struct intel_connector *intel_connector = intel_dp->attached_connector; 993 int bpp, bpc; 994 995 bpc = crtc_state->pipe_bpp / 3; 996 997 if (intel_dp->dfp.max_bpc) 998 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 999 1000 if (intel_dp->dfp.min_tmds_clock) { 1001 for (; bpc >= 10; bpc -= 2) { 1002 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) 1003 break; 1004 } 1005 } 1006 1007 bpp = bpc * 3; 1008 if (intel_dp_is_edp(intel_dp)) { 1009 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1010 if (intel_connector->base.display_info.bpc == 0 && 1011 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1012 drm_dbg_kms(&dev_priv->drm, 1013 "clamping bpp for eDP panel to BIOS-provided %i\n", 1014 dev_priv->vbt.edp.bpp); 1015 bpp = dev_priv->vbt.edp.bpp; 1016 } 1017 } 1018 1019 return bpp; 1020 } 1021 1022 /* Adjust link config limits based on compliance test requests. */ 1023 void 1024 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1025 struct intel_crtc_state *pipe_config, 1026 struct link_config_limits *limits) 1027 { 1028 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1029 1030 /* For DP Compliance we override the computed bpp for the pipe */ 1031 if (intel_dp->compliance.test_data.bpc != 0) { 1032 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1033 1034 limits->min_bpp = limits->max_bpp = bpp; 1035 pipe_config->dither_force_disable = bpp == 6 * 3; 1036 1037 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1038 } 1039 1040 /* Use values requested by Compliance Test Request */ 1041 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1042 int index; 1043 1044 /* Validate the compliance test data since max values 1045 * might have changed due to link train fallback. 1046 */ 1047 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1048 intel_dp->compliance.test_lane_count)) { 1049 index = intel_dp_rate_index(intel_dp->common_rates, 1050 intel_dp->num_common_rates, 1051 intel_dp->compliance.test_link_rate); 1052 if (index >= 0) 1053 limits->min_clock = limits->max_clock = index; 1054 limits->min_lane_count = limits->max_lane_count = 1055 intel_dp->compliance.test_lane_count; 1056 } 1057 } 1058 } 1059 1060 /* Optimize link config in order: max bpp, min clock, min lanes */ 1061 static int 1062 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 1063 struct intel_crtc_state *pipe_config, 1064 const struct link_config_limits *limits) 1065 { 1066 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1067 int bpp, clock, lane_count; 1068 int mode_rate, link_clock, link_avail; 1069 1070 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 1071 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1072 1073 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1074 output_bpp); 1075 1076 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 1077 for (lane_count = limits->min_lane_count; 1078 lane_count <= limits->max_lane_count; 1079 lane_count <<= 1) { 1080 link_clock = intel_dp->common_rates[clock]; 1081 link_avail = intel_dp_max_data_rate(link_clock, 1082 lane_count); 1083 1084 if (mode_rate <= link_avail) { 1085 pipe_config->lane_count = lane_count; 1086 pipe_config->pipe_bpp = bpp; 1087 pipe_config->port_clock = link_clock; 1088 1089 return 0; 1090 } 1091 } 1092 } 1093 } 1094 1095 return -EINVAL; 1096 } 1097 1098 /* Optimize link config in order: max bpp, min lanes, min clock */ 1099 static int 1100 intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, 1101 struct intel_crtc_state *pipe_config, 1102 const struct link_config_limits *limits) 1103 { 1104 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1105 int bpp, clock, lane_count; 1106 int mode_rate, link_clock, link_avail; 1107 1108 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 1109 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1110 1111 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1112 output_bpp); 1113 1114 for (lane_count = limits->min_lane_count; 1115 lane_count <= limits->max_lane_count; 1116 lane_count <<= 1) { 1117 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 1118 link_clock = intel_dp->common_rates[clock]; 1119 link_avail = intel_dp_max_data_rate(link_clock, 1120 lane_count); 1121 1122 if (mode_rate <= link_avail) { 1123 pipe_config->lane_count = lane_count; 1124 pipe_config->pipe_bpp = bpp; 1125 pipe_config->port_clock = link_clock; 1126 1127 return 0; 1128 } 1129 } 1130 } 1131 } 1132 1133 return -EINVAL; 1134 } 1135 1136 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 1137 { 1138 int i, num_bpc; 1139 u8 dsc_bpc[3] = {0}; 1140 1141 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 1142 dsc_bpc); 1143 for (i = 0; i < num_bpc; i++) { 1144 if (dsc_max_bpc >= dsc_bpc[i]) 1145 return dsc_bpc[i] * 3; 1146 } 1147 1148 return 0; 1149 } 1150 1151 #define DSC_SUPPORTED_VERSION_MIN 1 1152 1153 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 1154 struct intel_crtc_state *crtc_state) 1155 { 1156 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1157 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1158 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1159 u8 line_buf_depth; 1160 int ret; 1161 1162 /* 1163 * RC_MODEL_SIZE is currently a constant across all configurations. 1164 * 1165 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 1166 * DP_DSC_RC_BUF_SIZE for this. 1167 */ 1168 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1169 1170 ret = intel_dsc_compute_params(encoder, crtc_state); 1171 if (ret) 1172 return ret; 1173 1174 /* 1175 * Slice Height of 8 works for all currently available panels. So start 1176 * with that if pic_height is an integral multiple of 8. Eventually add 1177 * logic to try multiple slice heights. 1178 */ 1179 if (vdsc_cfg->pic_height % 8 == 0) 1180 vdsc_cfg->slice_height = 8; 1181 else if (vdsc_cfg->pic_height % 4 == 0) 1182 vdsc_cfg->slice_height = 4; 1183 else 1184 vdsc_cfg->slice_height = 2; 1185 1186 vdsc_cfg->dsc_version_major = 1187 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1188 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 1189 vdsc_cfg->dsc_version_minor = 1190 min(DSC_SUPPORTED_VERSION_MIN, 1191 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1192 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 1193 1194 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1195 DP_DSC_RGB; 1196 1197 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 1198 if (!line_buf_depth) { 1199 drm_dbg_kms(&i915->drm, 1200 "DSC Sink Line Buffer Depth invalid\n"); 1201 return -EINVAL; 1202 } 1203 1204 if (vdsc_cfg->dsc_version_minor == 2) 1205 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 1206 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 1207 else 1208 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 1209 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 1210 1211 vdsc_cfg->block_pred_enable = 1212 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 1213 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 1214 1215 return drm_dsc_compute_rc_parameters(vdsc_cfg); 1216 } 1217 1218 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 1219 struct intel_crtc_state *pipe_config, 1220 struct drm_connector_state *conn_state, 1221 struct link_config_limits *limits) 1222 { 1223 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1224 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 1225 const struct drm_display_mode *adjusted_mode = 1226 &pipe_config->hw.adjusted_mode; 1227 u8 dsc_max_bpc; 1228 int pipe_bpp; 1229 int ret; 1230 1231 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 1232 intel_dp_supports_fec(intel_dp, pipe_config); 1233 1234 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 1235 return -EINVAL; 1236 1237 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 1238 if (DISPLAY_VER(dev_priv) >= 12) 1239 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 1240 else 1241 dsc_max_bpc = min_t(u8, 10, 1242 conn_state->max_requested_bpc); 1243 1244 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 1245 1246 /* Min Input BPC for ICL+ is 8 */ 1247 if (pipe_bpp < 8 * 3) { 1248 drm_dbg_kms(&dev_priv->drm, 1249 "No DSC support for less than 8bpc\n"); 1250 return -EINVAL; 1251 } 1252 1253 /* 1254 * For now enable DSC for max bpp, max link rate, max lane count. 1255 * Optimize this later for the minimum possible link rate/lane count 1256 * with DSC enabled for the requested mode. 1257 */ 1258 pipe_config->pipe_bpp = pipe_bpp; 1259 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 1260 pipe_config->lane_count = limits->max_lane_count; 1261 1262 if (intel_dp_is_edp(intel_dp)) { 1263 pipe_config->dsc.compressed_bpp = 1264 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 1265 pipe_config->pipe_bpp); 1266 pipe_config->dsc.slice_count = 1267 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 1268 true); 1269 } else { 1270 u16 dsc_max_output_bpp; 1271 u8 dsc_dp_slice_count; 1272 1273 dsc_max_output_bpp = 1274 intel_dp_dsc_get_output_bpp(dev_priv, 1275 pipe_config->port_clock, 1276 pipe_config->lane_count, 1277 adjusted_mode->crtc_clock, 1278 adjusted_mode->crtc_hdisplay, 1279 pipe_config->bigjoiner); 1280 dsc_dp_slice_count = 1281 intel_dp_dsc_get_slice_count(intel_dp, 1282 adjusted_mode->crtc_clock, 1283 adjusted_mode->crtc_hdisplay, 1284 pipe_config->bigjoiner); 1285 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 1286 drm_dbg_kms(&dev_priv->drm, 1287 "Compressed BPP/Slice Count not supported\n"); 1288 return -EINVAL; 1289 } 1290 pipe_config->dsc.compressed_bpp = min_t(u16, 1291 dsc_max_output_bpp >> 4, 1292 pipe_config->pipe_bpp); 1293 pipe_config->dsc.slice_count = dsc_dp_slice_count; 1294 } 1295 /* 1296 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 1297 * is greater than the maximum Cdclock and if slice count is even 1298 * then we need to use 2 VDSC instances. 1299 */ 1300 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq || 1301 pipe_config->bigjoiner) { 1302 if (pipe_config->dsc.slice_count < 2) { 1303 drm_dbg_kms(&dev_priv->drm, 1304 "Cannot split stream to use 2 VDSC instances\n"); 1305 return -EINVAL; 1306 } 1307 1308 pipe_config->dsc.dsc_split = true; 1309 } 1310 1311 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 1312 if (ret < 0) { 1313 drm_dbg_kms(&dev_priv->drm, 1314 "Cannot compute valid DSC parameters for Input Bpp = %d " 1315 "Compressed BPP = %d\n", 1316 pipe_config->pipe_bpp, 1317 pipe_config->dsc.compressed_bpp); 1318 return ret; 1319 } 1320 1321 pipe_config->dsc.compression_enable = true; 1322 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 1323 "Compressed Bpp = %d Slice Count = %d\n", 1324 pipe_config->pipe_bpp, 1325 pipe_config->dsc.compressed_bpp, 1326 pipe_config->dsc.slice_count); 1327 1328 return 0; 1329 } 1330 1331 static int 1332 intel_dp_compute_link_config(struct intel_encoder *encoder, 1333 struct intel_crtc_state *pipe_config, 1334 struct drm_connector_state *conn_state) 1335 { 1336 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1337 const struct drm_display_mode *adjusted_mode = 1338 &pipe_config->hw.adjusted_mode; 1339 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1340 struct link_config_limits limits; 1341 int common_len; 1342 int ret; 1343 1344 common_len = intel_dp_common_len_rate_limit(intel_dp, 1345 intel_dp->max_link_rate); 1346 1347 /* No common link rates between source and sink */ 1348 drm_WARN_ON(encoder->base.dev, common_len <= 0); 1349 1350 limits.min_clock = 0; 1351 limits.max_clock = common_len - 1; 1352 1353 limits.min_lane_count = 1; 1354 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 1355 1356 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); 1357 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); 1358 1359 if (intel_dp->use_max_params) { 1360 /* 1361 * Use the maximum clock and number of lanes the eDP panel 1362 * advertizes being capable of in case the initial fast 1363 * optimal params failed us. The panels are generally 1364 * designed to support only a single clock and lane 1365 * configuration, and typically on older panels these 1366 * values correspond to the native resolution of the panel. 1367 */ 1368 limits.min_lane_count = limits.max_lane_count; 1369 limits.min_clock = limits.max_clock; 1370 } 1371 1372 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 1373 1374 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 1375 "max rate %d max bpp %d pixel clock %iKHz\n", 1376 limits.max_lane_count, 1377 intel_dp->common_rates[limits.max_clock], 1378 limits.max_bpp, adjusted_mode->crtc_clock); 1379 1380 if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq || 1381 adjusted_mode->crtc_hdisplay > 5120) && 1382 intel_dp_can_bigjoiner(intel_dp)) 1383 pipe_config->bigjoiner = true; 1384 1385 if (intel_dp_is_edp(intel_dp)) 1386 /* 1387 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 1388 * section A.1: "It is recommended that the minimum number of 1389 * lanes be used, using the minimum link rate allowed for that 1390 * lane configuration." 1391 * 1392 * Note that we fall back to the max clock and lane count for eDP 1393 * panels that fail with the fast optimal settings (see 1394 * intel_dp->use_max_params), in which case the fast vs. wide 1395 * choice doesn't matter. 1396 */ 1397 ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits); 1398 else 1399 /* Optimize for slow and wide. */ 1400 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 1401 1402 /* enable compression if the mode doesn't fit available BW */ 1403 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 1404 if (ret || intel_dp->force_dsc_en || pipe_config->bigjoiner) { 1405 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 1406 conn_state, &limits); 1407 if (ret < 0) 1408 return ret; 1409 } 1410 1411 if (pipe_config->dsc.compression_enable) { 1412 drm_dbg_kms(&i915->drm, 1413 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 1414 pipe_config->lane_count, pipe_config->port_clock, 1415 pipe_config->pipe_bpp, 1416 pipe_config->dsc.compressed_bpp); 1417 1418 drm_dbg_kms(&i915->drm, 1419 "DP link rate required %i available %i\n", 1420 intel_dp_link_required(adjusted_mode->crtc_clock, 1421 pipe_config->dsc.compressed_bpp), 1422 intel_dp_max_data_rate(pipe_config->port_clock, 1423 pipe_config->lane_count)); 1424 } else { 1425 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 1426 pipe_config->lane_count, pipe_config->port_clock, 1427 pipe_config->pipe_bpp); 1428 1429 drm_dbg_kms(&i915->drm, 1430 "DP link rate required %i available %i\n", 1431 intel_dp_link_required(adjusted_mode->crtc_clock, 1432 pipe_config->pipe_bpp), 1433 intel_dp_max_data_rate(pipe_config->port_clock, 1434 pipe_config->lane_count)); 1435 } 1436 return 0; 1437 } 1438 1439 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 1440 const struct drm_connector_state *conn_state) 1441 { 1442 const struct intel_digital_connector_state *intel_conn_state = 1443 to_intel_digital_connector_state(conn_state); 1444 const struct drm_display_mode *adjusted_mode = 1445 &crtc_state->hw.adjusted_mode; 1446 1447 /* 1448 * Our YCbCr output is always limited range. 1449 * crtc_state->limited_color_range only applies to RGB, 1450 * and it must never be set for YCbCr or we risk setting 1451 * some conflicting bits in PIPECONF which will mess up 1452 * the colors on the monitor. 1453 */ 1454 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 1455 return false; 1456 1457 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 1458 /* 1459 * See: 1460 * CEA-861-E - 5.1 Default Encoding Parameters 1461 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 1462 */ 1463 return crtc_state->pipe_bpp != 18 && 1464 drm_default_rgb_quant_range(adjusted_mode) == 1465 HDMI_QUANTIZATION_RANGE_LIMITED; 1466 } else { 1467 return intel_conn_state->broadcast_rgb == 1468 INTEL_BROADCAST_RGB_LIMITED; 1469 } 1470 } 1471 1472 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 1473 enum port port) 1474 { 1475 if (IS_G4X(dev_priv)) 1476 return false; 1477 if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A) 1478 return false; 1479 1480 return true; 1481 } 1482 1483 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 1484 const struct drm_connector_state *conn_state, 1485 struct drm_dp_vsc_sdp *vsc) 1486 { 1487 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1488 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1489 1490 /* 1491 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1492 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 1493 * Colorimetry Format indication. 1494 */ 1495 vsc->revision = 0x5; 1496 vsc->length = 0x13; 1497 1498 /* DP 1.4a spec, Table 2-120 */ 1499 switch (crtc_state->output_format) { 1500 case INTEL_OUTPUT_FORMAT_YCBCR444: 1501 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 1502 break; 1503 case INTEL_OUTPUT_FORMAT_YCBCR420: 1504 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 1505 break; 1506 case INTEL_OUTPUT_FORMAT_RGB: 1507 default: 1508 vsc->pixelformat = DP_PIXELFORMAT_RGB; 1509 } 1510 1511 switch (conn_state->colorspace) { 1512 case DRM_MODE_COLORIMETRY_BT709_YCC: 1513 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1514 break; 1515 case DRM_MODE_COLORIMETRY_XVYCC_601: 1516 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 1517 break; 1518 case DRM_MODE_COLORIMETRY_XVYCC_709: 1519 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 1520 break; 1521 case DRM_MODE_COLORIMETRY_SYCC_601: 1522 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 1523 break; 1524 case DRM_MODE_COLORIMETRY_OPYCC_601: 1525 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 1526 break; 1527 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 1528 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 1529 break; 1530 case DRM_MODE_COLORIMETRY_BT2020_RGB: 1531 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 1532 break; 1533 case DRM_MODE_COLORIMETRY_BT2020_YCC: 1534 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 1535 break; 1536 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 1537 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 1538 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 1539 break; 1540 default: 1541 /* 1542 * RGB->YCBCR color conversion uses the BT.709 1543 * color space. 1544 */ 1545 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1546 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1547 else 1548 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 1549 break; 1550 } 1551 1552 vsc->bpc = crtc_state->pipe_bpp / 3; 1553 1554 /* only RGB pixelformat supports 6 bpc */ 1555 drm_WARN_ON(&dev_priv->drm, 1556 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 1557 1558 /* all YCbCr are always limited range */ 1559 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 1560 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 1561 } 1562 1563 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 1564 struct intel_crtc_state *crtc_state, 1565 const struct drm_connector_state *conn_state) 1566 { 1567 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 1568 1569 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 1570 if (crtc_state->has_psr) 1571 return; 1572 1573 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 1574 return; 1575 1576 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 1577 vsc->sdp_type = DP_SDP_VSC; 1578 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1579 &crtc_state->infoframes.vsc); 1580 } 1581 1582 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 1583 const struct intel_crtc_state *crtc_state, 1584 const struct drm_connector_state *conn_state, 1585 struct drm_dp_vsc_sdp *vsc) 1586 { 1587 vsc->sdp_type = DP_SDP_VSC; 1588 1589 if (intel_dp->psr.psr2_enabled) { 1590 if (intel_dp->psr.colorimetry_support && 1591 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 1592 /* [PSR2, +Colorimetry] */ 1593 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1594 vsc); 1595 } else { 1596 /* 1597 * [PSR2, -Colorimetry] 1598 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 1599 * 3D stereo + PSR/PSR2 + Y-coordinate. 1600 */ 1601 vsc->revision = 0x4; 1602 vsc->length = 0xe; 1603 } 1604 } else { 1605 /* 1606 * [PSR1] 1607 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1608 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 1609 * higher). 1610 */ 1611 vsc->revision = 0x2; 1612 vsc->length = 0x8; 1613 } 1614 } 1615 1616 static void 1617 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 1618 struct intel_crtc_state *crtc_state, 1619 const struct drm_connector_state *conn_state) 1620 { 1621 int ret; 1622 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1623 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 1624 1625 if (!conn_state->hdr_output_metadata) 1626 return; 1627 1628 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 1629 1630 if (ret) { 1631 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 1632 return; 1633 } 1634 1635 crtc_state->infoframes.enable |= 1636 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 1637 } 1638 1639 static void 1640 intel_dp_drrs_compute_config(struct intel_dp *intel_dp, 1641 struct intel_crtc_state *pipe_config, 1642 int output_bpp, bool constant_n) 1643 { 1644 struct intel_connector *intel_connector = intel_dp->attached_connector; 1645 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1646 int pixel_clock; 1647 1648 if (pipe_config->vrr.enable) 1649 return; 1650 1651 /* 1652 * DRRS and PSR can't be enable together, so giving preference to PSR 1653 * as it allows more power-savings by complete shutting down display, 1654 * so to guarantee this, intel_dp_drrs_compute_config() must be called 1655 * after intel_psr_compute_config(). 1656 */ 1657 if (pipe_config->has_psr) 1658 return; 1659 1660 if (!intel_connector->panel.downclock_mode || 1661 dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 1662 return; 1663 1664 pipe_config->has_drrs = true; 1665 1666 pixel_clock = intel_connector->panel.downclock_mode->clock; 1667 if (pipe_config->splitter.enable) 1668 pixel_clock /= pipe_config->splitter.link_count; 1669 1670 intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, 1671 pipe_config->port_clock, &pipe_config->dp_m2_n2, 1672 constant_n, pipe_config->fec_enable); 1673 1674 /* FIXME: abstract this better */ 1675 if (pipe_config->splitter.enable) 1676 pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count; 1677 } 1678 1679 int 1680 intel_dp_compute_config(struct intel_encoder *encoder, 1681 struct intel_crtc_state *pipe_config, 1682 struct drm_connector_state *conn_state) 1683 { 1684 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1685 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1686 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1687 enum port port = encoder->port; 1688 struct intel_connector *intel_connector = intel_dp->attached_connector; 1689 struct intel_digital_connector_state *intel_conn_state = 1690 to_intel_digital_connector_state(conn_state); 1691 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); 1692 int ret = 0, output_bpp; 1693 1694 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 1695 pipe_config->has_pch_encoder = true; 1696 1697 pipe_config->output_format = intel_dp_output_format(&intel_connector->base, 1698 adjusted_mode); 1699 1700 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 1701 ret = intel_pch_panel_fitting(pipe_config, conn_state); 1702 if (ret) 1703 return ret; 1704 } 1705 1706 if (!intel_dp_port_has_audio(dev_priv, port)) 1707 pipe_config->has_audio = false; 1708 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 1709 pipe_config->has_audio = intel_dp->has_audio; 1710 else 1711 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 1712 1713 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1714 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 1715 adjusted_mode); 1716 1717 if (HAS_GMCH(dev_priv)) 1718 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 1719 else 1720 ret = intel_pch_panel_fitting(pipe_config, conn_state); 1721 if (ret) 1722 return ret; 1723 } 1724 1725 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1726 return -EINVAL; 1727 1728 if (HAS_GMCH(dev_priv) && 1729 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 1730 return -EINVAL; 1731 1732 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 1733 return -EINVAL; 1734 1735 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 1736 return -EINVAL; 1737 1738 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 1739 if (ret < 0) 1740 return ret; 1741 1742 pipe_config->limited_color_range = 1743 intel_dp_limited_color_range(pipe_config, conn_state); 1744 1745 if (pipe_config->dsc.compression_enable) 1746 output_bpp = pipe_config->dsc.compressed_bpp; 1747 else 1748 output_bpp = intel_dp_output_bpp(pipe_config->output_format, 1749 pipe_config->pipe_bpp); 1750 1751 if (intel_dp->mso_link_count) { 1752 int n = intel_dp->mso_link_count; 1753 int overlap = intel_dp->mso_pixel_overlap; 1754 1755 pipe_config->splitter.enable = true; 1756 pipe_config->splitter.link_count = n; 1757 pipe_config->splitter.pixel_overlap = overlap; 1758 1759 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", 1760 n, overlap); 1761 1762 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; 1763 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; 1764 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; 1765 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; 1766 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; 1767 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; 1768 adjusted_mode->crtc_clock /= n; 1769 } 1770 1771 intel_link_compute_m_n(output_bpp, 1772 pipe_config->lane_count, 1773 adjusted_mode->crtc_clock, 1774 pipe_config->port_clock, 1775 &pipe_config->dp_m_n, 1776 constant_n, pipe_config->fec_enable); 1777 1778 /* FIXME: abstract this better */ 1779 if (pipe_config->splitter.enable) 1780 pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count; 1781 1782 if (!HAS_DDI(dev_priv)) 1783 g4x_dp_set_clock(encoder, pipe_config); 1784 1785 intel_vrr_compute_config(pipe_config, conn_state); 1786 intel_psr_compute_config(intel_dp, pipe_config); 1787 intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp, 1788 constant_n); 1789 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 1790 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 1791 1792 return 0; 1793 } 1794 1795 void intel_dp_set_link_params(struct intel_dp *intel_dp, 1796 int link_rate, int lane_count) 1797 { 1798 intel_dp->link_trained = false; 1799 intel_dp->link_rate = link_rate; 1800 intel_dp->lane_count = lane_count; 1801 } 1802 1803 /* Enable backlight PWM and backlight PP control. */ 1804 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 1805 const struct drm_connector_state *conn_state) 1806 { 1807 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 1808 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1809 1810 if (!intel_dp_is_edp(intel_dp)) 1811 return; 1812 1813 drm_dbg_kms(&i915->drm, "\n"); 1814 1815 intel_panel_enable_backlight(crtc_state, conn_state); 1816 intel_pps_backlight_on(intel_dp); 1817 } 1818 1819 /* Disable backlight PP control and backlight PWM. */ 1820 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 1821 { 1822 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 1823 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1824 1825 if (!intel_dp_is_edp(intel_dp)) 1826 return; 1827 1828 drm_dbg_kms(&i915->drm, "\n"); 1829 1830 intel_pps_backlight_off(intel_dp); 1831 intel_panel_disable_backlight(old_conn_state); 1832 } 1833 1834 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 1835 { 1836 /* 1837 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 1838 * be capable of signalling downstream hpd with a long pulse. 1839 * Whether or not that means D3 is safe to use is not clear, 1840 * but let's assume so until proven otherwise. 1841 * 1842 * FIXME should really check all downstream ports... 1843 */ 1844 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 1845 drm_dp_is_branch(intel_dp->dpcd) && 1846 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 1847 } 1848 1849 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 1850 const struct intel_crtc_state *crtc_state, 1851 bool enable) 1852 { 1853 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1854 int ret; 1855 1856 if (!crtc_state->dsc.compression_enable) 1857 return; 1858 1859 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 1860 enable ? DP_DECOMPRESSION_EN : 0); 1861 if (ret < 0) 1862 drm_dbg_kms(&i915->drm, 1863 "Failed to %s sink decompression state\n", 1864 enable ? "enable" : "disable"); 1865 } 1866 1867 static void 1868 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 1869 { 1870 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1871 u8 oui[] = { 0x00, 0xaa, 0x01 }; 1872 u8 buf[3] = { 0 }; 1873 1874 /* 1875 * During driver init, we want to be careful and avoid changing the source OUI if it's 1876 * already set to what we want, so as to avoid clearing any state by accident 1877 */ 1878 if (careful) { 1879 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 1880 drm_err(&i915->drm, "Failed to read source OUI\n"); 1881 1882 if (memcmp(oui, buf, sizeof(oui)) == 0) 1883 return; 1884 } 1885 1886 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 1887 drm_err(&i915->drm, "Failed to write source OUI\n"); 1888 } 1889 1890 /* If the device supports it, try to set the power state appropriately */ 1891 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 1892 { 1893 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1894 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1895 int ret, i; 1896 1897 /* Should have a valid DPCD by this point */ 1898 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1899 return; 1900 1901 if (mode != DP_SET_POWER_D0) { 1902 if (downstream_hpd_needs_d0(intel_dp)) 1903 return; 1904 1905 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 1906 } else { 1907 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 1908 1909 lspcon_resume(dp_to_dig_port(intel_dp)); 1910 1911 /* Write the source OUI as early as possible */ 1912 if (intel_dp_is_edp(intel_dp)) 1913 intel_edp_init_source_oui(intel_dp, false); 1914 1915 /* 1916 * When turning on, we need to retry for 1ms to give the sink 1917 * time to wake up. 1918 */ 1919 for (i = 0; i < 3; i++) { 1920 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 1921 if (ret == 1) 1922 break; 1923 msleep(1); 1924 } 1925 1926 if (ret == 1 && lspcon->active) 1927 lspcon_wait_pcon_mode(lspcon); 1928 } 1929 1930 if (ret != 1) 1931 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 1932 encoder->base.base.id, encoder->base.name, 1933 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 1934 } 1935 1936 static bool 1937 intel_dp_get_dpcd(struct intel_dp *intel_dp); 1938 1939 /** 1940 * intel_dp_sync_state - sync the encoder state during init/resume 1941 * @encoder: intel encoder to sync 1942 * @crtc_state: state for the CRTC connected to the encoder 1943 * 1944 * Sync any state stored in the encoder wrt. HW state during driver init 1945 * and system resume. 1946 */ 1947 void intel_dp_sync_state(struct intel_encoder *encoder, 1948 const struct intel_crtc_state *crtc_state) 1949 { 1950 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1951 1952 /* 1953 * Don't clobber DPCD if it's been already read out during output 1954 * setup (eDP) or detect. 1955 */ 1956 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 1957 intel_dp_get_dpcd(intel_dp); 1958 1959 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 1960 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 1961 } 1962 1963 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 1964 struct intel_crtc_state *crtc_state) 1965 { 1966 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1967 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1968 1969 /* 1970 * If BIOS has set an unsupported or non-standard link rate for some 1971 * reason force an encoder recompute and full modeset. 1972 */ 1973 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 1974 crtc_state->port_clock) < 0) { 1975 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n"); 1976 crtc_state->uapi.connectors_changed = true; 1977 return false; 1978 } 1979 1980 /* 1981 * FIXME hack to force full modeset when DSC is being used. 1982 * 1983 * As long as we do not have full state readout and config comparison 1984 * of crtc_state->dsc, we have no way to ensure reliable fastset. 1985 * Remove once we have readout for DSC. 1986 */ 1987 if (crtc_state->dsc.compression_enable) { 1988 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n"); 1989 crtc_state->uapi.mode_changed = true; 1990 return false; 1991 } 1992 1993 if (CAN_PSR(intel_dp)) { 1994 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n"); 1995 crtc_state->uapi.mode_changed = true; 1996 return false; 1997 } 1998 1999 return true; 2000 } 2001 2002 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 2003 { 2004 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2005 2006 /* Clear the cached register set to avoid using stale values */ 2007 2008 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 2009 2010 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 2011 intel_dp->pcon_dsc_dpcd, 2012 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 2013 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", 2014 DP_PCON_DSC_ENCODER); 2015 2016 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", 2017 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 2018 } 2019 2020 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 2021 { 2022 int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 2023 int i; 2024 2025 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 2026 if (frl_bw_mask & (1 << i)) 2027 return bw_gbps[i]; 2028 } 2029 return 0; 2030 } 2031 2032 static int intel_dp_pcon_set_frl_mask(int max_frl) 2033 { 2034 switch (max_frl) { 2035 case 48: 2036 return DP_PCON_FRL_BW_MASK_48GBPS; 2037 case 40: 2038 return DP_PCON_FRL_BW_MASK_40GBPS; 2039 case 32: 2040 return DP_PCON_FRL_BW_MASK_32GBPS; 2041 case 24: 2042 return DP_PCON_FRL_BW_MASK_24GBPS; 2043 case 18: 2044 return DP_PCON_FRL_BW_MASK_18GBPS; 2045 case 9: 2046 return DP_PCON_FRL_BW_MASK_9GBPS; 2047 } 2048 2049 return 0; 2050 } 2051 2052 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 2053 { 2054 struct intel_connector *intel_connector = intel_dp->attached_connector; 2055 struct drm_connector *connector = &intel_connector->base; 2056 int max_frl_rate; 2057 int max_lanes, rate_per_lane; 2058 int max_dsc_lanes, dsc_rate_per_lane; 2059 2060 max_lanes = connector->display_info.hdmi.max_lanes; 2061 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; 2062 max_frl_rate = max_lanes * rate_per_lane; 2063 2064 if (connector->display_info.hdmi.dsc_cap.v_1p2) { 2065 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; 2066 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; 2067 if (max_dsc_lanes && dsc_rate_per_lane) 2068 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 2069 } 2070 2071 return max_frl_rate; 2072 } 2073 2074 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 2075 { 2076 #define TIMEOUT_FRL_READY_MS 500 2077 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 2078 2079 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2080 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 2081 u8 max_frl_bw_mask = 0, frl_trained_mask; 2082 bool is_active; 2083 2084 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2085 if (ret < 0) 2086 return ret; 2087 2088 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 2089 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 2090 2091 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 2092 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); 2093 2094 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 2095 2096 if (max_frl_bw <= 0) 2097 return -EINVAL; 2098 2099 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 2100 if (ret < 0) 2101 return ret; 2102 /* Wait for PCON to be FRL Ready */ 2103 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); 2104 2105 if (!is_active) 2106 return -ETIMEDOUT; 2107 2108 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 2109 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, 2110 DP_PCON_ENABLE_SEQUENTIAL_LINK); 2111 if (ret < 0) 2112 return ret; 2113 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, 2114 DP_PCON_FRL_LINK_TRAIN_NORMAL); 2115 if (ret < 0) 2116 return ret; 2117 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 2118 if (ret < 0) 2119 return ret; 2120 /* 2121 * Wait for FRL to be completed 2122 * Check if the HDMI Link is up and active. 2123 */ 2124 wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); 2125 2126 if (!is_active) 2127 return -ETIMEDOUT; 2128 2129 /* Verify HDMI Link configuration shows FRL Mode */ 2130 if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != 2131 DP_PCON_HDMI_MODE_FRL) { 2132 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); 2133 return -EINVAL; 2134 } 2135 drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); 2136 2137 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 2138 intel_dp->frl.is_trained = true; 2139 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); 2140 2141 return 0; 2142 } 2143 2144 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 2145 { 2146 if (drm_dp_is_branch(intel_dp->dpcd) && 2147 intel_dp->has_hdmi_sink && 2148 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 2149 return true; 2150 2151 return false; 2152 } 2153 2154 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 2155 { 2156 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2157 2158 /* 2159 * Always go for FRL training if: 2160 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) 2161 * -sink is HDMI2.1 2162 */ 2163 if (!(intel_dp->dpcd[2] & DP_PCON_SOURCE_CTL_MODE) || 2164 !intel_dp_is_hdmi_2_1_sink(intel_dp) || 2165 intel_dp->frl.is_trained) 2166 return; 2167 2168 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 2169 int ret, mode; 2170 2171 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); 2172 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2173 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 2174 2175 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 2176 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); 2177 } else { 2178 drm_dbg(&dev_priv->drm, "FRL training Completed\n"); 2179 } 2180 } 2181 2182 static int 2183 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 2184 { 2185 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 2186 2187 return intel_hdmi_dsc_get_slice_height(vactive); 2188 } 2189 2190 static int 2191 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 2192 const struct intel_crtc_state *crtc_state) 2193 { 2194 struct intel_connector *intel_connector = intel_dp->attached_connector; 2195 struct drm_connector *connector = &intel_connector->base; 2196 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; 2197 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; 2198 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 2199 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 2200 2201 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 2202 pcon_max_slice_width, 2203 hdmi_max_slices, hdmi_throughput); 2204 } 2205 2206 static int 2207 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 2208 const struct intel_crtc_state *crtc_state, 2209 int num_slices, int slice_width) 2210 { 2211 struct intel_connector *intel_connector = intel_dp->attached_connector; 2212 struct drm_connector *connector = &intel_connector->base; 2213 int output_format = crtc_state->output_format; 2214 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; 2215 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 2216 int hdmi_max_chunk_bytes = 2217 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; 2218 2219 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 2220 num_slices, output_format, hdmi_all_bpp, 2221 hdmi_max_chunk_bytes); 2222 } 2223 2224 void 2225 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 2226 const struct intel_crtc_state *crtc_state) 2227 { 2228 u8 pps_param[6]; 2229 int slice_height; 2230 int slice_width; 2231 int num_slices; 2232 int bits_per_pixel; 2233 int ret; 2234 struct intel_connector *intel_connector = intel_dp->attached_connector; 2235 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2236 struct drm_connector *connector; 2237 bool hdmi_is_dsc_1_2; 2238 2239 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 2240 return; 2241 2242 if (!intel_connector) 2243 return; 2244 connector = &intel_connector->base; 2245 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; 2246 2247 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 2248 !hdmi_is_dsc_1_2) 2249 return; 2250 2251 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 2252 if (!slice_height) 2253 return; 2254 2255 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 2256 if (!num_slices) 2257 return; 2258 2259 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 2260 num_slices); 2261 2262 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 2263 num_slices, slice_width); 2264 if (!bits_per_pixel) 2265 return; 2266 2267 pps_param[0] = slice_height & 0xFF; 2268 pps_param[1] = slice_height >> 8; 2269 pps_param[2] = slice_width & 0xFF; 2270 pps_param[3] = slice_width >> 8; 2271 pps_param[4] = bits_per_pixel & 0xFF; 2272 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 2273 2274 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 2275 if (ret < 0) 2276 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); 2277 } 2278 2279 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 2280 const struct intel_crtc_state *crtc_state) 2281 { 2282 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2283 u8 tmp; 2284 2285 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 2286 return; 2287 2288 if (!drm_dp_is_branch(intel_dp->dpcd)) 2289 return; 2290 2291 tmp = intel_dp->has_hdmi_sink ? 2292 DP_HDMI_DVI_OUTPUT_CONFIG : 0; 2293 2294 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2295 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 2296 drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n", 2297 enableddisabled(intel_dp->has_hdmi_sink)); 2298 2299 tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 2300 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 2301 2302 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2303 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 2304 drm_dbg_kms(&i915->drm, 2305 "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n", 2306 enableddisabled(intel_dp->dfp.ycbcr_444_to_420)); 2307 2308 tmp = 0; 2309 if (intel_dp->dfp.rgb_to_ycbcr) { 2310 bool bt2020, bt709; 2311 2312 /* 2313 * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only 2314 * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default. 2315 * 2316 */ 2317 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE; 2318 2319 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2320 intel_dp->downstream_ports, 2321 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 2322 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2323 intel_dp->downstream_ports, 2324 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 2325 switch (crtc_state->infoframes.vsc.colorimetry) { 2326 case DP_COLORIMETRY_BT2020_RGB: 2327 case DP_COLORIMETRY_BT2020_YCC: 2328 if (bt2020) 2329 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE; 2330 break; 2331 case DP_COLORIMETRY_BT709_YCC: 2332 case DP_COLORIMETRY_XVYCC_709: 2333 if (bt709) 2334 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE; 2335 break; 2336 default: 2337 break; 2338 } 2339 } 2340 2341 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 2342 drm_dbg_kms(&i915->drm, 2343 "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n", 2344 enableddisabled(tmp ? true : false)); 2345 } 2346 2347 2348 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 2349 { 2350 u8 dprx = 0; 2351 2352 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 2353 &dprx) != 1) 2354 return false; 2355 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 2356 } 2357 2358 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 2359 { 2360 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2361 2362 /* 2363 * Clear the cached register set to avoid using stale values 2364 * for the sinks that do not support DSC. 2365 */ 2366 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 2367 2368 /* Clear fec_capable to avoid using stale values */ 2369 intel_dp->fec_capable = 0; 2370 2371 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 2372 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 2373 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2374 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 2375 intel_dp->dsc_dpcd, 2376 sizeof(intel_dp->dsc_dpcd)) < 0) 2377 drm_err(&i915->drm, 2378 "Failed to read DPCD register 0x%x\n", 2379 DP_DSC_SUPPORT); 2380 2381 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 2382 (int)sizeof(intel_dp->dsc_dpcd), 2383 intel_dp->dsc_dpcd); 2384 2385 /* FEC is supported only on DP 1.4 */ 2386 if (!intel_dp_is_edp(intel_dp) && 2387 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 2388 &intel_dp->fec_capable) < 0) 2389 drm_err(&i915->drm, 2390 "Failed to read FEC DPCD register\n"); 2391 2392 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 2393 intel_dp->fec_capable); 2394 } 2395 } 2396 2397 static void intel_edp_mso_mode_fixup(struct intel_connector *connector, 2398 struct drm_display_mode *mode) 2399 { 2400 struct intel_dp *intel_dp = intel_attached_dp(connector); 2401 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2402 int n = intel_dp->mso_link_count; 2403 int overlap = intel_dp->mso_pixel_overlap; 2404 2405 if (!mode || !n) 2406 return; 2407 2408 mode->hdisplay = (mode->hdisplay - overlap) * n; 2409 mode->hsync_start = (mode->hsync_start - overlap) * n; 2410 mode->hsync_end = (mode->hsync_end - overlap) * n; 2411 mode->htotal = (mode->htotal - overlap) * n; 2412 mode->clock *= n; 2413 2414 drm_mode_set_name(mode); 2415 2416 drm_dbg_kms(&i915->drm, 2417 "[CONNECTOR:%d:%s] using generated MSO mode: ", 2418 connector->base.base.id, connector->base.name); 2419 drm_mode_debug_printmodeline(mode); 2420 } 2421 2422 static void intel_edp_mso_init(struct intel_dp *intel_dp) 2423 { 2424 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2425 u8 mso; 2426 2427 if (intel_dp->edp_dpcd[0] < DP_EDP_14) 2428 return; 2429 2430 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { 2431 drm_err(&i915->drm, "Failed to read MSO cap\n"); 2432 return; 2433 } 2434 2435 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ 2436 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; 2437 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { 2438 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); 2439 mso = 0; 2440 } 2441 2442 if (mso) { 2443 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n", 2444 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso); 2445 if (!HAS_MSO(i915)) { 2446 drm_err(&i915->drm, "No source MSO support, disabling\n"); 2447 mso = 0; 2448 } 2449 } 2450 2451 intel_dp->mso_link_count = mso; 2452 intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */ 2453 } 2454 2455 static bool 2456 intel_edp_init_dpcd(struct intel_dp *intel_dp) 2457 { 2458 struct drm_i915_private *dev_priv = 2459 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 2460 2461 /* this function is meant to be called only once */ 2462 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 2463 2464 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 2465 return false; 2466 2467 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2468 drm_dp_is_branch(intel_dp->dpcd)); 2469 2470 /* 2471 * Read the eDP display control registers. 2472 * 2473 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 2474 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 2475 * set, but require eDP 1.4+ detection (e.g. for supported link rates 2476 * method). The display control registers should read zero if they're 2477 * not supported anyway. 2478 */ 2479 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 2480 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 2481 sizeof(intel_dp->edp_dpcd)) 2482 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 2483 (int)sizeof(intel_dp->edp_dpcd), 2484 intel_dp->edp_dpcd); 2485 2486 /* 2487 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 2488 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 2489 */ 2490 intel_psr_init_dpcd(intel_dp); 2491 2492 /* Read the eDP 1.4+ supported link rates. */ 2493 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2494 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 2495 int i; 2496 2497 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 2498 sink_rates, sizeof(sink_rates)); 2499 2500 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 2501 int val = le16_to_cpu(sink_rates[i]); 2502 2503 if (val == 0) 2504 break; 2505 2506 /* Value read multiplied by 200kHz gives the per-lane 2507 * link rate in kHz. The source rates are, however, 2508 * stored in terms of LS_Clk kHz. The full conversion 2509 * back to symbols is 2510 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 2511 */ 2512 intel_dp->sink_rates[i] = (val * 200) / 10; 2513 } 2514 intel_dp->num_sink_rates = i; 2515 } 2516 2517 /* 2518 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 2519 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 2520 */ 2521 if (intel_dp->num_sink_rates) 2522 intel_dp->use_rate_select = true; 2523 else 2524 intel_dp_set_sink_rates(intel_dp); 2525 2526 intel_dp_set_common_rates(intel_dp); 2527 2528 /* Read the eDP DSC DPCD registers */ 2529 if (DISPLAY_VER(dev_priv) >= 10) 2530 intel_dp_get_dsc_sink_cap(intel_dp); 2531 2532 /* 2533 * If needed, program our source OUI so we can make various Intel-specific AUX services 2534 * available (such as HDR backlight controls) 2535 */ 2536 intel_edp_init_source_oui(intel_dp, true); 2537 2538 intel_edp_mso_init(intel_dp); 2539 2540 return true; 2541 } 2542 2543 static bool 2544 intel_dp_has_sink_count(struct intel_dp *intel_dp) 2545 { 2546 if (!intel_dp->attached_connector) 2547 return false; 2548 2549 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 2550 intel_dp->dpcd, 2551 &intel_dp->desc); 2552 } 2553 2554 static bool 2555 intel_dp_get_dpcd(struct intel_dp *intel_dp) 2556 { 2557 int ret; 2558 2559 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) 2560 return false; 2561 2562 /* 2563 * Don't clobber cached eDP rates. Also skip re-reading 2564 * the OUI/ID since we know it won't change. 2565 */ 2566 if (!intel_dp_is_edp(intel_dp)) { 2567 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2568 drm_dp_is_branch(intel_dp->dpcd)); 2569 2570 intel_dp_set_sink_rates(intel_dp); 2571 intel_dp_set_common_rates(intel_dp); 2572 } 2573 2574 if (intel_dp_has_sink_count(intel_dp)) { 2575 ret = drm_dp_read_sink_count(&intel_dp->aux); 2576 if (ret < 0) 2577 return false; 2578 2579 /* 2580 * Sink count can change between short pulse hpd hence 2581 * a member variable in intel_dp will track any changes 2582 * between short pulse interrupts. 2583 */ 2584 intel_dp->sink_count = ret; 2585 2586 /* 2587 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 2588 * a dongle is present but no display. Unless we require to know 2589 * if a dongle is present or not, we don't need to update 2590 * downstream port information. So, an early return here saves 2591 * time from performing other operations which are not required. 2592 */ 2593 if (!intel_dp->sink_count) 2594 return false; 2595 } 2596 2597 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 2598 intel_dp->downstream_ports) == 0; 2599 } 2600 2601 static bool 2602 intel_dp_can_mst(struct intel_dp *intel_dp) 2603 { 2604 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2605 2606 return i915->params.enable_dp_mst && 2607 intel_dp->can_mst && 2608 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 2609 } 2610 2611 static void 2612 intel_dp_configure_mst(struct intel_dp *intel_dp) 2613 { 2614 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2615 struct intel_encoder *encoder = 2616 &dp_to_dig_port(intel_dp)->base; 2617 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 2618 2619 drm_dbg_kms(&i915->drm, 2620 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 2621 encoder->base.base.id, encoder->base.name, 2622 yesno(intel_dp->can_mst), yesno(sink_can_mst), 2623 yesno(i915->params.enable_dp_mst)); 2624 2625 if (!intel_dp->can_mst) 2626 return; 2627 2628 intel_dp->is_mst = sink_can_mst && 2629 i915->params.enable_dp_mst; 2630 2631 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 2632 intel_dp->is_mst); 2633 } 2634 2635 static bool 2636 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2637 { 2638 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 2639 sink_irq_vector, DP_DPRX_ESI_LEN) == 2640 DP_DPRX_ESI_LEN; 2641 } 2642 2643 bool 2644 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 2645 const struct drm_connector_state *conn_state) 2646 { 2647 /* 2648 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 2649 * of Color Encoding Format and Content Color Gamut], in order to 2650 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 2651 */ 2652 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2653 return true; 2654 2655 switch (conn_state->colorspace) { 2656 case DRM_MODE_COLORIMETRY_SYCC_601: 2657 case DRM_MODE_COLORIMETRY_OPYCC_601: 2658 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2659 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2660 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2661 return true; 2662 default: 2663 break; 2664 } 2665 2666 return false; 2667 } 2668 2669 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 2670 struct dp_sdp *sdp, size_t size) 2671 { 2672 size_t length = sizeof(struct dp_sdp); 2673 2674 if (size < length) 2675 return -ENOSPC; 2676 2677 memset(sdp, 0, size); 2678 2679 /* 2680 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 2681 * VSC SDP Header Bytes 2682 */ 2683 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 2684 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 2685 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 2686 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 2687 2688 /* 2689 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 2690 * per DP 1.4a spec. 2691 */ 2692 if (vsc->revision != 0x5) 2693 goto out; 2694 2695 /* VSC SDP Payload for DB16 through DB18 */ 2696 /* Pixel Encoding and Colorimetry Formats */ 2697 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 2698 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 2699 2700 switch (vsc->bpc) { 2701 case 6: 2702 /* 6bpc: 0x0 */ 2703 break; 2704 case 8: 2705 sdp->db[17] = 0x1; /* DB17[3:0] */ 2706 break; 2707 case 10: 2708 sdp->db[17] = 0x2; 2709 break; 2710 case 12: 2711 sdp->db[17] = 0x3; 2712 break; 2713 case 16: 2714 sdp->db[17] = 0x4; 2715 break; 2716 default: 2717 MISSING_CASE(vsc->bpc); 2718 break; 2719 } 2720 /* Dynamic Range and Component Bit Depth */ 2721 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 2722 sdp->db[17] |= 0x80; /* DB17[7] */ 2723 2724 /* Content Type */ 2725 sdp->db[18] = vsc->content_type & 0x7; 2726 2727 out: 2728 return length; 2729 } 2730 2731 static ssize_t 2732 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 2733 struct dp_sdp *sdp, 2734 size_t size) 2735 { 2736 size_t length = sizeof(struct dp_sdp); 2737 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 2738 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 2739 ssize_t len; 2740 2741 if (size < length) 2742 return -ENOSPC; 2743 2744 memset(sdp, 0, size); 2745 2746 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 2747 if (len < 0) { 2748 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 2749 return -ENOSPC; 2750 } 2751 2752 if (len != infoframe_size) { 2753 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 2754 return -ENOSPC; 2755 } 2756 2757 /* 2758 * Set up the infoframe sdp packet for HDR static metadata. 2759 * Prepare VSC Header for SU as per DP 1.4a spec, 2760 * Table 2-100 and Table 2-101 2761 */ 2762 2763 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 2764 sdp->sdp_header.HB0 = 0; 2765 /* 2766 * Packet Type 80h + Non-audio INFOFRAME Type value 2767 * HDMI_INFOFRAME_TYPE_DRM: 0x87 2768 * - 80h + Non-audio INFOFRAME Type value 2769 * - InfoFrame Type: 0x07 2770 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 2771 */ 2772 sdp->sdp_header.HB1 = drm_infoframe->type; 2773 /* 2774 * Least Significant Eight Bits of (Data Byte Count – 1) 2775 * infoframe_size - 1 2776 */ 2777 sdp->sdp_header.HB2 = 0x1D; 2778 /* INFOFRAME SDP Version Number */ 2779 sdp->sdp_header.HB3 = (0x13 << 2); 2780 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 2781 sdp->db[0] = drm_infoframe->version; 2782 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 2783 sdp->db[1] = drm_infoframe->length; 2784 /* 2785 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 2786 * HDMI_INFOFRAME_HEADER_SIZE 2787 */ 2788 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 2789 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 2790 HDMI_DRM_INFOFRAME_SIZE); 2791 2792 /* 2793 * Size of DP infoframe sdp packet for HDR static metadata consists of 2794 * - DP SDP Header(struct dp_sdp_header): 4 bytes 2795 * - Two Data Blocks: 2 bytes 2796 * CTA Header Byte2 (INFOFRAME Version Number) 2797 * CTA Header Byte3 (Length of INFOFRAME) 2798 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 2799 * 2800 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 2801 * infoframe size. But GEN11+ has larger than that size, write_infoframe 2802 * will pad rest of the size. 2803 */ 2804 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 2805 } 2806 2807 static void intel_write_dp_sdp(struct intel_encoder *encoder, 2808 const struct intel_crtc_state *crtc_state, 2809 unsigned int type) 2810 { 2811 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2812 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2813 struct dp_sdp sdp = {}; 2814 ssize_t len; 2815 2816 if ((crtc_state->infoframes.enable & 2817 intel_hdmi_infoframe_enable(type)) == 0) 2818 return; 2819 2820 switch (type) { 2821 case DP_SDP_VSC: 2822 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 2823 sizeof(sdp)); 2824 break; 2825 case HDMI_PACKET_TYPE_GAMUT_METADATA: 2826 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 2827 &sdp, sizeof(sdp)); 2828 break; 2829 default: 2830 MISSING_CASE(type); 2831 return; 2832 } 2833 2834 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 2835 return; 2836 2837 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 2838 } 2839 2840 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 2841 const struct intel_crtc_state *crtc_state, 2842 struct drm_dp_vsc_sdp *vsc) 2843 { 2844 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2845 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2846 struct dp_sdp sdp = {}; 2847 ssize_t len; 2848 2849 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 2850 2851 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 2852 return; 2853 2854 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 2855 &sdp, len); 2856 } 2857 2858 void intel_dp_set_infoframes(struct intel_encoder *encoder, 2859 bool enable, 2860 const struct intel_crtc_state *crtc_state, 2861 const struct drm_connector_state *conn_state) 2862 { 2863 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2864 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2865 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 2866 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 2867 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 2868 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 2869 u32 val = intel_de_read(dev_priv, reg); 2870 2871 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 2872 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 2873 if (intel_psr_enabled(intel_dp)) 2874 val &= ~dip_enable; 2875 else 2876 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); 2877 2878 if (!enable) { 2879 intel_de_write(dev_priv, reg, val); 2880 intel_de_posting_read(dev_priv, reg); 2881 return; 2882 } 2883 2884 intel_de_write(dev_priv, reg, val); 2885 intel_de_posting_read(dev_priv, reg); 2886 2887 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 2888 if (!intel_psr_enabled(intel_dp)) 2889 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 2890 2891 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 2892 } 2893 2894 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 2895 const void *buffer, size_t size) 2896 { 2897 const struct dp_sdp *sdp = buffer; 2898 2899 if (size < sizeof(struct dp_sdp)) 2900 return -EINVAL; 2901 2902 memset(vsc, 0, size); 2903 2904 if (sdp->sdp_header.HB0 != 0) 2905 return -EINVAL; 2906 2907 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 2908 return -EINVAL; 2909 2910 vsc->sdp_type = sdp->sdp_header.HB1; 2911 vsc->revision = sdp->sdp_header.HB2; 2912 vsc->length = sdp->sdp_header.HB3; 2913 2914 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 2915 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 2916 /* 2917 * - HB2 = 0x2, HB3 = 0x8 2918 * VSC SDP supporting 3D stereo + PSR 2919 * - HB2 = 0x4, HB3 = 0xe 2920 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 2921 * first scan line of the SU region (applies to eDP v1.4b 2922 * and higher). 2923 */ 2924 return 0; 2925 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 2926 /* 2927 * - HB2 = 0x5, HB3 = 0x13 2928 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 2929 * Format. 2930 */ 2931 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 2932 vsc->colorimetry = sdp->db[16] & 0xf; 2933 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 2934 2935 switch (sdp->db[17] & 0x7) { 2936 case 0x0: 2937 vsc->bpc = 6; 2938 break; 2939 case 0x1: 2940 vsc->bpc = 8; 2941 break; 2942 case 0x2: 2943 vsc->bpc = 10; 2944 break; 2945 case 0x3: 2946 vsc->bpc = 12; 2947 break; 2948 case 0x4: 2949 vsc->bpc = 16; 2950 break; 2951 default: 2952 MISSING_CASE(sdp->db[17] & 0x7); 2953 return -EINVAL; 2954 } 2955 2956 vsc->content_type = sdp->db[18] & 0x7; 2957 } else { 2958 return -EINVAL; 2959 } 2960 2961 return 0; 2962 } 2963 2964 static int 2965 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 2966 const void *buffer, size_t size) 2967 { 2968 int ret; 2969 2970 const struct dp_sdp *sdp = buffer; 2971 2972 if (size < sizeof(struct dp_sdp)) 2973 return -EINVAL; 2974 2975 if (sdp->sdp_header.HB0 != 0) 2976 return -EINVAL; 2977 2978 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 2979 return -EINVAL; 2980 2981 /* 2982 * Least Significant Eight Bits of (Data Byte Count – 1) 2983 * 1Dh (i.e., Data Byte Count = 30 bytes). 2984 */ 2985 if (sdp->sdp_header.HB2 != 0x1D) 2986 return -EINVAL; 2987 2988 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 2989 if ((sdp->sdp_header.HB3 & 0x3) != 0) 2990 return -EINVAL; 2991 2992 /* INFOFRAME SDP Version Number */ 2993 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 2994 return -EINVAL; 2995 2996 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 2997 if (sdp->db[0] != 1) 2998 return -EINVAL; 2999 3000 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 3001 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 3002 return -EINVAL; 3003 3004 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 3005 HDMI_DRM_INFOFRAME_SIZE); 3006 3007 return ret; 3008 } 3009 3010 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 3011 struct intel_crtc_state *crtc_state, 3012 struct drm_dp_vsc_sdp *vsc) 3013 { 3014 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3015 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3016 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3017 unsigned int type = DP_SDP_VSC; 3018 struct dp_sdp sdp = {}; 3019 int ret; 3020 3021 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 3022 if (intel_psr_enabled(intel_dp)) 3023 return; 3024 3025 if ((crtc_state->infoframes.enable & 3026 intel_hdmi_infoframe_enable(type)) == 0) 3027 return; 3028 3029 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 3030 3031 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 3032 3033 if (ret) 3034 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 3035 } 3036 3037 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 3038 struct intel_crtc_state *crtc_state, 3039 struct hdmi_drm_infoframe *drm_infoframe) 3040 { 3041 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3042 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3043 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 3044 struct dp_sdp sdp = {}; 3045 int ret; 3046 3047 if ((crtc_state->infoframes.enable & 3048 intel_hdmi_infoframe_enable(type)) == 0) 3049 return; 3050 3051 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 3052 sizeof(sdp)); 3053 3054 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 3055 sizeof(sdp)); 3056 3057 if (ret) 3058 drm_dbg_kms(&dev_priv->drm, 3059 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 3060 } 3061 3062 void intel_read_dp_sdp(struct intel_encoder *encoder, 3063 struct intel_crtc_state *crtc_state, 3064 unsigned int type) 3065 { 3066 if (encoder->type != INTEL_OUTPUT_DDI) 3067 return; 3068 3069 switch (type) { 3070 case DP_SDP_VSC: 3071 intel_read_dp_vsc_sdp(encoder, crtc_state, 3072 &crtc_state->infoframes.vsc); 3073 break; 3074 case HDMI_PACKET_TYPE_GAMUT_METADATA: 3075 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 3076 &crtc_state->infoframes.drm.drm); 3077 break; 3078 default: 3079 MISSING_CASE(type); 3080 break; 3081 } 3082 } 3083 3084 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 3085 { 3086 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3087 int status = 0; 3088 int test_link_rate; 3089 u8 test_lane_count, test_link_bw; 3090 /* (DP CTS 1.2) 3091 * 4.3.1.11 3092 */ 3093 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 3094 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 3095 &test_lane_count); 3096 3097 if (status <= 0) { 3098 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 3099 return DP_TEST_NAK; 3100 } 3101 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 3102 3103 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 3104 &test_link_bw); 3105 if (status <= 0) { 3106 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 3107 return DP_TEST_NAK; 3108 } 3109 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 3110 3111 /* Validate the requested link rate and lane count */ 3112 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 3113 test_lane_count)) 3114 return DP_TEST_NAK; 3115 3116 intel_dp->compliance.test_lane_count = test_lane_count; 3117 intel_dp->compliance.test_link_rate = test_link_rate; 3118 3119 return DP_TEST_ACK; 3120 } 3121 3122 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 3123 { 3124 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3125 u8 test_pattern; 3126 u8 test_misc; 3127 __be16 h_width, v_height; 3128 int status = 0; 3129 3130 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 3131 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 3132 &test_pattern); 3133 if (status <= 0) { 3134 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 3135 return DP_TEST_NAK; 3136 } 3137 if (test_pattern != DP_COLOR_RAMP) 3138 return DP_TEST_NAK; 3139 3140 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 3141 &h_width, 2); 3142 if (status <= 0) { 3143 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 3144 return DP_TEST_NAK; 3145 } 3146 3147 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 3148 &v_height, 2); 3149 if (status <= 0) { 3150 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 3151 return DP_TEST_NAK; 3152 } 3153 3154 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 3155 &test_misc); 3156 if (status <= 0) { 3157 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 3158 return DP_TEST_NAK; 3159 } 3160 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 3161 return DP_TEST_NAK; 3162 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 3163 return DP_TEST_NAK; 3164 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 3165 case DP_TEST_BIT_DEPTH_6: 3166 intel_dp->compliance.test_data.bpc = 6; 3167 break; 3168 case DP_TEST_BIT_DEPTH_8: 3169 intel_dp->compliance.test_data.bpc = 8; 3170 break; 3171 default: 3172 return DP_TEST_NAK; 3173 } 3174 3175 intel_dp->compliance.test_data.video_pattern = test_pattern; 3176 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 3177 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 3178 /* Set test active flag here so userspace doesn't interrupt things */ 3179 intel_dp->compliance.test_active = true; 3180 3181 return DP_TEST_ACK; 3182 } 3183 3184 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 3185 { 3186 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3187 u8 test_result = DP_TEST_ACK; 3188 struct intel_connector *intel_connector = intel_dp->attached_connector; 3189 struct drm_connector *connector = &intel_connector->base; 3190 3191 if (intel_connector->detect_edid == NULL || 3192 connector->edid_corrupt || 3193 intel_dp->aux.i2c_defer_count > 6) { 3194 /* Check EDID read for NACKs, DEFERs and corruption 3195 * (DP CTS 1.2 Core r1.1) 3196 * 4.2.2.4 : Failed EDID read, I2C_NAK 3197 * 4.2.2.5 : Failed EDID read, I2C_DEFER 3198 * 4.2.2.6 : EDID corruption detected 3199 * Use failsafe mode for all cases 3200 */ 3201 if (intel_dp->aux.i2c_nack_count > 0 || 3202 intel_dp->aux.i2c_defer_count > 0) 3203 drm_dbg_kms(&i915->drm, 3204 "EDID read had %d NACKs, %d DEFERs\n", 3205 intel_dp->aux.i2c_nack_count, 3206 intel_dp->aux.i2c_defer_count); 3207 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 3208 } else { 3209 struct edid *block = intel_connector->detect_edid; 3210 3211 /* We have to write the checksum 3212 * of the last block read 3213 */ 3214 block += intel_connector->detect_edid->extensions; 3215 3216 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 3217 block->checksum) <= 0) 3218 drm_dbg_kms(&i915->drm, 3219 "Failed to write EDID checksum\n"); 3220 3221 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 3222 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 3223 } 3224 3225 /* Set test active flag here so userspace doesn't interrupt things */ 3226 intel_dp->compliance.test_active = true; 3227 3228 return test_result; 3229 } 3230 3231 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 3232 const struct intel_crtc_state *crtc_state) 3233 { 3234 struct drm_i915_private *dev_priv = 3235 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 3236 struct drm_dp_phy_test_params *data = 3237 &intel_dp->compliance.test_data.phytest; 3238 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3239 enum pipe pipe = crtc->pipe; 3240 u32 pattern_val; 3241 3242 switch (data->phy_pattern) { 3243 case DP_PHY_TEST_PATTERN_NONE: 3244 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 3245 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 3246 break; 3247 case DP_PHY_TEST_PATTERN_D10_2: 3248 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 3249 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3250 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 3251 break; 3252 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 3253 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 3254 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3255 DDI_DP_COMP_CTL_ENABLE | 3256 DDI_DP_COMP_CTL_SCRAMBLED_0); 3257 break; 3258 case DP_PHY_TEST_PATTERN_PRBS7: 3259 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 3260 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3261 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 3262 break; 3263 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 3264 /* 3265 * FIXME: Ideally pattern should come from DPCD 0x250. As 3266 * current firmware of DPR-100 could not set it, so hardcoding 3267 * now for complaince test. 3268 */ 3269 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 3270 pattern_val = 0x3e0f83e0; 3271 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 3272 pattern_val = 0x0f83e0f8; 3273 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 3274 pattern_val = 0x0000f83e; 3275 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 3276 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3277 DDI_DP_COMP_CTL_ENABLE | 3278 DDI_DP_COMP_CTL_CUSTOM80); 3279 break; 3280 case DP_PHY_TEST_PATTERN_CP2520: 3281 /* 3282 * FIXME: Ideally pattern should come from DPCD 0x24A. As 3283 * current firmware of DPR-100 could not set it, so hardcoding 3284 * now for complaince test. 3285 */ 3286 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 3287 pattern_val = 0xFB; 3288 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3289 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 3290 pattern_val); 3291 break; 3292 default: 3293 WARN(1, "Invalid Phy Test Pattern\n"); 3294 } 3295 } 3296 3297 static void 3298 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, 3299 const struct intel_crtc_state *crtc_state) 3300 { 3301 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3302 struct drm_device *dev = dig_port->base.base.dev; 3303 struct drm_i915_private *dev_priv = to_i915(dev); 3304 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 3305 enum pipe pipe = crtc->pipe; 3306 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 3307 3308 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 3309 TRANS_DDI_FUNC_CTL(pipe)); 3310 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 3311 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 3312 3313 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 3314 TGL_TRANS_DDI_PORT_MASK); 3315 trans_conf_value &= ~PIPECONF_ENABLE; 3316 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 3317 3318 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 3319 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 3320 trans_ddi_func_ctl_value); 3321 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 3322 } 3323 3324 static void 3325 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, 3326 const struct intel_crtc_state *crtc_state) 3327 { 3328 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3329 struct drm_device *dev = dig_port->base.base.dev; 3330 struct drm_i915_private *dev_priv = to_i915(dev); 3331 enum port port = dig_port->base.port; 3332 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 3333 enum pipe pipe = crtc->pipe; 3334 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 3335 3336 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 3337 TRANS_DDI_FUNC_CTL(pipe)); 3338 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 3339 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 3340 3341 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 3342 TGL_TRANS_DDI_SELECT_PORT(port); 3343 trans_conf_value |= PIPECONF_ENABLE; 3344 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 3345 3346 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 3347 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 3348 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 3349 trans_ddi_func_ctl_value); 3350 } 3351 3352 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 3353 const struct intel_crtc_state *crtc_state) 3354 { 3355 struct drm_dp_phy_test_params *data = 3356 &intel_dp->compliance.test_data.phytest; 3357 u8 link_status[DP_LINK_STATUS_SIZE]; 3358 3359 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3360 link_status) < 0) { 3361 DRM_DEBUG_KMS("failed to get link status\n"); 3362 return; 3363 } 3364 3365 /* retrieve vswing & pre-emphasis setting */ 3366 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 3367 link_status); 3368 3369 intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); 3370 3371 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); 3372 3373 intel_dp_phy_pattern_update(intel_dp, crtc_state); 3374 3375 intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); 3376 3377 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 3378 link_status[DP_DPCD_REV]); 3379 } 3380 3381 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 3382 { 3383 struct drm_dp_phy_test_params *data = 3384 &intel_dp->compliance.test_data.phytest; 3385 3386 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 3387 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 3388 return DP_TEST_NAK; 3389 } 3390 3391 /* Set test active flag here so userspace doesn't interrupt things */ 3392 intel_dp->compliance.test_active = true; 3393 3394 return DP_TEST_ACK; 3395 } 3396 3397 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 3398 { 3399 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3400 u8 response = DP_TEST_NAK; 3401 u8 request = 0; 3402 int status; 3403 3404 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 3405 if (status <= 0) { 3406 drm_dbg_kms(&i915->drm, 3407 "Could not read test request from sink\n"); 3408 goto update_status; 3409 } 3410 3411 switch (request) { 3412 case DP_TEST_LINK_TRAINING: 3413 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 3414 response = intel_dp_autotest_link_training(intel_dp); 3415 break; 3416 case DP_TEST_LINK_VIDEO_PATTERN: 3417 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 3418 response = intel_dp_autotest_video_pattern(intel_dp); 3419 break; 3420 case DP_TEST_LINK_EDID_READ: 3421 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 3422 response = intel_dp_autotest_edid(intel_dp); 3423 break; 3424 case DP_TEST_LINK_PHY_TEST_PATTERN: 3425 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 3426 response = intel_dp_autotest_phy_pattern(intel_dp); 3427 break; 3428 default: 3429 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 3430 request); 3431 break; 3432 } 3433 3434 if (response & DP_TEST_ACK) 3435 intel_dp->compliance.test_type = request; 3436 3437 update_status: 3438 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 3439 if (status <= 0) 3440 drm_dbg_kms(&i915->drm, 3441 "Could not write test response to sink\n"); 3442 } 3443 3444 static void 3445 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled) 3446 { 3447 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled); 3448 3449 if (esi[1] & DP_CP_IRQ) { 3450 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3451 *handled = true; 3452 } 3453 } 3454 3455 /** 3456 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 3457 * @intel_dp: Intel DP struct 3458 * 3459 * Read any pending MST interrupts, call MST core to handle these and ack the 3460 * interrupts. Check if the main and AUX link state is ok. 3461 * 3462 * Returns: 3463 * - %true if pending interrupts were serviced (or no interrupts were 3464 * pending) w/o detecting an error condition. 3465 * - %false if an error condition - like AUX failure or a loss of link - is 3466 * detected, which needs servicing from the hotplug work. 3467 */ 3468 static bool 3469 intel_dp_check_mst_status(struct intel_dp *intel_dp) 3470 { 3471 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3472 bool link_ok = true; 3473 3474 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 3475 3476 for (;;) { 3477 /* 3478 * The +2 is because DP_DPRX_ESI_LEN is 14, but we then 3479 * pass in "esi+10" to drm_dp_channel_eq_ok(), which 3480 * takes a 6-byte array. So we actually need 16 bytes 3481 * here. 3482 * 3483 * Somebody who knows what the limits actually are 3484 * should check this, but for now this is at least 3485 * harmless and avoids a valid compiler warning about 3486 * using more of the array than we have allocated. 3487 */ 3488 u8 esi[DP_DPRX_ESI_LEN+2] = {}; 3489 bool handled; 3490 int retry; 3491 3492 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 3493 drm_dbg_kms(&i915->drm, 3494 "failed to get ESI - device may have failed\n"); 3495 link_ok = false; 3496 3497 break; 3498 } 3499 3500 /* check link status - esi[10] = 0x200c */ 3501 if (intel_dp->active_mst_links > 0 && link_ok && 3502 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 3503 drm_dbg_kms(&i915->drm, 3504 "channel EQ not ok, retraining\n"); 3505 link_ok = false; 3506 } 3507 3508 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 3509 3510 intel_dp_mst_hpd_irq(intel_dp, esi, &handled); 3511 3512 if (!handled) 3513 break; 3514 3515 for (retry = 0; retry < 3; retry++) { 3516 int wret; 3517 3518 wret = drm_dp_dpcd_write(&intel_dp->aux, 3519 DP_SINK_COUNT_ESI+1, 3520 &esi[1], 3); 3521 if (wret == 3) 3522 break; 3523 } 3524 } 3525 3526 return link_ok; 3527 } 3528 3529 static void 3530 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 3531 { 3532 bool is_active; 3533 u8 buf = 0; 3534 3535 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 3536 if (intel_dp->frl.is_trained && !is_active) { 3537 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 3538 return; 3539 3540 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 3541 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 3542 return; 3543 3544 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 3545 3546 /* Restart FRL training or fall back to TMDS mode */ 3547 intel_dp_check_frl_training(intel_dp); 3548 } 3549 } 3550 3551 static bool 3552 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 3553 { 3554 u8 link_status[DP_LINK_STATUS_SIZE]; 3555 3556 if (!intel_dp->link_trained) 3557 return false; 3558 3559 /* 3560 * While PSR source HW is enabled, it will control main-link sending 3561 * frames, enabling and disabling it so trying to do a retrain will fail 3562 * as the link would or not be on or it could mix training patterns 3563 * and frame data at the same time causing retrain to fail. 3564 * Also when exiting PSR, HW will retrain the link anyways fixing 3565 * any link status error. 3566 */ 3567 if (intel_psr_enabled(intel_dp)) 3568 return false; 3569 3570 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3571 link_status) < 0) 3572 return false; 3573 3574 /* 3575 * Validate the cached values of intel_dp->link_rate and 3576 * intel_dp->lane_count before attempting to retrain. 3577 * 3578 * FIXME would be nice to user the crtc state here, but since 3579 * we need to call this from the short HPD handler that seems 3580 * a bit hard. 3581 */ 3582 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 3583 intel_dp->lane_count)) 3584 return false; 3585 3586 /* Retrain if Channel EQ or CR not ok */ 3587 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 3588 } 3589 3590 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 3591 const struct drm_connector_state *conn_state) 3592 { 3593 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3594 struct intel_encoder *encoder; 3595 enum pipe pipe; 3596 3597 if (!conn_state->best_encoder) 3598 return false; 3599 3600 /* SST */ 3601 encoder = &dp_to_dig_port(intel_dp)->base; 3602 if (conn_state->best_encoder == &encoder->base) 3603 return true; 3604 3605 /* MST */ 3606 for_each_pipe(i915, pipe) { 3607 encoder = &intel_dp->mst_encoders[pipe]->base; 3608 if (conn_state->best_encoder == &encoder->base) 3609 return true; 3610 } 3611 3612 return false; 3613 } 3614 3615 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 3616 struct drm_modeset_acquire_ctx *ctx, 3617 u32 *crtc_mask) 3618 { 3619 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3620 struct drm_connector_list_iter conn_iter; 3621 struct intel_connector *connector; 3622 int ret = 0; 3623 3624 *crtc_mask = 0; 3625 3626 if (!intel_dp_needs_link_retrain(intel_dp)) 3627 return 0; 3628 3629 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 3630 for_each_intel_connector_iter(connector, &conn_iter) { 3631 struct drm_connector_state *conn_state = 3632 connector->base.state; 3633 struct intel_crtc_state *crtc_state; 3634 struct intel_crtc *crtc; 3635 3636 if (!intel_dp_has_connector(intel_dp, conn_state)) 3637 continue; 3638 3639 crtc = to_intel_crtc(conn_state->crtc); 3640 if (!crtc) 3641 continue; 3642 3643 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 3644 if (ret) 3645 break; 3646 3647 crtc_state = to_intel_crtc_state(crtc->base.state); 3648 3649 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 3650 3651 if (!crtc_state->hw.active) 3652 continue; 3653 3654 if (conn_state->commit && 3655 !try_wait_for_completion(&conn_state->commit->hw_done)) 3656 continue; 3657 3658 *crtc_mask |= drm_crtc_mask(&crtc->base); 3659 } 3660 drm_connector_list_iter_end(&conn_iter); 3661 3662 if (!intel_dp_needs_link_retrain(intel_dp)) 3663 *crtc_mask = 0; 3664 3665 return ret; 3666 } 3667 3668 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 3669 { 3670 struct intel_connector *connector = intel_dp->attached_connector; 3671 3672 return connector->base.status == connector_status_connected || 3673 intel_dp->is_mst; 3674 } 3675 3676 int intel_dp_retrain_link(struct intel_encoder *encoder, 3677 struct drm_modeset_acquire_ctx *ctx) 3678 { 3679 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3680 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3681 struct intel_crtc *crtc; 3682 u32 crtc_mask; 3683 int ret; 3684 3685 if (!intel_dp_is_connected(intel_dp)) 3686 return 0; 3687 3688 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 3689 ctx); 3690 if (ret) 3691 return ret; 3692 3693 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 3694 if (ret) 3695 return ret; 3696 3697 if (crtc_mask == 0) 3698 return 0; 3699 3700 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 3701 encoder->base.base.id, encoder->base.name); 3702 3703 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3704 const struct intel_crtc_state *crtc_state = 3705 to_intel_crtc_state(crtc->base.state); 3706 3707 /* Suppress underruns caused by re-training */ 3708 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 3709 if (crtc_state->has_pch_encoder) 3710 intel_set_pch_fifo_underrun_reporting(dev_priv, 3711 intel_crtc_pch_transcoder(crtc), false); 3712 } 3713 3714 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3715 const struct intel_crtc_state *crtc_state = 3716 to_intel_crtc_state(crtc->base.state); 3717 3718 /* retrain on the MST master transcoder */ 3719 if (DISPLAY_VER(dev_priv) >= 12 && 3720 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 3721 !intel_dp_mst_is_master_trans(crtc_state)) 3722 continue; 3723 3724 intel_dp_check_frl_training(intel_dp); 3725 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); 3726 intel_dp_start_link_train(intel_dp, crtc_state); 3727 intel_dp_stop_link_train(intel_dp, crtc_state); 3728 break; 3729 } 3730 3731 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3732 const struct intel_crtc_state *crtc_state = 3733 to_intel_crtc_state(crtc->base.state); 3734 3735 /* Keep underrun reporting disabled until things are stable */ 3736 intel_wait_for_vblank(dev_priv, crtc->pipe); 3737 3738 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 3739 if (crtc_state->has_pch_encoder) 3740 intel_set_pch_fifo_underrun_reporting(dev_priv, 3741 intel_crtc_pch_transcoder(crtc), true); 3742 } 3743 3744 return 0; 3745 } 3746 3747 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 3748 struct drm_modeset_acquire_ctx *ctx, 3749 u32 *crtc_mask) 3750 { 3751 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3752 struct drm_connector_list_iter conn_iter; 3753 struct intel_connector *connector; 3754 int ret = 0; 3755 3756 *crtc_mask = 0; 3757 3758 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 3759 for_each_intel_connector_iter(connector, &conn_iter) { 3760 struct drm_connector_state *conn_state = 3761 connector->base.state; 3762 struct intel_crtc_state *crtc_state; 3763 struct intel_crtc *crtc; 3764 3765 if (!intel_dp_has_connector(intel_dp, conn_state)) 3766 continue; 3767 3768 crtc = to_intel_crtc(conn_state->crtc); 3769 if (!crtc) 3770 continue; 3771 3772 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 3773 if (ret) 3774 break; 3775 3776 crtc_state = to_intel_crtc_state(crtc->base.state); 3777 3778 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 3779 3780 if (!crtc_state->hw.active) 3781 continue; 3782 3783 if (conn_state->commit && 3784 !try_wait_for_completion(&conn_state->commit->hw_done)) 3785 continue; 3786 3787 *crtc_mask |= drm_crtc_mask(&crtc->base); 3788 } 3789 drm_connector_list_iter_end(&conn_iter); 3790 3791 return ret; 3792 } 3793 3794 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 3795 struct drm_modeset_acquire_ctx *ctx) 3796 { 3797 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3798 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3799 struct intel_crtc *crtc; 3800 u32 crtc_mask; 3801 int ret; 3802 3803 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 3804 ctx); 3805 if (ret) 3806 return ret; 3807 3808 ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask); 3809 if (ret) 3810 return ret; 3811 3812 if (crtc_mask == 0) 3813 return 0; 3814 3815 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 3816 encoder->base.base.id, encoder->base.name); 3817 3818 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3819 const struct intel_crtc_state *crtc_state = 3820 to_intel_crtc_state(crtc->base.state); 3821 3822 /* test on the MST master transcoder */ 3823 if (DISPLAY_VER(dev_priv) >= 12 && 3824 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 3825 !intel_dp_mst_is_master_trans(crtc_state)) 3826 continue; 3827 3828 intel_dp_process_phy_request(intel_dp, crtc_state); 3829 break; 3830 } 3831 3832 return 0; 3833 } 3834 3835 void intel_dp_phy_test(struct intel_encoder *encoder) 3836 { 3837 struct drm_modeset_acquire_ctx ctx; 3838 int ret; 3839 3840 drm_modeset_acquire_init(&ctx, 0); 3841 3842 for (;;) { 3843 ret = intel_dp_do_phy_test(encoder, &ctx); 3844 3845 if (ret == -EDEADLK) { 3846 drm_modeset_backoff(&ctx); 3847 continue; 3848 } 3849 3850 break; 3851 } 3852 3853 drm_modeset_drop_locks(&ctx); 3854 drm_modeset_acquire_fini(&ctx); 3855 drm_WARN(encoder->base.dev, ret, 3856 "Acquiring modeset locks failed with %i\n", ret); 3857 } 3858 3859 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) 3860 { 3861 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3862 u8 val; 3863 3864 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3865 return; 3866 3867 if (drm_dp_dpcd_readb(&intel_dp->aux, 3868 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 3869 return; 3870 3871 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 3872 3873 if (val & DP_AUTOMATED_TEST_REQUEST) 3874 intel_dp_handle_test_request(intel_dp); 3875 3876 if (val & DP_CP_IRQ) 3877 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3878 3879 if (val & DP_SINK_SPECIFIC_IRQ) 3880 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 3881 } 3882 3883 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) 3884 { 3885 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3886 u8 val; 3887 3888 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3889 return; 3890 3891 if (drm_dp_dpcd_readb(&intel_dp->aux, 3892 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) { 3893 drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n"); 3894 return; 3895 } 3896 3897 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3898 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) { 3899 drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n"); 3900 return; 3901 } 3902 3903 if (val & HDMI_LINK_STATUS_CHANGED) 3904 intel_dp_handle_hdmi_link_status_change(intel_dp); 3905 } 3906 3907 /* 3908 * According to DP spec 3909 * 5.1.2: 3910 * 1. Read DPCD 3911 * 2. Configure link according to Receiver Capabilities 3912 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 3913 * 4. Check link status on receipt of hot-plug interrupt 3914 * 3915 * intel_dp_short_pulse - handles short pulse interrupts 3916 * when full detection is not required. 3917 * Returns %true if short pulse is handled and full detection 3918 * is NOT required and %false otherwise. 3919 */ 3920 static bool 3921 intel_dp_short_pulse(struct intel_dp *intel_dp) 3922 { 3923 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3924 u8 old_sink_count = intel_dp->sink_count; 3925 bool ret; 3926 3927 /* 3928 * Clearing compliance test variables to allow capturing 3929 * of values for next automated test request. 3930 */ 3931 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 3932 3933 /* 3934 * Now read the DPCD to see if it's actually running 3935 * If the current value of sink count doesn't match with 3936 * the value that was stored earlier or dpcd read failed 3937 * we need to do full detection 3938 */ 3939 ret = intel_dp_get_dpcd(intel_dp); 3940 3941 if ((old_sink_count != intel_dp->sink_count) || !ret) { 3942 /* No need to proceed if we are going to do full detect */ 3943 return false; 3944 } 3945 3946 intel_dp_check_device_service_irq(intel_dp); 3947 intel_dp_check_link_service_irq(intel_dp); 3948 3949 /* Handle CEC interrupts, if any */ 3950 drm_dp_cec_irq(&intel_dp->aux); 3951 3952 /* defer to the hotplug work for link retraining if needed */ 3953 if (intel_dp_needs_link_retrain(intel_dp)) 3954 return false; 3955 3956 intel_psr_short_pulse(intel_dp); 3957 3958 switch (intel_dp->compliance.test_type) { 3959 case DP_TEST_LINK_TRAINING: 3960 drm_dbg_kms(&dev_priv->drm, 3961 "Link Training Compliance Test requested\n"); 3962 /* Send a Hotplug Uevent to userspace to start modeset */ 3963 drm_kms_helper_hotplug_event(&dev_priv->drm); 3964 break; 3965 case DP_TEST_LINK_PHY_TEST_PATTERN: 3966 drm_dbg_kms(&dev_priv->drm, 3967 "PHY test pattern Compliance Test requested\n"); 3968 /* 3969 * Schedule long hpd to do the test 3970 * 3971 * FIXME get rid of the ad-hoc phy test modeset code 3972 * and properly incorporate it into the normal modeset. 3973 */ 3974 return false; 3975 } 3976 3977 return true; 3978 } 3979 3980 /* XXX this is probably wrong for multiple downstream ports */ 3981 static enum drm_connector_status 3982 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 3983 { 3984 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3985 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3986 u8 *dpcd = intel_dp->dpcd; 3987 u8 type; 3988 3989 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 3990 return connector_status_connected; 3991 3992 lspcon_resume(dig_port); 3993 3994 if (!intel_dp_get_dpcd(intel_dp)) 3995 return connector_status_disconnected; 3996 3997 /* if there's no downstream port, we're done */ 3998 if (!drm_dp_is_branch(dpcd)) 3999 return connector_status_connected; 4000 4001 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 4002 if (intel_dp_has_sink_count(intel_dp) && 4003 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 4004 return intel_dp->sink_count ? 4005 connector_status_connected : connector_status_disconnected; 4006 } 4007 4008 if (intel_dp_can_mst(intel_dp)) 4009 return connector_status_connected; 4010 4011 /* If no HPD, poke DDC gently */ 4012 if (drm_probe_ddc(&intel_dp->aux.ddc)) 4013 return connector_status_connected; 4014 4015 /* Well we tried, say unknown for unreliable port types */ 4016 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 4017 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 4018 if (type == DP_DS_PORT_TYPE_VGA || 4019 type == DP_DS_PORT_TYPE_NON_EDID) 4020 return connector_status_unknown; 4021 } else { 4022 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 4023 DP_DWN_STRM_PORT_TYPE_MASK; 4024 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 4025 type == DP_DWN_STRM_PORT_TYPE_OTHER) 4026 return connector_status_unknown; 4027 } 4028 4029 /* Anything else is out of spec, warn and ignore */ 4030 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 4031 return connector_status_disconnected; 4032 } 4033 4034 static enum drm_connector_status 4035 edp_detect(struct intel_dp *intel_dp) 4036 { 4037 return connector_status_connected; 4038 } 4039 4040 /* 4041 * intel_digital_port_connected - is the specified port connected? 4042 * @encoder: intel_encoder 4043 * 4044 * In cases where there's a connector physically connected but it can't be used 4045 * by our hardware we also return false, since the rest of the driver should 4046 * pretty much treat the port as disconnected. This is relevant for type-C 4047 * (starting on ICL) where there's ownership involved. 4048 * 4049 * Return %true if port is connected, %false otherwise. 4050 */ 4051 bool intel_digital_port_connected(struct intel_encoder *encoder) 4052 { 4053 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4054 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4055 bool is_connected = false; 4056 intel_wakeref_t wakeref; 4057 4058 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 4059 is_connected = dig_port->connected(encoder); 4060 4061 return is_connected; 4062 } 4063 4064 static struct edid * 4065 intel_dp_get_edid(struct intel_dp *intel_dp) 4066 { 4067 struct intel_connector *intel_connector = intel_dp->attached_connector; 4068 4069 /* use cached edid if we have one */ 4070 if (intel_connector->edid) { 4071 /* invalid edid */ 4072 if (IS_ERR(intel_connector->edid)) 4073 return NULL; 4074 4075 return drm_edid_duplicate(intel_connector->edid); 4076 } else 4077 return drm_get_edid(&intel_connector->base, 4078 &intel_dp->aux.ddc); 4079 } 4080 4081 static void 4082 intel_dp_update_dfp(struct intel_dp *intel_dp, 4083 const struct edid *edid) 4084 { 4085 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4086 struct intel_connector *connector = intel_dp->attached_connector; 4087 4088 intel_dp->dfp.max_bpc = 4089 drm_dp_downstream_max_bpc(intel_dp->dpcd, 4090 intel_dp->downstream_ports, edid); 4091 4092 intel_dp->dfp.max_dotclock = 4093 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 4094 intel_dp->downstream_ports); 4095 4096 intel_dp->dfp.min_tmds_clock = 4097 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 4098 intel_dp->downstream_ports, 4099 edid); 4100 intel_dp->dfp.max_tmds_clock = 4101 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 4102 intel_dp->downstream_ports, 4103 edid); 4104 4105 intel_dp->dfp.pcon_max_frl_bw = 4106 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 4107 intel_dp->downstream_ports); 4108 4109 drm_dbg_kms(&i915->drm, 4110 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 4111 connector->base.base.id, connector->base.name, 4112 intel_dp->dfp.max_bpc, 4113 intel_dp->dfp.max_dotclock, 4114 intel_dp->dfp.min_tmds_clock, 4115 intel_dp->dfp.max_tmds_clock, 4116 intel_dp->dfp.pcon_max_frl_bw); 4117 4118 intel_dp_get_pcon_dsc_cap(intel_dp); 4119 } 4120 4121 static void 4122 intel_dp_update_420(struct intel_dp *intel_dp) 4123 { 4124 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4125 struct intel_connector *connector = intel_dp->attached_connector; 4126 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr; 4127 4128 /* No YCbCr output support on gmch platforms */ 4129 if (HAS_GMCH(i915)) 4130 return; 4131 4132 /* 4133 * ILK doesn't seem capable of DP YCbCr output. The 4134 * displayed image is severly corrupted. SNB+ is fine. 4135 */ 4136 if (IS_IRONLAKE(i915)) 4137 return; 4138 4139 is_branch = drm_dp_is_branch(intel_dp->dpcd); 4140 ycbcr_420_passthrough = 4141 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 4142 intel_dp->downstream_ports); 4143 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 4144 ycbcr_444_to_420 = 4145 dp_to_dig_port(intel_dp)->lspcon.active || 4146 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 4147 intel_dp->downstream_ports); 4148 rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 4149 intel_dp->downstream_ports, 4150 DP_DS_HDMI_BT601_RGB_YCBCR_CONV | 4151 DP_DS_HDMI_BT709_RGB_YCBCR_CONV | 4152 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 4153 4154 if (DISPLAY_VER(i915) >= 11) { 4155 /* Let PCON convert from RGB->YCbCr if possible */ 4156 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) { 4157 intel_dp->dfp.rgb_to_ycbcr = true; 4158 intel_dp->dfp.ycbcr_444_to_420 = true; 4159 connector->base.ycbcr_420_allowed = true; 4160 } else { 4161 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ 4162 intel_dp->dfp.ycbcr_444_to_420 = 4163 ycbcr_444_to_420 && !ycbcr_420_passthrough; 4164 4165 connector->base.ycbcr_420_allowed = 4166 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; 4167 } 4168 } else { 4169 /* 4:4:4->4:2:0 conversion is the only way */ 4170 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; 4171 4172 connector->base.ycbcr_420_allowed = ycbcr_444_to_420; 4173 } 4174 4175 drm_dbg_kms(&i915->drm, 4176 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 4177 connector->base.base.id, connector->base.name, 4178 yesno(intel_dp->dfp.rgb_to_ycbcr), 4179 yesno(connector->base.ycbcr_420_allowed), 4180 yesno(intel_dp->dfp.ycbcr_444_to_420)); 4181 } 4182 4183 static void 4184 intel_dp_set_edid(struct intel_dp *intel_dp) 4185 { 4186 struct intel_connector *connector = intel_dp->attached_connector; 4187 struct edid *edid; 4188 4189 intel_dp_unset_edid(intel_dp); 4190 edid = intel_dp_get_edid(intel_dp); 4191 connector->detect_edid = edid; 4192 4193 intel_dp_update_dfp(intel_dp, edid); 4194 intel_dp_update_420(intel_dp); 4195 4196 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 4197 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 4198 intel_dp->has_audio = drm_detect_monitor_audio(edid); 4199 } 4200 4201 drm_dp_cec_set_edid(&intel_dp->aux, edid); 4202 } 4203 4204 static void 4205 intel_dp_unset_edid(struct intel_dp *intel_dp) 4206 { 4207 struct intel_connector *connector = intel_dp->attached_connector; 4208 4209 drm_dp_cec_unset_edid(&intel_dp->aux); 4210 kfree(connector->detect_edid); 4211 connector->detect_edid = NULL; 4212 4213 intel_dp->has_hdmi_sink = false; 4214 intel_dp->has_audio = false; 4215 4216 intel_dp->dfp.max_bpc = 0; 4217 intel_dp->dfp.max_dotclock = 0; 4218 intel_dp->dfp.min_tmds_clock = 0; 4219 intel_dp->dfp.max_tmds_clock = 0; 4220 4221 intel_dp->dfp.pcon_max_frl_bw = 0; 4222 4223 intel_dp->dfp.ycbcr_444_to_420 = false; 4224 connector->base.ycbcr_420_allowed = false; 4225 } 4226 4227 static int 4228 intel_dp_detect(struct drm_connector *connector, 4229 struct drm_modeset_acquire_ctx *ctx, 4230 bool force) 4231 { 4232 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4233 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4234 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4235 struct intel_encoder *encoder = &dig_port->base; 4236 enum drm_connector_status status; 4237 4238 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4239 connector->base.id, connector->name); 4240 drm_WARN_ON(&dev_priv->drm, 4241 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 4242 4243 if (!INTEL_DISPLAY_ENABLED(dev_priv)) 4244 return connector_status_disconnected; 4245 4246 /* Can't disconnect eDP */ 4247 if (intel_dp_is_edp(intel_dp)) 4248 status = edp_detect(intel_dp); 4249 else if (intel_digital_port_connected(encoder)) 4250 status = intel_dp_detect_dpcd(intel_dp); 4251 else 4252 status = connector_status_disconnected; 4253 4254 if (status == connector_status_disconnected) { 4255 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 4256 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4257 4258 if (intel_dp->is_mst) { 4259 drm_dbg_kms(&dev_priv->drm, 4260 "MST device may have disappeared %d vs %d\n", 4261 intel_dp->is_mst, 4262 intel_dp->mst_mgr.mst_state); 4263 intel_dp->is_mst = false; 4264 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4265 intel_dp->is_mst); 4266 } 4267 4268 goto out; 4269 } 4270 4271 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 4272 if (DISPLAY_VER(dev_priv) >= 11) 4273 intel_dp_get_dsc_sink_cap(intel_dp); 4274 4275 intel_dp_configure_mst(intel_dp); 4276 4277 /* 4278 * TODO: Reset link params when switching to MST mode, until MST 4279 * supports link training fallback params. 4280 */ 4281 if (intel_dp->reset_link_params || intel_dp->is_mst) { 4282 /* Initial max link lane count */ 4283 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 4284 4285 /* Initial max link rate */ 4286 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 4287 4288 intel_dp->reset_link_params = false; 4289 } 4290 4291 intel_dp_print_rates(intel_dp); 4292 4293 if (intel_dp->is_mst) { 4294 /* 4295 * If we are in MST mode then this connector 4296 * won't appear connected or have anything 4297 * with EDID on it 4298 */ 4299 status = connector_status_disconnected; 4300 goto out; 4301 } 4302 4303 /* 4304 * Some external monitors do not signal loss of link synchronization 4305 * with an IRQ_HPD, so force a link status check. 4306 */ 4307 if (!intel_dp_is_edp(intel_dp)) { 4308 int ret; 4309 4310 ret = intel_dp_retrain_link(encoder, ctx); 4311 if (ret) 4312 return ret; 4313 } 4314 4315 /* 4316 * Clearing NACK and defer counts to get their exact values 4317 * while reading EDID which are required by Compliance tests 4318 * 4.2.2.4 and 4.2.2.5 4319 */ 4320 intel_dp->aux.i2c_nack_count = 0; 4321 intel_dp->aux.i2c_defer_count = 0; 4322 4323 intel_dp_set_edid(intel_dp); 4324 if (intel_dp_is_edp(intel_dp) || 4325 to_intel_connector(connector)->detect_edid) 4326 status = connector_status_connected; 4327 4328 intel_dp_check_device_service_irq(intel_dp); 4329 4330 out: 4331 if (status != connector_status_connected && !intel_dp->is_mst) 4332 intel_dp_unset_edid(intel_dp); 4333 4334 /* 4335 * Make sure the refs for power wells enabled during detect are 4336 * dropped to avoid a new detect cycle triggered by HPD polling. 4337 */ 4338 intel_display_power_flush_work(dev_priv); 4339 4340 if (!intel_dp_is_edp(intel_dp)) 4341 drm_dp_set_subconnector_property(connector, 4342 status, 4343 intel_dp->dpcd, 4344 intel_dp->downstream_ports); 4345 return status; 4346 } 4347 4348 static void 4349 intel_dp_force(struct drm_connector *connector) 4350 { 4351 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4352 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4353 struct intel_encoder *intel_encoder = &dig_port->base; 4354 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 4355 enum intel_display_power_domain aux_domain = 4356 intel_aux_power_domain(dig_port); 4357 intel_wakeref_t wakeref; 4358 4359 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4360 connector->base.id, connector->name); 4361 intel_dp_unset_edid(intel_dp); 4362 4363 if (connector->status != connector_status_connected) 4364 return; 4365 4366 wakeref = intel_display_power_get(dev_priv, aux_domain); 4367 4368 intel_dp_set_edid(intel_dp); 4369 4370 intel_display_power_put(dev_priv, aux_domain, wakeref); 4371 } 4372 4373 static int intel_dp_get_modes(struct drm_connector *connector) 4374 { 4375 struct intel_connector *intel_connector = to_intel_connector(connector); 4376 struct edid *edid; 4377 int num_modes = 0; 4378 4379 edid = intel_connector->detect_edid; 4380 if (edid) { 4381 num_modes = intel_connector_update_modes(connector, edid); 4382 4383 if (intel_vrr_is_capable(connector)) 4384 drm_connector_set_vrr_capable_property(connector, 4385 true); 4386 } 4387 4388 /* Also add fixed mode, which may or may not be present in EDID */ 4389 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && 4390 intel_connector->panel.fixed_mode) { 4391 struct drm_display_mode *mode; 4392 4393 mode = drm_mode_duplicate(connector->dev, 4394 intel_connector->panel.fixed_mode); 4395 if (mode) { 4396 drm_mode_probed_add(connector, mode); 4397 num_modes++; 4398 } 4399 } 4400 4401 if (num_modes) 4402 return num_modes; 4403 4404 if (!edid) { 4405 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 4406 struct drm_display_mode *mode; 4407 4408 mode = drm_dp_downstream_mode(connector->dev, 4409 intel_dp->dpcd, 4410 intel_dp->downstream_ports); 4411 if (mode) { 4412 drm_mode_probed_add(connector, mode); 4413 num_modes++; 4414 } 4415 } 4416 4417 return num_modes; 4418 } 4419 4420 static int 4421 intel_dp_connector_register(struct drm_connector *connector) 4422 { 4423 struct drm_i915_private *i915 = to_i915(connector->dev); 4424 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4425 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4426 struct intel_lspcon *lspcon = &dig_port->lspcon; 4427 int ret; 4428 4429 ret = intel_connector_register(connector); 4430 if (ret) 4431 return ret; 4432 4433 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 4434 intel_dp->aux.name, connector->kdev->kobj.name); 4435 4436 intel_dp->aux.dev = connector->kdev; 4437 ret = drm_dp_aux_register(&intel_dp->aux); 4438 if (!ret) 4439 drm_dp_cec_register_connector(&intel_dp->aux, connector); 4440 4441 if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) 4442 return ret; 4443 4444 /* 4445 * ToDo: Clean this up to handle lspcon init and resume more 4446 * efficiently and streamlined. 4447 */ 4448 if (lspcon_init(dig_port)) { 4449 lspcon_detect_hdr_capability(lspcon); 4450 if (lspcon->hdr_supported) 4451 drm_object_attach_property(&connector->base, 4452 connector->dev->mode_config.hdr_output_metadata_property, 4453 0); 4454 } 4455 4456 return ret; 4457 } 4458 4459 static void 4460 intel_dp_connector_unregister(struct drm_connector *connector) 4461 { 4462 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4463 4464 drm_dp_cec_unregister_connector(&intel_dp->aux); 4465 drm_dp_aux_unregister(&intel_dp->aux); 4466 intel_connector_unregister(connector); 4467 } 4468 4469 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 4470 { 4471 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 4472 struct intel_dp *intel_dp = &dig_port->dp; 4473 4474 intel_dp_mst_encoder_cleanup(dig_port); 4475 4476 intel_pps_vdd_off_sync(intel_dp); 4477 4478 intel_dp_aux_fini(intel_dp); 4479 } 4480 4481 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 4482 { 4483 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4484 4485 intel_pps_vdd_off_sync(intel_dp); 4486 } 4487 4488 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 4489 { 4490 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4491 4492 intel_pps_wait_power_cycle(intel_dp); 4493 } 4494 4495 static int intel_modeset_tile_group(struct intel_atomic_state *state, 4496 int tile_group_id) 4497 { 4498 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4499 struct drm_connector_list_iter conn_iter; 4500 struct drm_connector *connector; 4501 int ret = 0; 4502 4503 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 4504 drm_for_each_connector_iter(connector, &conn_iter) { 4505 struct drm_connector_state *conn_state; 4506 struct intel_crtc_state *crtc_state; 4507 struct intel_crtc *crtc; 4508 4509 if (!connector->has_tile || 4510 connector->tile_group->id != tile_group_id) 4511 continue; 4512 4513 conn_state = drm_atomic_get_connector_state(&state->base, 4514 connector); 4515 if (IS_ERR(conn_state)) { 4516 ret = PTR_ERR(conn_state); 4517 break; 4518 } 4519 4520 crtc = to_intel_crtc(conn_state->crtc); 4521 4522 if (!crtc) 4523 continue; 4524 4525 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 4526 crtc_state->uapi.mode_changed = true; 4527 4528 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4529 if (ret) 4530 break; 4531 } 4532 drm_connector_list_iter_end(&conn_iter); 4533 4534 return ret; 4535 } 4536 4537 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 4538 { 4539 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4540 struct intel_crtc *crtc; 4541 4542 if (transcoders == 0) 4543 return 0; 4544 4545 for_each_intel_crtc(&dev_priv->drm, crtc) { 4546 struct intel_crtc_state *crtc_state; 4547 int ret; 4548 4549 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 4550 if (IS_ERR(crtc_state)) 4551 return PTR_ERR(crtc_state); 4552 4553 if (!crtc_state->hw.enable) 4554 continue; 4555 4556 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 4557 continue; 4558 4559 crtc_state->uapi.mode_changed = true; 4560 4561 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 4562 if (ret) 4563 return ret; 4564 4565 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4566 if (ret) 4567 return ret; 4568 4569 transcoders &= ~BIT(crtc_state->cpu_transcoder); 4570 } 4571 4572 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 4573 4574 return 0; 4575 } 4576 4577 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 4578 struct drm_connector *connector) 4579 { 4580 const struct drm_connector_state *old_conn_state = 4581 drm_atomic_get_old_connector_state(&state->base, connector); 4582 const struct intel_crtc_state *old_crtc_state; 4583 struct intel_crtc *crtc; 4584 u8 transcoders; 4585 4586 crtc = to_intel_crtc(old_conn_state->crtc); 4587 if (!crtc) 4588 return 0; 4589 4590 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 4591 4592 if (!old_crtc_state->hw.active) 4593 return 0; 4594 4595 transcoders = old_crtc_state->sync_mode_slaves_mask; 4596 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 4597 transcoders |= BIT(old_crtc_state->master_transcoder); 4598 4599 return intel_modeset_affected_transcoders(state, 4600 transcoders); 4601 } 4602 4603 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 4604 struct drm_atomic_state *_state) 4605 { 4606 struct drm_i915_private *dev_priv = to_i915(conn->dev); 4607 struct intel_atomic_state *state = to_intel_atomic_state(_state); 4608 int ret; 4609 4610 ret = intel_digital_connector_atomic_check(conn, &state->base); 4611 if (ret) 4612 return ret; 4613 4614 /* 4615 * We don't enable port sync on BDW due to missing w/as and 4616 * due to not having adjusted the modeset sequence appropriately. 4617 */ 4618 if (DISPLAY_VER(dev_priv) < 9) 4619 return 0; 4620 4621 if (!intel_connector_needs_modeset(state, conn)) 4622 return 0; 4623 4624 if (conn->has_tile) { 4625 ret = intel_modeset_tile_group(state, conn->tile_group->id); 4626 if (ret) 4627 return ret; 4628 } 4629 4630 return intel_modeset_synced_crtcs(state, conn); 4631 } 4632 4633 static const struct drm_connector_funcs intel_dp_connector_funcs = { 4634 .force = intel_dp_force, 4635 .fill_modes = drm_helper_probe_single_connector_modes, 4636 .atomic_get_property = intel_digital_connector_atomic_get_property, 4637 .atomic_set_property = intel_digital_connector_atomic_set_property, 4638 .late_register = intel_dp_connector_register, 4639 .early_unregister = intel_dp_connector_unregister, 4640 .destroy = intel_connector_destroy, 4641 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 4642 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 4643 }; 4644 4645 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 4646 .detect_ctx = intel_dp_detect, 4647 .get_modes = intel_dp_get_modes, 4648 .mode_valid = intel_dp_mode_valid, 4649 .atomic_check = intel_dp_connector_atomic_check, 4650 }; 4651 4652 enum irqreturn 4653 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 4654 { 4655 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 4656 struct intel_dp *intel_dp = &dig_port->dp; 4657 4658 if (dig_port->base.type == INTEL_OUTPUT_EDP && 4659 (long_hpd || !intel_pps_have_power(intel_dp))) { 4660 /* 4661 * vdd off can generate a long/short pulse on eDP which 4662 * would require vdd on to handle it, and thus we 4663 * would end up in an endless cycle of 4664 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 4665 */ 4666 drm_dbg_kms(&i915->drm, 4667 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 4668 long_hpd ? "long" : "short", 4669 dig_port->base.base.base.id, 4670 dig_port->base.base.name); 4671 return IRQ_HANDLED; 4672 } 4673 4674 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 4675 dig_port->base.base.base.id, 4676 dig_port->base.base.name, 4677 long_hpd ? "long" : "short"); 4678 4679 if (long_hpd) { 4680 intel_dp->reset_link_params = true; 4681 return IRQ_NONE; 4682 } 4683 4684 if (intel_dp->is_mst) { 4685 if (!intel_dp_check_mst_status(intel_dp)) 4686 return IRQ_NONE; 4687 } else if (!intel_dp_short_pulse(intel_dp)) { 4688 return IRQ_NONE; 4689 } 4690 4691 return IRQ_HANDLED; 4692 } 4693 4694 /* check the VBT to see whether the eDP is on another port */ 4695 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 4696 { 4697 /* 4698 * eDP not supported on g4x. so bail out early just 4699 * for a bit extra safety in case the VBT is bonkers. 4700 */ 4701 if (DISPLAY_VER(dev_priv) < 5) 4702 return false; 4703 4704 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) 4705 return true; 4706 4707 return intel_bios_is_port_edp(dev_priv, port); 4708 } 4709 4710 static void 4711 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 4712 { 4713 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4714 enum port port = dp_to_dig_port(intel_dp)->base.port; 4715 4716 if (!intel_dp_is_edp(intel_dp)) 4717 drm_connector_attach_dp_subconnector_property(connector); 4718 4719 if (!IS_G4X(dev_priv) && port != PORT_A) 4720 intel_attach_force_audio_property(connector); 4721 4722 intel_attach_broadcast_rgb_property(connector); 4723 if (HAS_GMCH(dev_priv)) 4724 drm_connector_attach_max_bpc_property(connector, 6, 10); 4725 else if (DISPLAY_VER(dev_priv) >= 5) 4726 drm_connector_attach_max_bpc_property(connector, 6, 12); 4727 4728 /* Register HDMI colorspace for case of lspcon */ 4729 if (intel_bios_is_lspcon_present(dev_priv, port)) { 4730 drm_connector_attach_content_type_property(connector); 4731 intel_attach_hdmi_colorspace_property(connector); 4732 } else { 4733 intel_attach_dp_colorspace_property(connector); 4734 } 4735 4736 if (IS_GEMINILAKE(dev_priv) || DISPLAY_VER(dev_priv) >= 11) 4737 drm_object_attach_property(&connector->base, 4738 connector->dev->mode_config.hdr_output_metadata_property, 4739 0); 4740 4741 if (intel_dp_is_edp(intel_dp)) { 4742 u32 allowed_scalers; 4743 4744 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 4745 if (!HAS_GMCH(dev_priv)) 4746 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 4747 4748 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 4749 4750 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 4751 4752 } 4753 4754 if (HAS_VRR(dev_priv)) 4755 drm_connector_attach_vrr_capable_property(connector); 4756 } 4757 4758 /** 4759 * intel_dp_set_drrs_state - program registers for RR switch to take effect 4760 * @dev_priv: i915 device 4761 * @crtc_state: a pointer to the active intel_crtc_state 4762 * @refresh_rate: RR to be programmed 4763 * 4764 * This function gets called when refresh rate (RR) has to be changed from 4765 * one frequency to another. Switches can be between high and low RR 4766 * supported by the panel or to any other RR based on media playback (in 4767 * this case, RR value needs to be passed from user space). 4768 * 4769 * The caller of this function needs to take a lock on dev_priv->drrs. 4770 */ 4771 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 4772 const struct intel_crtc_state *crtc_state, 4773 int refresh_rate) 4774 { 4775 struct intel_dp *intel_dp = dev_priv->drrs.dp; 4776 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 4777 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 4778 4779 if (refresh_rate <= 0) { 4780 drm_dbg_kms(&dev_priv->drm, 4781 "Refresh rate should be positive non-zero.\n"); 4782 return; 4783 } 4784 4785 if (intel_dp == NULL) { 4786 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 4787 return; 4788 } 4789 4790 if (!intel_crtc) { 4791 drm_dbg_kms(&dev_priv->drm, 4792 "DRRS: intel_crtc not initialized\n"); 4793 return; 4794 } 4795 4796 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 4797 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 4798 return; 4799 } 4800 4801 if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == 4802 refresh_rate) 4803 index = DRRS_LOW_RR; 4804 4805 if (index == dev_priv->drrs.refresh_rate_type) { 4806 drm_dbg_kms(&dev_priv->drm, 4807 "DRRS requested for previously set RR...ignoring\n"); 4808 return; 4809 } 4810 4811 if (!crtc_state->hw.active) { 4812 drm_dbg_kms(&dev_priv->drm, 4813 "eDP encoder disabled. CRTC not Active\n"); 4814 return; 4815 } 4816 4817 if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 4818 switch (index) { 4819 case DRRS_HIGH_RR: 4820 intel_dp_set_m_n(crtc_state, M1_N1); 4821 break; 4822 case DRRS_LOW_RR: 4823 intel_dp_set_m_n(crtc_state, M2_N2); 4824 break; 4825 case DRRS_MAX_RR: 4826 default: 4827 drm_err(&dev_priv->drm, 4828 "Unsupported refreshrate type\n"); 4829 } 4830 } else if (DISPLAY_VER(dev_priv) > 6) { 4831 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 4832 u32 val; 4833 4834 val = intel_de_read(dev_priv, reg); 4835 if (index > DRRS_HIGH_RR) { 4836 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4837 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 4838 else 4839 val |= PIPECONF_EDP_RR_MODE_SWITCH; 4840 } else { 4841 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4842 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 4843 else 4844 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 4845 } 4846 intel_de_write(dev_priv, reg, val); 4847 } 4848 4849 dev_priv->drrs.refresh_rate_type = index; 4850 4851 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 4852 refresh_rate); 4853 } 4854 4855 static void 4856 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) 4857 { 4858 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4859 4860 dev_priv->drrs.busy_frontbuffer_bits = 0; 4861 dev_priv->drrs.dp = intel_dp; 4862 } 4863 4864 /** 4865 * intel_edp_drrs_enable - init drrs struct if supported 4866 * @intel_dp: DP struct 4867 * @crtc_state: A pointer to the active crtc state. 4868 * 4869 * Initializes frontbuffer_bits and drrs.dp 4870 */ 4871 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 4872 const struct intel_crtc_state *crtc_state) 4873 { 4874 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4875 4876 if (!crtc_state->has_drrs) 4877 return; 4878 4879 drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); 4880 4881 mutex_lock(&dev_priv->drrs.mutex); 4882 4883 if (dev_priv->drrs.dp) { 4884 drm_warn(&dev_priv->drm, "DRRS already enabled\n"); 4885 goto unlock; 4886 } 4887 4888 intel_edp_drrs_enable_locked(intel_dp); 4889 4890 unlock: 4891 mutex_unlock(&dev_priv->drrs.mutex); 4892 } 4893 4894 static void 4895 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, 4896 const struct intel_crtc_state *crtc_state) 4897 { 4898 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4899 4900 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { 4901 int refresh; 4902 4903 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); 4904 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); 4905 } 4906 4907 dev_priv->drrs.dp = NULL; 4908 } 4909 4910 /** 4911 * intel_edp_drrs_disable - Disable DRRS 4912 * @intel_dp: DP struct 4913 * @old_crtc_state: Pointer to old crtc_state. 4914 * 4915 */ 4916 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 4917 const struct intel_crtc_state *old_crtc_state) 4918 { 4919 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4920 4921 if (!old_crtc_state->has_drrs) 4922 return; 4923 4924 mutex_lock(&dev_priv->drrs.mutex); 4925 if (!dev_priv->drrs.dp) { 4926 mutex_unlock(&dev_priv->drrs.mutex); 4927 return; 4928 } 4929 4930 intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); 4931 mutex_unlock(&dev_priv->drrs.mutex); 4932 4933 cancel_delayed_work_sync(&dev_priv->drrs.work); 4934 } 4935 4936 /** 4937 * intel_edp_drrs_update - Update DRRS state 4938 * @intel_dp: Intel DP 4939 * @crtc_state: new CRTC state 4940 * 4941 * This function will update DRRS states, disabling or enabling DRRS when 4942 * executing fastsets. For full modeset, intel_edp_drrs_disable() and 4943 * intel_edp_drrs_enable() should be called instead. 4944 */ 4945 void 4946 intel_edp_drrs_update(struct intel_dp *intel_dp, 4947 const struct intel_crtc_state *crtc_state) 4948 { 4949 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4950 4951 if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 4952 return; 4953 4954 mutex_lock(&dev_priv->drrs.mutex); 4955 4956 /* New state matches current one? */ 4957 if (crtc_state->has_drrs == !!dev_priv->drrs.dp) 4958 goto unlock; 4959 4960 if (crtc_state->has_drrs) 4961 intel_edp_drrs_enable_locked(intel_dp); 4962 else 4963 intel_edp_drrs_disable_locked(intel_dp, crtc_state); 4964 4965 unlock: 4966 mutex_unlock(&dev_priv->drrs.mutex); 4967 } 4968 4969 static void intel_edp_drrs_downclock_work(struct work_struct *work) 4970 { 4971 struct drm_i915_private *dev_priv = 4972 container_of(work, typeof(*dev_priv), drrs.work.work); 4973 struct intel_dp *intel_dp; 4974 4975 mutex_lock(&dev_priv->drrs.mutex); 4976 4977 intel_dp = dev_priv->drrs.dp; 4978 4979 if (!intel_dp) 4980 goto unlock; 4981 4982 /* 4983 * The delayed work can race with an invalidate hence we need to 4984 * recheck. 4985 */ 4986 4987 if (dev_priv->drrs.busy_frontbuffer_bits) 4988 goto unlock; 4989 4990 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 4991 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 4992 4993 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 4994 drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); 4995 } 4996 4997 unlock: 4998 mutex_unlock(&dev_priv->drrs.mutex); 4999 } 5000 5001 /** 5002 * intel_edp_drrs_invalidate - Disable Idleness DRRS 5003 * @dev_priv: i915 device 5004 * @frontbuffer_bits: frontbuffer plane tracking bits 5005 * 5006 * This function gets called everytime rendering on the given planes start. 5007 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 5008 * 5009 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 5010 */ 5011 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 5012 unsigned int frontbuffer_bits) 5013 { 5014 struct intel_dp *intel_dp; 5015 struct drm_crtc *crtc; 5016 enum pipe pipe; 5017 5018 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 5019 return; 5020 5021 cancel_delayed_work(&dev_priv->drrs.work); 5022 5023 mutex_lock(&dev_priv->drrs.mutex); 5024 5025 intel_dp = dev_priv->drrs.dp; 5026 if (!intel_dp) { 5027 mutex_unlock(&dev_priv->drrs.mutex); 5028 return; 5029 } 5030 5031 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 5032 pipe = to_intel_crtc(crtc)->pipe; 5033 5034 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 5035 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 5036 5037 /* invalidate means busy screen hence upclock */ 5038 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5039 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 5040 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 5041 5042 mutex_unlock(&dev_priv->drrs.mutex); 5043 } 5044 5045 /** 5046 * intel_edp_drrs_flush - Restart Idleness DRRS 5047 * @dev_priv: i915 device 5048 * @frontbuffer_bits: frontbuffer plane tracking bits 5049 * 5050 * This function gets called every time rendering on the given planes has 5051 * completed or flip on a crtc is completed. So DRRS should be upclocked 5052 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 5053 * if no other planes are dirty. 5054 * 5055 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 5056 */ 5057 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 5058 unsigned int frontbuffer_bits) 5059 { 5060 struct intel_dp *intel_dp; 5061 struct drm_crtc *crtc; 5062 enum pipe pipe; 5063 5064 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 5065 return; 5066 5067 cancel_delayed_work(&dev_priv->drrs.work); 5068 5069 mutex_lock(&dev_priv->drrs.mutex); 5070 5071 intel_dp = dev_priv->drrs.dp; 5072 if (!intel_dp) { 5073 mutex_unlock(&dev_priv->drrs.mutex); 5074 return; 5075 } 5076 5077 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 5078 pipe = to_intel_crtc(crtc)->pipe; 5079 5080 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 5081 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 5082 5083 /* flush means busy screen hence upclock */ 5084 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5085 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 5086 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 5087 5088 /* 5089 * flush also means no more activity hence schedule downclock, if all 5090 * other fbs are quiescent too 5091 */ 5092 if (!dev_priv->drrs.busy_frontbuffer_bits) 5093 schedule_delayed_work(&dev_priv->drrs.work, 5094 msecs_to_jiffies(1000)); 5095 mutex_unlock(&dev_priv->drrs.mutex); 5096 } 5097 5098 /** 5099 * DOC: Display Refresh Rate Switching (DRRS) 5100 * 5101 * Display Refresh Rate Switching (DRRS) is a power conservation feature 5102 * which enables swtching between low and high refresh rates, 5103 * dynamically, based on the usage scenario. This feature is applicable 5104 * for internal panels. 5105 * 5106 * Indication that the panel supports DRRS is given by the panel EDID, which 5107 * would list multiple refresh rates for one resolution. 5108 * 5109 * DRRS is of 2 types - static and seamless. 5110 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 5111 * (may appear as a blink on screen) and is used in dock-undock scenario. 5112 * Seamless DRRS involves changing RR without any visual effect to the user 5113 * and can be used during normal system usage. This is done by programming 5114 * certain registers. 5115 * 5116 * Support for static/seamless DRRS may be indicated in the VBT based on 5117 * inputs from the panel spec. 5118 * 5119 * DRRS saves power by switching to low RR based on usage scenarios. 5120 * 5121 * The implementation is based on frontbuffer tracking implementation. When 5122 * there is a disturbance on the screen triggered by user activity or a periodic 5123 * system activity, DRRS is disabled (RR is changed to high RR). When there is 5124 * no movement on screen, after a timeout of 1 second, a switch to low RR is 5125 * made. 5126 * 5127 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 5128 * and intel_edp_drrs_flush() are called. 5129 * 5130 * DRRS can be further extended to support other internal panels and also 5131 * the scenario of video playback wherein RR is set based on the rate 5132 * requested by userspace. 5133 */ 5134 5135 /** 5136 * intel_dp_drrs_init - Init basic DRRS work and mutex. 5137 * @connector: eDP connector 5138 * @fixed_mode: preferred mode of panel 5139 * 5140 * This function is called only once at driver load to initialize basic 5141 * DRRS stuff. 5142 * 5143 * Returns: 5144 * Downclock mode if panel supports it, else return NULL. 5145 * DRRS support is determined by the presence of downclock mode (apart 5146 * from VBT setting). 5147 */ 5148 static struct drm_display_mode * 5149 intel_dp_drrs_init(struct intel_connector *connector, 5150 struct drm_display_mode *fixed_mode) 5151 { 5152 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 5153 struct drm_display_mode *downclock_mode = NULL; 5154 5155 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 5156 mutex_init(&dev_priv->drrs.mutex); 5157 5158 if (DISPLAY_VER(dev_priv) <= 6) { 5159 drm_dbg_kms(&dev_priv->drm, 5160 "DRRS supported for Gen7 and above\n"); 5161 return NULL; 5162 } 5163 5164 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 5165 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 5166 return NULL; 5167 } 5168 5169 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 5170 if (!downclock_mode) { 5171 drm_dbg_kms(&dev_priv->drm, 5172 "Downclock mode is not found. DRRS not supported\n"); 5173 return NULL; 5174 } 5175 5176 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 5177 5178 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 5179 drm_dbg_kms(&dev_priv->drm, 5180 "seamless DRRS supported for eDP panel.\n"); 5181 return downclock_mode; 5182 } 5183 5184 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 5185 struct intel_connector *intel_connector) 5186 { 5187 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5188 struct drm_device *dev = &dev_priv->drm; 5189 struct drm_connector *connector = &intel_connector->base; 5190 struct drm_display_mode *fixed_mode = NULL; 5191 struct drm_display_mode *downclock_mode = NULL; 5192 bool has_dpcd; 5193 enum pipe pipe = INVALID_PIPE; 5194 struct edid *edid; 5195 5196 if (!intel_dp_is_edp(intel_dp)) 5197 return true; 5198 5199 /* 5200 * On IBX/CPT we may get here with LVDS already registered. Since the 5201 * driver uses the only internal power sequencer available for both 5202 * eDP and LVDS bail out early in this case to prevent interfering 5203 * with an already powered-on LVDS power sequencer. 5204 */ 5205 if (intel_get_lvds_encoder(dev_priv)) { 5206 drm_WARN_ON(dev, 5207 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 5208 drm_info(&dev_priv->drm, 5209 "LVDS was detected, not registering eDP\n"); 5210 5211 return false; 5212 } 5213 5214 intel_pps_init(intel_dp); 5215 5216 /* Cache DPCD and EDID for edp. */ 5217 has_dpcd = intel_edp_init_dpcd(intel_dp); 5218 5219 if (!has_dpcd) { 5220 /* if this fails, presume the device is a ghost */ 5221 drm_info(&dev_priv->drm, 5222 "failed to retrieve link info, disabling eDP\n"); 5223 goto out_vdd_off; 5224 } 5225 5226 mutex_lock(&dev->mode_config.mutex); 5227 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 5228 if (edid) { 5229 if (drm_add_edid_modes(connector, edid)) { 5230 drm_connector_update_edid_property(connector, edid); 5231 } else { 5232 kfree(edid); 5233 edid = ERR_PTR(-EINVAL); 5234 } 5235 } else { 5236 edid = ERR_PTR(-ENOENT); 5237 } 5238 intel_connector->edid = edid; 5239 5240 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 5241 if (fixed_mode) 5242 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 5243 5244 /* multiply the mode clock and horizontal timings for MSO */ 5245 intel_edp_mso_mode_fixup(intel_connector, fixed_mode); 5246 intel_edp_mso_mode_fixup(intel_connector, downclock_mode); 5247 5248 /* fallback to VBT if available for eDP */ 5249 if (!fixed_mode) 5250 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 5251 mutex_unlock(&dev->mode_config.mutex); 5252 5253 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 5254 /* 5255 * Figure out the current pipe for the initial backlight setup. 5256 * If the current pipe isn't valid, try the PPS pipe, and if that 5257 * fails just assume pipe A. 5258 */ 5259 pipe = vlv_active_pipe(intel_dp); 5260 5261 if (pipe != PIPE_A && pipe != PIPE_B) 5262 pipe = intel_dp->pps.pps_pipe; 5263 5264 if (pipe != PIPE_A && pipe != PIPE_B) 5265 pipe = PIPE_A; 5266 5267 drm_dbg_kms(&dev_priv->drm, 5268 "using pipe %c for initial backlight setup\n", 5269 pipe_name(pipe)); 5270 } 5271 5272 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 5273 intel_connector->panel.backlight.power = intel_pps_backlight_power; 5274 intel_panel_setup_backlight(connector, pipe); 5275 5276 if (fixed_mode) { 5277 drm_connector_set_panel_orientation_with_quirk(connector, 5278 dev_priv->vbt.orientation, 5279 fixed_mode->hdisplay, fixed_mode->vdisplay); 5280 } 5281 5282 return true; 5283 5284 out_vdd_off: 5285 intel_pps_vdd_off_sync(intel_dp); 5286 5287 return false; 5288 } 5289 5290 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 5291 { 5292 struct intel_connector *intel_connector; 5293 struct drm_connector *connector; 5294 5295 intel_connector = container_of(work, typeof(*intel_connector), 5296 modeset_retry_work); 5297 connector = &intel_connector->base; 5298 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 5299 connector->name); 5300 5301 /* Grab the locks before changing connector property*/ 5302 mutex_lock(&connector->dev->mode_config.mutex); 5303 /* Set connector link status to BAD and send a Uevent to notify 5304 * userspace to do a modeset. 5305 */ 5306 drm_connector_set_link_status_property(connector, 5307 DRM_MODE_LINK_STATUS_BAD); 5308 mutex_unlock(&connector->dev->mode_config.mutex); 5309 /* Send Hotplug uevent so userspace can reprobe */ 5310 drm_kms_helper_hotplug_event(connector->dev); 5311 } 5312 5313 bool 5314 intel_dp_init_connector(struct intel_digital_port *dig_port, 5315 struct intel_connector *intel_connector) 5316 { 5317 struct drm_connector *connector = &intel_connector->base; 5318 struct intel_dp *intel_dp = &dig_port->dp; 5319 struct intel_encoder *intel_encoder = &dig_port->base; 5320 struct drm_device *dev = intel_encoder->base.dev; 5321 struct drm_i915_private *dev_priv = to_i915(dev); 5322 enum port port = intel_encoder->port; 5323 enum phy phy = intel_port_to_phy(dev_priv, port); 5324 int type; 5325 5326 /* Initialize the work for modeset in case of link train failure */ 5327 INIT_WORK(&intel_connector->modeset_retry_work, 5328 intel_dp_modeset_retry_work_fn); 5329 5330 if (drm_WARN(dev, dig_port->max_lanes < 1, 5331 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 5332 dig_port->max_lanes, intel_encoder->base.base.id, 5333 intel_encoder->base.name)) 5334 return false; 5335 5336 intel_dp_set_source_rates(intel_dp); 5337 5338 intel_dp->reset_link_params = true; 5339 intel_dp->pps.pps_pipe = INVALID_PIPE; 5340 intel_dp->pps.active_pipe = INVALID_PIPE; 5341 5342 /* Preserve the current hw state. */ 5343 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 5344 intel_dp->attached_connector = intel_connector; 5345 5346 if (intel_dp_is_port_edp(dev_priv, port)) { 5347 /* 5348 * Currently we don't support eDP on TypeC ports, although in 5349 * theory it could work on TypeC legacy ports. 5350 */ 5351 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 5352 type = DRM_MODE_CONNECTOR_eDP; 5353 } else { 5354 type = DRM_MODE_CONNECTOR_DisplayPort; 5355 } 5356 5357 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5358 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); 5359 5360 /* 5361 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 5362 * for DP the encoder type can be set by the caller to 5363 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 5364 */ 5365 if (type == DRM_MODE_CONNECTOR_eDP) 5366 intel_encoder->type = INTEL_OUTPUT_EDP; 5367 5368 /* eDP only on port B and/or C on vlv/chv */ 5369 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 5370 IS_CHERRYVIEW(dev_priv)) && 5371 intel_dp_is_edp(intel_dp) && 5372 port != PORT_B && port != PORT_C)) 5373 return false; 5374 5375 drm_dbg_kms(&dev_priv->drm, 5376 "Adding %s connector on [ENCODER:%d:%s]\n", 5377 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 5378 intel_encoder->base.base.id, intel_encoder->base.name); 5379 5380 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 5381 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 5382 5383 if (!HAS_GMCH(dev_priv)) 5384 connector->interlace_allowed = true; 5385 connector->doublescan_allowed = 0; 5386 5387 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 5388 5389 intel_dp_aux_init(intel_dp); 5390 5391 intel_connector_attach_encoder(intel_connector, intel_encoder); 5392 5393 if (HAS_DDI(dev_priv)) 5394 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 5395 else 5396 intel_connector->get_hw_state = intel_connector_get_hw_state; 5397 5398 /* init MST on ports that can support it */ 5399 intel_dp_mst_encoder_init(dig_port, 5400 intel_connector->base.base.id); 5401 5402 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 5403 intel_dp_aux_fini(intel_dp); 5404 intel_dp_mst_encoder_cleanup(dig_port); 5405 goto fail; 5406 } 5407 5408 intel_dp_add_properties(intel_dp, connector); 5409 5410 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 5411 int ret = intel_dp_init_hdcp(dig_port, intel_connector); 5412 if (ret) 5413 drm_dbg_kms(&dev_priv->drm, 5414 "HDCP init failed, skipping.\n"); 5415 } 5416 5417 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 5418 * 0xd. Failure to do so will result in spurious interrupts being 5419 * generated on the port when a cable is not attached. 5420 */ 5421 if (IS_G45(dev_priv)) { 5422 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 5423 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 5424 (temp & ~0xf) | 0xd); 5425 } 5426 5427 intel_dp->frl.is_trained = false; 5428 intel_dp->frl.trained_rate_gbps = 0; 5429 5430 intel_psr_init(intel_dp); 5431 5432 return true; 5433 5434 fail: 5435 drm_connector_cleanup(connector); 5436 5437 return false; 5438 } 5439 5440 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 5441 { 5442 struct intel_encoder *encoder; 5443 5444 for_each_intel_encoder(&dev_priv->drm, encoder) { 5445 struct intel_dp *intel_dp; 5446 5447 if (encoder->type != INTEL_OUTPUT_DDI) 5448 continue; 5449 5450 intel_dp = enc_to_intel_dp(encoder); 5451 5452 if (!intel_dp->can_mst) 5453 continue; 5454 5455 if (intel_dp->is_mst) 5456 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 5457 } 5458 } 5459 5460 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 5461 { 5462 struct intel_encoder *encoder; 5463 5464 for_each_intel_encoder(&dev_priv->drm, encoder) { 5465 struct intel_dp *intel_dp; 5466 int ret; 5467 5468 if (encoder->type != INTEL_OUTPUT_DDI) 5469 continue; 5470 5471 intel_dp = enc_to_intel_dp(encoder); 5472 5473 if (!intel_dp->can_mst) 5474 continue; 5475 5476 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 5477 true); 5478 if (ret) { 5479 intel_dp->is_mst = false; 5480 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5481 false); 5482 } 5483 } 5484 } 5485