1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/types.h> 33 34 #include <asm/byteorder.h> 35 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_crtc.h> 38 #include <drm/drm_dp_helper.h> 39 #include <drm/drm_edid.h> 40 #include <drm/drm_probe_helper.h> 41 42 #include "g4x_dp.h" 43 #include "i915_debugfs.h" 44 #include "i915_drv.h" 45 #include "intel_atomic.h" 46 #include "intel_audio.h" 47 #include "intel_connector.h" 48 #include "intel_ddi.h" 49 #include "intel_display_types.h" 50 #include "intel_dp.h" 51 #include "intel_dp_aux.h" 52 #include "intel_dp_link_training.h" 53 #include "intel_dp_mst.h" 54 #include "intel_dpll.h" 55 #include "intel_dpio_phy.h" 56 #include "intel_fifo_underrun.h" 57 #include "intel_hdcp.h" 58 #include "intel_hdmi.h" 59 #include "intel_hotplug.h" 60 #include "intel_lspcon.h" 61 #include "intel_lvds.h" 62 #include "intel_panel.h" 63 #include "intel_pps.h" 64 #include "intel_psr.h" 65 #include "intel_sideband.h" 66 #include "intel_tc.h" 67 #include "intel_vdsc.h" 68 #include "intel_vrr.h" 69 70 #define DP_DPRX_ESI_LEN 14 71 72 /* DP DSC throughput values used for slice count calculations KPixels/s */ 73 #define DP_DSC_PEAK_PIXEL_RATE 2720000 74 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 75 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 76 77 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 78 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 79 80 /* Compliance test status bits */ 81 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 82 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 83 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 84 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 85 86 87 /* Constants for DP DSC configurations */ 88 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 89 90 /* With Single pipe configuration, HW is capable of supporting maximum 91 * of 4 slices per line. 92 */ 93 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 94 95 /** 96 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 97 * @intel_dp: DP struct 98 * 99 * If a CPU or PCH DP output is attached to an eDP panel, this function 100 * will return true, and false otherwise. 101 */ 102 bool intel_dp_is_edp(struct intel_dp *intel_dp) 103 { 104 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 105 106 return dig_port->base.type == INTEL_OUTPUT_EDP; 107 } 108 109 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 110 111 /* update sink rates from dpcd */ 112 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 113 { 114 static const int dp_rates[] = { 115 162000, 270000, 540000, 810000 116 }; 117 int i, max_rate; 118 int max_lttpr_rate; 119 120 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 121 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 122 static const int quirk_rates[] = { 162000, 270000, 324000 }; 123 124 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 125 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 126 127 return; 128 } 129 130 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 131 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 132 if (max_lttpr_rate) 133 max_rate = min(max_rate, max_lttpr_rate); 134 135 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 136 if (dp_rates[i] > max_rate) 137 break; 138 intel_dp->sink_rates[i] = dp_rates[i]; 139 } 140 141 intel_dp->num_sink_rates = i; 142 } 143 144 /* Get length of rates array potentially limited by max_rate. */ 145 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 146 { 147 int i; 148 149 /* Limit results by potentially reduced max rate */ 150 for (i = 0; i < len; i++) { 151 if (rates[len - i - 1] <= max_rate) 152 return len - i; 153 } 154 155 return 0; 156 } 157 158 /* Get length of common rates array potentially limited by max_rate. */ 159 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 160 int max_rate) 161 { 162 return intel_dp_rate_limit_len(intel_dp->common_rates, 163 intel_dp->num_common_rates, max_rate); 164 } 165 166 /* Theoretical max between source and sink */ 167 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 168 { 169 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 170 } 171 172 /* Theoretical max between source and sink */ 173 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 174 { 175 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 176 int source_max = dig_port->max_lanes; 177 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 178 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 179 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 180 181 if (lttpr_max) 182 sink_max = min(sink_max, lttpr_max); 183 184 return min3(source_max, sink_max, fia_max); 185 } 186 187 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 188 { 189 return intel_dp->max_link_lane_count; 190 } 191 192 int 193 intel_dp_link_required(int pixel_clock, int bpp) 194 { 195 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 196 return DIV_ROUND_UP(pixel_clock * bpp, 8); 197 } 198 199 int 200 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 201 { 202 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 203 * link rate that is generally expressed in Gbps. Since, 8 bits of data 204 * is transmitted every LS_Clk per lane, there is no need to account for 205 * the channel encoding that is done in the PHY layer here. 206 */ 207 208 return max_link_clock * max_lanes; 209 } 210 211 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) 212 { 213 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 214 struct intel_encoder *encoder = &intel_dig_port->base; 215 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 216 217 return DISPLAY_VER(dev_priv) >= 12 || 218 (IS_DISPLAY_VER(dev_priv, 11) && 219 encoder->port != PORT_A); 220 } 221 222 static int cnl_max_source_rate(struct intel_dp *intel_dp) 223 { 224 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 225 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 226 enum port port = dig_port->base.port; 227 228 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 229 230 /* Low voltage SKUs are limited to max of 5.4G */ 231 if (voltage == VOLTAGE_INFO_0_85V) 232 return 540000; 233 234 /* For this SKU 8.1G is supported in all ports */ 235 if (IS_CNL_WITH_PORT_F(dev_priv)) 236 return 810000; 237 238 /* For other SKUs, max rate on ports A and D is 5.4G */ 239 if (port == PORT_A || port == PORT_D) 240 return 540000; 241 242 return 810000; 243 } 244 245 static int icl_max_source_rate(struct intel_dp *intel_dp) 246 { 247 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 248 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 249 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 250 251 if (intel_phy_is_combo(dev_priv, phy) && 252 !intel_dp_is_edp(intel_dp)) 253 return 540000; 254 255 return 810000; 256 } 257 258 static int ehl_max_source_rate(struct intel_dp *intel_dp) 259 { 260 if (intel_dp_is_edp(intel_dp)) 261 return 540000; 262 263 return 810000; 264 } 265 266 static void 267 intel_dp_set_source_rates(struct intel_dp *intel_dp) 268 { 269 /* The values must be in increasing order */ 270 static const int cnl_rates[] = { 271 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 272 }; 273 static const int bxt_rates[] = { 274 162000, 216000, 243000, 270000, 324000, 432000, 540000 275 }; 276 static const int skl_rates[] = { 277 162000, 216000, 270000, 324000, 432000, 540000 278 }; 279 static const int hsw_rates[] = { 280 162000, 270000, 540000 281 }; 282 static const int g4x_rates[] = { 283 162000, 270000 284 }; 285 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 286 struct intel_encoder *encoder = &dig_port->base; 287 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 288 const int *source_rates; 289 int size, max_rate = 0, vbt_max_rate; 290 291 /* This should only be done once */ 292 drm_WARN_ON(&dev_priv->drm, 293 intel_dp->source_rates || intel_dp->num_source_rates); 294 295 if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) { 296 source_rates = cnl_rates; 297 size = ARRAY_SIZE(cnl_rates); 298 if (IS_DISPLAY_VER(dev_priv, 10)) 299 max_rate = cnl_max_source_rate(intel_dp); 300 else if (IS_JSL_EHL(dev_priv)) 301 max_rate = ehl_max_source_rate(intel_dp); 302 else 303 max_rate = icl_max_source_rate(intel_dp); 304 } else if (IS_GEN9_LP(dev_priv)) { 305 source_rates = bxt_rates; 306 size = ARRAY_SIZE(bxt_rates); 307 } else if (IS_GEN9_BC(dev_priv)) { 308 source_rates = skl_rates; 309 size = ARRAY_SIZE(skl_rates); 310 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 311 IS_BROADWELL(dev_priv)) { 312 source_rates = hsw_rates; 313 size = ARRAY_SIZE(hsw_rates); 314 } else { 315 source_rates = g4x_rates; 316 size = ARRAY_SIZE(g4x_rates); 317 } 318 319 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 320 if (max_rate && vbt_max_rate) 321 max_rate = min(max_rate, vbt_max_rate); 322 else if (vbt_max_rate) 323 max_rate = vbt_max_rate; 324 325 if (max_rate) 326 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 327 328 intel_dp->source_rates = source_rates; 329 intel_dp->num_source_rates = size; 330 } 331 332 static int intersect_rates(const int *source_rates, int source_len, 333 const int *sink_rates, int sink_len, 334 int *common_rates) 335 { 336 int i = 0, j = 0, k = 0; 337 338 while (i < source_len && j < sink_len) { 339 if (source_rates[i] == sink_rates[j]) { 340 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 341 return k; 342 common_rates[k] = source_rates[i]; 343 ++k; 344 ++i; 345 ++j; 346 } else if (source_rates[i] < sink_rates[j]) { 347 ++i; 348 } else { 349 ++j; 350 } 351 } 352 return k; 353 } 354 355 /* return index of rate in rates array, or -1 if not found */ 356 static int intel_dp_rate_index(const int *rates, int len, int rate) 357 { 358 int i; 359 360 for (i = 0; i < len; i++) 361 if (rate == rates[i]) 362 return i; 363 364 return -1; 365 } 366 367 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 368 { 369 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 370 371 drm_WARN_ON(&i915->drm, 372 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 373 374 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 375 intel_dp->num_source_rates, 376 intel_dp->sink_rates, 377 intel_dp->num_sink_rates, 378 intel_dp->common_rates); 379 380 /* Paranoia, there should always be something in common. */ 381 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 382 intel_dp->common_rates[0] = 162000; 383 intel_dp->num_common_rates = 1; 384 } 385 } 386 387 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 388 u8 lane_count) 389 { 390 /* 391 * FIXME: we need to synchronize the current link parameters with 392 * hardware readout. Currently fast link training doesn't work on 393 * boot-up. 394 */ 395 if (link_rate == 0 || 396 link_rate > intel_dp->max_link_rate) 397 return false; 398 399 if (lane_count == 0 || 400 lane_count > intel_dp_max_lane_count(intel_dp)) 401 return false; 402 403 return true; 404 } 405 406 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 407 int link_rate, 408 u8 lane_count) 409 { 410 const struct drm_display_mode *fixed_mode = 411 intel_dp->attached_connector->panel.fixed_mode; 412 int mode_rate, max_rate; 413 414 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 415 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 416 if (mode_rate > max_rate) 417 return false; 418 419 return true; 420 } 421 422 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 423 int link_rate, u8 lane_count) 424 { 425 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 426 int index; 427 428 /* 429 * TODO: Enable fallback on MST links once MST link compute can handle 430 * the fallback params. 431 */ 432 if (intel_dp->is_mst) { 433 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 434 return -1; 435 } 436 437 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { 438 drm_dbg_kms(&i915->drm, 439 "Retrying Link training for eDP with max parameters\n"); 440 intel_dp->use_max_params = true; 441 return 0; 442 } 443 444 index = intel_dp_rate_index(intel_dp->common_rates, 445 intel_dp->num_common_rates, 446 link_rate); 447 if (index > 0) { 448 if (intel_dp_is_edp(intel_dp) && 449 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 450 intel_dp->common_rates[index - 1], 451 lane_count)) { 452 drm_dbg_kms(&i915->drm, 453 "Retrying Link training for eDP with same parameters\n"); 454 return 0; 455 } 456 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 457 intel_dp->max_link_lane_count = lane_count; 458 } else if (lane_count > 1) { 459 if (intel_dp_is_edp(intel_dp) && 460 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 461 intel_dp_max_common_rate(intel_dp), 462 lane_count >> 1)) { 463 drm_dbg_kms(&i915->drm, 464 "Retrying Link training for eDP with same parameters\n"); 465 return 0; 466 } 467 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 468 intel_dp->max_link_lane_count = lane_count >> 1; 469 } else { 470 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 471 return -1; 472 } 473 474 return 0; 475 } 476 477 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 478 { 479 return div_u64(mul_u32_u32(mode_clock, 1000000U), 480 DP_DSC_FEC_OVERHEAD_FACTOR); 481 } 482 483 static int 484 small_joiner_ram_size_bits(struct drm_i915_private *i915) 485 { 486 if (DISPLAY_VER(i915) >= 11) 487 return 7680 * 8; 488 else 489 return 6144 * 8; 490 } 491 492 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 493 u32 link_clock, u32 lane_count, 494 u32 mode_clock, u32 mode_hdisplay, 495 bool bigjoiner) 496 { 497 u32 bits_per_pixel, max_bpp_small_joiner_ram; 498 int i; 499 500 /* 501 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 502 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 503 * for SST -> TimeSlotsPerMTP is 1, 504 * for MST -> TimeSlotsPerMTP has to be calculated 505 */ 506 bits_per_pixel = (link_clock * lane_count * 8) / 507 intel_dp_mode_to_fec_clock(mode_clock); 508 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 509 510 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 511 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 512 mode_hdisplay; 513 514 if (bigjoiner) 515 max_bpp_small_joiner_ram *= 2; 516 517 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 518 max_bpp_small_joiner_ram); 519 520 /* 521 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 522 * check, output bpp from small joiner RAM check) 523 */ 524 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 525 526 if (bigjoiner) { 527 u32 max_bpp_bigjoiner = 528 i915->max_cdclk_freq * 48 / 529 intel_dp_mode_to_fec_clock(mode_clock); 530 531 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner); 532 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); 533 } 534 535 /* Error out if the max bpp is less than smallest allowed valid bpp */ 536 if (bits_per_pixel < valid_dsc_bpp[0]) { 537 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 538 bits_per_pixel, valid_dsc_bpp[0]); 539 return 0; 540 } 541 542 /* Find the nearest match in the array of known BPPs from VESA */ 543 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 544 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 545 break; 546 } 547 bits_per_pixel = valid_dsc_bpp[i]; 548 549 /* 550 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 551 * fractional part is 0 552 */ 553 return bits_per_pixel << 4; 554 } 555 556 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 557 int mode_clock, int mode_hdisplay, 558 bool bigjoiner) 559 { 560 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 561 u8 min_slice_count, i; 562 int max_slice_width; 563 564 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 565 min_slice_count = DIV_ROUND_UP(mode_clock, 566 DP_DSC_MAX_ENC_THROUGHPUT_0); 567 else 568 min_slice_count = DIV_ROUND_UP(mode_clock, 569 DP_DSC_MAX_ENC_THROUGHPUT_1); 570 571 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 572 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 573 drm_dbg_kms(&i915->drm, 574 "Unsupported slice width %d by DP DSC Sink device\n", 575 max_slice_width); 576 return 0; 577 } 578 /* Also take into account max slice width */ 579 min_slice_count = max_t(u8, min_slice_count, 580 DIV_ROUND_UP(mode_hdisplay, 581 max_slice_width)); 582 583 /* Find the closest match to the valid slice count values */ 584 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 585 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; 586 587 if (test_slice_count > 588 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false)) 589 break; 590 591 /* big joiner needs small joiner to be enabled */ 592 if (bigjoiner && test_slice_count < 4) 593 continue; 594 595 if (min_slice_count <= test_slice_count) 596 return test_slice_count; 597 } 598 599 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 600 min_slice_count); 601 return 0; 602 } 603 604 static enum intel_output_format 605 intel_dp_output_format(struct drm_connector *connector, 606 const struct drm_display_mode *mode) 607 { 608 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 609 const struct drm_display_info *info = &connector->display_info; 610 611 if (!connector->ycbcr_420_allowed || 612 !drm_mode_is_420_only(info, mode)) 613 return INTEL_OUTPUT_FORMAT_RGB; 614 615 if (intel_dp->dfp.rgb_to_ycbcr && 616 intel_dp->dfp.ycbcr_444_to_420) 617 return INTEL_OUTPUT_FORMAT_RGB; 618 619 if (intel_dp->dfp.ycbcr_444_to_420) 620 return INTEL_OUTPUT_FORMAT_YCBCR444; 621 else 622 return INTEL_OUTPUT_FORMAT_YCBCR420; 623 } 624 625 int intel_dp_min_bpp(enum intel_output_format output_format) 626 { 627 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 628 return 6 * 3; 629 else 630 return 8 * 3; 631 } 632 633 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 634 { 635 /* 636 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 637 * format of the number of bytes per pixel will be half the number 638 * of bytes of RGB pixel. 639 */ 640 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 641 bpp /= 2; 642 643 return bpp; 644 } 645 646 static int 647 intel_dp_mode_min_output_bpp(struct drm_connector *connector, 648 const struct drm_display_mode *mode) 649 { 650 enum intel_output_format output_format = 651 intel_dp_output_format(connector, mode); 652 653 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 654 } 655 656 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 657 int hdisplay) 658 { 659 /* 660 * Older platforms don't like hdisplay==4096 with DP. 661 * 662 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 663 * and frame counter increment), but we don't get vblank interrupts, 664 * and the pipe underruns immediately. The link also doesn't seem 665 * to get trained properly. 666 * 667 * On CHV the vblank interrupts don't seem to disappear but 668 * otherwise the symptoms are similar. 669 * 670 * TODO: confirm the behaviour on HSW+ 671 */ 672 return hdisplay == 4096 && !HAS_DDI(dev_priv); 673 } 674 675 static enum drm_mode_status 676 intel_dp_mode_valid_downstream(struct intel_connector *connector, 677 const struct drm_display_mode *mode, 678 int target_clock) 679 { 680 struct intel_dp *intel_dp = intel_attached_dp(connector); 681 const struct drm_display_info *info = &connector->base.display_info; 682 int tmds_clock; 683 684 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 685 if (intel_dp->dfp.pcon_max_frl_bw) { 686 int target_bw; 687 int max_frl_bw; 688 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode); 689 690 target_bw = bpp * target_clock; 691 692 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 693 694 /* converting bw from Gbps to Kbps*/ 695 max_frl_bw = max_frl_bw * 1000000; 696 697 if (target_bw > max_frl_bw) 698 return MODE_CLOCK_HIGH; 699 700 return MODE_OK; 701 } 702 703 if (intel_dp->dfp.max_dotclock && 704 target_clock > intel_dp->dfp.max_dotclock) 705 return MODE_CLOCK_HIGH; 706 707 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 708 tmds_clock = target_clock; 709 if (drm_mode_is_420_only(info, mode)) 710 tmds_clock /= 2; 711 712 if (intel_dp->dfp.min_tmds_clock && 713 tmds_clock < intel_dp->dfp.min_tmds_clock) 714 return MODE_CLOCK_LOW; 715 if (intel_dp->dfp.max_tmds_clock && 716 tmds_clock > intel_dp->dfp.max_tmds_clock) 717 return MODE_CLOCK_HIGH; 718 719 return MODE_OK; 720 } 721 722 static enum drm_mode_status 723 intel_dp_mode_valid(struct drm_connector *connector, 724 struct drm_display_mode *mode) 725 { 726 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 727 struct intel_connector *intel_connector = to_intel_connector(connector); 728 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 729 struct drm_i915_private *dev_priv = to_i915(connector->dev); 730 int target_clock = mode->clock; 731 int max_rate, mode_rate, max_lanes, max_link_clock; 732 int max_dotclk = dev_priv->max_dotclk_freq; 733 u16 dsc_max_output_bpp = 0; 734 u8 dsc_slice_count = 0; 735 enum drm_mode_status status; 736 bool dsc = false, bigjoiner = false; 737 738 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 739 return MODE_NO_DBLESCAN; 740 741 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 742 return MODE_H_ILLEGAL; 743 744 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 745 if (mode->hdisplay != fixed_mode->hdisplay) 746 return MODE_PANEL; 747 748 if (mode->vdisplay != fixed_mode->vdisplay) 749 return MODE_PANEL; 750 751 target_clock = fixed_mode->clock; 752 } 753 754 if (mode->clock < 10000) 755 return MODE_CLOCK_LOW; 756 757 if ((target_clock > max_dotclk || mode->hdisplay > 5120) && 758 intel_dp_can_bigjoiner(intel_dp)) { 759 bigjoiner = true; 760 max_dotclk *= 2; 761 } 762 if (target_clock > max_dotclk) 763 return MODE_CLOCK_HIGH; 764 765 max_link_clock = intel_dp_max_link_rate(intel_dp); 766 max_lanes = intel_dp_max_lane_count(intel_dp); 767 768 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 769 mode_rate = intel_dp_link_required(target_clock, 770 intel_dp_mode_min_output_bpp(connector, mode)); 771 772 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 773 return MODE_H_ILLEGAL; 774 775 /* 776 * Output bpp is stored in 6.4 format so right shift by 4 to get the 777 * integer value since we support only integer values of bpp. 778 */ 779 if (DISPLAY_VER(dev_priv) >= 10 && 780 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 781 if (intel_dp_is_edp(intel_dp)) { 782 dsc_max_output_bpp = 783 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 784 dsc_slice_count = 785 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 786 true); 787 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 788 dsc_max_output_bpp = 789 intel_dp_dsc_get_output_bpp(dev_priv, 790 max_link_clock, 791 max_lanes, 792 target_clock, 793 mode->hdisplay, 794 bigjoiner) >> 4; 795 dsc_slice_count = 796 intel_dp_dsc_get_slice_count(intel_dp, 797 target_clock, 798 mode->hdisplay, 799 bigjoiner); 800 } 801 802 dsc = dsc_max_output_bpp && dsc_slice_count; 803 } 804 805 /* big joiner configuration needs DSC */ 806 if (bigjoiner && !dsc) 807 return MODE_CLOCK_HIGH; 808 809 if (mode_rate > max_rate && !dsc) 810 return MODE_CLOCK_HIGH; 811 812 status = intel_dp_mode_valid_downstream(intel_connector, 813 mode, target_clock); 814 if (status != MODE_OK) 815 return status; 816 817 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); 818 } 819 820 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 821 { 822 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 823 824 return max_rate >= 540000; 825 } 826 827 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 828 { 829 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 830 831 return max_rate >= 810000; 832 } 833 834 static void snprintf_int_array(char *str, size_t len, 835 const int *array, int nelem) 836 { 837 int i; 838 839 str[0] = '\0'; 840 841 for (i = 0; i < nelem; i++) { 842 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 843 if (r >= len) 844 return; 845 str += r; 846 len -= r; 847 } 848 } 849 850 static void intel_dp_print_rates(struct intel_dp *intel_dp) 851 { 852 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 853 char str[128]; /* FIXME: too big for stack? */ 854 855 if (!drm_debug_enabled(DRM_UT_KMS)) 856 return; 857 858 snprintf_int_array(str, sizeof(str), 859 intel_dp->source_rates, intel_dp->num_source_rates); 860 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 861 862 snprintf_int_array(str, sizeof(str), 863 intel_dp->sink_rates, intel_dp->num_sink_rates); 864 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 865 866 snprintf_int_array(str, sizeof(str), 867 intel_dp->common_rates, intel_dp->num_common_rates); 868 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 869 } 870 871 int 872 intel_dp_max_link_rate(struct intel_dp *intel_dp) 873 { 874 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 875 int len; 876 877 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 878 if (drm_WARN_ON(&i915->drm, len <= 0)) 879 return 162000; 880 881 return intel_dp->common_rates[len - 1]; 882 } 883 884 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 885 { 886 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 887 int i = intel_dp_rate_index(intel_dp->sink_rates, 888 intel_dp->num_sink_rates, rate); 889 890 if (drm_WARN_ON(&i915->drm, i < 0)) 891 i = 0; 892 893 return i; 894 } 895 896 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 897 u8 *link_bw, u8 *rate_select) 898 { 899 /* eDP 1.4 rate select method. */ 900 if (intel_dp->use_rate_select) { 901 *link_bw = 0; 902 *rate_select = 903 intel_dp_rate_select(intel_dp, port_clock); 904 } else { 905 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 906 *rate_select = 0; 907 } 908 } 909 910 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 911 const struct intel_crtc_state *pipe_config) 912 { 913 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 914 915 /* On TGL, FEC is supported on all Pipes */ 916 if (DISPLAY_VER(dev_priv) >= 12) 917 return true; 918 919 if (IS_DISPLAY_VER(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 920 return true; 921 922 return false; 923 } 924 925 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 926 const struct intel_crtc_state *pipe_config) 927 { 928 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 929 drm_dp_sink_supports_fec(intel_dp->fec_capable); 930 } 931 932 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 933 const struct intel_crtc_state *crtc_state) 934 { 935 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) 936 return false; 937 938 return intel_dsc_source_support(crtc_state) && 939 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 940 } 941 942 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, 943 const struct intel_crtc_state *crtc_state) 944 { 945 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 946 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 947 intel_dp->dfp.ycbcr_444_to_420); 948 } 949 950 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, 951 const struct intel_crtc_state *crtc_state, int bpc) 952 { 953 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; 954 955 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) 956 clock /= 2; 957 958 return clock; 959 } 960 961 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, 962 const struct intel_crtc_state *crtc_state, int bpc) 963 { 964 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); 965 966 if (intel_dp->dfp.min_tmds_clock && 967 tmds_clock < intel_dp->dfp.min_tmds_clock) 968 return false; 969 970 if (intel_dp->dfp.max_tmds_clock && 971 tmds_clock > intel_dp->dfp.max_tmds_clock) 972 return false; 973 974 return true; 975 } 976 977 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, 978 const struct intel_crtc_state *crtc_state, 979 int bpc) 980 { 981 982 return intel_hdmi_deep_color_possible(crtc_state, bpc, 983 intel_dp->has_hdmi_sink, 984 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && 985 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); 986 } 987 988 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 989 const struct intel_crtc_state *crtc_state) 990 { 991 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 992 struct intel_connector *intel_connector = intel_dp->attached_connector; 993 int bpp, bpc; 994 995 bpc = crtc_state->pipe_bpp / 3; 996 997 if (intel_dp->dfp.max_bpc) 998 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 999 1000 if (intel_dp->dfp.min_tmds_clock) { 1001 for (; bpc >= 10; bpc -= 2) { 1002 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) 1003 break; 1004 } 1005 } 1006 1007 bpp = bpc * 3; 1008 if (intel_dp_is_edp(intel_dp)) { 1009 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1010 if (intel_connector->base.display_info.bpc == 0 && 1011 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1012 drm_dbg_kms(&dev_priv->drm, 1013 "clamping bpp for eDP panel to BIOS-provided %i\n", 1014 dev_priv->vbt.edp.bpp); 1015 bpp = dev_priv->vbt.edp.bpp; 1016 } 1017 } 1018 1019 return bpp; 1020 } 1021 1022 /* Adjust link config limits based on compliance test requests. */ 1023 void 1024 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1025 struct intel_crtc_state *pipe_config, 1026 struct link_config_limits *limits) 1027 { 1028 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1029 1030 /* For DP Compliance we override the computed bpp for the pipe */ 1031 if (intel_dp->compliance.test_data.bpc != 0) { 1032 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1033 1034 limits->min_bpp = limits->max_bpp = bpp; 1035 pipe_config->dither_force_disable = bpp == 6 * 3; 1036 1037 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1038 } 1039 1040 /* Use values requested by Compliance Test Request */ 1041 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1042 int index; 1043 1044 /* Validate the compliance test data since max values 1045 * might have changed due to link train fallback. 1046 */ 1047 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1048 intel_dp->compliance.test_lane_count)) { 1049 index = intel_dp_rate_index(intel_dp->common_rates, 1050 intel_dp->num_common_rates, 1051 intel_dp->compliance.test_link_rate); 1052 if (index >= 0) 1053 limits->min_clock = limits->max_clock = index; 1054 limits->min_lane_count = limits->max_lane_count = 1055 intel_dp->compliance.test_lane_count; 1056 } 1057 } 1058 } 1059 1060 /* Optimize link config in order: max bpp, min clock, min lanes */ 1061 static int 1062 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 1063 struct intel_crtc_state *pipe_config, 1064 const struct link_config_limits *limits) 1065 { 1066 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1067 int bpp, clock, lane_count; 1068 int mode_rate, link_clock, link_avail; 1069 1070 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 1071 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1072 1073 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1074 output_bpp); 1075 1076 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 1077 for (lane_count = limits->min_lane_count; 1078 lane_count <= limits->max_lane_count; 1079 lane_count <<= 1) { 1080 link_clock = intel_dp->common_rates[clock]; 1081 link_avail = intel_dp_max_data_rate(link_clock, 1082 lane_count); 1083 1084 if (mode_rate <= link_avail) { 1085 pipe_config->lane_count = lane_count; 1086 pipe_config->pipe_bpp = bpp; 1087 pipe_config->port_clock = link_clock; 1088 1089 return 0; 1090 } 1091 } 1092 } 1093 } 1094 1095 return -EINVAL; 1096 } 1097 1098 /* Optimize link config in order: max bpp, min lanes, min clock */ 1099 static int 1100 intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, 1101 struct intel_crtc_state *pipe_config, 1102 const struct link_config_limits *limits) 1103 { 1104 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1105 int bpp, clock, lane_count; 1106 int mode_rate, link_clock, link_avail; 1107 1108 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 1109 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1110 1111 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1112 output_bpp); 1113 1114 for (lane_count = limits->min_lane_count; 1115 lane_count <= limits->max_lane_count; 1116 lane_count <<= 1) { 1117 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 1118 link_clock = intel_dp->common_rates[clock]; 1119 link_avail = intel_dp_max_data_rate(link_clock, 1120 lane_count); 1121 1122 if (mode_rate <= link_avail) { 1123 pipe_config->lane_count = lane_count; 1124 pipe_config->pipe_bpp = bpp; 1125 pipe_config->port_clock = link_clock; 1126 1127 return 0; 1128 } 1129 } 1130 } 1131 } 1132 1133 return -EINVAL; 1134 } 1135 1136 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 1137 { 1138 int i, num_bpc; 1139 u8 dsc_bpc[3] = {0}; 1140 1141 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 1142 dsc_bpc); 1143 for (i = 0; i < num_bpc; i++) { 1144 if (dsc_max_bpc >= dsc_bpc[i]) 1145 return dsc_bpc[i] * 3; 1146 } 1147 1148 return 0; 1149 } 1150 1151 #define DSC_SUPPORTED_VERSION_MIN 1 1152 1153 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 1154 struct intel_crtc_state *crtc_state) 1155 { 1156 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1157 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1158 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1159 u8 line_buf_depth; 1160 int ret; 1161 1162 /* 1163 * RC_MODEL_SIZE is currently a constant across all configurations. 1164 * 1165 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 1166 * DP_DSC_RC_BUF_SIZE for this. 1167 */ 1168 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1169 1170 ret = intel_dsc_compute_params(encoder, crtc_state); 1171 if (ret) 1172 return ret; 1173 1174 /* 1175 * Slice Height of 8 works for all currently available panels. So start 1176 * with that if pic_height is an integral multiple of 8. Eventually add 1177 * logic to try multiple slice heights. 1178 */ 1179 if (vdsc_cfg->pic_height % 8 == 0) 1180 vdsc_cfg->slice_height = 8; 1181 else if (vdsc_cfg->pic_height % 4 == 0) 1182 vdsc_cfg->slice_height = 4; 1183 else 1184 vdsc_cfg->slice_height = 2; 1185 1186 vdsc_cfg->dsc_version_major = 1187 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1188 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 1189 vdsc_cfg->dsc_version_minor = 1190 min(DSC_SUPPORTED_VERSION_MIN, 1191 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1192 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 1193 1194 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1195 DP_DSC_RGB; 1196 1197 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 1198 if (!line_buf_depth) { 1199 drm_dbg_kms(&i915->drm, 1200 "DSC Sink Line Buffer Depth invalid\n"); 1201 return -EINVAL; 1202 } 1203 1204 if (vdsc_cfg->dsc_version_minor == 2) 1205 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 1206 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 1207 else 1208 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 1209 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 1210 1211 vdsc_cfg->block_pred_enable = 1212 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 1213 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 1214 1215 return drm_dsc_compute_rc_parameters(vdsc_cfg); 1216 } 1217 1218 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 1219 struct intel_crtc_state *pipe_config, 1220 struct drm_connector_state *conn_state, 1221 struct link_config_limits *limits) 1222 { 1223 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1224 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 1225 const struct drm_display_mode *adjusted_mode = 1226 &pipe_config->hw.adjusted_mode; 1227 u8 dsc_max_bpc; 1228 int pipe_bpp; 1229 int ret; 1230 1231 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 1232 intel_dp_supports_fec(intel_dp, pipe_config); 1233 1234 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 1235 return -EINVAL; 1236 1237 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 1238 if (DISPLAY_VER(dev_priv) >= 12) 1239 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 1240 else 1241 dsc_max_bpc = min_t(u8, 10, 1242 conn_state->max_requested_bpc); 1243 1244 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 1245 1246 /* Min Input BPC for ICL+ is 8 */ 1247 if (pipe_bpp < 8 * 3) { 1248 drm_dbg_kms(&dev_priv->drm, 1249 "No DSC support for less than 8bpc\n"); 1250 return -EINVAL; 1251 } 1252 1253 /* 1254 * For now enable DSC for max bpp, max link rate, max lane count. 1255 * Optimize this later for the minimum possible link rate/lane count 1256 * with DSC enabled for the requested mode. 1257 */ 1258 pipe_config->pipe_bpp = pipe_bpp; 1259 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 1260 pipe_config->lane_count = limits->max_lane_count; 1261 1262 if (intel_dp_is_edp(intel_dp)) { 1263 pipe_config->dsc.compressed_bpp = 1264 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 1265 pipe_config->pipe_bpp); 1266 pipe_config->dsc.slice_count = 1267 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 1268 true); 1269 } else { 1270 u16 dsc_max_output_bpp; 1271 u8 dsc_dp_slice_count; 1272 1273 dsc_max_output_bpp = 1274 intel_dp_dsc_get_output_bpp(dev_priv, 1275 pipe_config->port_clock, 1276 pipe_config->lane_count, 1277 adjusted_mode->crtc_clock, 1278 adjusted_mode->crtc_hdisplay, 1279 pipe_config->bigjoiner); 1280 dsc_dp_slice_count = 1281 intel_dp_dsc_get_slice_count(intel_dp, 1282 adjusted_mode->crtc_clock, 1283 adjusted_mode->crtc_hdisplay, 1284 pipe_config->bigjoiner); 1285 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 1286 drm_dbg_kms(&dev_priv->drm, 1287 "Compressed BPP/Slice Count not supported\n"); 1288 return -EINVAL; 1289 } 1290 pipe_config->dsc.compressed_bpp = min_t(u16, 1291 dsc_max_output_bpp >> 4, 1292 pipe_config->pipe_bpp); 1293 pipe_config->dsc.slice_count = dsc_dp_slice_count; 1294 } 1295 /* 1296 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 1297 * is greater than the maximum Cdclock and if slice count is even 1298 * then we need to use 2 VDSC instances. 1299 */ 1300 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq || 1301 pipe_config->bigjoiner) { 1302 if (pipe_config->dsc.slice_count < 2) { 1303 drm_dbg_kms(&dev_priv->drm, 1304 "Cannot split stream to use 2 VDSC instances\n"); 1305 return -EINVAL; 1306 } 1307 1308 pipe_config->dsc.dsc_split = true; 1309 } 1310 1311 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 1312 if (ret < 0) { 1313 drm_dbg_kms(&dev_priv->drm, 1314 "Cannot compute valid DSC parameters for Input Bpp = %d " 1315 "Compressed BPP = %d\n", 1316 pipe_config->pipe_bpp, 1317 pipe_config->dsc.compressed_bpp); 1318 return ret; 1319 } 1320 1321 pipe_config->dsc.compression_enable = true; 1322 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 1323 "Compressed Bpp = %d Slice Count = %d\n", 1324 pipe_config->pipe_bpp, 1325 pipe_config->dsc.compressed_bpp, 1326 pipe_config->dsc.slice_count); 1327 1328 return 0; 1329 } 1330 1331 static int 1332 intel_dp_compute_link_config(struct intel_encoder *encoder, 1333 struct intel_crtc_state *pipe_config, 1334 struct drm_connector_state *conn_state) 1335 { 1336 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1337 const struct drm_display_mode *adjusted_mode = 1338 &pipe_config->hw.adjusted_mode; 1339 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1340 struct link_config_limits limits; 1341 int common_len; 1342 int ret; 1343 1344 common_len = intel_dp_common_len_rate_limit(intel_dp, 1345 intel_dp->max_link_rate); 1346 1347 /* No common link rates between source and sink */ 1348 drm_WARN_ON(encoder->base.dev, common_len <= 0); 1349 1350 limits.min_clock = 0; 1351 limits.max_clock = common_len - 1; 1352 1353 limits.min_lane_count = 1; 1354 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 1355 1356 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); 1357 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); 1358 1359 if (intel_dp->use_max_params) { 1360 /* 1361 * Use the maximum clock and number of lanes the eDP panel 1362 * advertizes being capable of in case the initial fast 1363 * optimal params failed us. The panels are generally 1364 * designed to support only a single clock and lane 1365 * configuration, and typically on older panels these 1366 * values correspond to the native resolution of the panel. 1367 */ 1368 limits.min_lane_count = limits.max_lane_count; 1369 limits.min_clock = limits.max_clock; 1370 } 1371 1372 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 1373 1374 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 1375 "max rate %d max bpp %d pixel clock %iKHz\n", 1376 limits.max_lane_count, 1377 intel_dp->common_rates[limits.max_clock], 1378 limits.max_bpp, adjusted_mode->crtc_clock); 1379 1380 if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq || 1381 adjusted_mode->crtc_hdisplay > 5120) && 1382 intel_dp_can_bigjoiner(intel_dp)) 1383 pipe_config->bigjoiner = true; 1384 1385 if (intel_dp_is_edp(intel_dp)) 1386 /* 1387 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 1388 * section A.1: "It is recommended that the minimum number of 1389 * lanes be used, using the minimum link rate allowed for that 1390 * lane configuration." 1391 * 1392 * Note that we fall back to the max clock and lane count for eDP 1393 * panels that fail with the fast optimal settings (see 1394 * intel_dp->use_max_params), in which case the fast vs. wide 1395 * choice doesn't matter. 1396 */ 1397 ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits); 1398 else 1399 /* Optimize for slow and wide. */ 1400 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 1401 1402 /* enable compression if the mode doesn't fit available BW */ 1403 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 1404 if (ret || intel_dp->force_dsc_en || pipe_config->bigjoiner) { 1405 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 1406 conn_state, &limits); 1407 if (ret < 0) 1408 return ret; 1409 } 1410 1411 if (pipe_config->dsc.compression_enable) { 1412 drm_dbg_kms(&i915->drm, 1413 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 1414 pipe_config->lane_count, pipe_config->port_clock, 1415 pipe_config->pipe_bpp, 1416 pipe_config->dsc.compressed_bpp); 1417 1418 drm_dbg_kms(&i915->drm, 1419 "DP link rate required %i available %i\n", 1420 intel_dp_link_required(adjusted_mode->crtc_clock, 1421 pipe_config->dsc.compressed_bpp), 1422 intel_dp_max_data_rate(pipe_config->port_clock, 1423 pipe_config->lane_count)); 1424 } else { 1425 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 1426 pipe_config->lane_count, pipe_config->port_clock, 1427 pipe_config->pipe_bpp); 1428 1429 drm_dbg_kms(&i915->drm, 1430 "DP link rate required %i available %i\n", 1431 intel_dp_link_required(adjusted_mode->crtc_clock, 1432 pipe_config->pipe_bpp), 1433 intel_dp_max_data_rate(pipe_config->port_clock, 1434 pipe_config->lane_count)); 1435 } 1436 return 0; 1437 } 1438 1439 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 1440 const struct drm_connector_state *conn_state) 1441 { 1442 const struct intel_digital_connector_state *intel_conn_state = 1443 to_intel_digital_connector_state(conn_state); 1444 const struct drm_display_mode *adjusted_mode = 1445 &crtc_state->hw.adjusted_mode; 1446 1447 /* 1448 * Our YCbCr output is always limited range. 1449 * crtc_state->limited_color_range only applies to RGB, 1450 * and it must never be set for YCbCr or we risk setting 1451 * some conflicting bits in PIPECONF which will mess up 1452 * the colors on the monitor. 1453 */ 1454 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 1455 return false; 1456 1457 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 1458 /* 1459 * See: 1460 * CEA-861-E - 5.1 Default Encoding Parameters 1461 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 1462 */ 1463 return crtc_state->pipe_bpp != 18 && 1464 drm_default_rgb_quant_range(adjusted_mode) == 1465 HDMI_QUANTIZATION_RANGE_LIMITED; 1466 } else { 1467 return intel_conn_state->broadcast_rgb == 1468 INTEL_BROADCAST_RGB_LIMITED; 1469 } 1470 } 1471 1472 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 1473 enum port port) 1474 { 1475 if (IS_G4X(dev_priv)) 1476 return false; 1477 if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A) 1478 return false; 1479 1480 return true; 1481 } 1482 1483 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 1484 const struct drm_connector_state *conn_state, 1485 struct drm_dp_vsc_sdp *vsc) 1486 { 1487 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1488 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1489 1490 /* 1491 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1492 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 1493 * Colorimetry Format indication. 1494 */ 1495 vsc->revision = 0x5; 1496 vsc->length = 0x13; 1497 1498 /* DP 1.4a spec, Table 2-120 */ 1499 switch (crtc_state->output_format) { 1500 case INTEL_OUTPUT_FORMAT_YCBCR444: 1501 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 1502 break; 1503 case INTEL_OUTPUT_FORMAT_YCBCR420: 1504 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 1505 break; 1506 case INTEL_OUTPUT_FORMAT_RGB: 1507 default: 1508 vsc->pixelformat = DP_PIXELFORMAT_RGB; 1509 } 1510 1511 switch (conn_state->colorspace) { 1512 case DRM_MODE_COLORIMETRY_BT709_YCC: 1513 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1514 break; 1515 case DRM_MODE_COLORIMETRY_XVYCC_601: 1516 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 1517 break; 1518 case DRM_MODE_COLORIMETRY_XVYCC_709: 1519 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 1520 break; 1521 case DRM_MODE_COLORIMETRY_SYCC_601: 1522 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 1523 break; 1524 case DRM_MODE_COLORIMETRY_OPYCC_601: 1525 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 1526 break; 1527 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 1528 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 1529 break; 1530 case DRM_MODE_COLORIMETRY_BT2020_RGB: 1531 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 1532 break; 1533 case DRM_MODE_COLORIMETRY_BT2020_YCC: 1534 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 1535 break; 1536 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 1537 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 1538 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 1539 break; 1540 default: 1541 /* 1542 * RGB->YCBCR color conversion uses the BT.709 1543 * color space. 1544 */ 1545 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1546 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1547 else 1548 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 1549 break; 1550 } 1551 1552 vsc->bpc = crtc_state->pipe_bpp / 3; 1553 1554 /* only RGB pixelformat supports 6 bpc */ 1555 drm_WARN_ON(&dev_priv->drm, 1556 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 1557 1558 /* all YCbCr are always limited range */ 1559 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 1560 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 1561 } 1562 1563 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 1564 struct intel_crtc_state *crtc_state, 1565 const struct drm_connector_state *conn_state) 1566 { 1567 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 1568 1569 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 1570 if (crtc_state->has_psr) 1571 return; 1572 1573 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 1574 return; 1575 1576 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 1577 vsc->sdp_type = DP_SDP_VSC; 1578 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1579 &crtc_state->infoframes.vsc); 1580 } 1581 1582 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 1583 const struct intel_crtc_state *crtc_state, 1584 const struct drm_connector_state *conn_state, 1585 struct drm_dp_vsc_sdp *vsc) 1586 { 1587 vsc->sdp_type = DP_SDP_VSC; 1588 1589 if (intel_dp->psr.psr2_enabled) { 1590 if (intel_dp->psr.colorimetry_support && 1591 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 1592 /* [PSR2, +Colorimetry] */ 1593 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1594 vsc); 1595 } else { 1596 /* 1597 * [PSR2, -Colorimetry] 1598 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 1599 * 3D stereo + PSR/PSR2 + Y-coordinate. 1600 */ 1601 vsc->revision = 0x4; 1602 vsc->length = 0xe; 1603 } 1604 } else { 1605 /* 1606 * [PSR1] 1607 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1608 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 1609 * higher). 1610 */ 1611 vsc->revision = 0x2; 1612 vsc->length = 0x8; 1613 } 1614 } 1615 1616 static void 1617 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 1618 struct intel_crtc_state *crtc_state, 1619 const struct drm_connector_state *conn_state) 1620 { 1621 int ret; 1622 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1623 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 1624 1625 if (!conn_state->hdr_output_metadata) 1626 return; 1627 1628 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 1629 1630 if (ret) { 1631 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 1632 return; 1633 } 1634 1635 crtc_state->infoframes.enable |= 1636 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 1637 } 1638 1639 static void 1640 intel_dp_drrs_compute_config(struct intel_dp *intel_dp, 1641 struct intel_crtc_state *pipe_config, 1642 int output_bpp, bool constant_n) 1643 { 1644 struct intel_connector *intel_connector = intel_dp->attached_connector; 1645 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1646 int pixel_clock; 1647 1648 if (pipe_config->vrr.enable) 1649 return; 1650 1651 /* 1652 * DRRS and PSR can't be enable together, so giving preference to PSR 1653 * as it allows more power-savings by complete shutting down display, 1654 * so to guarantee this, intel_dp_drrs_compute_config() must be called 1655 * after intel_psr_compute_config(). 1656 */ 1657 if (pipe_config->has_psr) 1658 return; 1659 1660 if (!intel_connector->panel.downclock_mode || 1661 dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 1662 return; 1663 1664 pipe_config->has_drrs = true; 1665 1666 pixel_clock = intel_connector->panel.downclock_mode->clock; 1667 if (pipe_config->splitter.enable) 1668 pixel_clock /= pipe_config->splitter.link_count; 1669 1670 intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, 1671 pipe_config->port_clock, &pipe_config->dp_m2_n2, 1672 constant_n, pipe_config->fec_enable); 1673 1674 /* FIXME: abstract this better */ 1675 if (pipe_config->splitter.enable) 1676 pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count; 1677 } 1678 1679 int 1680 intel_dp_compute_config(struct intel_encoder *encoder, 1681 struct intel_crtc_state *pipe_config, 1682 struct drm_connector_state *conn_state) 1683 { 1684 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1685 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1686 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1687 enum port port = encoder->port; 1688 struct intel_connector *intel_connector = intel_dp->attached_connector; 1689 struct intel_digital_connector_state *intel_conn_state = 1690 to_intel_digital_connector_state(conn_state); 1691 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); 1692 int ret = 0, output_bpp; 1693 1694 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 1695 pipe_config->has_pch_encoder = true; 1696 1697 pipe_config->output_format = intel_dp_output_format(&intel_connector->base, 1698 adjusted_mode); 1699 1700 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 1701 ret = intel_pch_panel_fitting(pipe_config, conn_state); 1702 if (ret) 1703 return ret; 1704 } 1705 1706 if (!intel_dp_port_has_audio(dev_priv, port)) 1707 pipe_config->has_audio = false; 1708 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 1709 pipe_config->has_audio = intel_dp->has_audio; 1710 else 1711 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 1712 1713 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1714 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 1715 adjusted_mode); 1716 1717 if (HAS_GMCH(dev_priv)) 1718 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 1719 else 1720 ret = intel_pch_panel_fitting(pipe_config, conn_state); 1721 if (ret) 1722 return ret; 1723 } 1724 1725 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1726 return -EINVAL; 1727 1728 if (HAS_GMCH(dev_priv) && 1729 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 1730 return -EINVAL; 1731 1732 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 1733 return -EINVAL; 1734 1735 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 1736 return -EINVAL; 1737 1738 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 1739 if (ret < 0) 1740 return ret; 1741 1742 pipe_config->limited_color_range = 1743 intel_dp_limited_color_range(pipe_config, conn_state); 1744 1745 if (pipe_config->dsc.compression_enable) 1746 output_bpp = pipe_config->dsc.compressed_bpp; 1747 else 1748 output_bpp = intel_dp_output_bpp(pipe_config->output_format, 1749 pipe_config->pipe_bpp); 1750 1751 if (intel_dp->mso_link_count) { 1752 int n = intel_dp->mso_link_count; 1753 int overlap = intel_dp->mso_pixel_overlap; 1754 1755 pipe_config->splitter.enable = true; 1756 pipe_config->splitter.link_count = n; 1757 pipe_config->splitter.pixel_overlap = overlap; 1758 1759 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", 1760 n, overlap); 1761 1762 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; 1763 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; 1764 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; 1765 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; 1766 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; 1767 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; 1768 adjusted_mode->crtc_clock /= n; 1769 } 1770 1771 intel_link_compute_m_n(output_bpp, 1772 pipe_config->lane_count, 1773 adjusted_mode->crtc_clock, 1774 pipe_config->port_clock, 1775 &pipe_config->dp_m_n, 1776 constant_n, pipe_config->fec_enable); 1777 1778 /* FIXME: abstract this better */ 1779 if (pipe_config->splitter.enable) 1780 pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count; 1781 1782 if (!HAS_DDI(dev_priv)) 1783 g4x_dp_set_clock(encoder, pipe_config); 1784 1785 intel_vrr_compute_config(pipe_config, conn_state); 1786 intel_psr_compute_config(intel_dp, pipe_config); 1787 intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp, 1788 constant_n); 1789 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 1790 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 1791 1792 return 0; 1793 } 1794 1795 void intel_dp_set_link_params(struct intel_dp *intel_dp, 1796 int link_rate, int lane_count) 1797 { 1798 intel_dp->link_trained = false; 1799 intel_dp->link_rate = link_rate; 1800 intel_dp->lane_count = lane_count; 1801 } 1802 1803 /* Enable backlight PWM and backlight PP control. */ 1804 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 1805 const struct drm_connector_state *conn_state) 1806 { 1807 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 1808 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1809 1810 if (!intel_dp_is_edp(intel_dp)) 1811 return; 1812 1813 drm_dbg_kms(&i915->drm, "\n"); 1814 1815 intel_panel_enable_backlight(crtc_state, conn_state); 1816 intel_pps_backlight_on(intel_dp); 1817 } 1818 1819 /* Disable backlight PP control and backlight PWM. */ 1820 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 1821 { 1822 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 1823 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1824 1825 if (!intel_dp_is_edp(intel_dp)) 1826 return; 1827 1828 drm_dbg_kms(&i915->drm, "\n"); 1829 1830 intel_pps_backlight_off(intel_dp); 1831 intel_panel_disable_backlight(old_conn_state); 1832 } 1833 1834 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 1835 { 1836 /* 1837 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 1838 * be capable of signalling downstream hpd with a long pulse. 1839 * Whether or not that means D3 is safe to use is not clear, 1840 * but let's assume so until proven otherwise. 1841 * 1842 * FIXME should really check all downstream ports... 1843 */ 1844 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 1845 drm_dp_is_branch(intel_dp->dpcd) && 1846 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 1847 } 1848 1849 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 1850 const struct intel_crtc_state *crtc_state, 1851 bool enable) 1852 { 1853 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1854 int ret; 1855 1856 if (!crtc_state->dsc.compression_enable) 1857 return; 1858 1859 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 1860 enable ? DP_DECOMPRESSION_EN : 0); 1861 if (ret < 0) 1862 drm_dbg_kms(&i915->drm, 1863 "Failed to %s sink decompression state\n", 1864 enable ? "enable" : "disable"); 1865 } 1866 1867 static void 1868 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 1869 { 1870 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1871 u8 oui[] = { 0x00, 0xaa, 0x01 }; 1872 u8 buf[3] = { 0 }; 1873 1874 /* 1875 * During driver init, we want to be careful and avoid changing the source OUI if it's 1876 * already set to what we want, so as to avoid clearing any state by accident 1877 */ 1878 if (careful) { 1879 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 1880 drm_err(&i915->drm, "Failed to read source OUI\n"); 1881 1882 if (memcmp(oui, buf, sizeof(oui)) == 0) 1883 return; 1884 } 1885 1886 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 1887 drm_err(&i915->drm, "Failed to write source OUI\n"); 1888 } 1889 1890 /* If the device supports it, try to set the power state appropriately */ 1891 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 1892 { 1893 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1894 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1895 int ret, i; 1896 1897 /* Should have a valid DPCD by this point */ 1898 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1899 return; 1900 1901 if (mode != DP_SET_POWER_D0) { 1902 if (downstream_hpd_needs_d0(intel_dp)) 1903 return; 1904 1905 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 1906 } else { 1907 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 1908 1909 lspcon_resume(dp_to_dig_port(intel_dp)); 1910 1911 /* Write the source OUI as early as possible */ 1912 if (intel_dp_is_edp(intel_dp)) 1913 intel_edp_init_source_oui(intel_dp, false); 1914 1915 /* 1916 * When turning on, we need to retry for 1ms to give the sink 1917 * time to wake up. 1918 */ 1919 for (i = 0; i < 3; i++) { 1920 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 1921 if (ret == 1) 1922 break; 1923 msleep(1); 1924 } 1925 1926 if (ret == 1 && lspcon->active) 1927 lspcon_wait_pcon_mode(lspcon); 1928 } 1929 1930 if (ret != 1) 1931 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 1932 encoder->base.base.id, encoder->base.name, 1933 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 1934 } 1935 1936 static bool 1937 intel_dp_get_dpcd(struct intel_dp *intel_dp); 1938 1939 /** 1940 * intel_dp_sync_state - sync the encoder state during init/resume 1941 * @encoder: intel encoder to sync 1942 * @crtc_state: state for the CRTC connected to the encoder 1943 * 1944 * Sync any state stored in the encoder wrt. HW state during driver init 1945 * and system resume. 1946 */ 1947 void intel_dp_sync_state(struct intel_encoder *encoder, 1948 const struct intel_crtc_state *crtc_state) 1949 { 1950 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1951 1952 /* 1953 * Don't clobber DPCD if it's been already read out during output 1954 * setup (eDP) or detect. 1955 */ 1956 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 1957 intel_dp_get_dpcd(intel_dp); 1958 1959 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 1960 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 1961 } 1962 1963 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 1964 struct intel_crtc_state *crtc_state) 1965 { 1966 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1967 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1968 1969 /* 1970 * If BIOS has set an unsupported or non-standard link rate for some 1971 * reason force an encoder recompute and full modeset. 1972 */ 1973 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 1974 crtc_state->port_clock) < 0) { 1975 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n"); 1976 crtc_state->uapi.connectors_changed = true; 1977 return false; 1978 } 1979 1980 /* 1981 * FIXME hack to force full modeset when DSC is being used. 1982 * 1983 * As long as we do not have full state readout and config comparison 1984 * of crtc_state->dsc, we have no way to ensure reliable fastset. 1985 * Remove once we have readout for DSC. 1986 */ 1987 if (crtc_state->dsc.compression_enable) { 1988 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n"); 1989 crtc_state->uapi.mode_changed = true; 1990 return false; 1991 } 1992 1993 if (CAN_PSR(intel_dp)) { 1994 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n"); 1995 crtc_state->uapi.mode_changed = true; 1996 return false; 1997 } 1998 1999 return true; 2000 } 2001 2002 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 2003 { 2004 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2005 2006 /* Clear the cached register set to avoid using stale values */ 2007 2008 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 2009 2010 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 2011 intel_dp->pcon_dsc_dpcd, 2012 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 2013 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", 2014 DP_PCON_DSC_ENCODER); 2015 2016 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", 2017 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 2018 } 2019 2020 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 2021 { 2022 int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 2023 int i; 2024 2025 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 2026 if (frl_bw_mask & (1 << i)) 2027 return bw_gbps[i]; 2028 } 2029 return 0; 2030 } 2031 2032 static int intel_dp_pcon_set_frl_mask(int max_frl) 2033 { 2034 switch (max_frl) { 2035 case 48: 2036 return DP_PCON_FRL_BW_MASK_48GBPS; 2037 case 40: 2038 return DP_PCON_FRL_BW_MASK_40GBPS; 2039 case 32: 2040 return DP_PCON_FRL_BW_MASK_32GBPS; 2041 case 24: 2042 return DP_PCON_FRL_BW_MASK_24GBPS; 2043 case 18: 2044 return DP_PCON_FRL_BW_MASK_18GBPS; 2045 case 9: 2046 return DP_PCON_FRL_BW_MASK_9GBPS; 2047 } 2048 2049 return 0; 2050 } 2051 2052 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 2053 { 2054 struct intel_connector *intel_connector = intel_dp->attached_connector; 2055 struct drm_connector *connector = &intel_connector->base; 2056 int max_frl_rate; 2057 int max_lanes, rate_per_lane; 2058 int max_dsc_lanes, dsc_rate_per_lane; 2059 2060 max_lanes = connector->display_info.hdmi.max_lanes; 2061 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; 2062 max_frl_rate = max_lanes * rate_per_lane; 2063 2064 if (connector->display_info.hdmi.dsc_cap.v_1p2) { 2065 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; 2066 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; 2067 if (max_dsc_lanes && dsc_rate_per_lane) 2068 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 2069 } 2070 2071 return max_frl_rate; 2072 } 2073 2074 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 2075 { 2076 #define TIMEOUT_FRL_READY_MS 500 2077 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 2078 2079 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2080 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 2081 u8 max_frl_bw_mask = 0, frl_trained_mask; 2082 bool is_active; 2083 2084 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2085 if (ret < 0) 2086 return ret; 2087 2088 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 2089 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 2090 2091 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 2092 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); 2093 2094 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 2095 2096 if (max_frl_bw <= 0) 2097 return -EINVAL; 2098 2099 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 2100 if (ret < 0) 2101 return ret; 2102 /* Wait for PCON to be FRL Ready */ 2103 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); 2104 2105 if (!is_active) 2106 return -ETIMEDOUT; 2107 2108 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 2109 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, 2110 DP_PCON_ENABLE_SEQUENTIAL_LINK); 2111 if (ret < 0) 2112 return ret; 2113 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, 2114 DP_PCON_FRL_LINK_TRAIN_NORMAL); 2115 if (ret < 0) 2116 return ret; 2117 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 2118 if (ret < 0) 2119 return ret; 2120 /* 2121 * Wait for FRL to be completed 2122 * Check if the HDMI Link is up and active. 2123 */ 2124 wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); 2125 2126 if (!is_active) 2127 return -ETIMEDOUT; 2128 2129 /* Verify HDMI Link configuration shows FRL Mode */ 2130 if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != 2131 DP_PCON_HDMI_MODE_FRL) { 2132 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); 2133 return -EINVAL; 2134 } 2135 drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); 2136 2137 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 2138 intel_dp->frl.is_trained = true; 2139 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); 2140 2141 return 0; 2142 } 2143 2144 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 2145 { 2146 if (drm_dp_is_branch(intel_dp->dpcd) && 2147 intel_dp->has_hdmi_sink && 2148 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 2149 return true; 2150 2151 return false; 2152 } 2153 2154 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 2155 { 2156 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2157 2158 /* 2159 * Always go for FRL training if: 2160 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) 2161 * -sink is HDMI2.1 2162 */ 2163 if (!(intel_dp->dpcd[2] & DP_PCON_SOURCE_CTL_MODE) || 2164 !intel_dp_is_hdmi_2_1_sink(intel_dp) || 2165 intel_dp->frl.is_trained) 2166 return; 2167 2168 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 2169 int ret, mode; 2170 2171 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); 2172 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2173 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 2174 2175 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 2176 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); 2177 } else { 2178 drm_dbg(&dev_priv->drm, "FRL training Completed\n"); 2179 } 2180 } 2181 2182 static int 2183 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 2184 { 2185 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 2186 2187 return intel_hdmi_dsc_get_slice_height(vactive); 2188 } 2189 2190 static int 2191 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 2192 const struct intel_crtc_state *crtc_state) 2193 { 2194 struct intel_connector *intel_connector = intel_dp->attached_connector; 2195 struct drm_connector *connector = &intel_connector->base; 2196 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; 2197 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; 2198 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 2199 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 2200 2201 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 2202 pcon_max_slice_width, 2203 hdmi_max_slices, hdmi_throughput); 2204 } 2205 2206 static int 2207 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 2208 const struct intel_crtc_state *crtc_state, 2209 int num_slices, int slice_width) 2210 { 2211 struct intel_connector *intel_connector = intel_dp->attached_connector; 2212 struct drm_connector *connector = &intel_connector->base; 2213 int output_format = crtc_state->output_format; 2214 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; 2215 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 2216 int hdmi_max_chunk_bytes = 2217 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; 2218 2219 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 2220 num_slices, output_format, hdmi_all_bpp, 2221 hdmi_max_chunk_bytes); 2222 } 2223 2224 void 2225 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 2226 const struct intel_crtc_state *crtc_state) 2227 { 2228 u8 pps_param[6]; 2229 int slice_height; 2230 int slice_width; 2231 int num_slices; 2232 int bits_per_pixel; 2233 int ret; 2234 struct intel_connector *intel_connector = intel_dp->attached_connector; 2235 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2236 struct drm_connector *connector; 2237 bool hdmi_is_dsc_1_2; 2238 2239 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 2240 return; 2241 2242 if (!intel_connector) 2243 return; 2244 connector = &intel_connector->base; 2245 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; 2246 2247 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 2248 !hdmi_is_dsc_1_2) 2249 return; 2250 2251 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 2252 if (!slice_height) 2253 return; 2254 2255 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 2256 if (!num_slices) 2257 return; 2258 2259 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 2260 num_slices); 2261 2262 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 2263 num_slices, slice_width); 2264 if (!bits_per_pixel) 2265 return; 2266 2267 pps_param[0] = slice_height & 0xFF; 2268 pps_param[1] = slice_height >> 8; 2269 pps_param[2] = slice_width & 0xFF; 2270 pps_param[3] = slice_width >> 8; 2271 pps_param[4] = bits_per_pixel & 0xFF; 2272 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 2273 2274 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 2275 if (ret < 0) 2276 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); 2277 } 2278 2279 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 2280 const struct intel_crtc_state *crtc_state) 2281 { 2282 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2283 u8 tmp; 2284 2285 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 2286 return; 2287 2288 if (!drm_dp_is_branch(intel_dp->dpcd)) 2289 return; 2290 2291 tmp = intel_dp->has_hdmi_sink ? 2292 DP_HDMI_DVI_OUTPUT_CONFIG : 0; 2293 2294 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2295 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 2296 drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n", 2297 enableddisabled(intel_dp->has_hdmi_sink)); 2298 2299 tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 2300 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 2301 2302 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2303 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 2304 drm_dbg_kms(&i915->drm, 2305 "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n", 2306 enableddisabled(intel_dp->dfp.ycbcr_444_to_420)); 2307 2308 tmp = 0; 2309 if (intel_dp->dfp.rgb_to_ycbcr) { 2310 bool bt2020, bt709; 2311 2312 /* 2313 * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only 2314 * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default. 2315 * 2316 */ 2317 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE; 2318 2319 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2320 intel_dp->downstream_ports, 2321 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 2322 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2323 intel_dp->downstream_ports, 2324 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 2325 switch (crtc_state->infoframes.vsc.colorimetry) { 2326 case DP_COLORIMETRY_BT2020_RGB: 2327 case DP_COLORIMETRY_BT2020_YCC: 2328 if (bt2020) 2329 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE; 2330 break; 2331 case DP_COLORIMETRY_BT709_YCC: 2332 case DP_COLORIMETRY_XVYCC_709: 2333 if (bt709) 2334 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE; 2335 break; 2336 default: 2337 break; 2338 } 2339 } 2340 2341 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 2342 drm_dbg_kms(&i915->drm, 2343 "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n", 2344 enableddisabled(tmp ? true : false)); 2345 } 2346 2347 2348 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 2349 { 2350 u8 dprx = 0; 2351 2352 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 2353 &dprx) != 1) 2354 return false; 2355 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 2356 } 2357 2358 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 2359 { 2360 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2361 2362 /* 2363 * Clear the cached register set to avoid using stale values 2364 * for the sinks that do not support DSC. 2365 */ 2366 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 2367 2368 /* Clear fec_capable to avoid using stale values */ 2369 intel_dp->fec_capable = 0; 2370 2371 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 2372 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 2373 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2374 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 2375 intel_dp->dsc_dpcd, 2376 sizeof(intel_dp->dsc_dpcd)) < 0) 2377 drm_err(&i915->drm, 2378 "Failed to read DPCD register 0x%x\n", 2379 DP_DSC_SUPPORT); 2380 2381 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 2382 (int)sizeof(intel_dp->dsc_dpcd), 2383 intel_dp->dsc_dpcd); 2384 2385 /* FEC is supported only on DP 1.4 */ 2386 if (!intel_dp_is_edp(intel_dp) && 2387 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 2388 &intel_dp->fec_capable) < 0) 2389 drm_err(&i915->drm, 2390 "Failed to read FEC DPCD register\n"); 2391 2392 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 2393 intel_dp->fec_capable); 2394 } 2395 } 2396 2397 static void intel_edp_mso_mode_fixup(struct intel_connector *connector, 2398 struct drm_display_mode *mode) 2399 { 2400 struct intel_dp *intel_dp = intel_attached_dp(connector); 2401 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2402 int n = intel_dp->mso_link_count; 2403 int overlap = intel_dp->mso_pixel_overlap; 2404 2405 if (!mode || !n) 2406 return; 2407 2408 mode->hdisplay = (mode->hdisplay - overlap) * n; 2409 mode->hsync_start = (mode->hsync_start - overlap) * n; 2410 mode->hsync_end = (mode->hsync_end - overlap) * n; 2411 mode->htotal = (mode->htotal - overlap) * n; 2412 mode->clock *= n; 2413 2414 drm_mode_set_name(mode); 2415 2416 drm_dbg_kms(&i915->drm, 2417 "[CONNECTOR:%d:%s] using generated MSO mode: ", 2418 connector->base.base.id, connector->base.name); 2419 drm_mode_debug_printmodeline(mode); 2420 } 2421 2422 static void intel_edp_mso_init(struct intel_dp *intel_dp) 2423 { 2424 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2425 u8 mso; 2426 2427 if (intel_dp->edp_dpcd[0] < DP_EDP_14) 2428 return; 2429 2430 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { 2431 drm_err(&i915->drm, "Failed to read MSO cap\n"); 2432 return; 2433 } 2434 2435 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ 2436 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; 2437 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { 2438 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); 2439 mso = 0; 2440 } 2441 2442 if (mso) { 2443 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n", 2444 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso); 2445 if (!HAS_MSO(i915)) { 2446 drm_err(&i915->drm, "No source MSO support, disabling\n"); 2447 mso = 0; 2448 } 2449 } 2450 2451 intel_dp->mso_link_count = mso; 2452 intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */ 2453 } 2454 2455 static bool 2456 intel_edp_init_dpcd(struct intel_dp *intel_dp) 2457 { 2458 struct drm_i915_private *dev_priv = 2459 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 2460 2461 /* this function is meant to be called only once */ 2462 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 2463 2464 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 2465 return false; 2466 2467 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2468 drm_dp_is_branch(intel_dp->dpcd)); 2469 2470 /* 2471 * Read the eDP display control registers. 2472 * 2473 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 2474 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 2475 * set, but require eDP 1.4+ detection (e.g. for supported link rates 2476 * method). The display control registers should read zero if they're 2477 * not supported anyway. 2478 */ 2479 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 2480 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 2481 sizeof(intel_dp->edp_dpcd)) 2482 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 2483 (int)sizeof(intel_dp->edp_dpcd), 2484 intel_dp->edp_dpcd); 2485 2486 /* 2487 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 2488 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 2489 */ 2490 intel_psr_init_dpcd(intel_dp); 2491 2492 /* Read the eDP 1.4+ supported link rates. */ 2493 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2494 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 2495 int i; 2496 2497 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 2498 sink_rates, sizeof(sink_rates)); 2499 2500 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 2501 int val = le16_to_cpu(sink_rates[i]); 2502 2503 if (val == 0) 2504 break; 2505 2506 /* Value read multiplied by 200kHz gives the per-lane 2507 * link rate in kHz. The source rates are, however, 2508 * stored in terms of LS_Clk kHz. The full conversion 2509 * back to symbols is 2510 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 2511 */ 2512 intel_dp->sink_rates[i] = (val * 200) / 10; 2513 } 2514 intel_dp->num_sink_rates = i; 2515 } 2516 2517 /* 2518 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 2519 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 2520 */ 2521 if (intel_dp->num_sink_rates) 2522 intel_dp->use_rate_select = true; 2523 else 2524 intel_dp_set_sink_rates(intel_dp); 2525 2526 intel_dp_set_common_rates(intel_dp); 2527 2528 /* Read the eDP DSC DPCD registers */ 2529 if (DISPLAY_VER(dev_priv) >= 10) 2530 intel_dp_get_dsc_sink_cap(intel_dp); 2531 2532 /* 2533 * If needed, program our source OUI so we can make various Intel-specific AUX services 2534 * available (such as HDR backlight controls) 2535 */ 2536 intel_edp_init_source_oui(intel_dp, true); 2537 2538 intel_edp_mso_init(intel_dp); 2539 2540 return true; 2541 } 2542 2543 static bool 2544 intel_dp_has_sink_count(struct intel_dp *intel_dp) 2545 { 2546 if (!intel_dp->attached_connector) 2547 return false; 2548 2549 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 2550 intel_dp->dpcd, 2551 &intel_dp->desc); 2552 } 2553 2554 static bool 2555 intel_dp_get_dpcd(struct intel_dp *intel_dp) 2556 { 2557 int ret; 2558 2559 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) 2560 return false; 2561 2562 /* 2563 * Don't clobber cached eDP rates. Also skip re-reading 2564 * the OUI/ID since we know it won't change. 2565 */ 2566 if (!intel_dp_is_edp(intel_dp)) { 2567 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2568 drm_dp_is_branch(intel_dp->dpcd)); 2569 2570 intel_dp_set_sink_rates(intel_dp); 2571 intel_dp_set_common_rates(intel_dp); 2572 } 2573 2574 if (intel_dp_has_sink_count(intel_dp)) { 2575 ret = drm_dp_read_sink_count(&intel_dp->aux); 2576 if (ret < 0) 2577 return false; 2578 2579 /* 2580 * Sink count can change between short pulse hpd hence 2581 * a member variable in intel_dp will track any changes 2582 * between short pulse interrupts. 2583 */ 2584 intel_dp->sink_count = ret; 2585 2586 /* 2587 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 2588 * a dongle is present but no display. Unless we require to know 2589 * if a dongle is present or not, we don't need to update 2590 * downstream port information. So, an early return here saves 2591 * time from performing other operations which are not required. 2592 */ 2593 if (!intel_dp->sink_count) 2594 return false; 2595 } 2596 2597 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 2598 intel_dp->downstream_ports) == 0; 2599 } 2600 2601 static bool 2602 intel_dp_can_mst(struct intel_dp *intel_dp) 2603 { 2604 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2605 2606 return i915->params.enable_dp_mst && 2607 intel_dp->can_mst && 2608 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 2609 } 2610 2611 static void 2612 intel_dp_configure_mst(struct intel_dp *intel_dp) 2613 { 2614 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2615 struct intel_encoder *encoder = 2616 &dp_to_dig_port(intel_dp)->base; 2617 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 2618 2619 drm_dbg_kms(&i915->drm, 2620 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 2621 encoder->base.base.id, encoder->base.name, 2622 yesno(intel_dp->can_mst), yesno(sink_can_mst), 2623 yesno(i915->params.enable_dp_mst)); 2624 2625 if (!intel_dp->can_mst) 2626 return; 2627 2628 intel_dp->is_mst = sink_can_mst && 2629 i915->params.enable_dp_mst; 2630 2631 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 2632 intel_dp->is_mst); 2633 } 2634 2635 static bool 2636 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2637 { 2638 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 2639 sink_irq_vector, DP_DPRX_ESI_LEN) == 2640 DP_DPRX_ESI_LEN; 2641 } 2642 2643 bool 2644 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 2645 const struct drm_connector_state *conn_state) 2646 { 2647 /* 2648 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 2649 * of Color Encoding Format and Content Color Gamut], in order to 2650 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 2651 */ 2652 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2653 return true; 2654 2655 switch (conn_state->colorspace) { 2656 case DRM_MODE_COLORIMETRY_SYCC_601: 2657 case DRM_MODE_COLORIMETRY_OPYCC_601: 2658 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2659 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2660 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2661 return true; 2662 default: 2663 break; 2664 } 2665 2666 return false; 2667 } 2668 2669 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 2670 struct dp_sdp *sdp, size_t size) 2671 { 2672 size_t length = sizeof(struct dp_sdp); 2673 2674 if (size < length) 2675 return -ENOSPC; 2676 2677 memset(sdp, 0, size); 2678 2679 /* 2680 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 2681 * VSC SDP Header Bytes 2682 */ 2683 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 2684 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 2685 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 2686 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 2687 2688 /* 2689 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 2690 * per DP 1.4a spec. 2691 */ 2692 if (vsc->revision != 0x5) 2693 goto out; 2694 2695 /* VSC SDP Payload for DB16 through DB18 */ 2696 /* Pixel Encoding and Colorimetry Formats */ 2697 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 2698 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 2699 2700 switch (vsc->bpc) { 2701 case 6: 2702 /* 6bpc: 0x0 */ 2703 break; 2704 case 8: 2705 sdp->db[17] = 0x1; /* DB17[3:0] */ 2706 break; 2707 case 10: 2708 sdp->db[17] = 0x2; 2709 break; 2710 case 12: 2711 sdp->db[17] = 0x3; 2712 break; 2713 case 16: 2714 sdp->db[17] = 0x4; 2715 break; 2716 default: 2717 MISSING_CASE(vsc->bpc); 2718 break; 2719 } 2720 /* Dynamic Range and Component Bit Depth */ 2721 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 2722 sdp->db[17] |= 0x80; /* DB17[7] */ 2723 2724 /* Content Type */ 2725 sdp->db[18] = vsc->content_type & 0x7; 2726 2727 out: 2728 return length; 2729 } 2730 2731 static ssize_t 2732 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 2733 struct dp_sdp *sdp, 2734 size_t size) 2735 { 2736 size_t length = sizeof(struct dp_sdp); 2737 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 2738 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 2739 ssize_t len; 2740 2741 if (size < length) 2742 return -ENOSPC; 2743 2744 memset(sdp, 0, size); 2745 2746 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 2747 if (len < 0) { 2748 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 2749 return -ENOSPC; 2750 } 2751 2752 if (len != infoframe_size) { 2753 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 2754 return -ENOSPC; 2755 } 2756 2757 /* 2758 * Set up the infoframe sdp packet for HDR static metadata. 2759 * Prepare VSC Header for SU as per DP 1.4a spec, 2760 * Table 2-100 and Table 2-101 2761 */ 2762 2763 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 2764 sdp->sdp_header.HB0 = 0; 2765 /* 2766 * Packet Type 80h + Non-audio INFOFRAME Type value 2767 * HDMI_INFOFRAME_TYPE_DRM: 0x87 2768 * - 80h + Non-audio INFOFRAME Type value 2769 * - InfoFrame Type: 0x07 2770 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 2771 */ 2772 sdp->sdp_header.HB1 = drm_infoframe->type; 2773 /* 2774 * Least Significant Eight Bits of (Data Byte Count – 1) 2775 * infoframe_size - 1 2776 */ 2777 sdp->sdp_header.HB2 = 0x1D; 2778 /* INFOFRAME SDP Version Number */ 2779 sdp->sdp_header.HB3 = (0x13 << 2); 2780 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 2781 sdp->db[0] = drm_infoframe->version; 2782 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 2783 sdp->db[1] = drm_infoframe->length; 2784 /* 2785 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 2786 * HDMI_INFOFRAME_HEADER_SIZE 2787 */ 2788 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 2789 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 2790 HDMI_DRM_INFOFRAME_SIZE); 2791 2792 /* 2793 * Size of DP infoframe sdp packet for HDR static metadata consists of 2794 * - DP SDP Header(struct dp_sdp_header): 4 bytes 2795 * - Two Data Blocks: 2 bytes 2796 * CTA Header Byte2 (INFOFRAME Version Number) 2797 * CTA Header Byte3 (Length of INFOFRAME) 2798 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 2799 * 2800 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 2801 * infoframe size. But GEN11+ has larger than that size, write_infoframe 2802 * will pad rest of the size. 2803 */ 2804 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 2805 } 2806 2807 static void intel_write_dp_sdp(struct intel_encoder *encoder, 2808 const struct intel_crtc_state *crtc_state, 2809 unsigned int type) 2810 { 2811 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2812 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2813 struct dp_sdp sdp = {}; 2814 ssize_t len; 2815 2816 if ((crtc_state->infoframes.enable & 2817 intel_hdmi_infoframe_enable(type)) == 0) 2818 return; 2819 2820 switch (type) { 2821 case DP_SDP_VSC: 2822 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 2823 sizeof(sdp)); 2824 break; 2825 case HDMI_PACKET_TYPE_GAMUT_METADATA: 2826 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 2827 &sdp, sizeof(sdp)); 2828 break; 2829 default: 2830 MISSING_CASE(type); 2831 return; 2832 } 2833 2834 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 2835 return; 2836 2837 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 2838 } 2839 2840 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 2841 const struct intel_crtc_state *crtc_state, 2842 struct drm_dp_vsc_sdp *vsc) 2843 { 2844 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2845 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2846 struct dp_sdp sdp = {}; 2847 ssize_t len; 2848 2849 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 2850 2851 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 2852 return; 2853 2854 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 2855 &sdp, len); 2856 } 2857 2858 void intel_dp_set_infoframes(struct intel_encoder *encoder, 2859 bool enable, 2860 const struct intel_crtc_state *crtc_state, 2861 const struct drm_connector_state *conn_state) 2862 { 2863 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2864 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2865 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 2866 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 2867 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 2868 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 2869 u32 val = intel_de_read(dev_priv, reg); 2870 2871 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 2872 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 2873 if (intel_psr_enabled(intel_dp)) 2874 val &= ~dip_enable; 2875 else 2876 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); 2877 2878 if (!enable) { 2879 intel_de_write(dev_priv, reg, val); 2880 intel_de_posting_read(dev_priv, reg); 2881 return; 2882 } 2883 2884 intel_de_write(dev_priv, reg, val); 2885 intel_de_posting_read(dev_priv, reg); 2886 2887 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 2888 if (!intel_psr_enabled(intel_dp)) 2889 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 2890 2891 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 2892 } 2893 2894 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 2895 const void *buffer, size_t size) 2896 { 2897 const struct dp_sdp *sdp = buffer; 2898 2899 if (size < sizeof(struct dp_sdp)) 2900 return -EINVAL; 2901 2902 memset(vsc, 0, size); 2903 2904 if (sdp->sdp_header.HB0 != 0) 2905 return -EINVAL; 2906 2907 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 2908 return -EINVAL; 2909 2910 vsc->sdp_type = sdp->sdp_header.HB1; 2911 vsc->revision = sdp->sdp_header.HB2; 2912 vsc->length = sdp->sdp_header.HB3; 2913 2914 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 2915 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 2916 /* 2917 * - HB2 = 0x2, HB3 = 0x8 2918 * VSC SDP supporting 3D stereo + PSR 2919 * - HB2 = 0x4, HB3 = 0xe 2920 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 2921 * first scan line of the SU region (applies to eDP v1.4b 2922 * and higher). 2923 */ 2924 return 0; 2925 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 2926 /* 2927 * - HB2 = 0x5, HB3 = 0x13 2928 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 2929 * Format. 2930 */ 2931 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 2932 vsc->colorimetry = sdp->db[16] & 0xf; 2933 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 2934 2935 switch (sdp->db[17] & 0x7) { 2936 case 0x0: 2937 vsc->bpc = 6; 2938 break; 2939 case 0x1: 2940 vsc->bpc = 8; 2941 break; 2942 case 0x2: 2943 vsc->bpc = 10; 2944 break; 2945 case 0x3: 2946 vsc->bpc = 12; 2947 break; 2948 case 0x4: 2949 vsc->bpc = 16; 2950 break; 2951 default: 2952 MISSING_CASE(sdp->db[17] & 0x7); 2953 return -EINVAL; 2954 } 2955 2956 vsc->content_type = sdp->db[18] & 0x7; 2957 } else { 2958 return -EINVAL; 2959 } 2960 2961 return 0; 2962 } 2963 2964 static int 2965 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 2966 const void *buffer, size_t size) 2967 { 2968 int ret; 2969 2970 const struct dp_sdp *sdp = buffer; 2971 2972 if (size < sizeof(struct dp_sdp)) 2973 return -EINVAL; 2974 2975 if (sdp->sdp_header.HB0 != 0) 2976 return -EINVAL; 2977 2978 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 2979 return -EINVAL; 2980 2981 /* 2982 * Least Significant Eight Bits of (Data Byte Count – 1) 2983 * 1Dh (i.e., Data Byte Count = 30 bytes). 2984 */ 2985 if (sdp->sdp_header.HB2 != 0x1D) 2986 return -EINVAL; 2987 2988 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 2989 if ((sdp->sdp_header.HB3 & 0x3) != 0) 2990 return -EINVAL; 2991 2992 /* INFOFRAME SDP Version Number */ 2993 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 2994 return -EINVAL; 2995 2996 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 2997 if (sdp->db[0] != 1) 2998 return -EINVAL; 2999 3000 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 3001 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 3002 return -EINVAL; 3003 3004 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 3005 HDMI_DRM_INFOFRAME_SIZE); 3006 3007 return ret; 3008 } 3009 3010 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 3011 struct intel_crtc_state *crtc_state, 3012 struct drm_dp_vsc_sdp *vsc) 3013 { 3014 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3015 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3016 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3017 unsigned int type = DP_SDP_VSC; 3018 struct dp_sdp sdp = {}; 3019 int ret; 3020 3021 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 3022 if (intel_psr_enabled(intel_dp)) 3023 return; 3024 3025 if ((crtc_state->infoframes.enable & 3026 intel_hdmi_infoframe_enable(type)) == 0) 3027 return; 3028 3029 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 3030 3031 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 3032 3033 if (ret) 3034 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 3035 } 3036 3037 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 3038 struct intel_crtc_state *crtc_state, 3039 struct hdmi_drm_infoframe *drm_infoframe) 3040 { 3041 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3042 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3043 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 3044 struct dp_sdp sdp = {}; 3045 int ret; 3046 3047 if ((crtc_state->infoframes.enable & 3048 intel_hdmi_infoframe_enable(type)) == 0) 3049 return; 3050 3051 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 3052 sizeof(sdp)); 3053 3054 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 3055 sizeof(sdp)); 3056 3057 if (ret) 3058 drm_dbg_kms(&dev_priv->drm, 3059 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 3060 } 3061 3062 void intel_read_dp_sdp(struct intel_encoder *encoder, 3063 struct intel_crtc_state *crtc_state, 3064 unsigned int type) 3065 { 3066 if (encoder->type != INTEL_OUTPUT_DDI) 3067 return; 3068 3069 switch (type) { 3070 case DP_SDP_VSC: 3071 intel_read_dp_vsc_sdp(encoder, crtc_state, 3072 &crtc_state->infoframes.vsc); 3073 break; 3074 case HDMI_PACKET_TYPE_GAMUT_METADATA: 3075 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 3076 &crtc_state->infoframes.drm.drm); 3077 break; 3078 default: 3079 MISSING_CASE(type); 3080 break; 3081 } 3082 } 3083 3084 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 3085 { 3086 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3087 int status = 0; 3088 int test_link_rate; 3089 u8 test_lane_count, test_link_bw; 3090 /* (DP CTS 1.2) 3091 * 4.3.1.11 3092 */ 3093 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 3094 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 3095 &test_lane_count); 3096 3097 if (status <= 0) { 3098 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 3099 return DP_TEST_NAK; 3100 } 3101 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 3102 3103 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 3104 &test_link_bw); 3105 if (status <= 0) { 3106 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 3107 return DP_TEST_NAK; 3108 } 3109 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 3110 3111 /* Validate the requested link rate and lane count */ 3112 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 3113 test_lane_count)) 3114 return DP_TEST_NAK; 3115 3116 intel_dp->compliance.test_lane_count = test_lane_count; 3117 intel_dp->compliance.test_link_rate = test_link_rate; 3118 3119 return DP_TEST_ACK; 3120 } 3121 3122 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 3123 { 3124 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3125 u8 test_pattern; 3126 u8 test_misc; 3127 __be16 h_width, v_height; 3128 int status = 0; 3129 3130 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 3131 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 3132 &test_pattern); 3133 if (status <= 0) { 3134 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 3135 return DP_TEST_NAK; 3136 } 3137 if (test_pattern != DP_COLOR_RAMP) 3138 return DP_TEST_NAK; 3139 3140 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 3141 &h_width, 2); 3142 if (status <= 0) { 3143 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 3144 return DP_TEST_NAK; 3145 } 3146 3147 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 3148 &v_height, 2); 3149 if (status <= 0) { 3150 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 3151 return DP_TEST_NAK; 3152 } 3153 3154 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 3155 &test_misc); 3156 if (status <= 0) { 3157 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 3158 return DP_TEST_NAK; 3159 } 3160 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 3161 return DP_TEST_NAK; 3162 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 3163 return DP_TEST_NAK; 3164 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 3165 case DP_TEST_BIT_DEPTH_6: 3166 intel_dp->compliance.test_data.bpc = 6; 3167 break; 3168 case DP_TEST_BIT_DEPTH_8: 3169 intel_dp->compliance.test_data.bpc = 8; 3170 break; 3171 default: 3172 return DP_TEST_NAK; 3173 } 3174 3175 intel_dp->compliance.test_data.video_pattern = test_pattern; 3176 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 3177 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 3178 /* Set test active flag here so userspace doesn't interrupt things */ 3179 intel_dp->compliance.test_active = true; 3180 3181 return DP_TEST_ACK; 3182 } 3183 3184 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 3185 { 3186 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3187 u8 test_result = DP_TEST_ACK; 3188 struct intel_connector *intel_connector = intel_dp->attached_connector; 3189 struct drm_connector *connector = &intel_connector->base; 3190 3191 if (intel_connector->detect_edid == NULL || 3192 connector->edid_corrupt || 3193 intel_dp->aux.i2c_defer_count > 6) { 3194 /* Check EDID read for NACKs, DEFERs and corruption 3195 * (DP CTS 1.2 Core r1.1) 3196 * 4.2.2.4 : Failed EDID read, I2C_NAK 3197 * 4.2.2.5 : Failed EDID read, I2C_DEFER 3198 * 4.2.2.6 : EDID corruption detected 3199 * Use failsafe mode for all cases 3200 */ 3201 if (intel_dp->aux.i2c_nack_count > 0 || 3202 intel_dp->aux.i2c_defer_count > 0) 3203 drm_dbg_kms(&i915->drm, 3204 "EDID read had %d NACKs, %d DEFERs\n", 3205 intel_dp->aux.i2c_nack_count, 3206 intel_dp->aux.i2c_defer_count); 3207 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 3208 } else { 3209 struct edid *block = intel_connector->detect_edid; 3210 3211 /* We have to write the checksum 3212 * of the last block read 3213 */ 3214 block += intel_connector->detect_edid->extensions; 3215 3216 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 3217 block->checksum) <= 0) 3218 drm_dbg_kms(&i915->drm, 3219 "Failed to write EDID checksum\n"); 3220 3221 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 3222 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 3223 } 3224 3225 /* Set test active flag here so userspace doesn't interrupt things */ 3226 intel_dp->compliance.test_active = true; 3227 3228 return test_result; 3229 } 3230 3231 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 3232 const struct intel_crtc_state *crtc_state) 3233 { 3234 struct drm_i915_private *dev_priv = 3235 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 3236 struct drm_dp_phy_test_params *data = 3237 &intel_dp->compliance.test_data.phytest; 3238 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3239 enum pipe pipe = crtc->pipe; 3240 u32 pattern_val; 3241 3242 switch (data->phy_pattern) { 3243 case DP_PHY_TEST_PATTERN_NONE: 3244 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 3245 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 3246 break; 3247 case DP_PHY_TEST_PATTERN_D10_2: 3248 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 3249 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3250 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 3251 break; 3252 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 3253 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 3254 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3255 DDI_DP_COMP_CTL_ENABLE | 3256 DDI_DP_COMP_CTL_SCRAMBLED_0); 3257 break; 3258 case DP_PHY_TEST_PATTERN_PRBS7: 3259 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 3260 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3261 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 3262 break; 3263 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 3264 /* 3265 * FIXME: Ideally pattern should come from DPCD 0x250. As 3266 * current firmware of DPR-100 could not set it, so hardcoding 3267 * now for complaince test. 3268 */ 3269 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 3270 pattern_val = 0x3e0f83e0; 3271 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 3272 pattern_val = 0x0f83e0f8; 3273 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 3274 pattern_val = 0x0000f83e; 3275 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 3276 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3277 DDI_DP_COMP_CTL_ENABLE | 3278 DDI_DP_COMP_CTL_CUSTOM80); 3279 break; 3280 case DP_PHY_TEST_PATTERN_CP2520: 3281 /* 3282 * FIXME: Ideally pattern should come from DPCD 0x24A. As 3283 * current firmware of DPR-100 could not set it, so hardcoding 3284 * now for complaince test. 3285 */ 3286 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 3287 pattern_val = 0xFB; 3288 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3289 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 3290 pattern_val); 3291 break; 3292 default: 3293 WARN(1, "Invalid Phy Test Pattern\n"); 3294 } 3295 } 3296 3297 static void 3298 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, 3299 const struct intel_crtc_state *crtc_state) 3300 { 3301 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3302 struct drm_device *dev = dig_port->base.base.dev; 3303 struct drm_i915_private *dev_priv = to_i915(dev); 3304 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 3305 enum pipe pipe = crtc->pipe; 3306 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 3307 3308 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 3309 TRANS_DDI_FUNC_CTL(pipe)); 3310 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 3311 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 3312 3313 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 3314 TGL_TRANS_DDI_PORT_MASK); 3315 trans_conf_value &= ~PIPECONF_ENABLE; 3316 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 3317 3318 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 3319 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 3320 trans_ddi_func_ctl_value); 3321 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 3322 } 3323 3324 static void 3325 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, 3326 const struct intel_crtc_state *crtc_state) 3327 { 3328 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3329 struct drm_device *dev = dig_port->base.base.dev; 3330 struct drm_i915_private *dev_priv = to_i915(dev); 3331 enum port port = dig_port->base.port; 3332 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 3333 enum pipe pipe = crtc->pipe; 3334 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 3335 3336 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 3337 TRANS_DDI_FUNC_CTL(pipe)); 3338 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 3339 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 3340 3341 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 3342 TGL_TRANS_DDI_SELECT_PORT(port); 3343 trans_conf_value |= PIPECONF_ENABLE; 3344 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 3345 3346 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 3347 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 3348 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 3349 trans_ddi_func_ctl_value); 3350 } 3351 3352 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 3353 const struct intel_crtc_state *crtc_state) 3354 { 3355 struct drm_dp_phy_test_params *data = 3356 &intel_dp->compliance.test_data.phytest; 3357 u8 link_status[DP_LINK_STATUS_SIZE]; 3358 3359 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3360 link_status) < 0) { 3361 DRM_DEBUG_KMS("failed to get link status\n"); 3362 return; 3363 } 3364 3365 /* retrieve vswing & pre-emphasis setting */ 3366 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 3367 link_status); 3368 3369 intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); 3370 3371 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); 3372 3373 intel_dp_phy_pattern_update(intel_dp, crtc_state); 3374 3375 intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); 3376 3377 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 3378 link_status[DP_DPCD_REV]); 3379 } 3380 3381 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 3382 { 3383 struct drm_dp_phy_test_params *data = 3384 &intel_dp->compliance.test_data.phytest; 3385 3386 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 3387 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 3388 return DP_TEST_NAK; 3389 } 3390 3391 /* Set test active flag here so userspace doesn't interrupt things */ 3392 intel_dp->compliance.test_active = true; 3393 3394 return DP_TEST_ACK; 3395 } 3396 3397 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 3398 { 3399 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3400 u8 response = DP_TEST_NAK; 3401 u8 request = 0; 3402 int status; 3403 3404 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 3405 if (status <= 0) { 3406 drm_dbg_kms(&i915->drm, 3407 "Could not read test request from sink\n"); 3408 goto update_status; 3409 } 3410 3411 switch (request) { 3412 case DP_TEST_LINK_TRAINING: 3413 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 3414 response = intel_dp_autotest_link_training(intel_dp); 3415 break; 3416 case DP_TEST_LINK_VIDEO_PATTERN: 3417 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 3418 response = intel_dp_autotest_video_pattern(intel_dp); 3419 break; 3420 case DP_TEST_LINK_EDID_READ: 3421 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 3422 response = intel_dp_autotest_edid(intel_dp); 3423 break; 3424 case DP_TEST_LINK_PHY_TEST_PATTERN: 3425 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 3426 response = intel_dp_autotest_phy_pattern(intel_dp); 3427 break; 3428 default: 3429 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 3430 request); 3431 break; 3432 } 3433 3434 if (response & DP_TEST_ACK) 3435 intel_dp->compliance.test_type = request; 3436 3437 update_status: 3438 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 3439 if (status <= 0) 3440 drm_dbg_kms(&i915->drm, 3441 "Could not write test response to sink\n"); 3442 } 3443 3444 static void 3445 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled) 3446 { 3447 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled); 3448 3449 if (esi[1] & DP_CP_IRQ) { 3450 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3451 *handled = true; 3452 } 3453 } 3454 3455 /** 3456 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 3457 * @intel_dp: Intel DP struct 3458 * 3459 * Read any pending MST interrupts, call MST core to handle these and ack the 3460 * interrupts. Check if the main and AUX link state is ok. 3461 * 3462 * Returns: 3463 * - %true if pending interrupts were serviced (or no interrupts were 3464 * pending) w/o detecting an error condition. 3465 * - %false if an error condition - like AUX failure or a loss of link - is 3466 * detected, which needs servicing from the hotplug work. 3467 */ 3468 static bool 3469 intel_dp_check_mst_status(struct intel_dp *intel_dp) 3470 { 3471 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3472 bool link_ok = true; 3473 3474 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 3475 3476 for (;;) { 3477 u8 esi[DP_DPRX_ESI_LEN] = {}; 3478 bool handled; 3479 int retry; 3480 3481 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 3482 drm_dbg_kms(&i915->drm, 3483 "failed to get ESI - device may have failed\n"); 3484 link_ok = false; 3485 3486 break; 3487 } 3488 3489 /* check link status - esi[10] = 0x200c */ 3490 if (intel_dp->active_mst_links > 0 && link_ok && 3491 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 3492 drm_dbg_kms(&i915->drm, 3493 "channel EQ not ok, retraining\n"); 3494 link_ok = false; 3495 } 3496 3497 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 3498 3499 intel_dp_mst_hpd_irq(intel_dp, esi, &handled); 3500 3501 if (!handled) 3502 break; 3503 3504 for (retry = 0; retry < 3; retry++) { 3505 int wret; 3506 3507 wret = drm_dp_dpcd_write(&intel_dp->aux, 3508 DP_SINK_COUNT_ESI+1, 3509 &esi[1], 3); 3510 if (wret == 3) 3511 break; 3512 } 3513 } 3514 3515 return link_ok; 3516 } 3517 3518 static void 3519 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 3520 { 3521 bool is_active; 3522 u8 buf = 0; 3523 3524 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 3525 if (intel_dp->frl.is_trained && !is_active) { 3526 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 3527 return; 3528 3529 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 3530 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 3531 return; 3532 3533 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 3534 3535 /* Restart FRL training or fall back to TMDS mode */ 3536 intel_dp_check_frl_training(intel_dp); 3537 } 3538 } 3539 3540 static bool 3541 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 3542 { 3543 u8 link_status[DP_LINK_STATUS_SIZE]; 3544 3545 if (!intel_dp->link_trained) 3546 return false; 3547 3548 /* 3549 * While PSR source HW is enabled, it will control main-link sending 3550 * frames, enabling and disabling it so trying to do a retrain will fail 3551 * as the link would or not be on or it could mix training patterns 3552 * and frame data at the same time causing retrain to fail. 3553 * Also when exiting PSR, HW will retrain the link anyways fixing 3554 * any link status error. 3555 */ 3556 if (intel_psr_enabled(intel_dp)) 3557 return false; 3558 3559 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3560 link_status) < 0) 3561 return false; 3562 3563 /* 3564 * Validate the cached values of intel_dp->link_rate and 3565 * intel_dp->lane_count before attempting to retrain. 3566 * 3567 * FIXME would be nice to user the crtc state here, but since 3568 * we need to call this from the short HPD handler that seems 3569 * a bit hard. 3570 */ 3571 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 3572 intel_dp->lane_count)) 3573 return false; 3574 3575 /* Retrain if Channel EQ or CR not ok */ 3576 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 3577 } 3578 3579 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 3580 const struct drm_connector_state *conn_state) 3581 { 3582 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3583 struct intel_encoder *encoder; 3584 enum pipe pipe; 3585 3586 if (!conn_state->best_encoder) 3587 return false; 3588 3589 /* SST */ 3590 encoder = &dp_to_dig_port(intel_dp)->base; 3591 if (conn_state->best_encoder == &encoder->base) 3592 return true; 3593 3594 /* MST */ 3595 for_each_pipe(i915, pipe) { 3596 encoder = &intel_dp->mst_encoders[pipe]->base; 3597 if (conn_state->best_encoder == &encoder->base) 3598 return true; 3599 } 3600 3601 return false; 3602 } 3603 3604 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 3605 struct drm_modeset_acquire_ctx *ctx, 3606 u32 *crtc_mask) 3607 { 3608 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3609 struct drm_connector_list_iter conn_iter; 3610 struct intel_connector *connector; 3611 int ret = 0; 3612 3613 *crtc_mask = 0; 3614 3615 if (!intel_dp_needs_link_retrain(intel_dp)) 3616 return 0; 3617 3618 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 3619 for_each_intel_connector_iter(connector, &conn_iter) { 3620 struct drm_connector_state *conn_state = 3621 connector->base.state; 3622 struct intel_crtc_state *crtc_state; 3623 struct intel_crtc *crtc; 3624 3625 if (!intel_dp_has_connector(intel_dp, conn_state)) 3626 continue; 3627 3628 crtc = to_intel_crtc(conn_state->crtc); 3629 if (!crtc) 3630 continue; 3631 3632 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 3633 if (ret) 3634 break; 3635 3636 crtc_state = to_intel_crtc_state(crtc->base.state); 3637 3638 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 3639 3640 if (!crtc_state->hw.active) 3641 continue; 3642 3643 if (conn_state->commit && 3644 !try_wait_for_completion(&conn_state->commit->hw_done)) 3645 continue; 3646 3647 *crtc_mask |= drm_crtc_mask(&crtc->base); 3648 } 3649 drm_connector_list_iter_end(&conn_iter); 3650 3651 if (!intel_dp_needs_link_retrain(intel_dp)) 3652 *crtc_mask = 0; 3653 3654 return ret; 3655 } 3656 3657 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 3658 { 3659 struct intel_connector *connector = intel_dp->attached_connector; 3660 3661 return connector->base.status == connector_status_connected || 3662 intel_dp->is_mst; 3663 } 3664 3665 int intel_dp_retrain_link(struct intel_encoder *encoder, 3666 struct drm_modeset_acquire_ctx *ctx) 3667 { 3668 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3669 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3670 struct intel_crtc *crtc; 3671 u32 crtc_mask; 3672 int ret; 3673 3674 if (!intel_dp_is_connected(intel_dp)) 3675 return 0; 3676 3677 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 3678 ctx); 3679 if (ret) 3680 return ret; 3681 3682 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 3683 if (ret) 3684 return ret; 3685 3686 if (crtc_mask == 0) 3687 return 0; 3688 3689 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 3690 encoder->base.base.id, encoder->base.name); 3691 3692 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3693 const struct intel_crtc_state *crtc_state = 3694 to_intel_crtc_state(crtc->base.state); 3695 3696 /* Suppress underruns caused by re-training */ 3697 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 3698 if (crtc_state->has_pch_encoder) 3699 intel_set_pch_fifo_underrun_reporting(dev_priv, 3700 intel_crtc_pch_transcoder(crtc), false); 3701 } 3702 3703 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3704 const struct intel_crtc_state *crtc_state = 3705 to_intel_crtc_state(crtc->base.state); 3706 3707 /* retrain on the MST master transcoder */ 3708 if (DISPLAY_VER(dev_priv) >= 12 && 3709 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 3710 !intel_dp_mst_is_master_trans(crtc_state)) 3711 continue; 3712 3713 intel_dp_check_frl_training(intel_dp); 3714 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); 3715 intel_dp_start_link_train(intel_dp, crtc_state); 3716 intel_dp_stop_link_train(intel_dp, crtc_state); 3717 break; 3718 } 3719 3720 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3721 const struct intel_crtc_state *crtc_state = 3722 to_intel_crtc_state(crtc->base.state); 3723 3724 /* Keep underrun reporting disabled until things are stable */ 3725 intel_wait_for_vblank(dev_priv, crtc->pipe); 3726 3727 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 3728 if (crtc_state->has_pch_encoder) 3729 intel_set_pch_fifo_underrun_reporting(dev_priv, 3730 intel_crtc_pch_transcoder(crtc), true); 3731 } 3732 3733 return 0; 3734 } 3735 3736 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 3737 struct drm_modeset_acquire_ctx *ctx, 3738 u32 *crtc_mask) 3739 { 3740 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3741 struct drm_connector_list_iter conn_iter; 3742 struct intel_connector *connector; 3743 int ret = 0; 3744 3745 *crtc_mask = 0; 3746 3747 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 3748 for_each_intel_connector_iter(connector, &conn_iter) { 3749 struct drm_connector_state *conn_state = 3750 connector->base.state; 3751 struct intel_crtc_state *crtc_state; 3752 struct intel_crtc *crtc; 3753 3754 if (!intel_dp_has_connector(intel_dp, conn_state)) 3755 continue; 3756 3757 crtc = to_intel_crtc(conn_state->crtc); 3758 if (!crtc) 3759 continue; 3760 3761 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 3762 if (ret) 3763 break; 3764 3765 crtc_state = to_intel_crtc_state(crtc->base.state); 3766 3767 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 3768 3769 if (!crtc_state->hw.active) 3770 continue; 3771 3772 if (conn_state->commit && 3773 !try_wait_for_completion(&conn_state->commit->hw_done)) 3774 continue; 3775 3776 *crtc_mask |= drm_crtc_mask(&crtc->base); 3777 } 3778 drm_connector_list_iter_end(&conn_iter); 3779 3780 return ret; 3781 } 3782 3783 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 3784 struct drm_modeset_acquire_ctx *ctx) 3785 { 3786 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3787 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3788 struct intel_crtc *crtc; 3789 u32 crtc_mask; 3790 int ret; 3791 3792 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 3793 ctx); 3794 if (ret) 3795 return ret; 3796 3797 ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask); 3798 if (ret) 3799 return ret; 3800 3801 if (crtc_mask == 0) 3802 return 0; 3803 3804 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 3805 encoder->base.base.id, encoder->base.name); 3806 3807 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3808 const struct intel_crtc_state *crtc_state = 3809 to_intel_crtc_state(crtc->base.state); 3810 3811 /* test on the MST master transcoder */ 3812 if (DISPLAY_VER(dev_priv) >= 12 && 3813 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 3814 !intel_dp_mst_is_master_trans(crtc_state)) 3815 continue; 3816 3817 intel_dp_process_phy_request(intel_dp, crtc_state); 3818 break; 3819 } 3820 3821 return 0; 3822 } 3823 3824 void intel_dp_phy_test(struct intel_encoder *encoder) 3825 { 3826 struct drm_modeset_acquire_ctx ctx; 3827 int ret; 3828 3829 drm_modeset_acquire_init(&ctx, 0); 3830 3831 for (;;) { 3832 ret = intel_dp_do_phy_test(encoder, &ctx); 3833 3834 if (ret == -EDEADLK) { 3835 drm_modeset_backoff(&ctx); 3836 continue; 3837 } 3838 3839 break; 3840 } 3841 3842 drm_modeset_drop_locks(&ctx); 3843 drm_modeset_acquire_fini(&ctx); 3844 drm_WARN(encoder->base.dev, ret, 3845 "Acquiring modeset locks failed with %i\n", ret); 3846 } 3847 3848 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) 3849 { 3850 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3851 u8 val; 3852 3853 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3854 return; 3855 3856 if (drm_dp_dpcd_readb(&intel_dp->aux, 3857 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 3858 return; 3859 3860 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 3861 3862 if (val & DP_AUTOMATED_TEST_REQUEST) 3863 intel_dp_handle_test_request(intel_dp); 3864 3865 if (val & DP_CP_IRQ) 3866 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3867 3868 if (val & DP_SINK_SPECIFIC_IRQ) 3869 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 3870 } 3871 3872 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) 3873 { 3874 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3875 u8 val; 3876 3877 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3878 return; 3879 3880 if (drm_dp_dpcd_readb(&intel_dp->aux, 3881 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) { 3882 drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n"); 3883 return; 3884 } 3885 3886 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3887 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) { 3888 drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n"); 3889 return; 3890 } 3891 3892 if (val & HDMI_LINK_STATUS_CHANGED) 3893 intel_dp_handle_hdmi_link_status_change(intel_dp); 3894 } 3895 3896 /* 3897 * According to DP spec 3898 * 5.1.2: 3899 * 1. Read DPCD 3900 * 2. Configure link according to Receiver Capabilities 3901 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 3902 * 4. Check link status on receipt of hot-plug interrupt 3903 * 3904 * intel_dp_short_pulse - handles short pulse interrupts 3905 * when full detection is not required. 3906 * Returns %true if short pulse is handled and full detection 3907 * is NOT required and %false otherwise. 3908 */ 3909 static bool 3910 intel_dp_short_pulse(struct intel_dp *intel_dp) 3911 { 3912 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3913 u8 old_sink_count = intel_dp->sink_count; 3914 bool ret; 3915 3916 /* 3917 * Clearing compliance test variables to allow capturing 3918 * of values for next automated test request. 3919 */ 3920 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 3921 3922 /* 3923 * Now read the DPCD to see if it's actually running 3924 * If the current value of sink count doesn't match with 3925 * the value that was stored earlier or dpcd read failed 3926 * we need to do full detection 3927 */ 3928 ret = intel_dp_get_dpcd(intel_dp); 3929 3930 if ((old_sink_count != intel_dp->sink_count) || !ret) { 3931 /* No need to proceed if we are going to do full detect */ 3932 return false; 3933 } 3934 3935 intel_dp_check_device_service_irq(intel_dp); 3936 intel_dp_check_link_service_irq(intel_dp); 3937 3938 /* Handle CEC interrupts, if any */ 3939 drm_dp_cec_irq(&intel_dp->aux); 3940 3941 /* defer to the hotplug work for link retraining if needed */ 3942 if (intel_dp_needs_link_retrain(intel_dp)) 3943 return false; 3944 3945 intel_psr_short_pulse(intel_dp); 3946 3947 switch (intel_dp->compliance.test_type) { 3948 case DP_TEST_LINK_TRAINING: 3949 drm_dbg_kms(&dev_priv->drm, 3950 "Link Training Compliance Test requested\n"); 3951 /* Send a Hotplug Uevent to userspace to start modeset */ 3952 drm_kms_helper_hotplug_event(&dev_priv->drm); 3953 break; 3954 case DP_TEST_LINK_PHY_TEST_PATTERN: 3955 drm_dbg_kms(&dev_priv->drm, 3956 "PHY test pattern Compliance Test requested\n"); 3957 /* 3958 * Schedule long hpd to do the test 3959 * 3960 * FIXME get rid of the ad-hoc phy test modeset code 3961 * and properly incorporate it into the normal modeset. 3962 */ 3963 return false; 3964 } 3965 3966 return true; 3967 } 3968 3969 /* XXX this is probably wrong for multiple downstream ports */ 3970 static enum drm_connector_status 3971 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 3972 { 3973 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3974 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3975 u8 *dpcd = intel_dp->dpcd; 3976 u8 type; 3977 3978 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 3979 return connector_status_connected; 3980 3981 lspcon_resume(dig_port); 3982 3983 if (!intel_dp_get_dpcd(intel_dp)) 3984 return connector_status_disconnected; 3985 3986 /* if there's no downstream port, we're done */ 3987 if (!drm_dp_is_branch(dpcd)) 3988 return connector_status_connected; 3989 3990 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 3991 if (intel_dp_has_sink_count(intel_dp) && 3992 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 3993 return intel_dp->sink_count ? 3994 connector_status_connected : connector_status_disconnected; 3995 } 3996 3997 if (intel_dp_can_mst(intel_dp)) 3998 return connector_status_connected; 3999 4000 /* If no HPD, poke DDC gently */ 4001 if (drm_probe_ddc(&intel_dp->aux.ddc)) 4002 return connector_status_connected; 4003 4004 /* Well we tried, say unknown for unreliable port types */ 4005 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 4006 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 4007 if (type == DP_DS_PORT_TYPE_VGA || 4008 type == DP_DS_PORT_TYPE_NON_EDID) 4009 return connector_status_unknown; 4010 } else { 4011 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 4012 DP_DWN_STRM_PORT_TYPE_MASK; 4013 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 4014 type == DP_DWN_STRM_PORT_TYPE_OTHER) 4015 return connector_status_unknown; 4016 } 4017 4018 /* Anything else is out of spec, warn and ignore */ 4019 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 4020 return connector_status_disconnected; 4021 } 4022 4023 static enum drm_connector_status 4024 edp_detect(struct intel_dp *intel_dp) 4025 { 4026 return connector_status_connected; 4027 } 4028 4029 /* 4030 * intel_digital_port_connected - is the specified port connected? 4031 * @encoder: intel_encoder 4032 * 4033 * In cases where there's a connector physically connected but it can't be used 4034 * by our hardware we also return false, since the rest of the driver should 4035 * pretty much treat the port as disconnected. This is relevant for type-C 4036 * (starting on ICL) where there's ownership involved. 4037 * 4038 * Return %true if port is connected, %false otherwise. 4039 */ 4040 bool intel_digital_port_connected(struct intel_encoder *encoder) 4041 { 4042 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4043 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4044 bool is_connected = false; 4045 intel_wakeref_t wakeref; 4046 4047 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 4048 is_connected = dig_port->connected(encoder); 4049 4050 return is_connected; 4051 } 4052 4053 static struct edid * 4054 intel_dp_get_edid(struct intel_dp *intel_dp) 4055 { 4056 struct intel_connector *intel_connector = intel_dp->attached_connector; 4057 4058 /* use cached edid if we have one */ 4059 if (intel_connector->edid) { 4060 /* invalid edid */ 4061 if (IS_ERR(intel_connector->edid)) 4062 return NULL; 4063 4064 return drm_edid_duplicate(intel_connector->edid); 4065 } else 4066 return drm_get_edid(&intel_connector->base, 4067 &intel_dp->aux.ddc); 4068 } 4069 4070 static void 4071 intel_dp_update_dfp(struct intel_dp *intel_dp, 4072 const struct edid *edid) 4073 { 4074 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4075 struct intel_connector *connector = intel_dp->attached_connector; 4076 4077 intel_dp->dfp.max_bpc = 4078 drm_dp_downstream_max_bpc(intel_dp->dpcd, 4079 intel_dp->downstream_ports, edid); 4080 4081 intel_dp->dfp.max_dotclock = 4082 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 4083 intel_dp->downstream_ports); 4084 4085 intel_dp->dfp.min_tmds_clock = 4086 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 4087 intel_dp->downstream_ports, 4088 edid); 4089 intel_dp->dfp.max_tmds_clock = 4090 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 4091 intel_dp->downstream_ports, 4092 edid); 4093 4094 intel_dp->dfp.pcon_max_frl_bw = 4095 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 4096 intel_dp->downstream_ports); 4097 4098 drm_dbg_kms(&i915->drm, 4099 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 4100 connector->base.base.id, connector->base.name, 4101 intel_dp->dfp.max_bpc, 4102 intel_dp->dfp.max_dotclock, 4103 intel_dp->dfp.min_tmds_clock, 4104 intel_dp->dfp.max_tmds_clock, 4105 intel_dp->dfp.pcon_max_frl_bw); 4106 4107 intel_dp_get_pcon_dsc_cap(intel_dp); 4108 } 4109 4110 static void 4111 intel_dp_update_420(struct intel_dp *intel_dp) 4112 { 4113 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4114 struct intel_connector *connector = intel_dp->attached_connector; 4115 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr; 4116 4117 /* No YCbCr output support on gmch platforms */ 4118 if (HAS_GMCH(i915)) 4119 return; 4120 4121 /* 4122 * ILK doesn't seem capable of DP YCbCr output. The 4123 * displayed image is severly corrupted. SNB+ is fine. 4124 */ 4125 if (IS_IRONLAKE(i915)) 4126 return; 4127 4128 is_branch = drm_dp_is_branch(intel_dp->dpcd); 4129 ycbcr_420_passthrough = 4130 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 4131 intel_dp->downstream_ports); 4132 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 4133 ycbcr_444_to_420 = 4134 dp_to_dig_port(intel_dp)->lspcon.active || 4135 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 4136 intel_dp->downstream_ports); 4137 rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 4138 intel_dp->downstream_ports, 4139 DP_DS_HDMI_BT601_RGB_YCBCR_CONV | 4140 DP_DS_HDMI_BT709_RGB_YCBCR_CONV | 4141 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 4142 4143 if (DISPLAY_VER(i915) >= 11) { 4144 /* Let PCON convert from RGB->YCbCr if possible */ 4145 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) { 4146 intel_dp->dfp.rgb_to_ycbcr = true; 4147 intel_dp->dfp.ycbcr_444_to_420 = true; 4148 connector->base.ycbcr_420_allowed = true; 4149 } else { 4150 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ 4151 intel_dp->dfp.ycbcr_444_to_420 = 4152 ycbcr_444_to_420 && !ycbcr_420_passthrough; 4153 4154 connector->base.ycbcr_420_allowed = 4155 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; 4156 } 4157 } else { 4158 /* 4:4:4->4:2:0 conversion is the only way */ 4159 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; 4160 4161 connector->base.ycbcr_420_allowed = ycbcr_444_to_420; 4162 } 4163 4164 drm_dbg_kms(&i915->drm, 4165 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 4166 connector->base.base.id, connector->base.name, 4167 yesno(intel_dp->dfp.rgb_to_ycbcr), 4168 yesno(connector->base.ycbcr_420_allowed), 4169 yesno(intel_dp->dfp.ycbcr_444_to_420)); 4170 } 4171 4172 static void 4173 intel_dp_set_edid(struct intel_dp *intel_dp) 4174 { 4175 struct intel_connector *connector = intel_dp->attached_connector; 4176 struct edid *edid; 4177 4178 intel_dp_unset_edid(intel_dp); 4179 edid = intel_dp_get_edid(intel_dp); 4180 connector->detect_edid = edid; 4181 4182 intel_dp_update_dfp(intel_dp, edid); 4183 intel_dp_update_420(intel_dp); 4184 4185 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 4186 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 4187 intel_dp->has_audio = drm_detect_monitor_audio(edid); 4188 } 4189 4190 drm_dp_cec_set_edid(&intel_dp->aux, edid); 4191 } 4192 4193 static void 4194 intel_dp_unset_edid(struct intel_dp *intel_dp) 4195 { 4196 struct intel_connector *connector = intel_dp->attached_connector; 4197 4198 drm_dp_cec_unset_edid(&intel_dp->aux); 4199 kfree(connector->detect_edid); 4200 connector->detect_edid = NULL; 4201 4202 intel_dp->has_hdmi_sink = false; 4203 intel_dp->has_audio = false; 4204 4205 intel_dp->dfp.max_bpc = 0; 4206 intel_dp->dfp.max_dotclock = 0; 4207 intel_dp->dfp.min_tmds_clock = 0; 4208 intel_dp->dfp.max_tmds_clock = 0; 4209 4210 intel_dp->dfp.pcon_max_frl_bw = 0; 4211 4212 intel_dp->dfp.ycbcr_444_to_420 = false; 4213 connector->base.ycbcr_420_allowed = false; 4214 } 4215 4216 static int 4217 intel_dp_detect(struct drm_connector *connector, 4218 struct drm_modeset_acquire_ctx *ctx, 4219 bool force) 4220 { 4221 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4222 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4223 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4224 struct intel_encoder *encoder = &dig_port->base; 4225 enum drm_connector_status status; 4226 4227 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4228 connector->base.id, connector->name); 4229 drm_WARN_ON(&dev_priv->drm, 4230 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 4231 4232 if (!INTEL_DISPLAY_ENABLED(dev_priv)) 4233 return connector_status_disconnected; 4234 4235 /* Can't disconnect eDP */ 4236 if (intel_dp_is_edp(intel_dp)) 4237 status = edp_detect(intel_dp); 4238 else if (intel_digital_port_connected(encoder)) 4239 status = intel_dp_detect_dpcd(intel_dp); 4240 else 4241 status = connector_status_disconnected; 4242 4243 if (status == connector_status_disconnected) { 4244 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 4245 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4246 4247 if (intel_dp->is_mst) { 4248 drm_dbg_kms(&dev_priv->drm, 4249 "MST device may have disappeared %d vs %d\n", 4250 intel_dp->is_mst, 4251 intel_dp->mst_mgr.mst_state); 4252 intel_dp->is_mst = false; 4253 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4254 intel_dp->is_mst); 4255 } 4256 4257 goto out; 4258 } 4259 4260 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 4261 if (DISPLAY_VER(dev_priv) >= 11) 4262 intel_dp_get_dsc_sink_cap(intel_dp); 4263 4264 intel_dp_configure_mst(intel_dp); 4265 4266 /* 4267 * TODO: Reset link params when switching to MST mode, until MST 4268 * supports link training fallback params. 4269 */ 4270 if (intel_dp->reset_link_params || intel_dp->is_mst) { 4271 /* Initial max link lane count */ 4272 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 4273 4274 /* Initial max link rate */ 4275 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 4276 4277 intel_dp->reset_link_params = false; 4278 } 4279 4280 intel_dp_print_rates(intel_dp); 4281 4282 if (intel_dp->is_mst) { 4283 /* 4284 * If we are in MST mode then this connector 4285 * won't appear connected or have anything 4286 * with EDID on it 4287 */ 4288 status = connector_status_disconnected; 4289 goto out; 4290 } 4291 4292 /* 4293 * Some external monitors do not signal loss of link synchronization 4294 * with an IRQ_HPD, so force a link status check. 4295 */ 4296 if (!intel_dp_is_edp(intel_dp)) { 4297 int ret; 4298 4299 ret = intel_dp_retrain_link(encoder, ctx); 4300 if (ret) 4301 return ret; 4302 } 4303 4304 /* 4305 * Clearing NACK and defer counts to get their exact values 4306 * while reading EDID which are required by Compliance tests 4307 * 4.2.2.4 and 4.2.2.5 4308 */ 4309 intel_dp->aux.i2c_nack_count = 0; 4310 intel_dp->aux.i2c_defer_count = 0; 4311 4312 intel_dp_set_edid(intel_dp); 4313 if (intel_dp_is_edp(intel_dp) || 4314 to_intel_connector(connector)->detect_edid) 4315 status = connector_status_connected; 4316 4317 intel_dp_check_device_service_irq(intel_dp); 4318 4319 out: 4320 if (status != connector_status_connected && !intel_dp->is_mst) 4321 intel_dp_unset_edid(intel_dp); 4322 4323 /* 4324 * Make sure the refs for power wells enabled during detect are 4325 * dropped to avoid a new detect cycle triggered by HPD polling. 4326 */ 4327 intel_display_power_flush_work(dev_priv); 4328 4329 if (!intel_dp_is_edp(intel_dp)) 4330 drm_dp_set_subconnector_property(connector, 4331 status, 4332 intel_dp->dpcd, 4333 intel_dp->downstream_ports); 4334 return status; 4335 } 4336 4337 static void 4338 intel_dp_force(struct drm_connector *connector) 4339 { 4340 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4341 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4342 struct intel_encoder *intel_encoder = &dig_port->base; 4343 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 4344 enum intel_display_power_domain aux_domain = 4345 intel_aux_power_domain(dig_port); 4346 intel_wakeref_t wakeref; 4347 4348 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4349 connector->base.id, connector->name); 4350 intel_dp_unset_edid(intel_dp); 4351 4352 if (connector->status != connector_status_connected) 4353 return; 4354 4355 wakeref = intel_display_power_get(dev_priv, aux_domain); 4356 4357 intel_dp_set_edid(intel_dp); 4358 4359 intel_display_power_put(dev_priv, aux_domain, wakeref); 4360 } 4361 4362 static int intel_dp_get_modes(struct drm_connector *connector) 4363 { 4364 struct intel_connector *intel_connector = to_intel_connector(connector); 4365 struct edid *edid; 4366 int num_modes = 0; 4367 4368 edid = intel_connector->detect_edid; 4369 if (edid) { 4370 num_modes = intel_connector_update_modes(connector, edid); 4371 4372 if (intel_vrr_is_capable(connector)) 4373 drm_connector_set_vrr_capable_property(connector, 4374 true); 4375 } 4376 4377 /* Also add fixed mode, which may or may not be present in EDID */ 4378 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && 4379 intel_connector->panel.fixed_mode) { 4380 struct drm_display_mode *mode; 4381 4382 mode = drm_mode_duplicate(connector->dev, 4383 intel_connector->panel.fixed_mode); 4384 if (mode) { 4385 drm_mode_probed_add(connector, mode); 4386 num_modes++; 4387 } 4388 } 4389 4390 if (num_modes) 4391 return num_modes; 4392 4393 if (!edid) { 4394 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 4395 struct drm_display_mode *mode; 4396 4397 mode = drm_dp_downstream_mode(connector->dev, 4398 intel_dp->dpcd, 4399 intel_dp->downstream_ports); 4400 if (mode) { 4401 drm_mode_probed_add(connector, mode); 4402 num_modes++; 4403 } 4404 } 4405 4406 return num_modes; 4407 } 4408 4409 static int 4410 intel_dp_connector_register(struct drm_connector *connector) 4411 { 4412 struct drm_i915_private *i915 = to_i915(connector->dev); 4413 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4414 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4415 struct intel_lspcon *lspcon = &dig_port->lspcon; 4416 int ret; 4417 4418 ret = intel_connector_register(connector); 4419 if (ret) 4420 return ret; 4421 4422 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 4423 intel_dp->aux.name, connector->kdev->kobj.name); 4424 4425 intel_dp->aux.dev = connector->kdev; 4426 ret = drm_dp_aux_register(&intel_dp->aux); 4427 if (!ret) 4428 drm_dp_cec_register_connector(&intel_dp->aux, connector); 4429 4430 if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) 4431 return ret; 4432 4433 /* 4434 * ToDo: Clean this up to handle lspcon init and resume more 4435 * efficiently and streamlined. 4436 */ 4437 if (lspcon_init(dig_port)) { 4438 lspcon_detect_hdr_capability(lspcon); 4439 if (lspcon->hdr_supported) 4440 drm_object_attach_property(&connector->base, 4441 connector->dev->mode_config.hdr_output_metadata_property, 4442 0); 4443 } 4444 4445 return ret; 4446 } 4447 4448 static void 4449 intel_dp_connector_unregister(struct drm_connector *connector) 4450 { 4451 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4452 4453 drm_dp_cec_unregister_connector(&intel_dp->aux); 4454 drm_dp_aux_unregister(&intel_dp->aux); 4455 intel_connector_unregister(connector); 4456 } 4457 4458 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 4459 { 4460 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 4461 struct intel_dp *intel_dp = &dig_port->dp; 4462 4463 intel_dp_mst_encoder_cleanup(dig_port); 4464 4465 intel_pps_vdd_off_sync(intel_dp); 4466 4467 intel_dp_aux_fini(intel_dp); 4468 } 4469 4470 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 4471 { 4472 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4473 4474 intel_pps_vdd_off_sync(intel_dp); 4475 } 4476 4477 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 4478 { 4479 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4480 4481 intel_pps_wait_power_cycle(intel_dp); 4482 } 4483 4484 static int intel_modeset_tile_group(struct intel_atomic_state *state, 4485 int tile_group_id) 4486 { 4487 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4488 struct drm_connector_list_iter conn_iter; 4489 struct drm_connector *connector; 4490 int ret = 0; 4491 4492 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 4493 drm_for_each_connector_iter(connector, &conn_iter) { 4494 struct drm_connector_state *conn_state; 4495 struct intel_crtc_state *crtc_state; 4496 struct intel_crtc *crtc; 4497 4498 if (!connector->has_tile || 4499 connector->tile_group->id != tile_group_id) 4500 continue; 4501 4502 conn_state = drm_atomic_get_connector_state(&state->base, 4503 connector); 4504 if (IS_ERR(conn_state)) { 4505 ret = PTR_ERR(conn_state); 4506 break; 4507 } 4508 4509 crtc = to_intel_crtc(conn_state->crtc); 4510 4511 if (!crtc) 4512 continue; 4513 4514 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 4515 crtc_state->uapi.mode_changed = true; 4516 4517 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4518 if (ret) 4519 break; 4520 } 4521 drm_connector_list_iter_end(&conn_iter); 4522 4523 return ret; 4524 } 4525 4526 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 4527 { 4528 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4529 struct intel_crtc *crtc; 4530 4531 if (transcoders == 0) 4532 return 0; 4533 4534 for_each_intel_crtc(&dev_priv->drm, crtc) { 4535 struct intel_crtc_state *crtc_state; 4536 int ret; 4537 4538 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 4539 if (IS_ERR(crtc_state)) 4540 return PTR_ERR(crtc_state); 4541 4542 if (!crtc_state->hw.enable) 4543 continue; 4544 4545 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 4546 continue; 4547 4548 crtc_state->uapi.mode_changed = true; 4549 4550 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 4551 if (ret) 4552 return ret; 4553 4554 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4555 if (ret) 4556 return ret; 4557 4558 transcoders &= ~BIT(crtc_state->cpu_transcoder); 4559 } 4560 4561 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 4562 4563 return 0; 4564 } 4565 4566 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 4567 struct drm_connector *connector) 4568 { 4569 const struct drm_connector_state *old_conn_state = 4570 drm_atomic_get_old_connector_state(&state->base, connector); 4571 const struct intel_crtc_state *old_crtc_state; 4572 struct intel_crtc *crtc; 4573 u8 transcoders; 4574 4575 crtc = to_intel_crtc(old_conn_state->crtc); 4576 if (!crtc) 4577 return 0; 4578 4579 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 4580 4581 if (!old_crtc_state->hw.active) 4582 return 0; 4583 4584 transcoders = old_crtc_state->sync_mode_slaves_mask; 4585 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 4586 transcoders |= BIT(old_crtc_state->master_transcoder); 4587 4588 return intel_modeset_affected_transcoders(state, 4589 transcoders); 4590 } 4591 4592 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 4593 struct drm_atomic_state *_state) 4594 { 4595 struct drm_i915_private *dev_priv = to_i915(conn->dev); 4596 struct intel_atomic_state *state = to_intel_atomic_state(_state); 4597 int ret; 4598 4599 ret = intel_digital_connector_atomic_check(conn, &state->base); 4600 if (ret) 4601 return ret; 4602 4603 /* 4604 * We don't enable port sync on BDW due to missing w/as and 4605 * due to not having adjusted the modeset sequence appropriately. 4606 */ 4607 if (DISPLAY_VER(dev_priv) < 9) 4608 return 0; 4609 4610 if (!intel_connector_needs_modeset(state, conn)) 4611 return 0; 4612 4613 if (conn->has_tile) { 4614 ret = intel_modeset_tile_group(state, conn->tile_group->id); 4615 if (ret) 4616 return ret; 4617 } 4618 4619 return intel_modeset_synced_crtcs(state, conn); 4620 } 4621 4622 static const struct drm_connector_funcs intel_dp_connector_funcs = { 4623 .force = intel_dp_force, 4624 .fill_modes = drm_helper_probe_single_connector_modes, 4625 .atomic_get_property = intel_digital_connector_atomic_get_property, 4626 .atomic_set_property = intel_digital_connector_atomic_set_property, 4627 .late_register = intel_dp_connector_register, 4628 .early_unregister = intel_dp_connector_unregister, 4629 .destroy = intel_connector_destroy, 4630 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 4631 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 4632 }; 4633 4634 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 4635 .detect_ctx = intel_dp_detect, 4636 .get_modes = intel_dp_get_modes, 4637 .mode_valid = intel_dp_mode_valid, 4638 .atomic_check = intel_dp_connector_atomic_check, 4639 }; 4640 4641 enum irqreturn 4642 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 4643 { 4644 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 4645 struct intel_dp *intel_dp = &dig_port->dp; 4646 4647 if (dig_port->base.type == INTEL_OUTPUT_EDP && 4648 (long_hpd || !intel_pps_have_power(intel_dp))) { 4649 /* 4650 * vdd off can generate a long/short pulse on eDP which 4651 * would require vdd on to handle it, and thus we 4652 * would end up in an endless cycle of 4653 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 4654 */ 4655 drm_dbg_kms(&i915->drm, 4656 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 4657 long_hpd ? "long" : "short", 4658 dig_port->base.base.base.id, 4659 dig_port->base.base.name); 4660 return IRQ_HANDLED; 4661 } 4662 4663 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 4664 dig_port->base.base.base.id, 4665 dig_port->base.base.name, 4666 long_hpd ? "long" : "short"); 4667 4668 if (long_hpd) { 4669 intel_dp->reset_link_params = true; 4670 return IRQ_NONE; 4671 } 4672 4673 if (intel_dp->is_mst) { 4674 if (!intel_dp_check_mst_status(intel_dp)) 4675 return IRQ_NONE; 4676 } else if (!intel_dp_short_pulse(intel_dp)) { 4677 return IRQ_NONE; 4678 } 4679 4680 return IRQ_HANDLED; 4681 } 4682 4683 /* check the VBT to see whether the eDP is on another port */ 4684 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 4685 { 4686 /* 4687 * eDP not supported on g4x. so bail out early just 4688 * for a bit extra safety in case the VBT is bonkers. 4689 */ 4690 if (DISPLAY_VER(dev_priv) < 5) 4691 return false; 4692 4693 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) 4694 return true; 4695 4696 return intel_bios_is_port_edp(dev_priv, port); 4697 } 4698 4699 static void 4700 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 4701 { 4702 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4703 enum port port = dp_to_dig_port(intel_dp)->base.port; 4704 4705 if (!intel_dp_is_edp(intel_dp)) 4706 drm_connector_attach_dp_subconnector_property(connector); 4707 4708 if (!IS_G4X(dev_priv) && port != PORT_A) 4709 intel_attach_force_audio_property(connector); 4710 4711 intel_attach_broadcast_rgb_property(connector); 4712 if (HAS_GMCH(dev_priv)) 4713 drm_connector_attach_max_bpc_property(connector, 6, 10); 4714 else if (DISPLAY_VER(dev_priv) >= 5) 4715 drm_connector_attach_max_bpc_property(connector, 6, 12); 4716 4717 /* Register HDMI colorspace for case of lspcon */ 4718 if (intel_bios_is_lspcon_present(dev_priv, port)) { 4719 drm_connector_attach_content_type_property(connector); 4720 intel_attach_hdmi_colorspace_property(connector); 4721 } else { 4722 intel_attach_dp_colorspace_property(connector); 4723 } 4724 4725 if (IS_GEMINILAKE(dev_priv) || DISPLAY_VER(dev_priv) >= 11) 4726 drm_object_attach_property(&connector->base, 4727 connector->dev->mode_config.hdr_output_metadata_property, 4728 0); 4729 4730 if (intel_dp_is_edp(intel_dp)) { 4731 u32 allowed_scalers; 4732 4733 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 4734 if (!HAS_GMCH(dev_priv)) 4735 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 4736 4737 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 4738 4739 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 4740 4741 } 4742 4743 if (HAS_VRR(dev_priv)) 4744 drm_connector_attach_vrr_capable_property(connector); 4745 } 4746 4747 /** 4748 * intel_dp_set_drrs_state - program registers for RR switch to take effect 4749 * @dev_priv: i915 device 4750 * @crtc_state: a pointer to the active intel_crtc_state 4751 * @refresh_rate: RR to be programmed 4752 * 4753 * This function gets called when refresh rate (RR) has to be changed from 4754 * one frequency to another. Switches can be between high and low RR 4755 * supported by the panel or to any other RR based on media playback (in 4756 * this case, RR value needs to be passed from user space). 4757 * 4758 * The caller of this function needs to take a lock on dev_priv->drrs. 4759 */ 4760 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 4761 const struct intel_crtc_state *crtc_state, 4762 int refresh_rate) 4763 { 4764 struct intel_dp *intel_dp = dev_priv->drrs.dp; 4765 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 4766 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 4767 4768 if (refresh_rate <= 0) { 4769 drm_dbg_kms(&dev_priv->drm, 4770 "Refresh rate should be positive non-zero.\n"); 4771 return; 4772 } 4773 4774 if (intel_dp == NULL) { 4775 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 4776 return; 4777 } 4778 4779 if (!intel_crtc) { 4780 drm_dbg_kms(&dev_priv->drm, 4781 "DRRS: intel_crtc not initialized\n"); 4782 return; 4783 } 4784 4785 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 4786 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 4787 return; 4788 } 4789 4790 if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == 4791 refresh_rate) 4792 index = DRRS_LOW_RR; 4793 4794 if (index == dev_priv->drrs.refresh_rate_type) { 4795 drm_dbg_kms(&dev_priv->drm, 4796 "DRRS requested for previously set RR...ignoring\n"); 4797 return; 4798 } 4799 4800 if (!crtc_state->hw.active) { 4801 drm_dbg_kms(&dev_priv->drm, 4802 "eDP encoder disabled. CRTC not Active\n"); 4803 return; 4804 } 4805 4806 if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 4807 switch (index) { 4808 case DRRS_HIGH_RR: 4809 intel_dp_set_m_n(crtc_state, M1_N1); 4810 break; 4811 case DRRS_LOW_RR: 4812 intel_dp_set_m_n(crtc_state, M2_N2); 4813 break; 4814 case DRRS_MAX_RR: 4815 default: 4816 drm_err(&dev_priv->drm, 4817 "Unsupported refreshrate type\n"); 4818 } 4819 } else if (DISPLAY_VER(dev_priv) > 6) { 4820 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 4821 u32 val; 4822 4823 val = intel_de_read(dev_priv, reg); 4824 if (index > DRRS_HIGH_RR) { 4825 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4826 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 4827 else 4828 val |= PIPECONF_EDP_RR_MODE_SWITCH; 4829 } else { 4830 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4831 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 4832 else 4833 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 4834 } 4835 intel_de_write(dev_priv, reg, val); 4836 } 4837 4838 dev_priv->drrs.refresh_rate_type = index; 4839 4840 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 4841 refresh_rate); 4842 } 4843 4844 static void 4845 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) 4846 { 4847 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4848 4849 dev_priv->drrs.busy_frontbuffer_bits = 0; 4850 dev_priv->drrs.dp = intel_dp; 4851 } 4852 4853 /** 4854 * intel_edp_drrs_enable - init drrs struct if supported 4855 * @intel_dp: DP struct 4856 * @crtc_state: A pointer to the active crtc state. 4857 * 4858 * Initializes frontbuffer_bits and drrs.dp 4859 */ 4860 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 4861 const struct intel_crtc_state *crtc_state) 4862 { 4863 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4864 4865 if (!crtc_state->has_drrs) 4866 return; 4867 4868 drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); 4869 4870 mutex_lock(&dev_priv->drrs.mutex); 4871 4872 if (dev_priv->drrs.dp) { 4873 drm_warn(&dev_priv->drm, "DRRS already enabled\n"); 4874 goto unlock; 4875 } 4876 4877 intel_edp_drrs_enable_locked(intel_dp); 4878 4879 unlock: 4880 mutex_unlock(&dev_priv->drrs.mutex); 4881 } 4882 4883 static void 4884 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, 4885 const struct intel_crtc_state *crtc_state) 4886 { 4887 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4888 4889 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { 4890 int refresh; 4891 4892 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); 4893 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); 4894 } 4895 4896 dev_priv->drrs.dp = NULL; 4897 } 4898 4899 /** 4900 * intel_edp_drrs_disable - Disable DRRS 4901 * @intel_dp: DP struct 4902 * @old_crtc_state: Pointer to old crtc_state. 4903 * 4904 */ 4905 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 4906 const struct intel_crtc_state *old_crtc_state) 4907 { 4908 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4909 4910 if (!old_crtc_state->has_drrs) 4911 return; 4912 4913 mutex_lock(&dev_priv->drrs.mutex); 4914 if (!dev_priv->drrs.dp) { 4915 mutex_unlock(&dev_priv->drrs.mutex); 4916 return; 4917 } 4918 4919 intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); 4920 mutex_unlock(&dev_priv->drrs.mutex); 4921 4922 cancel_delayed_work_sync(&dev_priv->drrs.work); 4923 } 4924 4925 /** 4926 * intel_edp_drrs_update - Update DRRS state 4927 * @intel_dp: Intel DP 4928 * @crtc_state: new CRTC state 4929 * 4930 * This function will update DRRS states, disabling or enabling DRRS when 4931 * executing fastsets. For full modeset, intel_edp_drrs_disable() and 4932 * intel_edp_drrs_enable() should be called instead. 4933 */ 4934 void 4935 intel_edp_drrs_update(struct intel_dp *intel_dp, 4936 const struct intel_crtc_state *crtc_state) 4937 { 4938 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4939 4940 if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 4941 return; 4942 4943 mutex_lock(&dev_priv->drrs.mutex); 4944 4945 /* New state matches current one? */ 4946 if (crtc_state->has_drrs == !!dev_priv->drrs.dp) 4947 goto unlock; 4948 4949 if (crtc_state->has_drrs) 4950 intel_edp_drrs_enable_locked(intel_dp); 4951 else 4952 intel_edp_drrs_disable_locked(intel_dp, crtc_state); 4953 4954 unlock: 4955 mutex_unlock(&dev_priv->drrs.mutex); 4956 } 4957 4958 static void intel_edp_drrs_downclock_work(struct work_struct *work) 4959 { 4960 struct drm_i915_private *dev_priv = 4961 container_of(work, typeof(*dev_priv), drrs.work.work); 4962 struct intel_dp *intel_dp; 4963 4964 mutex_lock(&dev_priv->drrs.mutex); 4965 4966 intel_dp = dev_priv->drrs.dp; 4967 4968 if (!intel_dp) 4969 goto unlock; 4970 4971 /* 4972 * The delayed work can race with an invalidate hence we need to 4973 * recheck. 4974 */ 4975 4976 if (dev_priv->drrs.busy_frontbuffer_bits) 4977 goto unlock; 4978 4979 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 4980 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 4981 4982 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 4983 drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); 4984 } 4985 4986 unlock: 4987 mutex_unlock(&dev_priv->drrs.mutex); 4988 } 4989 4990 /** 4991 * intel_edp_drrs_invalidate - Disable Idleness DRRS 4992 * @dev_priv: i915 device 4993 * @frontbuffer_bits: frontbuffer plane tracking bits 4994 * 4995 * This function gets called everytime rendering on the given planes start. 4996 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 4997 * 4998 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 4999 */ 5000 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 5001 unsigned int frontbuffer_bits) 5002 { 5003 struct intel_dp *intel_dp; 5004 struct drm_crtc *crtc; 5005 enum pipe pipe; 5006 5007 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 5008 return; 5009 5010 cancel_delayed_work(&dev_priv->drrs.work); 5011 5012 mutex_lock(&dev_priv->drrs.mutex); 5013 5014 intel_dp = dev_priv->drrs.dp; 5015 if (!intel_dp) { 5016 mutex_unlock(&dev_priv->drrs.mutex); 5017 return; 5018 } 5019 5020 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 5021 pipe = to_intel_crtc(crtc)->pipe; 5022 5023 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 5024 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 5025 5026 /* invalidate means busy screen hence upclock */ 5027 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5028 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 5029 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 5030 5031 mutex_unlock(&dev_priv->drrs.mutex); 5032 } 5033 5034 /** 5035 * intel_edp_drrs_flush - Restart Idleness DRRS 5036 * @dev_priv: i915 device 5037 * @frontbuffer_bits: frontbuffer plane tracking bits 5038 * 5039 * This function gets called every time rendering on the given planes has 5040 * completed or flip on a crtc is completed. So DRRS should be upclocked 5041 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 5042 * if no other planes are dirty. 5043 * 5044 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 5045 */ 5046 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 5047 unsigned int frontbuffer_bits) 5048 { 5049 struct intel_dp *intel_dp; 5050 struct drm_crtc *crtc; 5051 enum pipe pipe; 5052 5053 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 5054 return; 5055 5056 cancel_delayed_work(&dev_priv->drrs.work); 5057 5058 mutex_lock(&dev_priv->drrs.mutex); 5059 5060 intel_dp = dev_priv->drrs.dp; 5061 if (!intel_dp) { 5062 mutex_unlock(&dev_priv->drrs.mutex); 5063 return; 5064 } 5065 5066 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 5067 pipe = to_intel_crtc(crtc)->pipe; 5068 5069 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 5070 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 5071 5072 /* flush means busy screen hence upclock */ 5073 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5074 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 5075 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 5076 5077 /* 5078 * flush also means no more activity hence schedule downclock, if all 5079 * other fbs are quiescent too 5080 */ 5081 if (!dev_priv->drrs.busy_frontbuffer_bits) 5082 schedule_delayed_work(&dev_priv->drrs.work, 5083 msecs_to_jiffies(1000)); 5084 mutex_unlock(&dev_priv->drrs.mutex); 5085 } 5086 5087 /** 5088 * DOC: Display Refresh Rate Switching (DRRS) 5089 * 5090 * Display Refresh Rate Switching (DRRS) is a power conservation feature 5091 * which enables swtching between low and high refresh rates, 5092 * dynamically, based on the usage scenario. This feature is applicable 5093 * for internal panels. 5094 * 5095 * Indication that the panel supports DRRS is given by the panel EDID, which 5096 * would list multiple refresh rates for one resolution. 5097 * 5098 * DRRS is of 2 types - static and seamless. 5099 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 5100 * (may appear as a blink on screen) and is used in dock-undock scenario. 5101 * Seamless DRRS involves changing RR without any visual effect to the user 5102 * and can be used during normal system usage. This is done by programming 5103 * certain registers. 5104 * 5105 * Support for static/seamless DRRS may be indicated in the VBT based on 5106 * inputs from the panel spec. 5107 * 5108 * DRRS saves power by switching to low RR based on usage scenarios. 5109 * 5110 * The implementation is based on frontbuffer tracking implementation. When 5111 * there is a disturbance on the screen triggered by user activity or a periodic 5112 * system activity, DRRS is disabled (RR is changed to high RR). When there is 5113 * no movement on screen, after a timeout of 1 second, a switch to low RR is 5114 * made. 5115 * 5116 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 5117 * and intel_edp_drrs_flush() are called. 5118 * 5119 * DRRS can be further extended to support other internal panels and also 5120 * the scenario of video playback wherein RR is set based on the rate 5121 * requested by userspace. 5122 */ 5123 5124 /** 5125 * intel_dp_drrs_init - Init basic DRRS work and mutex. 5126 * @connector: eDP connector 5127 * @fixed_mode: preferred mode of panel 5128 * 5129 * This function is called only once at driver load to initialize basic 5130 * DRRS stuff. 5131 * 5132 * Returns: 5133 * Downclock mode if panel supports it, else return NULL. 5134 * DRRS support is determined by the presence of downclock mode (apart 5135 * from VBT setting). 5136 */ 5137 static struct drm_display_mode * 5138 intel_dp_drrs_init(struct intel_connector *connector, 5139 struct drm_display_mode *fixed_mode) 5140 { 5141 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 5142 struct drm_display_mode *downclock_mode = NULL; 5143 5144 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 5145 mutex_init(&dev_priv->drrs.mutex); 5146 5147 if (DISPLAY_VER(dev_priv) <= 6) { 5148 drm_dbg_kms(&dev_priv->drm, 5149 "DRRS supported for Gen7 and above\n"); 5150 return NULL; 5151 } 5152 5153 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 5154 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 5155 return NULL; 5156 } 5157 5158 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 5159 if (!downclock_mode) { 5160 drm_dbg_kms(&dev_priv->drm, 5161 "Downclock mode is not found. DRRS not supported\n"); 5162 return NULL; 5163 } 5164 5165 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 5166 5167 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 5168 drm_dbg_kms(&dev_priv->drm, 5169 "seamless DRRS supported for eDP panel.\n"); 5170 return downclock_mode; 5171 } 5172 5173 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 5174 struct intel_connector *intel_connector) 5175 { 5176 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5177 struct drm_device *dev = &dev_priv->drm; 5178 struct drm_connector *connector = &intel_connector->base; 5179 struct drm_display_mode *fixed_mode = NULL; 5180 struct drm_display_mode *downclock_mode = NULL; 5181 bool has_dpcd; 5182 enum pipe pipe = INVALID_PIPE; 5183 struct edid *edid; 5184 5185 if (!intel_dp_is_edp(intel_dp)) 5186 return true; 5187 5188 /* 5189 * On IBX/CPT we may get here with LVDS already registered. Since the 5190 * driver uses the only internal power sequencer available for both 5191 * eDP and LVDS bail out early in this case to prevent interfering 5192 * with an already powered-on LVDS power sequencer. 5193 */ 5194 if (intel_get_lvds_encoder(dev_priv)) { 5195 drm_WARN_ON(dev, 5196 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 5197 drm_info(&dev_priv->drm, 5198 "LVDS was detected, not registering eDP\n"); 5199 5200 return false; 5201 } 5202 5203 intel_pps_init(intel_dp); 5204 5205 /* Cache DPCD and EDID for edp. */ 5206 has_dpcd = intel_edp_init_dpcd(intel_dp); 5207 5208 if (!has_dpcd) { 5209 /* if this fails, presume the device is a ghost */ 5210 drm_info(&dev_priv->drm, 5211 "failed to retrieve link info, disabling eDP\n"); 5212 goto out_vdd_off; 5213 } 5214 5215 mutex_lock(&dev->mode_config.mutex); 5216 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 5217 if (edid) { 5218 if (drm_add_edid_modes(connector, edid)) { 5219 drm_connector_update_edid_property(connector, edid); 5220 } else { 5221 kfree(edid); 5222 edid = ERR_PTR(-EINVAL); 5223 } 5224 } else { 5225 edid = ERR_PTR(-ENOENT); 5226 } 5227 intel_connector->edid = edid; 5228 5229 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 5230 if (fixed_mode) 5231 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 5232 5233 /* multiply the mode clock and horizontal timings for MSO */ 5234 intel_edp_mso_mode_fixup(intel_connector, fixed_mode); 5235 intel_edp_mso_mode_fixup(intel_connector, downclock_mode); 5236 5237 /* fallback to VBT if available for eDP */ 5238 if (!fixed_mode) 5239 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 5240 mutex_unlock(&dev->mode_config.mutex); 5241 5242 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 5243 /* 5244 * Figure out the current pipe for the initial backlight setup. 5245 * If the current pipe isn't valid, try the PPS pipe, and if that 5246 * fails just assume pipe A. 5247 */ 5248 pipe = vlv_active_pipe(intel_dp); 5249 5250 if (pipe != PIPE_A && pipe != PIPE_B) 5251 pipe = intel_dp->pps.pps_pipe; 5252 5253 if (pipe != PIPE_A && pipe != PIPE_B) 5254 pipe = PIPE_A; 5255 5256 drm_dbg_kms(&dev_priv->drm, 5257 "using pipe %c for initial backlight setup\n", 5258 pipe_name(pipe)); 5259 } 5260 5261 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 5262 intel_connector->panel.backlight.power = intel_pps_backlight_power; 5263 intel_panel_setup_backlight(connector, pipe); 5264 5265 if (fixed_mode) { 5266 drm_connector_set_panel_orientation_with_quirk(connector, 5267 dev_priv->vbt.orientation, 5268 fixed_mode->hdisplay, fixed_mode->vdisplay); 5269 } 5270 5271 return true; 5272 5273 out_vdd_off: 5274 intel_pps_vdd_off_sync(intel_dp); 5275 5276 return false; 5277 } 5278 5279 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 5280 { 5281 struct intel_connector *intel_connector; 5282 struct drm_connector *connector; 5283 5284 intel_connector = container_of(work, typeof(*intel_connector), 5285 modeset_retry_work); 5286 connector = &intel_connector->base; 5287 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 5288 connector->name); 5289 5290 /* Grab the locks before changing connector property*/ 5291 mutex_lock(&connector->dev->mode_config.mutex); 5292 /* Set connector link status to BAD and send a Uevent to notify 5293 * userspace to do a modeset. 5294 */ 5295 drm_connector_set_link_status_property(connector, 5296 DRM_MODE_LINK_STATUS_BAD); 5297 mutex_unlock(&connector->dev->mode_config.mutex); 5298 /* Send Hotplug uevent so userspace can reprobe */ 5299 drm_kms_helper_hotplug_event(connector->dev); 5300 } 5301 5302 bool 5303 intel_dp_init_connector(struct intel_digital_port *dig_port, 5304 struct intel_connector *intel_connector) 5305 { 5306 struct drm_connector *connector = &intel_connector->base; 5307 struct intel_dp *intel_dp = &dig_port->dp; 5308 struct intel_encoder *intel_encoder = &dig_port->base; 5309 struct drm_device *dev = intel_encoder->base.dev; 5310 struct drm_i915_private *dev_priv = to_i915(dev); 5311 enum port port = intel_encoder->port; 5312 enum phy phy = intel_port_to_phy(dev_priv, port); 5313 int type; 5314 5315 /* Initialize the work for modeset in case of link train failure */ 5316 INIT_WORK(&intel_connector->modeset_retry_work, 5317 intel_dp_modeset_retry_work_fn); 5318 5319 if (drm_WARN(dev, dig_port->max_lanes < 1, 5320 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 5321 dig_port->max_lanes, intel_encoder->base.base.id, 5322 intel_encoder->base.name)) 5323 return false; 5324 5325 intel_dp_set_source_rates(intel_dp); 5326 5327 intel_dp->reset_link_params = true; 5328 intel_dp->pps.pps_pipe = INVALID_PIPE; 5329 intel_dp->pps.active_pipe = INVALID_PIPE; 5330 5331 /* Preserve the current hw state. */ 5332 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 5333 intel_dp->attached_connector = intel_connector; 5334 5335 if (intel_dp_is_port_edp(dev_priv, port)) { 5336 /* 5337 * Currently we don't support eDP on TypeC ports, although in 5338 * theory it could work on TypeC legacy ports. 5339 */ 5340 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 5341 type = DRM_MODE_CONNECTOR_eDP; 5342 } else { 5343 type = DRM_MODE_CONNECTOR_DisplayPort; 5344 } 5345 5346 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5347 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); 5348 5349 /* 5350 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 5351 * for DP the encoder type can be set by the caller to 5352 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 5353 */ 5354 if (type == DRM_MODE_CONNECTOR_eDP) 5355 intel_encoder->type = INTEL_OUTPUT_EDP; 5356 5357 /* eDP only on port B and/or C on vlv/chv */ 5358 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 5359 IS_CHERRYVIEW(dev_priv)) && 5360 intel_dp_is_edp(intel_dp) && 5361 port != PORT_B && port != PORT_C)) 5362 return false; 5363 5364 drm_dbg_kms(&dev_priv->drm, 5365 "Adding %s connector on [ENCODER:%d:%s]\n", 5366 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 5367 intel_encoder->base.base.id, intel_encoder->base.name); 5368 5369 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 5370 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 5371 5372 if (!HAS_GMCH(dev_priv)) 5373 connector->interlace_allowed = true; 5374 connector->doublescan_allowed = 0; 5375 5376 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 5377 5378 intel_dp_aux_init(intel_dp); 5379 5380 intel_connector_attach_encoder(intel_connector, intel_encoder); 5381 5382 if (HAS_DDI(dev_priv)) 5383 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 5384 else 5385 intel_connector->get_hw_state = intel_connector_get_hw_state; 5386 5387 /* init MST on ports that can support it */ 5388 intel_dp_mst_encoder_init(dig_port, 5389 intel_connector->base.base.id); 5390 5391 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 5392 intel_dp_aux_fini(intel_dp); 5393 intel_dp_mst_encoder_cleanup(dig_port); 5394 goto fail; 5395 } 5396 5397 intel_dp_add_properties(intel_dp, connector); 5398 5399 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 5400 int ret = intel_dp_init_hdcp(dig_port, intel_connector); 5401 if (ret) 5402 drm_dbg_kms(&dev_priv->drm, 5403 "HDCP init failed, skipping.\n"); 5404 } 5405 5406 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 5407 * 0xd. Failure to do so will result in spurious interrupts being 5408 * generated on the port when a cable is not attached. 5409 */ 5410 if (IS_G45(dev_priv)) { 5411 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 5412 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 5413 (temp & ~0xf) | 0xd); 5414 } 5415 5416 intel_dp->frl.is_trained = false; 5417 intel_dp->frl.trained_rate_gbps = 0; 5418 5419 intel_psr_init(intel_dp); 5420 5421 return true; 5422 5423 fail: 5424 drm_connector_cleanup(connector); 5425 5426 return false; 5427 } 5428 5429 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 5430 { 5431 struct intel_encoder *encoder; 5432 5433 for_each_intel_encoder(&dev_priv->drm, encoder) { 5434 struct intel_dp *intel_dp; 5435 5436 if (encoder->type != INTEL_OUTPUT_DDI) 5437 continue; 5438 5439 intel_dp = enc_to_intel_dp(encoder); 5440 5441 if (!intel_dp->can_mst) 5442 continue; 5443 5444 if (intel_dp->is_mst) 5445 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 5446 } 5447 } 5448 5449 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 5450 { 5451 struct intel_encoder *encoder; 5452 5453 for_each_intel_encoder(&dev_priv->drm, encoder) { 5454 struct intel_dp *intel_dp; 5455 int ret; 5456 5457 if (encoder->type != INTEL_OUTPUT_DDI) 5458 continue; 5459 5460 intel_dp = enc_to_intel_dp(encoder); 5461 5462 if (!intel_dp->can_mst) 5463 continue; 5464 5465 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 5466 true); 5467 if (ret) { 5468 intel_dp->is_mst = false; 5469 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5470 false); 5471 } 5472 } 5473 } 5474