1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/string_helpers.h> 33 #include <linux/timekeeping.h> 34 #include <linux/types.h> 35 36 #include <asm/byteorder.h> 37 38 #include <drm/display/drm_dp_helper.h> 39 #include <drm/display/drm_dsc_helper.h> 40 #include <drm/display/drm_hdmi_helper.h> 41 #include <drm/drm_atomic_helper.h> 42 #include <drm/drm_crtc.h> 43 #include <drm/drm_edid.h> 44 #include <drm/drm_probe_helper.h> 45 46 #include "g4x_dp.h" 47 #include "i915_debugfs.h" 48 #include "i915_drv.h" 49 #include "i915_reg.h" 50 #include "intel_atomic.h" 51 #include "intel_audio.h" 52 #include "intel_backlight.h" 53 #include "intel_combo_phy_regs.h" 54 #include "intel_connector.h" 55 #include "intel_crtc.h" 56 #include "intel_ddi.h" 57 #include "intel_de.h" 58 #include "intel_display_types.h" 59 #include "intel_dp.h" 60 #include "intel_dp_aux.h" 61 #include "intel_dp_hdcp.h" 62 #include "intel_dp_link_training.h" 63 #include "intel_dp_mst.h" 64 #include "intel_dpio_phy.h" 65 #include "intel_dpll.h" 66 #include "intel_fifo_underrun.h" 67 #include "intel_hdcp.h" 68 #include "intel_hdmi.h" 69 #include "intel_hotplug.h" 70 #include "intel_lspcon.h" 71 #include "intel_lvds.h" 72 #include "intel_panel.h" 73 #include "intel_pch_display.h" 74 #include "intel_pps.h" 75 #include "intel_psr.h" 76 #include "intel_tc.h" 77 #include "intel_vdsc.h" 78 #include "intel_vrr.h" 79 80 /* DP DSC throughput values used for slice count calculations KPixels/s */ 81 #define DP_DSC_PEAK_PIXEL_RATE 2720000 82 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 83 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 84 85 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 86 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 87 88 /* Compliance test status bits */ 89 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 90 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 91 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 92 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 93 94 95 /* Constants for DP DSC configurations */ 96 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 97 98 /* With Single pipe configuration, HW is capable of supporting maximum 99 * of 4 slices per line. 100 */ 101 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 102 103 /** 104 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 105 * @intel_dp: DP struct 106 * 107 * If a CPU or PCH DP output is attached to an eDP panel, this function 108 * will return true, and false otherwise. 109 * 110 * This function is not safe to use prior to encoder type being set. 111 */ 112 bool intel_dp_is_edp(struct intel_dp *intel_dp) 113 { 114 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 115 116 return dig_port->base.type == INTEL_OUTPUT_EDP; 117 } 118 119 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 120 121 /* Is link rate UHBR and thus 128b/132b? */ 122 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) 123 { 124 return crtc_state->port_clock >= 1000000; 125 } 126 127 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) 128 { 129 intel_dp->sink_rates[0] = 162000; 130 intel_dp->num_sink_rates = 1; 131 } 132 133 /* update sink rates from dpcd */ 134 static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) 135 { 136 static const int dp_rates[] = { 137 162000, 270000, 540000, 810000 138 }; 139 int i, max_rate; 140 int max_lttpr_rate; 141 142 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 143 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 144 static const int quirk_rates[] = { 162000, 270000, 324000 }; 145 146 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 147 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 148 149 return; 150 } 151 152 /* 153 * Sink rates for 8b/10b. 154 */ 155 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 156 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 157 if (max_lttpr_rate) 158 max_rate = min(max_rate, max_lttpr_rate); 159 160 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 161 if (dp_rates[i] > max_rate) 162 break; 163 intel_dp->sink_rates[i] = dp_rates[i]; 164 } 165 166 /* 167 * Sink rates for 128b/132b. If set, sink should support all 8b/10b 168 * rates and 10 Gbps. 169 */ 170 if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) { 171 u8 uhbr_rates = 0; 172 173 BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3); 174 175 drm_dp_dpcd_readb(&intel_dp->aux, 176 DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates); 177 178 if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) { 179 /* We have a repeater */ 180 if (intel_dp->lttpr_common_caps[0] >= 0x20 && 181 intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - 182 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] & 183 DP_PHY_REPEATER_128B132B_SUPPORTED) { 184 /* Repeater supports 128b/132b, valid UHBR rates */ 185 uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES - 186 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; 187 } else { 188 /* Does not support 128b/132b */ 189 uhbr_rates = 0; 190 } 191 } 192 193 if (uhbr_rates & DP_UHBR10) 194 intel_dp->sink_rates[i++] = 1000000; 195 if (uhbr_rates & DP_UHBR13_5) 196 intel_dp->sink_rates[i++] = 1350000; 197 if (uhbr_rates & DP_UHBR20) 198 intel_dp->sink_rates[i++] = 2000000; 199 } 200 201 intel_dp->num_sink_rates = i; 202 } 203 204 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 205 { 206 struct intel_connector *connector = intel_dp->attached_connector; 207 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 208 struct intel_encoder *encoder = &intel_dig_port->base; 209 210 intel_dp_set_dpcd_sink_rates(intel_dp); 211 212 if (intel_dp->num_sink_rates) 213 return; 214 215 drm_err(&dp_to_i915(intel_dp)->drm, 216 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n", 217 connector->base.base.id, connector->base.name, 218 encoder->base.base.id, encoder->base.name); 219 220 intel_dp_set_default_sink_rates(intel_dp); 221 } 222 223 static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp) 224 { 225 intel_dp->max_sink_lane_count = 1; 226 } 227 228 static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) 229 { 230 struct intel_connector *connector = intel_dp->attached_connector; 231 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 232 struct intel_encoder *encoder = &intel_dig_port->base; 233 234 intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 235 236 switch (intel_dp->max_sink_lane_count) { 237 case 1: 238 case 2: 239 case 4: 240 return; 241 } 242 243 drm_err(&dp_to_i915(intel_dp)->drm, 244 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n", 245 connector->base.base.id, connector->base.name, 246 encoder->base.base.id, encoder->base.name, 247 intel_dp->max_sink_lane_count); 248 249 intel_dp_set_default_max_sink_lane_count(intel_dp); 250 } 251 252 /* Get length of rates array potentially limited by max_rate. */ 253 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 254 { 255 int i; 256 257 /* Limit results by potentially reduced max rate */ 258 for (i = 0; i < len; i++) { 259 if (rates[len - i - 1] <= max_rate) 260 return len - i; 261 } 262 263 return 0; 264 } 265 266 /* Get length of common rates array potentially limited by max_rate. */ 267 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 268 int max_rate) 269 { 270 return intel_dp_rate_limit_len(intel_dp->common_rates, 271 intel_dp->num_common_rates, max_rate); 272 } 273 274 static int intel_dp_common_rate(struct intel_dp *intel_dp, int index) 275 { 276 if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm, 277 index < 0 || index >= intel_dp->num_common_rates)) 278 return 162000; 279 280 return intel_dp->common_rates[index]; 281 } 282 283 /* Theoretical max between source and sink */ 284 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 285 { 286 return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); 287 } 288 289 static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) 290 { 291 int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata); 292 int max_lanes = dig_port->max_lanes; 293 294 if (vbt_max_lanes) 295 max_lanes = min(max_lanes, vbt_max_lanes); 296 297 return max_lanes; 298 } 299 300 /* Theoretical max between source and sink */ 301 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 302 { 303 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 304 int source_max = intel_dp_max_source_lane_count(dig_port); 305 int sink_max = intel_dp->max_sink_lane_count; 306 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 307 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 308 309 if (lttpr_max) 310 sink_max = min(sink_max, lttpr_max); 311 312 return min3(source_max, sink_max, fia_max); 313 } 314 315 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 316 { 317 switch (intel_dp->max_link_lane_count) { 318 case 1: 319 case 2: 320 case 4: 321 return intel_dp->max_link_lane_count; 322 default: 323 MISSING_CASE(intel_dp->max_link_lane_count); 324 return 1; 325 } 326 } 327 328 /* 329 * The required data bandwidth for a mode with given pixel clock and bpp. This 330 * is the required net bandwidth independent of the data bandwidth efficiency. 331 */ 332 int 333 intel_dp_link_required(int pixel_clock, int bpp) 334 { 335 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 336 return DIV_ROUND_UP(pixel_clock * bpp, 8); 337 } 338 339 /* 340 * Given a link rate and lanes, get the data bandwidth. 341 * 342 * Data bandwidth is the actual payload rate, which depends on the data 343 * bandwidth efficiency and the link rate. 344 * 345 * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency 346 * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) = 347 * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by 348 * coincidence, the port clock in kHz matches the data bandwidth in kBps, and 349 * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no 350 * longer holds for data bandwidth as soon as FEC or MST is taken into account!) 351 * 352 * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For 353 * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875 354 * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000 355 * does not match the symbol clock, the port clock (not even if you think in 356 * terms of a byte clock), nor the data bandwidth. It only matches the link bit 357 * rate in units of 10000 bps. 358 */ 359 int 360 intel_dp_max_data_rate(int max_link_rate, int max_lanes) 361 { 362 if (max_link_rate >= 1000000) { 363 /* 364 * UHBR rates always use 128b/132b channel encoding, and have 365 * 97.71% data bandwidth efficiency. Consider max_link_rate the 366 * link bit rate in units of 10000 bps. 367 */ 368 int max_link_rate_kbps = max_link_rate * 10; 369 370 max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000); 371 max_link_rate = max_link_rate_kbps / 8; 372 } 373 374 /* 375 * Lower than UHBR rates always use 8b/10b channel encoding, and have 376 * 80% data bandwidth efficiency for SST non-FEC. However, this turns 377 * out to be a nop by coincidence, and can be skipped: 378 * 379 * int max_link_rate_kbps = max_link_rate * 10; 380 * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10); 381 * max_link_rate = max_link_rate_kbps / 8; 382 */ 383 384 return max_link_rate * max_lanes; 385 } 386 387 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) 388 { 389 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 390 struct intel_encoder *encoder = &intel_dig_port->base; 391 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 392 393 return DISPLAY_VER(dev_priv) >= 12 || 394 (DISPLAY_VER(dev_priv) == 11 && 395 encoder->port != PORT_A); 396 } 397 398 static int dg2_max_source_rate(struct intel_dp *intel_dp) 399 { 400 return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; 401 } 402 403 static int icl_max_source_rate(struct intel_dp *intel_dp) 404 { 405 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 406 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 407 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 408 409 if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp)) 410 return 540000; 411 412 return 810000; 413 } 414 415 static int ehl_max_source_rate(struct intel_dp *intel_dp) 416 { 417 if (intel_dp_is_edp(intel_dp)) 418 return 540000; 419 420 return 810000; 421 } 422 423 static int vbt_max_link_rate(struct intel_dp *intel_dp) 424 { 425 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 426 int max_rate; 427 428 max_rate = intel_bios_dp_max_link_rate(encoder->devdata); 429 430 if (intel_dp_is_edp(intel_dp)) { 431 struct intel_connector *connector = intel_dp->attached_connector; 432 int edp_max_rate = connector->panel.vbt.edp.max_link_rate; 433 434 if (max_rate && edp_max_rate) 435 max_rate = min(max_rate, edp_max_rate); 436 else if (edp_max_rate) 437 max_rate = edp_max_rate; 438 } 439 440 return max_rate; 441 } 442 443 static void 444 intel_dp_set_source_rates(struct intel_dp *intel_dp) 445 { 446 /* The values must be in increasing order */ 447 static const int icl_rates[] = { 448 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000, 449 1000000, 1350000, 450 }; 451 static const int bxt_rates[] = { 452 162000, 216000, 243000, 270000, 324000, 432000, 540000 453 }; 454 static const int skl_rates[] = { 455 162000, 216000, 270000, 324000, 432000, 540000 456 }; 457 static const int hsw_rates[] = { 458 162000, 270000, 540000 459 }; 460 static const int g4x_rates[] = { 461 162000, 270000 462 }; 463 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 464 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 465 const int *source_rates; 466 int size, max_rate = 0, vbt_max_rate; 467 468 /* This should only be done once */ 469 drm_WARN_ON(&dev_priv->drm, 470 intel_dp->source_rates || intel_dp->num_source_rates); 471 472 if (DISPLAY_VER(dev_priv) >= 11) { 473 source_rates = icl_rates; 474 size = ARRAY_SIZE(icl_rates); 475 if (IS_DG2(dev_priv)) 476 max_rate = dg2_max_source_rate(intel_dp); 477 else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || 478 IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 479 max_rate = 810000; 480 else if (IS_JSL_EHL(dev_priv)) 481 max_rate = ehl_max_source_rate(intel_dp); 482 else 483 max_rate = icl_max_source_rate(intel_dp); 484 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 485 source_rates = bxt_rates; 486 size = ARRAY_SIZE(bxt_rates); 487 } else if (DISPLAY_VER(dev_priv) == 9) { 488 source_rates = skl_rates; 489 size = ARRAY_SIZE(skl_rates); 490 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 491 IS_BROADWELL(dev_priv)) { 492 source_rates = hsw_rates; 493 size = ARRAY_SIZE(hsw_rates); 494 } else { 495 source_rates = g4x_rates; 496 size = ARRAY_SIZE(g4x_rates); 497 } 498 499 vbt_max_rate = vbt_max_link_rate(intel_dp); 500 if (max_rate && vbt_max_rate) 501 max_rate = min(max_rate, vbt_max_rate); 502 else if (vbt_max_rate) 503 max_rate = vbt_max_rate; 504 505 if (max_rate) 506 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 507 508 intel_dp->source_rates = source_rates; 509 intel_dp->num_source_rates = size; 510 } 511 512 static int intersect_rates(const int *source_rates, int source_len, 513 const int *sink_rates, int sink_len, 514 int *common_rates) 515 { 516 int i = 0, j = 0, k = 0; 517 518 while (i < source_len && j < sink_len) { 519 if (source_rates[i] == sink_rates[j]) { 520 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 521 return k; 522 common_rates[k] = source_rates[i]; 523 ++k; 524 ++i; 525 ++j; 526 } else if (source_rates[i] < sink_rates[j]) { 527 ++i; 528 } else { 529 ++j; 530 } 531 } 532 return k; 533 } 534 535 /* return index of rate in rates array, or -1 if not found */ 536 static int intel_dp_rate_index(const int *rates, int len, int rate) 537 { 538 int i; 539 540 for (i = 0; i < len; i++) 541 if (rate == rates[i]) 542 return i; 543 544 return -1; 545 } 546 547 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 548 { 549 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 550 551 drm_WARN_ON(&i915->drm, 552 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 553 554 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 555 intel_dp->num_source_rates, 556 intel_dp->sink_rates, 557 intel_dp->num_sink_rates, 558 intel_dp->common_rates); 559 560 /* Paranoia, there should always be something in common. */ 561 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 562 intel_dp->common_rates[0] = 162000; 563 intel_dp->num_common_rates = 1; 564 } 565 } 566 567 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 568 u8 lane_count) 569 { 570 /* 571 * FIXME: we need to synchronize the current link parameters with 572 * hardware readout. Currently fast link training doesn't work on 573 * boot-up. 574 */ 575 if (link_rate == 0 || 576 link_rate > intel_dp->max_link_rate) 577 return false; 578 579 if (lane_count == 0 || 580 lane_count > intel_dp_max_lane_count(intel_dp)) 581 return false; 582 583 return true; 584 } 585 586 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 587 int link_rate, 588 u8 lane_count) 589 { 590 /* FIXME figure out what we actually want here */ 591 const struct drm_display_mode *fixed_mode = 592 intel_panel_preferred_fixed_mode(intel_dp->attached_connector); 593 int mode_rate, max_rate; 594 595 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 596 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 597 if (mode_rate > max_rate) 598 return false; 599 600 return true; 601 } 602 603 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 604 int link_rate, u8 lane_count) 605 { 606 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 607 int index; 608 609 /* 610 * TODO: Enable fallback on MST links once MST link compute can handle 611 * the fallback params. 612 */ 613 if (intel_dp->is_mst) { 614 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 615 return -1; 616 } 617 618 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { 619 drm_dbg_kms(&i915->drm, 620 "Retrying Link training for eDP with max parameters\n"); 621 intel_dp->use_max_params = true; 622 return 0; 623 } 624 625 index = intel_dp_rate_index(intel_dp->common_rates, 626 intel_dp->num_common_rates, 627 link_rate); 628 if (index > 0) { 629 if (intel_dp_is_edp(intel_dp) && 630 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 631 intel_dp_common_rate(intel_dp, index - 1), 632 lane_count)) { 633 drm_dbg_kms(&i915->drm, 634 "Retrying Link training for eDP with same parameters\n"); 635 return 0; 636 } 637 intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1); 638 intel_dp->max_link_lane_count = lane_count; 639 } else if (lane_count > 1) { 640 if (intel_dp_is_edp(intel_dp) && 641 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 642 intel_dp_max_common_rate(intel_dp), 643 lane_count >> 1)) { 644 drm_dbg_kms(&i915->drm, 645 "Retrying Link training for eDP with same parameters\n"); 646 return 0; 647 } 648 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 649 intel_dp->max_link_lane_count = lane_count >> 1; 650 } else { 651 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 652 return -1; 653 } 654 655 return 0; 656 } 657 658 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 659 { 660 return div_u64(mul_u32_u32(mode_clock, 1000000U), 661 DP_DSC_FEC_OVERHEAD_FACTOR); 662 } 663 664 static int 665 small_joiner_ram_size_bits(struct drm_i915_private *i915) 666 { 667 if (DISPLAY_VER(i915) >= 13) 668 return 17280 * 8; 669 else if (DISPLAY_VER(i915) >= 11) 670 return 7680 * 8; 671 else 672 return 6144 * 8; 673 } 674 675 u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp) 676 { 677 u32 bits_per_pixel = bpp; 678 int i; 679 680 /* Error out if the max bpp is less than smallest allowed valid bpp */ 681 if (bits_per_pixel < valid_dsc_bpp[0]) { 682 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 683 bits_per_pixel, valid_dsc_bpp[0]); 684 return 0; 685 } 686 687 /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */ 688 if (DISPLAY_VER(i915) >= 13) { 689 bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1); 690 } else { 691 /* Find the nearest match in the array of known BPPs from VESA */ 692 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 693 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 694 break; 695 } 696 drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n", 697 bits_per_pixel, valid_dsc_bpp[i]); 698 699 bits_per_pixel = valid_dsc_bpp[i]; 700 } 701 702 return bits_per_pixel; 703 } 704 705 u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 706 u32 link_clock, u32 lane_count, 707 u32 mode_clock, u32 mode_hdisplay, 708 bool bigjoiner, 709 u32 pipe_bpp, 710 u32 timeslots) 711 { 712 u32 bits_per_pixel, max_bpp_small_joiner_ram; 713 714 /* 715 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 716 * (LinkSymbolClock)* 8 * (TimeSlots / 64) 717 * for SST -> TimeSlots is 64(i.e all TimeSlots that are available) 718 * for MST -> TimeSlots has to be calculated, based on mode requirements 719 */ 720 bits_per_pixel = DIV_ROUND_UP((link_clock * lane_count) * timeslots, 721 intel_dp_mode_to_fec_clock(mode_clock) * 8); 722 723 drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots " 724 "total bw %u pixel clock %u\n", 725 bits_per_pixel, timeslots, 726 (link_clock * lane_count * 8), 727 intel_dp_mode_to_fec_clock(mode_clock)); 728 729 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 730 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 731 mode_hdisplay; 732 733 if (bigjoiner) 734 max_bpp_small_joiner_ram *= 2; 735 736 /* 737 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 738 * check, output bpp from small joiner RAM check) 739 */ 740 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 741 742 if (bigjoiner) { 743 u32 max_bpp_bigjoiner = 744 i915->display.cdclk.max_cdclk_freq * 48 / 745 intel_dp_mode_to_fec_clock(mode_clock); 746 747 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); 748 } 749 750 bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp); 751 752 /* 753 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 754 * fractional part is 0 755 */ 756 return bits_per_pixel << 4; 757 } 758 759 u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 760 int mode_clock, int mode_hdisplay, 761 bool bigjoiner) 762 { 763 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 764 u8 min_slice_count, i; 765 int max_slice_width; 766 767 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 768 min_slice_count = DIV_ROUND_UP(mode_clock, 769 DP_DSC_MAX_ENC_THROUGHPUT_0); 770 else 771 min_slice_count = DIV_ROUND_UP(mode_clock, 772 DP_DSC_MAX_ENC_THROUGHPUT_1); 773 774 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 775 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 776 drm_dbg_kms(&i915->drm, 777 "Unsupported slice width %d by DP DSC Sink device\n", 778 max_slice_width); 779 return 0; 780 } 781 /* Also take into account max slice width */ 782 min_slice_count = max_t(u8, min_slice_count, 783 DIV_ROUND_UP(mode_hdisplay, 784 max_slice_width)); 785 786 /* Find the closest match to the valid slice count values */ 787 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 788 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; 789 790 if (test_slice_count > 791 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false)) 792 break; 793 794 /* big joiner needs small joiner to be enabled */ 795 if (bigjoiner && test_slice_count < 4) 796 continue; 797 798 if (min_slice_count <= test_slice_count) 799 return test_slice_count; 800 } 801 802 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 803 min_slice_count); 804 return 0; 805 } 806 807 static enum intel_output_format 808 intel_dp_output_format(struct intel_connector *connector, 809 bool ycbcr_420_output) 810 { 811 struct intel_dp *intel_dp = intel_attached_dp(connector); 812 813 if (!connector->base.ycbcr_420_allowed || !ycbcr_420_output) 814 return INTEL_OUTPUT_FORMAT_RGB; 815 816 if (intel_dp->dfp.rgb_to_ycbcr && 817 intel_dp->dfp.ycbcr_444_to_420) 818 return INTEL_OUTPUT_FORMAT_RGB; 819 820 if (intel_dp->dfp.ycbcr_444_to_420) 821 return INTEL_OUTPUT_FORMAT_YCBCR444; 822 else 823 return INTEL_OUTPUT_FORMAT_YCBCR420; 824 } 825 826 int intel_dp_min_bpp(enum intel_output_format output_format) 827 { 828 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 829 return 6 * 3; 830 else 831 return 8 * 3; 832 } 833 834 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 835 { 836 /* 837 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 838 * format of the number of bytes per pixel will be half the number 839 * of bytes of RGB pixel. 840 */ 841 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 842 bpp /= 2; 843 844 return bpp; 845 } 846 847 static int 848 intel_dp_mode_min_output_bpp(struct intel_connector *connector, 849 const struct drm_display_mode *mode) 850 { 851 const struct drm_display_info *info = &connector->base.display_info; 852 enum intel_output_format output_format = 853 intel_dp_output_format(connector, drm_mode_is_420_only(info, mode)); 854 855 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 856 } 857 858 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 859 int hdisplay) 860 { 861 /* 862 * Older platforms don't like hdisplay==4096 with DP. 863 * 864 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 865 * and frame counter increment), but we don't get vblank interrupts, 866 * and the pipe underruns immediately. The link also doesn't seem 867 * to get trained properly. 868 * 869 * On CHV the vblank interrupts don't seem to disappear but 870 * otherwise the symptoms are similar. 871 * 872 * TODO: confirm the behaviour on HSW+ 873 */ 874 return hdisplay == 4096 && !HAS_DDI(dev_priv); 875 } 876 877 static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp) 878 { 879 struct intel_connector *connector = intel_dp->attached_connector; 880 const struct drm_display_info *info = &connector->base.display_info; 881 int max_tmds_clock = intel_dp->dfp.max_tmds_clock; 882 883 /* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */ 884 if (max_tmds_clock && info->max_tmds_clock) 885 max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock); 886 887 return max_tmds_clock; 888 } 889 890 static enum drm_mode_status 891 intel_dp_tmds_clock_valid(struct intel_dp *intel_dp, 892 int clock, int bpc, bool ycbcr420_output, 893 bool respect_downstream_limits) 894 { 895 int tmds_clock, min_tmds_clock, max_tmds_clock; 896 897 if (!respect_downstream_limits) 898 return MODE_OK; 899 900 tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output); 901 902 min_tmds_clock = intel_dp->dfp.min_tmds_clock; 903 max_tmds_clock = intel_dp_max_tmds_clock(intel_dp); 904 905 if (min_tmds_clock && tmds_clock < min_tmds_clock) 906 return MODE_CLOCK_LOW; 907 908 if (max_tmds_clock && tmds_clock > max_tmds_clock) 909 return MODE_CLOCK_HIGH; 910 911 return MODE_OK; 912 } 913 914 static enum drm_mode_status 915 intel_dp_mode_valid_downstream(struct intel_connector *connector, 916 const struct drm_display_mode *mode, 917 int target_clock) 918 { 919 struct intel_dp *intel_dp = intel_attached_dp(connector); 920 const struct drm_display_info *info = &connector->base.display_info; 921 enum drm_mode_status status; 922 bool ycbcr_420_only; 923 924 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 925 if (intel_dp->dfp.pcon_max_frl_bw) { 926 int target_bw; 927 int max_frl_bw; 928 int bpp = intel_dp_mode_min_output_bpp(connector, mode); 929 930 target_bw = bpp * target_clock; 931 932 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 933 934 /* converting bw from Gbps to Kbps*/ 935 max_frl_bw = max_frl_bw * 1000000; 936 937 if (target_bw > max_frl_bw) 938 return MODE_CLOCK_HIGH; 939 940 return MODE_OK; 941 } 942 943 if (intel_dp->dfp.max_dotclock && 944 target_clock > intel_dp->dfp.max_dotclock) 945 return MODE_CLOCK_HIGH; 946 947 ycbcr_420_only = drm_mode_is_420_only(info, mode); 948 949 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 950 status = intel_dp_tmds_clock_valid(intel_dp, target_clock, 951 8, ycbcr_420_only, true); 952 953 if (status != MODE_OK) { 954 if (ycbcr_420_only || 955 !connector->base.ycbcr_420_allowed || 956 !drm_mode_is_420_also(info, mode)) 957 return status; 958 959 status = intel_dp_tmds_clock_valid(intel_dp, target_clock, 960 8, true, true); 961 if (status != MODE_OK) 962 return status; 963 } 964 965 return MODE_OK; 966 } 967 968 bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp, 969 int hdisplay, int clock) 970 { 971 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 972 973 if (!intel_dp_can_bigjoiner(intel_dp)) 974 return false; 975 976 return clock > i915->max_dotclk_freq || hdisplay > 5120; 977 } 978 979 static enum drm_mode_status 980 intel_dp_mode_valid(struct drm_connector *_connector, 981 struct drm_display_mode *mode) 982 { 983 struct intel_connector *connector = to_intel_connector(_connector); 984 struct intel_dp *intel_dp = intel_attached_dp(connector); 985 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 986 const struct drm_display_mode *fixed_mode; 987 int target_clock = mode->clock; 988 int max_rate, mode_rate, max_lanes, max_link_clock; 989 int max_dotclk = dev_priv->max_dotclk_freq; 990 u16 dsc_max_output_bpp = 0; 991 u8 dsc_slice_count = 0; 992 enum drm_mode_status status; 993 bool dsc = false, bigjoiner = false; 994 995 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 996 return MODE_H_ILLEGAL; 997 998 fixed_mode = intel_panel_fixed_mode(connector, mode); 999 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 1000 status = intel_panel_mode_valid(connector, mode); 1001 if (status != MODE_OK) 1002 return status; 1003 1004 target_clock = fixed_mode->clock; 1005 } 1006 1007 if (mode->clock < 10000) 1008 return MODE_CLOCK_LOW; 1009 1010 if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) { 1011 bigjoiner = true; 1012 max_dotclk *= 2; 1013 } 1014 if (target_clock > max_dotclk) 1015 return MODE_CLOCK_HIGH; 1016 1017 max_link_clock = intel_dp_max_link_rate(intel_dp); 1018 max_lanes = intel_dp_max_lane_count(intel_dp); 1019 1020 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 1021 mode_rate = intel_dp_link_required(target_clock, 1022 intel_dp_mode_min_output_bpp(connector, mode)); 1023 1024 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 1025 return MODE_H_ILLEGAL; 1026 1027 /* 1028 * Output bpp is stored in 6.4 format so right shift by 4 to get the 1029 * integer value since we support only integer values of bpp. 1030 */ 1031 if (HAS_DSC(dev_priv) && 1032 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 1033 /* 1034 * TBD pass the connector BPC, 1035 * for now U8_MAX so that max BPC on that platform would be picked 1036 */ 1037 int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX); 1038 1039 if (intel_dp_is_edp(intel_dp)) { 1040 dsc_max_output_bpp = 1041 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 1042 dsc_slice_count = 1043 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 1044 true); 1045 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 1046 dsc_max_output_bpp = 1047 intel_dp_dsc_get_output_bpp(dev_priv, 1048 max_link_clock, 1049 max_lanes, 1050 target_clock, 1051 mode->hdisplay, 1052 bigjoiner, 1053 pipe_bpp, 64) >> 4; 1054 dsc_slice_count = 1055 intel_dp_dsc_get_slice_count(intel_dp, 1056 target_clock, 1057 mode->hdisplay, 1058 bigjoiner); 1059 } 1060 1061 dsc = dsc_max_output_bpp && dsc_slice_count; 1062 } 1063 1064 /* 1065 * Big joiner configuration needs DSC for TGL which is not true for 1066 * XE_LPD where uncompressed joiner is supported. 1067 */ 1068 if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) 1069 return MODE_CLOCK_HIGH; 1070 1071 if (mode_rate > max_rate && !dsc) 1072 return MODE_CLOCK_HIGH; 1073 1074 status = intel_dp_mode_valid_downstream(connector, mode, target_clock); 1075 if (status != MODE_OK) 1076 return status; 1077 1078 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); 1079 } 1080 1081 bool intel_dp_source_supports_tps3(struct drm_i915_private *i915) 1082 { 1083 return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915); 1084 } 1085 1086 bool intel_dp_source_supports_tps4(struct drm_i915_private *i915) 1087 { 1088 return DISPLAY_VER(i915) >= 10; 1089 } 1090 1091 static void snprintf_int_array(char *str, size_t len, 1092 const int *array, int nelem) 1093 { 1094 int i; 1095 1096 str[0] = '\0'; 1097 1098 for (i = 0; i < nelem; i++) { 1099 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1100 if (r >= len) 1101 return; 1102 str += r; 1103 len -= r; 1104 } 1105 } 1106 1107 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1108 { 1109 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1110 char str[128]; /* FIXME: too big for stack? */ 1111 1112 if (!drm_debug_enabled(DRM_UT_KMS)) 1113 return; 1114 1115 snprintf_int_array(str, sizeof(str), 1116 intel_dp->source_rates, intel_dp->num_source_rates); 1117 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 1118 1119 snprintf_int_array(str, sizeof(str), 1120 intel_dp->sink_rates, intel_dp->num_sink_rates); 1121 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 1122 1123 snprintf_int_array(str, sizeof(str), 1124 intel_dp->common_rates, intel_dp->num_common_rates); 1125 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 1126 } 1127 1128 int 1129 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1130 { 1131 int len; 1132 1133 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1134 1135 return intel_dp_common_rate(intel_dp, len - 1); 1136 } 1137 1138 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1139 { 1140 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1141 int i = intel_dp_rate_index(intel_dp->sink_rates, 1142 intel_dp->num_sink_rates, rate); 1143 1144 if (drm_WARN_ON(&i915->drm, i < 0)) 1145 i = 0; 1146 1147 return i; 1148 } 1149 1150 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1151 u8 *link_bw, u8 *rate_select) 1152 { 1153 /* eDP 1.4 rate select method. */ 1154 if (intel_dp->use_rate_select) { 1155 *link_bw = 0; 1156 *rate_select = 1157 intel_dp_rate_select(intel_dp, port_clock); 1158 } else { 1159 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1160 *rate_select = 0; 1161 } 1162 } 1163 1164 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1165 const struct intel_crtc_state *pipe_config) 1166 { 1167 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1168 1169 /* On TGL, FEC is supported on all Pipes */ 1170 if (DISPLAY_VER(dev_priv) >= 12) 1171 return true; 1172 1173 if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A) 1174 return true; 1175 1176 return false; 1177 } 1178 1179 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1180 const struct intel_crtc_state *pipe_config) 1181 { 1182 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1183 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1184 } 1185 1186 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1187 const struct intel_crtc_state *crtc_state) 1188 { 1189 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) 1190 return false; 1191 1192 return intel_dsc_source_support(crtc_state) && 1193 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1194 } 1195 1196 static bool intel_dp_is_ycbcr420(struct intel_dp *intel_dp, 1197 const struct intel_crtc_state *crtc_state) 1198 { 1199 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 1200 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 1201 intel_dp->dfp.ycbcr_444_to_420); 1202 } 1203 1204 static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp, 1205 const struct intel_crtc_state *crtc_state, 1206 int bpc, bool respect_downstream_limits) 1207 { 1208 bool ycbcr420_output = intel_dp_is_ycbcr420(intel_dp, crtc_state); 1209 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 1210 1211 /* 1212 * Current bpc could already be below 8bpc due to 1213 * FDI bandwidth constraints or other limits. 1214 * HDMI minimum is 8bpc however. 1215 */ 1216 bpc = max(bpc, 8); 1217 1218 /* 1219 * We will never exceed downstream TMDS clock limits while 1220 * attempting deep color. If the user insists on forcing an 1221 * out of spec mode they will have to be satisfied with 8bpc. 1222 */ 1223 if (!respect_downstream_limits) 1224 bpc = 8; 1225 1226 for (; bpc >= 8; bpc -= 2) { 1227 if (intel_hdmi_bpc_possible(crtc_state, bpc, 1228 intel_dp->has_hdmi_sink, ycbcr420_output) && 1229 intel_dp_tmds_clock_valid(intel_dp, clock, bpc, ycbcr420_output, 1230 respect_downstream_limits) == MODE_OK) 1231 return bpc; 1232 } 1233 1234 return -EINVAL; 1235 } 1236 1237 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 1238 const struct intel_crtc_state *crtc_state, 1239 bool respect_downstream_limits) 1240 { 1241 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1242 struct intel_connector *intel_connector = intel_dp->attached_connector; 1243 int bpp, bpc; 1244 1245 bpc = crtc_state->pipe_bpp / 3; 1246 1247 if (intel_dp->dfp.max_bpc) 1248 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 1249 1250 if (intel_dp->dfp.min_tmds_clock) { 1251 int max_hdmi_bpc; 1252 1253 max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc, 1254 respect_downstream_limits); 1255 if (max_hdmi_bpc < 0) 1256 return 0; 1257 1258 bpc = min(bpc, max_hdmi_bpc); 1259 } 1260 1261 bpp = bpc * 3; 1262 if (intel_dp_is_edp(intel_dp)) { 1263 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1264 if (intel_connector->base.display_info.bpc == 0 && 1265 intel_connector->panel.vbt.edp.bpp && 1266 intel_connector->panel.vbt.edp.bpp < bpp) { 1267 drm_dbg_kms(&dev_priv->drm, 1268 "clamping bpp for eDP panel to BIOS-provided %i\n", 1269 intel_connector->panel.vbt.edp.bpp); 1270 bpp = intel_connector->panel.vbt.edp.bpp; 1271 } 1272 } 1273 1274 return bpp; 1275 } 1276 1277 /* Adjust link config limits based on compliance test requests. */ 1278 void 1279 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1280 struct intel_crtc_state *pipe_config, 1281 struct link_config_limits *limits) 1282 { 1283 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1284 1285 /* For DP Compliance we override the computed bpp for the pipe */ 1286 if (intel_dp->compliance.test_data.bpc != 0) { 1287 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1288 1289 limits->min_bpp = limits->max_bpp = bpp; 1290 pipe_config->dither_force_disable = bpp == 6 * 3; 1291 1292 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1293 } 1294 1295 /* Use values requested by Compliance Test Request */ 1296 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1297 int index; 1298 1299 /* Validate the compliance test data since max values 1300 * might have changed due to link train fallback. 1301 */ 1302 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1303 intel_dp->compliance.test_lane_count)) { 1304 index = intel_dp_rate_index(intel_dp->common_rates, 1305 intel_dp->num_common_rates, 1306 intel_dp->compliance.test_link_rate); 1307 if (index >= 0) 1308 limits->min_rate = limits->max_rate = 1309 intel_dp->compliance.test_link_rate; 1310 limits->min_lane_count = limits->max_lane_count = 1311 intel_dp->compliance.test_lane_count; 1312 } 1313 } 1314 } 1315 1316 static bool has_seamless_m_n(struct intel_connector *connector) 1317 { 1318 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1319 1320 /* 1321 * Seamless M/N reprogramming only implemented 1322 * for BDW+ double buffered M/N registers so far. 1323 */ 1324 return HAS_DOUBLE_BUFFERED_M_N(i915) && 1325 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; 1326 } 1327 1328 static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state, 1329 const struct drm_connector_state *conn_state) 1330 { 1331 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1332 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 1333 1334 /* FIXME a bit of a mess wrt clock vs. crtc_clock */ 1335 if (has_seamless_m_n(connector)) 1336 return intel_panel_highest_mode(connector, adjusted_mode)->clock; 1337 else 1338 return adjusted_mode->crtc_clock; 1339 } 1340 1341 /* Optimize link config in order: max bpp, min clock, min lanes */ 1342 static int 1343 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 1344 struct intel_crtc_state *pipe_config, 1345 const struct drm_connector_state *conn_state, 1346 const struct link_config_limits *limits) 1347 { 1348 int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state); 1349 int mode_rate, link_rate, link_avail; 1350 1351 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 1352 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1353 1354 mode_rate = intel_dp_link_required(clock, output_bpp); 1355 1356 for (i = 0; i < intel_dp->num_common_rates; i++) { 1357 link_rate = intel_dp_common_rate(intel_dp, i); 1358 if (link_rate < limits->min_rate || 1359 link_rate > limits->max_rate) 1360 continue; 1361 1362 for (lane_count = limits->min_lane_count; 1363 lane_count <= limits->max_lane_count; 1364 lane_count <<= 1) { 1365 link_avail = intel_dp_max_data_rate(link_rate, 1366 lane_count); 1367 1368 if (mode_rate <= link_avail) { 1369 pipe_config->lane_count = lane_count; 1370 pipe_config->pipe_bpp = bpp; 1371 pipe_config->port_clock = link_rate; 1372 1373 return 0; 1374 } 1375 } 1376 } 1377 } 1378 1379 return -EINVAL; 1380 } 1381 1382 int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc) 1383 { 1384 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1385 int i, num_bpc; 1386 u8 dsc_bpc[3] = {0}; 1387 u8 dsc_max_bpc; 1388 1389 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 1390 if (DISPLAY_VER(i915) >= 12) 1391 dsc_max_bpc = min_t(u8, 12, max_req_bpc); 1392 else 1393 dsc_max_bpc = min_t(u8, 10, max_req_bpc); 1394 1395 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 1396 dsc_bpc); 1397 for (i = 0; i < num_bpc; i++) { 1398 if (dsc_max_bpc >= dsc_bpc[i]) 1399 return dsc_bpc[i] * 3; 1400 } 1401 1402 return 0; 1403 } 1404 1405 static int intel_dp_source_dsc_version_minor(struct intel_dp *intel_dp) 1406 { 1407 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1408 1409 return DISPLAY_VER(i915) >= 14 ? 2 : 1; 1410 } 1411 1412 static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp) 1413 { 1414 return (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >> 1415 DP_DSC_MINOR_SHIFT; 1416 } 1417 1418 static int intel_dp_get_slice_height(int vactive) 1419 { 1420 int slice_height; 1421 1422 /* 1423 * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108 1424 * lines is an optimal slice height, but any size can be used as long as 1425 * vertical active integer multiple and maximum vertical slice count 1426 * requirements are met. 1427 */ 1428 for (slice_height = 108; slice_height <= vactive; slice_height += 2) 1429 if (vactive % slice_height == 0) 1430 return slice_height; 1431 1432 /* 1433 * Highly unlikely we reach here as most of the resolutions will end up 1434 * finding appropriate slice_height in above loop but returning 1435 * slice_height as 2 here as it should work with all resolutions. 1436 */ 1437 return 2; 1438 } 1439 1440 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 1441 struct intel_crtc_state *crtc_state) 1442 { 1443 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1444 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1445 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1446 u8 line_buf_depth; 1447 int ret; 1448 1449 /* 1450 * RC_MODEL_SIZE is currently a constant across all configurations. 1451 * 1452 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 1453 * DP_DSC_RC_BUF_SIZE for this. 1454 */ 1455 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1456 vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; 1457 1458 vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height); 1459 1460 ret = intel_dsc_compute_params(crtc_state); 1461 if (ret) 1462 return ret; 1463 1464 vdsc_cfg->dsc_version_major = 1465 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1466 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 1467 vdsc_cfg->dsc_version_minor = 1468 min(intel_dp_source_dsc_version_minor(intel_dp), 1469 intel_dp_sink_dsc_version_minor(intel_dp)); 1470 1471 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1472 DP_DSC_RGB; 1473 1474 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 1475 if (!line_buf_depth) { 1476 drm_dbg_kms(&i915->drm, 1477 "DSC Sink Line Buffer Depth invalid\n"); 1478 return -EINVAL; 1479 } 1480 1481 if (vdsc_cfg->dsc_version_minor == 2) 1482 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 1483 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 1484 else 1485 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 1486 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 1487 1488 vdsc_cfg->block_pred_enable = 1489 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 1490 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 1491 1492 return drm_dsc_compute_rc_parameters(vdsc_cfg); 1493 } 1494 1495 int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 1496 struct intel_crtc_state *pipe_config, 1497 struct drm_connector_state *conn_state, 1498 struct link_config_limits *limits, 1499 int timeslots, 1500 bool compute_pipe_bpp) 1501 { 1502 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1503 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 1504 const struct drm_display_mode *adjusted_mode = 1505 &pipe_config->hw.adjusted_mode; 1506 int pipe_bpp; 1507 int ret; 1508 1509 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 1510 intel_dp_supports_fec(intel_dp, pipe_config); 1511 1512 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 1513 return -EINVAL; 1514 1515 if (compute_pipe_bpp) 1516 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc); 1517 else 1518 pipe_bpp = pipe_config->pipe_bpp; 1519 1520 if (intel_dp->force_dsc_bpc) { 1521 pipe_bpp = intel_dp->force_dsc_bpc * 3; 1522 drm_dbg_kms(&dev_priv->drm, "Input DSC BPP forced to %d", pipe_bpp); 1523 } 1524 1525 /* Min Input BPC for ICL+ is 8 */ 1526 if (pipe_bpp < 8 * 3) { 1527 drm_dbg_kms(&dev_priv->drm, 1528 "No DSC support for less than 8bpc\n"); 1529 return -EINVAL; 1530 } 1531 1532 /* 1533 * For now enable DSC for max bpp, max link rate, max lane count. 1534 * Optimize this later for the minimum possible link rate/lane count 1535 * with DSC enabled for the requested mode. 1536 */ 1537 pipe_config->pipe_bpp = pipe_bpp; 1538 pipe_config->port_clock = limits->max_rate; 1539 pipe_config->lane_count = limits->max_lane_count; 1540 1541 if (intel_dp_is_edp(intel_dp)) { 1542 pipe_config->dsc.compressed_bpp = 1543 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 1544 pipe_config->pipe_bpp); 1545 pipe_config->dsc.slice_count = 1546 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 1547 true); 1548 } else { 1549 u16 dsc_max_output_bpp = 0; 1550 u8 dsc_dp_slice_count; 1551 1552 if (compute_pipe_bpp) { 1553 dsc_max_output_bpp = 1554 intel_dp_dsc_get_output_bpp(dev_priv, 1555 pipe_config->port_clock, 1556 pipe_config->lane_count, 1557 adjusted_mode->crtc_clock, 1558 adjusted_mode->crtc_hdisplay, 1559 pipe_config->bigjoiner_pipes, 1560 pipe_bpp, 1561 timeslots); 1562 if (!dsc_max_output_bpp) { 1563 drm_dbg_kms(&dev_priv->drm, 1564 "Compressed BPP not supported\n"); 1565 return -EINVAL; 1566 } 1567 } 1568 dsc_dp_slice_count = 1569 intel_dp_dsc_get_slice_count(intel_dp, 1570 adjusted_mode->crtc_clock, 1571 adjusted_mode->crtc_hdisplay, 1572 pipe_config->bigjoiner_pipes); 1573 if (!dsc_dp_slice_count) { 1574 drm_dbg_kms(&dev_priv->drm, 1575 "Compressed Slice Count not supported\n"); 1576 return -EINVAL; 1577 } 1578 1579 /* 1580 * compute pipe bpp is set to false for DP MST DSC case 1581 * and compressed_bpp is calculated same time once 1582 * vpci timeslots are allocated, because overall bpp 1583 * calculation procedure is bit different for MST case. 1584 */ 1585 if (compute_pipe_bpp) { 1586 pipe_config->dsc.compressed_bpp = min_t(u16, 1587 dsc_max_output_bpp >> 4, 1588 pipe_config->pipe_bpp); 1589 } 1590 pipe_config->dsc.slice_count = dsc_dp_slice_count; 1591 drm_dbg_kms(&dev_priv->drm, "DSC: compressed bpp %d slice count %d\n", 1592 pipe_config->dsc.compressed_bpp, 1593 pipe_config->dsc.slice_count); 1594 } 1595 /* 1596 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 1597 * is greater than the maximum Cdclock and if slice count is even 1598 * then we need to use 2 VDSC instances. 1599 */ 1600 if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq || 1601 pipe_config->bigjoiner_pipes) { 1602 if (pipe_config->dsc.slice_count > 1) { 1603 pipe_config->dsc.dsc_split = true; 1604 } else { 1605 drm_dbg_kms(&dev_priv->drm, 1606 "Cannot split stream to use 2 VDSC instances\n"); 1607 return -EINVAL; 1608 } 1609 } 1610 1611 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 1612 if (ret < 0) { 1613 drm_dbg_kms(&dev_priv->drm, 1614 "Cannot compute valid DSC parameters for Input Bpp = %d " 1615 "Compressed BPP = %d\n", 1616 pipe_config->pipe_bpp, 1617 pipe_config->dsc.compressed_bpp); 1618 return ret; 1619 } 1620 1621 pipe_config->dsc.compression_enable = true; 1622 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 1623 "Compressed Bpp = %d Slice Count = %d\n", 1624 pipe_config->pipe_bpp, 1625 pipe_config->dsc.compressed_bpp, 1626 pipe_config->dsc.slice_count); 1627 1628 return 0; 1629 } 1630 1631 static int 1632 intel_dp_compute_link_config(struct intel_encoder *encoder, 1633 struct intel_crtc_state *pipe_config, 1634 struct drm_connector_state *conn_state, 1635 bool respect_downstream_limits) 1636 { 1637 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1638 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 1639 const struct drm_display_mode *adjusted_mode = 1640 &pipe_config->hw.adjusted_mode; 1641 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1642 struct link_config_limits limits; 1643 bool joiner_needs_dsc = false; 1644 int ret; 1645 1646 limits.min_rate = intel_dp_common_rate(intel_dp, 0); 1647 limits.max_rate = intel_dp_max_link_rate(intel_dp); 1648 1649 limits.min_lane_count = 1; 1650 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 1651 1652 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); 1653 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config, respect_downstream_limits); 1654 1655 if (intel_dp->use_max_params) { 1656 /* 1657 * Use the maximum clock and number of lanes the eDP panel 1658 * advertizes being capable of in case the initial fast 1659 * optimal params failed us. The panels are generally 1660 * designed to support only a single clock and lane 1661 * configuration, and typically on older panels these 1662 * values correspond to the native resolution of the panel. 1663 */ 1664 limits.min_lane_count = limits.max_lane_count; 1665 limits.min_rate = limits.max_rate; 1666 } 1667 1668 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 1669 1670 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 1671 "max rate %d max bpp %d pixel clock %iKHz\n", 1672 limits.max_lane_count, limits.max_rate, 1673 limits.max_bpp, adjusted_mode->crtc_clock); 1674 1675 if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay, 1676 adjusted_mode->crtc_clock)) 1677 pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); 1678 1679 /* 1680 * Pipe joiner needs compression up to display 12 due to bandwidth 1681 * limitation. DG2 onwards pipe joiner can be enabled without 1682 * compression. 1683 */ 1684 joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes; 1685 1686 /* 1687 * Optimize for slow and wide for everything, because there are some 1688 * eDP 1.3 and 1.4 panels don't work well with fast and narrow. 1689 */ 1690 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, conn_state, &limits); 1691 1692 if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) { 1693 drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n", 1694 str_yes_no(ret), str_yes_no(joiner_needs_dsc), 1695 str_yes_no(intel_dp->force_dsc_en)); 1696 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 1697 conn_state, &limits, 64, true); 1698 if (ret < 0) 1699 return ret; 1700 } 1701 1702 if (pipe_config->dsc.compression_enable) { 1703 drm_dbg_kms(&i915->drm, 1704 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 1705 pipe_config->lane_count, pipe_config->port_clock, 1706 pipe_config->pipe_bpp, 1707 pipe_config->dsc.compressed_bpp); 1708 1709 drm_dbg_kms(&i915->drm, 1710 "DP link rate required %i available %i\n", 1711 intel_dp_link_required(adjusted_mode->crtc_clock, 1712 pipe_config->dsc.compressed_bpp), 1713 intel_dp_max_data_rate(pipe_config->port_clock, 1714 pipe_config->lane_count)); 1715 } else { 1716 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 1717 pipe_config->lane_count, pipe_config->port_clock, 1718 pipe_config->pipe_bpp); 1719 1720 drm_dbg_kms(&i915->drm, 1721 "DP link rate required %i available %i\n", 1722 intel_dp_link_required(adjusted_mode->crtc_clock, 1723 pipe_config->pipe_bpp), 1724 intel_dp_max_data_rate(pipe_config->port_clock, 1725 pipe_config->lane_count)); 1726 } 1727 return 0; 1728 } 1729 1730 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 1731 const struct drm_connector_state *conn_state) 1732 { 1733 const struct intel_digital_connector_state *intel_conn_state = 1734 to_intel_digital_connector_state(conn_state); 1735 const struct drm_display_mode *adjusted_mode = 1736 &crtc_state->hw.adjusted_mode; 1737 1738 /* 1739 * Our YCbCr output is always limited range. 1740 * crtc_state->limited_color_range only applies to RGB, 1741 * and it must never be set for YCbCr or we risk setting 1742 * some conflicting bits in TRANSCONF which will mess up 1743 * the colors on the monitor. 1744 */ 1745 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 1746 return false; 1747 1748 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 1749 /* 1750 * See: 1751 * CEA-861-E - 5.1 Default Encoding Parameters 1752 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 1753 */ 1754 return crtc_state->pipe_bpp != 18 && 1755 drm_default_rgb_quant_range(adjusted_mode) == 1756 HDMI_QUANTIZATION_RANGE_LIMITED; 1757 } else { 1758 return intel_conn_state->broadcast_rgb == 1759 INTEL_BROADCAST_RGB_LIMITED; 1760 } 1761 } 1762 1763 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 1764 enum port port) 1765 { 1766 if (IS_G4X(dev_priv)) 1767 return false; 1768 if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A) 1769 return false; 1770 1771 return true; 1772 } 1773 1774 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 1775 const struct drm_connector_state *conn_state, 1776 struct drm_dp_vsc_sdp *vsc) 1777 { 1778 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1779 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1780 1781 /* 1782 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1783 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 1784 * Colorimetry Format indication. 1785 */ 1786 vsc->revision = 0x5; 1787 vsc->length = 0x13; 1788 1789 /* DP 1.4a spec, Table 2-120 */ 1790 switch (crtc_state->output_format) { 1791 case INTEL_OUTPUT_FORMAT_YCBCR444: 1792 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 1793 break; 1794 case INTEL_OUTPUT_FORMAT_YCBCR420: 1795 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 1796 break; 1797 case INTEL_OUTPUT_FORMAT_RGB: 1798 default: 1799 vsc->pixelformat = DP_PIXELFORMAT_RGB; 1800 } 1801 1802 switch (conn_state->colorspace) { 1803 case DRM_MODE_COLORIMETRY_BT709_YCC: 1804 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1805 break; 1806 case DRM_MODE_COLORIMETRY_XVYCC_601: 1807 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 1808 break; 1809 case DRM_MODE_COLORIMETRY_XVYCC_709: 1810 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 1811 break; 1812 case DRM_MODE_COLORIMETRY_SYCC_601: 1813 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 1814 break; 1815 case DRM_MODE_COLORIMETRY_OPYCC_601: 1816 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 1817 break; 1818 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 1819 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 1820 break; 1821 case DRM_MODE_COLORIMETRY_BT2020_RGB: 1822 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 1823 break; 1824 case DRM_MODE_COLORIMETRY_BT2020_YCC: 1825 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 1826 break; 1827 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 1828 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 1829 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 1830 break; 1831 default: 1832 /* 1833 * RGB->YCBCR color conversion uses the BT.709 1834 * color space. 1835 */ 1836 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1837 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1838 else 1839 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 1840 break; 1841 } 1842 1843 vsc->bpc = crtc_state->pipe_bpp / 3; 1844 1845 /* only RGB pixelformat supports 6 bpc */ 1846 drm_WARN_ON(&dev_priv->drm, 1847 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 1848 1849 /* all YCbCr are always limited range */ 1850 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 1851 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 1852 } 1853 1854 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 1855 struct intel_crtc_state *crtc_state, 1856 const struct drm_connector_state *conn_state) 1857 { 1858 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 1859 1860 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 1861 if (crtc_state->has_psr) 1862 return; 1863 1864 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 1865 return; 1866 1867 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 1868 vsc->sdp_type = DP_SDP_VSC; 1869 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1870 &crtc_state->infoframes.vsc); 1871 } 1872 1873 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 1874 const struct intel_crtc_state *crtc_state, 1875 const struct drm_connector_state *conn_state, 1876 struct drm_dp_vsc_sdp *vsc) 1877 { 1878 vsc->sdp_type = DP_SDP_VSC; 1879 1880 if (crtc_state->has_psr2) { 1881 if (intel_dp->psr.colorimetry_support && 1882 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 1883 /* [PSR2, +Colorimetry] */ 1884 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1885 vsc); 1886 } else { 1887 /* 1888 * [PSR2, -Colorimetry] 1889 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 1890 * 3D stereo + PSR/PSR2 + Y-coordinate. 1891 */ 1892 vsc->revision = 0x4; 1893 vsc->length = 0xe; 1894 } 1895 } else { 1896 /* 1897 * [PSR1] 1898 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1899 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 1900 * higher). 1901 */ 1902 vsc->revision = 0x2; 1903 vsc->length = 0x8; 1904 } 1905 } 1906 1907 static void 1908 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 1909 struct intel_crtc_state *crtc_state, 1910 const struct drm_connector_state *conn_state) 1911 { 1912 int ret; 1913 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1914 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 1915 1916 if (!conn_state->hdr_output_metadata) 1917 return; 1918 1919 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 1920 1921 if (ret) { 1922 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 1923 return; 1924 } 1925 1926 crtc_state->infoframes.enable |= 1927 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 1928 } 1929 1930 static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915, 1931 enum transcoder cpu_transcoder) 1932 { 1933 if (HAS_DOUBLE_BUFFERED_M_N(i915)) 1934 return true; 1935 1936 return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder); 1937 } 1938 1939 static bool can_enable_drrs(struct intel_connector *connector, 1940 const struct intel_crtc_state *pipe_config, 1941 const struct drm_display_mode *downclock_mode) 1942 { 1943 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1944 1945 if (pipe_config->vrr.enable) 1946 return false; 1947 1948 /* 1949 * DRRS and PSR can't be enable together, so giving preference to PSR 1950 * as it allows more power-savings by complete shutting down display, 1951 * so to guarantee this, intel_drrs_compute_config() must be called 1952 * after intel_psr_compute_config(). 1953 */ 1954 if (pipe_config->has_psr) 1955 return false; 1956 1957 /* FIXME missing FDI M2/N2 etc. */ 1958 if (pipe_config->has_pch_encoder) 1959 return false; 1960 1961 if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder)) 1962 return false; 1963 1964 return downclock_mode && 1965 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; 1966 } 1967 1968 static void 1969 intel_dp_drrs_compute_config(struct intel_connector *connector, 1970 struct intel_crtc_state *pipe_config, 1971 int output_bpp) 1972 { 1973 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1974 const struct drm_display_mode *downclock_mode = 1975 intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode); 1976 int pixel_clock; 1977 1978 if (has_seamless_m_n(connector)) 1979 pipe_config->seamless_m_n = true; 1980 1981 if (!can_enable_drrs(connector, pipe_config, downclock_mode)) { 1982 if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder)) 1983 intel_zero_m_n(&pipe_config->dp_m2_n2); 1984 return; 1985 } 1986 1987 if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) 1988 pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay; 1989 1990 pipe_config->has_drrs = true; 1991 1992 pixel_clock = downclock_mode->clock; 1993 if (pipe_config->splitter.enable) 1994 pixel_clock /= pipe_config->splitter.link_count; 1995 1996 intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, 1997 pipe_config->port_clock, &pipe_config->dp_m2_n2, 1998 pipe_config->fec_enable); 1999 2000 /* FIXME: abstract this better */ 2001 if (pipe_config->splitter.enable) 2002 pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count; 2003 } 2004 2005 static bool intel_dp_has_audio(struct intel_encoder *encoder, 2006 const struct drm_connector_state *conn_state) 2007 { 2008 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2009 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2010 const struct intel_digital_connector_state *intel_conn_state = 2011 to_intel_digital_connector_state(conn_state); 2012 2013 if (!intel_dp_port_has_audio(i915, encoder->port)) 2014 return false; 2015 2016 if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2017 return intel_dp->has_audio; 2018 else 2019 return intel_conn_state->force_audio == HDMI_AUDIO_ON; 2020 } 2021 2022 static int 2023 intel_dp_compute_output_format(struct intel_encoder *encoder, 2024 struct intel_crtc_state *crtc_state, 2025 struct drm_connector_state *conn_state, 2026 bool respect_downstream_limits) 2027 { 2028 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2029 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2030 struct intel_connector *connector = intel_dp->attached_connector; 2031 const struct drm_display_info *info = &connector->base.display_info; 2032 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2033 bool ycbcr_420_only; 2034 int ret; 2035 2036 ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); 2037 2038 crtc_state->output_format = intel_dp_output_format(connector, ycbcr_420_only); 2039 2040 if (ycbcr_420_only && !intel_dp_is_ycbcr420(intel_dp, crtc_state)) { 2041 drm_dbg_kms(&i915->drm, 2042 "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); 2043 crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; 2044 } 2045 2046 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, 2047 respect_downstream_limits); 2048 if (ret) { 2049 if (intel_dp_is_ycbcr420(intel_dp, crtc_state) || 2050 !connector->base.ycbcr_420_allowed || 2051 !drm_mode_is_420_also(info, adjusted_mode)) 2052 return ret; 2053 2054 crtc_state->output_format = intel_dp_output_format(connector, true); 2055 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state, 2056 respect_downstream_limits); 2057 } 2058 2059 return ret; 2060 } 2061 2062 static void 2063 intel_dp_audio_compute_config(struct intel_encoder *encoder, 2064 struct intel_crtc_state *pipe_config, 2065 struct drm_connector_state *conn_state) 2066 { 2067 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2068 struct drm_connector *connector = conn_state->connector; 2069 2070 pipe_config->sdp_split_enable = 2071 intel_dp_has_audio(encoder, conn_state) && 2072 intel_dp_is_uhbr(pipe_config); 2073 2074 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n", 2075 connector->base.id, connector->name, 2076 str_yes_no(pipe_config->sdp_split_enable)); 2077 } 2078 2079 int 2080 intel_dp_compute_config(struct intel_encoder *encoder, 2081 struct intel_crtc_state *pipe_config, 2082 struct drm_connector_state *conn_state) 2083 { 2084 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2085 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2086 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2087 const struct drm_display_mode *fixed_mode; 2088 struct intel_connector *connector = intel_dp->attached_connector; 2089 int ret = 0, output_bpp; 2090 2091 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A) 2092 pipe_config->has_pch_encoder = true; 2093 2094 pipe_config->has_audio = 2095 intel_dp_has_audio(encoder, conn_state) && 2096 intel_audio_compute_config(encoder, pipe_config, conn_state); 2097 2098 fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode); 2099 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 2100 ret = intel_panel_compute_config(connector, adjusted_mode); 2101 if (ret) 2102 return ret; 2103 } 2104 2105 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2106 return -EINVAL; 2107 2108 if (!connector->base.interlace_allowed && 2109 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2110 return -EINVAL; 2111 2112 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2113 return -EINVAL; 2114 2115 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2116 return -EINVAL; 2117 2118 /* 2119 * Try to respect downstream TMDS clock limits first, if 2120 * that fails assume the user might know something we don't. 2121 */ 2122 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, true); 2123 if (ret) 2124 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, false); 2125 if (ret) 2126 return ret; 2127 2128 if ((intel_dp_is_edp(intel_dp) && fixed_mode) || 2129 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 2130 ret = intel_panel_fitting(pipe_config, conn_state); 2131 if (ret) 2132 return ret; 2133 } 2134 2135 pipe_config->limited_color_range = 2136 intel_dp_limited_color_range(pipe_config, conn_state); 2137 2138 if (pipe_config->dsc.compression_enable) 2139 output_bpp = pipe_config->dsc.compressed_bpp; 2140 else 2141 output_bpp = intel_dp_output_bpp(pipe_config->output_format, 2142 pipe_config->pipe_bpp); 2143 2144 if (intel_dp->mso_link_count) { 2145 int n = intel_dp->mso_link_count; 2146 int overlap = intel_dp->mso_pixel_overlap; 2147 2148 pipe_config->splitter.enable = true; 2149 pipe_config->splitter.link_count = n; 2150 pipe_config->splitter.pixel_overlap = overlap; 2151 2152 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", 2153 n, overlap); 2154 2155 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; 2156 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; 2157 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; 2158 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; 2159 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; 2160 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; 2161 adjusted_mode->crtc_clock /= n; 2162 } 2163 2164 intel_dp_audio_compute_config(encoder, pipe_config, conn_state); 2165 2166 intel_link_compute_m_n(output_bpp, 2167 pipe_config->lane_count, 2168 adjusted_mode->crtc_clock, 2169 pipe_config->port_clock, 2170 &pipe_config->dp_m_n, 2171 pipe_config->fec_enable); 2172 2173 /* FIXME: abstract this better */ 2174 if (pipe_config->splitter.enable) 2175 pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count; 2176 2177 if (!HAS_DDI(dev_priv)) 2178 g4x_dp_set_clock(encoder, pipe_config); 2179 2180 intel_vrr_compute_config(pipe_config, conn_state); 2181 intel_psr_compute_config(intel_dp, pipe_config, conn_state); 2182 intel_dp_drrs_compute_config(connector, pipe_config, output_bpp); 2183 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 2184 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 2185 2186 return 0; 2187 } 2188 2189 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2190 int link_rate, int lane_count) 2191 { 2192 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 2193 intel_dp->link_trained = false; 2194 intel_dp->link_rate = link_rate; 2195 intel_dp->lane_count = lane_count; 2196 } 2197 2198 static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp) 2199 { 2200 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 2201 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 2202 } 2203 2204 /* Enable backlight PWM and backlight PP control. */ 2205 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 2206 const struct drm_connector_state *conn_state) 2207 { 2208 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 2209 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2210 2211 if (!intel_dp_is_edp(intel_dp)) 2212 return; 2213 2214 drm_dbg_kms(&i915->drm, "\n"); 2215 2216 intel_backlight_enable(crtc_state, conn_state); 2217 intel_pps_backlight_on(intel_dp); 2218 } 2219 2220 /* Disable backlight PP control and backlight PWM. */ 2221 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 2222 { 2223 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 2224 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2225 2226 if (!intel_dp_is_edp(intel_dp)) 2227 return; 2228 2229 drm_dbg_kms(&i915->drm, "\n"); 2230 2231 intel_pps_backlight_off(intel_dp); 2232 intel_backlight_disable(old_conn_state); 2233 } 2234 2235 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 2236 { 2237 /* 2238 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 2239 * be capable of signalling downstream hpd with a long pulse. 2240 * Whether or not that means D3 is safe to use is not clear, 2241 * but let's assume so until proven otherwise. 2242 * 2243 * FIXME should really check all downstream ports... 2244 */ 2245 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 2246 drm_dp_is_branch(intel_dp->dpcd) && 2247 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 2248 } 2249 2250 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 2251 const struct intel_crtc_state *crtc_state, 2252 bool enable) 2253 { 2254 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2255 int ret; 2256 2257 if (!crtc_state->dsc.compression_enable) 2258 return; 2259 2260 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 2261 enable ? DP_DECOMPRESSION_EN : 0); 2262 if (ret < 0) 2263 drm_dbg_kms(&i915->drm, 2264 "Failed to %s sink decompression state\n", 2265 str_enable_disable(enable)); 2266 } 2267 2268 static void 2269 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 2270 { 2271 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2272 u8 oui[] = { 0x00, 0xaa, 0x01 }; 2273 u8 buf[3] = { 0 }; 2274 2275 /* 2276 * During driver init, we want to be careful and avoid changing the source OUI if it's 2277 * already set to what we want, so as to avoid clearing any state by accident 2278 */ 2279 if (careful) { 2280 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 2281 drm_err(&i915->drm, "Failed to read source OUI\n"); 2282 2283 if (memcmp(oui, buf, sizeof(oui)) == 0) 2284 return; 2285 } 2286 2287 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 2288 drm_err(&i915->drm, "Failed to write source OUI\n"); 2289 2290 intel_dp->last_oui_write = jiffies; 2291 } 2292 2293 void intel_dp_wait_source_oui(struct intel_dp *intel_dp) 2294 { 2295 struct intel_connector *connector = intel_dp->attached_connector; 2296 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2297 2298 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n", 2299 connector->base.base.id, connector->base.name, 2300 connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); 2301 2302 wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 2303 connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); 2304 } 2305 2306 /* If the device supports it, try to set the power state appropriately */ 2307 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 2308 { 2309 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 2310 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2311 int ret, i; 2312 2313 /* Should have a valid DPCD by this point */ 2314 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 2315 return; 2316 2317 if (mode != DP_SET_POWER_D0) { 2318 if (downstream_hpd_needs_d0(intel_dp)) 2319 return; 2320 2321 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 2322 } else { 2323 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 2324 2325 lspcon_resume(dp_to_dig_port(intel_dp)); 2326 2327 /* Write the source OUI as early as possible */ 2328 if (intel_dp_is_edp(intel_dp)) 2329 intel_edp_init_source_oui(intel_dp, false); 2330 2331 /* 2332 * When turning on, we need to retry for 1ms to give the sink 2333 * time to wake up. 2334 */ 2335 for (i = 0; i < 3; i++) { 2336 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 2337 if (ret == 1) 2338 break; 2339 msleep(1); 2340 } 2341 2342 if (ret == 1 && lspcon->active) 2343 lspcon_wait_pcon_mode(lspcon); 2344 } 2345 2346 if (ret != 1) 2347 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 2348 encoder->base.base.id, encoder->base.name, 2349 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 2350 } 2351 2352 static bool 2353 intel_dp_get_dpcd(struct intel_dp *intel_dp); 2354 2355 /** 2356 * intel_dp_sync_state - sync the encoder state during init/resume 2357 * @encoder: intel encoder to sync 2358 * @crtc_state: state for the CRTC connected to the encoder 2359 * 2360 * Sync any state stored in the encoder wrt. HW state during driver init 2361 * and system resume. 2362 */ 2363 void intel_dp_sync_state(struct intel_encoder *encoder, 2364 const struct intel_crtc_state *crtc_state) 2365 { 2366 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2367 2368 if (!crtc_state) 2369 return; 2370 2371 /* 2372 * Don't clobber DPCD if it's been already read out during output 2373 * setup (eDP) or detect. 2374 */ 2375 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2376 intel_dp_get_dpcd(intel_dp); 2377 2378 intel_dp_reset_max_link_params(intel_dp); 2379 } 2380 2381 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 2382 struct intel_crtc_state *crtc_state) 2383 { 2384 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2385 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2386 bool fastset = true; 2387 2388 /* 2389 * If BIOS has set an unsupported or non-standard link rate for some 2390 * reason force an encoder recompute and full modeset. 2391 */ 2392 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 2393 crtc_state->port_clock) < 0) { 2394 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n", 2395 encoder->base.base.id, encoder->base.name); 2396 crtc_state->uapi.connectors_changed = true; 2397 fastset = false; 2398 } 2399 2400 /* 2401 * FIXME hack to force full modeset when DSC is being used. 2402 * 2403 * As long as we do not have full state readout and config comparison 2404 * of crtc_state->dsc, we have no way to ensure reliable fastset. 2405 * Remove once we have readout for DSC. 2406 */ 2407 if (crtc_state->dsc.compression_enable) { 2408 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n", 2409 encoder->base.base.id, encoder->base.name); 2410 crtc_state->uapi.mode_changed = true; 2411 fastset = false; 2412 } 2413 2414 if (CAN_PSR(intel_dp)) { 2415 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute PSR state\n", 2416 encoder->base.base.id, encoder->base.name); 2417 crtc_state->uapi.mode_changed = true; 2418 fastset = false; 2419 } 2420 2421 return fastset; 2422 } 2423 2424 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 2425 { 2426 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2427 2428 /* Clear the cached register set to avoid using stale values */ 2429 2430 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 2431 2432 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 2433 intel_dp->pcon_dsc_dpcd, 2434 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 2435 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", 2436 DP_PCON_DSC_ENCODER); 2437 2438 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", 2439 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 2440 } 2441 2442 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 2443 { 2444 int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 2445 int i; 2446 2447 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 2448 if (frl_bw_mask & (1 << i)) 2449 return bw_gbps[i]; 2450 } 2451 return 0; 2452 } 2453 2454 static int intel_dp_pcon_set_frl_mask(int max_frl) 2455 { 2456 switch (max_frl) { 2457 case 48: 2458 return DP_PCON_FRL_BW_MASK_48GBPS; 2459 case 40: 2460 return DP_PCON_FRL_BW_MASK_40GBPS; 2461 case 32: 2462 return DP_PCON_FRL_BW_MASK_32GBPS; 2463 case 24: 2464 return DP_PCON_FRL_BW_MASK_24GBPS; 2465 case 18: 2466 return DP_PCON_FRL_BW_MASK_18GBPS; 2467 case 9: 2468 return DP_PCON_FRL_BW_MASK_9GBPS; 2469 } 2470 2471 return 0; 2472 } 2473 2474 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 2475 { 2476 struct intel_connector *intel_connector = intel_dp->attached_connector; 2477 struct drm_connector *connector = &intel_connector->base; 2478 int max_frl_rate; 2479 int max_lanes, rate_per_lane; 2480 int max_dsc_lanes, dsc_rate_per_lane; 2481 2482 max_lanes = connector->display_info.hdmi.max_lanes; 2483 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; 2484 max_frl_rate = max_lanes * rate_per_lane; 2485 2486 if (connector->display_info.hdmi.dsc_cap.v_1p2) { 2487 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; 2488 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; 2489 if (max_dsc_lanes && dsc_rate_per_lane) 2490 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 2491 } 2492 2493 return max_frl_rate; 2494 } 2495 2496 static bool 2497 intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp, 2498 u8 max_frl_bw_mask, u8 *frl_trained_mask) 2499 { 2500 if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) && 2501 drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL && 2502 *frl_trained_mask >= max_frl_bw_mask) 2503 return true; 2504 2505 return false; 2506 } 2507 2508 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 2509 { 2510 #define TIMEOUT_FRL_READY_MS 500 2511 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 2512 2513 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2514 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 2515 u8 max_frl_bw_mask = 0, frl_trained_mask; 2516 bool is_active; 2517 2518 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 2519 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 2520 2521 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 2522 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); 2523 2524 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 2525 2526 if (max_frl_bw <= 0) 2527 return -EINVAL; 2528 2529 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 2530 drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask); 2531 2532 if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask)) 2533 goto frl_trained; 2534 2535 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 2536 if (ret < 0) 2537 return ret; 2538 /* Wait for PCON to be FRL Ready */ 2539 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); 2540 2541 if (!is_active) 2542 return -ETIMEDOUT; 2543 2544 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, 2545 DP_PCON_ENABLE_SEQUENTIAL_LINK); 2546 if (ret < 0) 2547 return ret; 2548 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, 2549 DP_PCON_FRL_LINK_TRAIN_NORMAL); 2550 if (ret < 0) 2551 return ret; 2552 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 2553 if (ret < 0) 2554 return ret; 2555 /* 2556 * Wait for FRL to be completed 2557 * Check if the HDMI Link is up and active. 2558 */ 2559 wait_for(is_active = 2560 intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), 2561 TIMEOUT_HDMI_LINK_ACTIVE_MS); 2562 2563 if (!is_active) 2564 return -ETIMEDOUT; 2565 2566 frl_trained: 2567 drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask); 2568 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 2569 intel_dp->frl.is_trained = true; 2570 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); 2571 2572 return 0; 2573 } 2574 2575 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 2576 { 2577 if (drm_dp_is_branch(intel_dp->dpcd) && 2578 intel_dp->has_hdmi_sink && 2579 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 2580 return true; 2581 2582 return false; 2583 } 2584 2585 static 2586 int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp) 2587 { 2588 int ret; 2589 u8 buf = 0; 2590 2591 /* Set PCON source control mode */ 2592 buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE; 2593 2594 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); 2595 if (ret < 0) 2596 return ret; 2597 2598 /* Set HDMI LINK ENABLE */ 2599 buf |= DP_PCON_ENABLE_HDMI_LINK; 2600 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); 2601 if (ret < 0) 2602 return ret; 2603 2604 return 0; 2605 } 2606 2607 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 2608 { 2609 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2610 2611 /* 2612 * Always go for FRL training if: 2613 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) 2614 * -sink is HDMI2.1 2615 */ 2616 if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) || 2617 !intel_dp_is_hdmi_2_1_sink(intel_dp) || 2618 intel_dp->frl.is_trained) 2619 return; 2620 2621 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 2622 int ret, mode; 2623 2624 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); 2625 ret = intel_dp_pcon_set_tmds_mode(intel_dp); 2626 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 2627 2628 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 2629 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); 2630 } else { 2631 drm_dbg(&dev_priv->drm, "FRL training Completed\n"); 2632 } 2633 } 2634 2635 static int 2636 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 2637 { 2638 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 2639 2640 return intel_hdmi_dsc_get_slice_height(vactive); 2641 } 2642 2643 static int 2644 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 2645 const struct intel_crtc_state *crtc_state) 2646 { 2647 struct intel_connector *intel_connector = intel_dp->attached_connector; 2648 struct drm_connector *connector = &intel_connector->base; 2649 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; 2650 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; 2651 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 2652 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 2653 2654 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 2655 pcon_max_slice_width, 2656 hdmi_max_slices, hdmi_throughput); 2657 } 2658 2659 static int 2660 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 2661 const struct intel_crtc_state *crtc_state, 2662 int num_slices, int slice_width) 2663 { 2664 struct intel_connector *intel_connector = intel_dp->attached_connector; 2665 struct drm_connector *connector = &intel_connector->base; 2666 int output_format = crtc_state->output_format; 2667 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; 2668 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 2669 int hdmi_max_chunk_bytes = 2670 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; 2671 2672 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 2673 num_slices, output_format, hdmi_all_bpp, 2674 hdmi_max_chunk_bytes); 2675 } 2676 2677 void 2678 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 2679 const struct intel_crtc_state *crtc_state) 2680 { 2681 u8 pps_param[6]; 2682 int slice_height; 2683 int slice_width; 2684 int num_slices; 2685 int bits_per_pixel; 2686 int ret; 2687 struct intel_connector *intel_connector = intel_dp->attached_connector; 2688 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2689 struct drm_connector *connector; 2690 bool hdmi_is_dsc_1_2; 2691 2692 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 2693 return; 2694 2695 if (!intel_connector) 2696 return; 2697 connector = &intel_connector->base; 2698 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; 2699 2700 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 2701 !hdmi_is_dsc_1_2) 2702 return; 2703 2704 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 2705 if (!slice_height) 2706 return; 2707 2708 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 2709 if (!num_slices) 2710 return; 2711 2712 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 2713 num_slices); 2714 2715 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 2716 num_slices, slice_width); 2717 if (!bits_per_pixel) 2718 return; 2719 2720 pps_param[0] = slice_height & 0xFF; 2721 pps_param[1] = slice_height >> 8; 2722 pps_param[2] = slice_width & 0xFF; 2723 pps_param[3] = slice_width >> 8; 2724 pps_param[4] = bits_per_pixel & 0xFF; 2725 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 2726 2727 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 2728 if (ret < 0) 2729 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); 2730 } 2731 2732 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 2733 const struct intel_crtc_state *crtc_state) 2734 { 2735 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2736 u8 tmp; 2737 2738 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 2739 return; 2740 2741 if (!drm_dp_is_branch(intel_dp->dpcd)) 2742 return; 2743 2744 tmp = intel_dp->has_hdmi_sink ? 2745 DP_HDMI_DVI_OUTPUT_CONFIG : 0; 2746 2747 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2748 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 2749 drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n", 2750 str_enable_disable(intel_dp->has_hdmi_sink)); 2751 2752 tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 2753 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 2754 2755 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2756 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 2757 drm_dbg_kms(&i915->drm, 2758 "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n", 2759 str_enable_disable(intel_dp->dfp.ycbcr_444_to_420)); 2760 2761 tmp = intel_dp->dfp.rgb_to_ycbcr ? 2762 DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0; 2763 2764 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 2765 drm_dbg_kms(&i915->drm, 2766 "Failed to %s protocol converter RGB->YCbCr conversion mode\n", 2767 str_enable_disable(tmp)); 2768 } 2769 2770 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 2771 { 2772 u8 dprx = 0; 2773 2774 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 2775 &dprx) != 1) 2776 return false; 2777 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 2778 } 2779 2780 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 2781 { 2782 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2783 2784 /* 2785 * Clear the cached register set to avoid using stale values 2786 * for the sinks that do not support DSC. 2787 */ 2788 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 2789 2790 /* Clear fec_capable to avoid using stale values */ 2791 intel_dp->fec_capable = 0; 2792 2793 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 2794 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 2795 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2796 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 2797 intel_dp->dsc_dpcd, 2798 sizeof(intel_dp->dsc_dpcd)) < 0) 2799 drm_err(&i915->drm, 2800 "Failed to read DPCD register 0x%x\n", 2801 DP_DSC_SUPPORT); 2802 2803 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 2804 (int)sizeof(intel_dp->dsc_dpcd), 2805 intel_dp->dsc_dpcd); 2806 2807 /* FEC is supported only on DP 1.4 */ 2808 if (!intel_dp_is_edp(intel_dp) && 2809 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 2810 &intel_dp->fec_capable) < 0) 2811 drm_err(&i915->drm, 2812 "Failed to read FEC DPCD register\n"); 2813 2814 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 2815 intel_dp->fec_capable); 2816 } 2817 } 2818 2819 static void intel_edp_mso_mode_fixup(struct intel_connector *connector, 2820 struct drm_display_mode *mode) 2821 { 2822 struct intel_dp *intel_dp = intel_attached_dp(connector); 2823 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2824 int n = intel_dp->mso_link_count; 2825 int overlap = intel_dp->mso_pixel_overlap; 2826 2827 if (!mode || !n) 2828 return; 2829 2830 mode->hdisplay = (mode->hdisplay - overlap) * n; 2831 mode->hsync_start = (mode->hsync_start - overlap) * n; 2832 mode->hsync_end = (mode->hsync_end - overlap) * n; 2833 mode->htotal = (mode->htotal - overlap) * n; 2834 mode->clock *= n; 2835 2836 drm_mode_set_name(mode); 2837 2838 drm_dbg_kms(&i915->drm, 2839 "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n", 2840 connector->base.base.id, connector->base.name, 2841 DRM_MODE_ARG(mode)); 2842 } 2843 2844 void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp) 2845 { 2846 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2847 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2848 struct intel_connector *connector = intel_dp->attached_connector; 2849 2850 if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) { 2851 /* 2852 * This is a big fat ugly hack. 2853 * 2854 * Some machines in UEFI boot mode provide us a VBT that has 18 2855 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 2856 * unknown we fail to light up. Yet the same BIOS boots up with 2857 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 2858 * max, not what it tells us to use. 2859 * 2860 * Note: This will still be broken if the eDP panel is not lit 2861 * up by the BIOS, and thus we can't get the mode at module 2862 * load. 2863 */ 2864 drm_dbg_kms(&dev_priv->drm, 2865 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 2866 pipe_bpp, connector->panel.vbt.edp.bpp); 2867 connector->panel.vbt.edp.bpp = pipe_bpp; 2868 } 2869 } 2870 2871 static void intel_edp_mso_init(struct intel_dp *intel_dp) 2872 { 2873 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2874 struct intel_connector *connector = intel_dp->attached_connector; 2875 struct drm_display_info *info = &connector->base.display_info; 2876 u8 mso; 2877 2878 if (intel_dp->edp_dpcd[0] < DP_EDP_14) 2879 return; 2880 2881 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { 2882 drm_err(&i915->drm, "Failed to read MSO cap\n"); 2883 return; 2884 } 2885 2886 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ 2887 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; 2888 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { 2889 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); 2890 mso = 0; 2891 } 2892 2893 if (mso) { 2894 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n", 2895 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso, 2896 info->mso_pixel_overlap); 2897 if (!HAS_MSO(i915)) { 2898 drm_err(&i915->drm, "No source MSO support, disabling\n"); 2899 mso = 0; 2900 } 2901 } 2902 2903 intel_dp->mso_link_count = mso; 2904 intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; 2905 } 2906 2907 static bool 2908 intel_edp_init_dpcd(struct intel_dp *intel_dp) 2909 { 2910 struct drm_i915_private *dev_priv = 2911 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 2912 2913 /* this function is meant to be called only once */ 2914 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 2915 2916 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 2917 return false; 2918 2919 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2920 drm_dp_is_branch(intel_dp->dpcd)); 2921 2922 /* 2923 * Read the eDP display control registers. 2924 * 2925 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 2926 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 2927 * set, but require eDP 1.4+ detection (e.g. for supported link rates 2928 * method). The display control registers should read zero if they're 2929 * not supported anyway. 2930 */ 2931 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 2932 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 2933 sizeof(intel_dp->edp_dpcd)) { 2934 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 2935 (int)sizeof(intel_dp->edp_dpcd), 2936 intel_dp->edp_dpcd); 2937 2938 intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; 2939 } 2940 2941 /* 2942 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 2943 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 2944 */ 2945 intel_psr_init_dpcd(intel_dp); 2946 2947 /* Clear the default sink rates */ 2948 intel_dp->num_sink_rates = 0; 2949 2950 /* Read the eDP 1.4+ supported link rates. */ 2951 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2952 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 2953 int i; 2954 2955 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 2956 sink_rates, sizeof(sink_rates)); 2957 2958 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 2959 int val = le16_to_cpu(sink_rates[i]); 2960 2961 if (val == 0) 2962 break; 2963 2964 /* Value read multiplied by 200kHz gives the per-lane 2965 * link rate in kHz. The source rates are, however, 2966 * stored in terms of LS_Clk kHz. The full conversion 2967 * back to symbols is 2968 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 2969 */ 2970 intel_dp->sink_rates[i] = (val * 200) / 10; 2971 } 2972 intel_dp->num_sink_rates = i; 2973 } 2974 2975 /* 2976 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 2977 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 2978 */ 2979 if (intel_dp->num_sink_rates) 2980 intel_dp->use_rate_select = true; 2981 else 2982 intel_dp_set_sink_rates(intel_dp); 2983 intel_dp_set_max_sink_lane_count(intel_dp); 2984 2985 /* Read the eDP DSC DPCD registers */ 2986 if (HAS_DSC(dev_priv)) 2987 intel_dp_get_dsc_sink_cap(intel_dp); 2988 2989 /* 2990 * If needed, program our source OUI so we can make various Intel-specific AUX services 2991 * available (such as HDR backlight controls) 2992 */ 2993 intel_edp_init_source_oui(intel_dp, true); 2994 2995 return true; 2996 } 2997 2998 static bool 2999 intel_dp_has_sink_count(struct intel_dp *intel_dp) 3000 { 3001 if (!intel_dp->attached_connector) 3002 return false; 3003 3004 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 3005 intel_dp->dpcd, 3006 &intel_dp->desc); 3007 } 3008 3009 static bool 3010 intel_dp_get_dpcd(struct intel_dp *intel_dp) 3011 { 3012 int ret; 3013 3014 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) 3015 return false; 3016 3017 /* 3018 * Don't clobber cached eDP rates. Also skip re-reading 3019 * the OUI/ID since we know it won't change. 3020 */ 3021 if (!intel_dp_is_edp(intel_dp)) { 3022 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 3023 drm_dp_is_branch(intel_dp->dpcd)); 3024 3025 intel_dp_set_sink_rates(intel_dp); 3026 intel_dp_set_max_sink_lane_count(intel_dp); 3027 intel_dp_set_common_rates(intel_dp); 3028 } 3029 3030 if (intel_dp_has_sink_count(intel_dp)) { 3031 ret = drm_dp_read_sink_count(&intel_dp->aux); 3032 if (ret < 0) 3033 return false; 3034 3035 /* 3036 * Sink count can change between short pulse hpd hence 3037 * a member variable in intel_dp will track any changes 3038 * between short pulse interrupts. 3039 */ 3040 intel_dp->sink_count = ret; 3041 3042 /* 3043 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 3044 * a dongle is present but no display. Unless we require to know 3045 * if a dongle is present or not, we don't need to update 3046 * downstream port information. So, an early return here saves 3047 * time from performing other operations which are not required. 3048 */ 3049 if (!intel_dp->sink_count) 3050 return false; 3051 } 3052 3053 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 3054 intel_dp->downstream_ports) == 0; 3055 } 3056 3057 static bool 3058 intel_dp_can_mst(struct intel_dp *intel_dp) 3059 { 3060 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3061 3062 return i915->params.enable_dp_mst && 3063 intel_dp_mst_source_support(intel_dp) && 3064 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 3065 } 3066 3067 static void 3068 intel_dp_configure_mst(struct intel_dp *intel_dp) 3069 { 3070 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3071 struct intel_encoder *encoder = 3072 &dp_to_dig_port(intel_dp)->base; 3073 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 3074 3075 drm_dbg_kms(&i915->drm, 3076 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 3077 encoder->base.base.id, encoder->base.name, 3078 str_yes_no(intel_dp_mst_source_support(intel_dp)), 3079 str_yes_no(sink_can_mst), 3080 str_yes_no(i915->params.enable_dp_mst)); 3081 3082 if (!intel_dp_mst_source_support(intel_dp)) 3083 return; 3084 3085 intel_dp->is_mst = sink_can_mst && 3086 i915->params.enable_dp_mst; 3087 3088 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 3089 intel_dp->is_mst); 3090 } 3091 3092 static bool 3093 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi) 3094 { 3095 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4; 3096 } 3097 3098 static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4]) 3099 { 3100 int retry; 3101 3102 for (retry = 0; retry < 3; retry++) { 3103 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1, 3104 &esi[1], 3) == 3) 3105 return true; 3106 } 3107 3108 return false; 3109 } 3110 3111 bool 3112 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 3113 const struct drm_connector_state *conn_state) 3114 { 3115 /* 3116 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 3117 * of Color Encoding Format and Content Color Gamut], in order to 3118 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 3119 */ 3120 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3121 return true; 3122 3123 switch (conn_state->colorspace) { 3124 case DRM_MODE_COLORIMETRY_SYCC_601: 3125 case DRM_MODE_COLORIMETRY_OPYCC_601: 3126 case DRM_MODE_COLORIMETRY_BT2020_YCC: 3127 case DRM_MODE_COLORIMETRY_BT2020_RGB: 3128 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 3129 return true; 3130 default: 3131 break; 3132 } 3133 3134 return false; 3135 } 3136 3137 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 3138 struct dp_sdp *sdp, size_t size) 3139 { 3140 size_t length = sizeof(struct dp_sdp); 3141 3142 if (size < length) 3143 return -ENOSPC; 3144 3145 memset(sdp, 0, size); 3146 3147 /* 3148 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 3149 * VSC SDP Header Bytes 3150 */ 3151 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 3152 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 3153 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 3154 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 3155 3156 /* 3157 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 3158 * per DP 1.4a spec. 3159 */ 3160 if (vsc->revision != 0x5) 3161 goto out; 3162 3163 /* VSC SDP Payload for DB16 through DB18 */ 3164 /* Pixel Encoding and Colorimetry Formats */ 3165 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 3166 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 3167 3168 switch (vsc->bpc) { 3169 case 6: 3170 /* 6bpc: 0x0 */ 3171 break; 3172 case 8: 3173 sdp->db[17] = 0x1; /* DB17[3:0] */ 3174 break; 3175 case 10: 3176 sdp->db[17] = 0x2; 3177 break; 3178 case 12: 3179 sdp->db[17] = 0x3; 3180 break; 3181 case 16: 3182 sdp->db[17] = 0x4; 3183 break; 3184 default: 3185 MISSING_CASE(vsc->bpc); 3186 break; 3187 } 3188 /* Dynamic Range and Component Bit Depth */ 3189 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 3190 sdp->db[17] |= 0x80; /* DB17[7] */ 3191 3192 /* Content Type */ 3193 sdp->db[18] = vsc->content_type & 0x7; 3194 3195 out: 3196 return length; 3197 } 3198 3199 static ssize_t 3200 intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915, 3201 const struct hdmi_drm_infoframe *drm_infoframe, 3202 struct dp_sdp *sdp, 3203 size_t size) 3204 { 3205 size_t length = sizeof(struct dp_sdp); 3206 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 3207 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 3208 ssize_t len; 3209 3210 if (size < length) 3211 return -ENOSPC; 3212 3213 memset(sdp, 0, size); 3214 3215 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 3216 if (len < 0) { 3217 drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n"); 3218 return -ENOSPC; 3219 } 3220 3221 if (len != infoframe_size) { 3222 drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n"); 3223 return -ENOSPC; 3224 } 3225 3226 /* 3227 * Set up the infoframe sdp packet for HDR static metadata. 3228 * Prepare VSC Header for SU as per DP 1.4a spec, 3229 * Table 2-100 and Table 2-101 3230 */ 3231 3232 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 3233 sdp->sdp_header.HB0 = 0; 3234 /* 3235 * Packet Type 80h + Non-audio INFOFRAME Type value 3236 * HDMI_INFOFRAME_TYPE_DRM: 0x87 3237 * - 80h + Non-audio INFOFRAME Type value 3238 * - InfoFrame Type: 0x07 3239 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 3240 */ 3241 sdp->sdp_header.HB1 = drm_infoframe->type; 3242 /* 3243 * Least Significant Eight Bits of (Data Byte Count – 1) 3244 * infoframe_size - 1 3245 */ 3246 sdp->sdp_header.HB2 = 0x1D; 3247 /* INFOFRAME SDP Version Number */ 3248 sdp->sdp_header.HB3 = (0x13 << 2); 3249 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 3250 sdp->db[0] = drm_infoframe->version; 3251 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 3252 sdp->db[1] = drm_infoframe->length; 3253 /* 3254 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 3255 * HDMI_INFOFRAME_HEADER_SIZE 3256 */ 3257 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 3258 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 3259 HDMI_DRM_INFOFRAME_SIZE); 3260 3261 /* 3262 * Size of DP infoframe sdp packet for HDR static metadata consists of 3263 * - DP SDP Header(struct dp_sdp_header): 4 bytes 3264 * - Two Data Blocks: 2 bytes 3265 * CTA Header Byte2 (INFOFRAME Version Number) 3266 * CTA Header Byte3 (Length of INFOFRAME) 3267 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 3268 * 3269 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 3270 * infoframe size. But GEN11+ has larger than that size, write_infoframe 3271 * will pad rest of the size. 3272 */ 3273 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 3274 } 3275 3276 static void intel_write_dp_sdp(struct intel_encoder *encoder, 3277 const struct intel_crtc_state *crtc_state, 3278 unsigned int type) 3279 { 3280 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3281 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3282 struct dp_sdp sdp = {}; 3283 ssize_t len; 3284 3285 if ((crtc_state->infoframes.enable & 3286 intel_hdmi_infoframe_enable(type)) == 0) 3287 return; 3288 3289 switch (type) { 3290 case DP_SDP_VSC: 3291 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 3292 sizeof(sdp)); 3293 break; 3294 case HDMI_PACKET_TYPE_GAMUT_METADATA: 3295 len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv, 3296 &crtc_state->infoframes.drm.drm, 3297 &sdp, sizeof(sdp)); 3298 break; 3299 default: 3300 MISSING_CASE(type); 3301 return; 3302 } 3303 3304 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 3305 return; 3306 3307 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 3308 } 3309 3310 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 3311 const struct intel_crtc_state *crtc_state, 3312 const struct drm_dp_vsc_sdp *vsc) 3313 { 3314 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3315 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3316 struct dp_sdp sdp = {}; 3317 ssize_t len; 3318 3319 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 3320 3321 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 3322 return; 3323 3324 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 3325 &sdp, len); 3326 } 3327 3328 void intel_dp_set_infoframes(struct intel_encoder *encoder, 3329 bool enable, 3330 const struct intel_crtc_state *crtc_state, 3331 const struct drm_connector_state *conn_state) 3332 { 3333 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3334 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 3335 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 3336 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 3337 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 3338 u32 val = intel_de_read(dev_priv, reg) & ~dip_enable; 3339 3340 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 3341 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 3342 if (!crtc_state->has_psr) 3343 val &= ~VIDEO_DIP_ENABLE_VSC_HSW; 3344 3345 intel_de_write(dev_priv, reg, val); 3346 intel_de_posting_read(dev_priv, reg); 3347 3348 if (!enable) 3349 return; 3350 3351 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 3352 if (!crtc_state->has_psr) 3353 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 3354 3355 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 3356 } 3357 3358 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 3359 const void *buffer, size_t size) 3360 { 3361 const struct dp_sdp *sdp = buffer; 3362 3363 if (size < sizeof(struct dp_sdp)) 3364 return -EINVAL; 3365 3366 memset(vsc, 0, sizeof(*vsc)); 3367 3368 if (sdp->sdp_header.HB0 != 0) 3369 return -EINVAL; 3370 3371 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 3372 return -EINVAL; 3373 3374 vsc->sdp_type = sdp->sdp_header.HB1; 3375 vsc->revision = sdp->sdp_header.HB2; 3376 vsc->length = sdp->sdp_header.HB3; 3377 3378 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 3379 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 3380 /* 3381 * - HB2 = 0x2, HB3 = 0x8 3382 * VSC SDP supporting 3D stereo + PSR 3383 * - HB2 = 0x4, HB3 = 0xe 3384 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 3385 * first scan line of the SU region (applies to eDP v1.4b 3386 * and higher). 3387 */ 3388 return 0; 3389 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 3390 /* 3391 * - HB2 = 0x5, HB3 = 0x13 3392 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 3393 * Format. 3394 */ 3395 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 3396 vsc->colorimetry = sdp->db[16] & 0xf; 3397 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 3398 3399 switch (sdp->db[17] & 0x7) { 3400 case 0x0: 3401 vsc->bpc = 6; 3402 break; 3403 case 0x1: 3404 vsc->bpc = 8; 3405 break; 3406 case 0x2: 3407 vsc->bpc = 10; 3408 break; 3409 case 0x3: 3410 vsc->bpc = 12; 3411 break; 3412 case 0x4: 3413 vsc->bpc = 16; 3414 break; 3415 default: 3416 MISSING_CASE(sdp->db[17] & 0x7); 3417 return -EINVAL; 3418 } 3419 3420 vsc->content_type = sdp->db[18] & 0x7; 3421 } else { 3422 return -EINVAL; 3423 } 3424 3425 return 0; 3426 } 3427 3428 static int 3429 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 3430 const void *buffer, size_t size) 3431 { 3432 int ret; 3433 3434 const struct dp_sdp *sdp = buffer; 3435 3436 if (size < sizeof(struct dp_sdp)) 3437 return -EINVAL; 3438 3439 if (sdp->sdp_header.HB0 != 0) 3440 return -EINVAL; 3441 3442 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 3443 return -EINVAL; 3444 3445 /* 3446 * Least Significant Eight Bits of (Data Byte Count – 1) 3447 * 1Dh (i.e., Data Byte Count = 30 bytes). 3448 */ 3449 if (sdp->sdp_header.HB2 != 0x1D) 3450 return -EINVAL; 3451 3452 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 3453 if ((sdp->sdp_header.HB3 & 0x3) != 0) 3454 return -EINVAL; 3455 3456 /* INFOFRAME SDP Version Number */ 3457 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 3458 return -EINVAL; 3459 3460 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 3461 if (sdp->db[0] != 1) 3462 return -EINVAL; 3463 3464 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 3465 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 3466 return -EINVAL; 3467 3468 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 3469 HDMI_DRM_INFOFRAME_SIZE); 3470 3471 return ret; 3472 } 3473 3474 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 3475 struct intel_crtc_state *crtc_state, 3476 struct drm_dp_vsc_sdp *vsc) 3477 { 3478 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3479 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3480 unsigned int type = DP_SDP_VSC; 3481 struct dp_sdp sdp = {}; 3482 int ret; 3483 3484 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 3485 if (crtc_state->has_psr) 3486 return; 3487 3488 if ((crtc_state->infoframes.enable & 3489 intel_hdmi_infoframe_enable(type)) == 0) 3490 return; 3491 3492 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 3493 3494 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 3495 3496 if (ret) 3497 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 3498 } 3499 3500 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 3501 struct intel_crtc_state *crtc_state, 3502 struct hdmi_drm_infoframe *drm_infoframe) 3503 { 3504 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3505 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3506 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 3507 struct dp_sdp sdp = {}; 3508 int ret; 3509 3510 if ((crtc_state->infoframes.enable & 3511 intel_hdmi_infoframe_enable(type)) == 0) 3512 return; 3513 3514 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 3515 sizeof(sdp)); 3516 3517 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 3518 sizeof(sdp)); 3519 3520 if (ret) 3521 drm_dbg_kms(&dev_priv->drm, 3522 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 3523 } 3524 3525 void intel_read_dp_sdp(struct intel_encoder *encoder, 3526 struct intel_crtc_state *crtc_state, 3527 unsigned int type) 3528 { 3529 switch (type) { 3530 case DP_SDP_VSC: 3531 intel_read_dp_vsc_sdp(encoder, crtc_state, 3532 &crtc_state->infoframes.vsc); 3533 break; 3534 case HDMI_PACKET_TYPE_GAMUT_METADATA: 3535 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 3536 &crtc_state->infoframes.drm.drm); 3537 break; 3538 default: 3539 MISSING_CASE(type); 3540 break; 3541 } 3542 } 3543 3544 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 3545 { 3546 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3547 int status = 0; 3548 int test_link_rate; 3549 u8 test_lane_count, test_link_bw; 3550 /* (DP CTS 1.2) 3551 * 4.3.1.11 3552 */ 3553 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 3554 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 3555 &test_lane_count); 3556 3557 if (status <= 0) { 3558 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 3559 return DP_TEST_NAK; 3560 } 3561 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 3562 3563 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 3564 &test_link_bw); 3565 if (status <= 0) { 3566 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 3567 return DP_TEST_NAK; 3568 } 3569 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 3570 3571 /* Validate the requested link rate and lane count */ 3572 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 3573 test_lane_count)) 3574 return DP_TEST_NAK; 3575 3576 intel_dp->compliance.test_lane_count = test_lane_count; 3577 intel_dp->compliance.test_link_rate = test_link_rate; 3578 3579 return DP_TEST_ACK; 3580 } 3581 3582 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 3583 { 3584 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3585 u8 test_pattern; 3586 u8 test_misc; 3587 __be16 h_width, v_height; 3588 int status = 0; 3589 3590 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 3591 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 3592 &test_pattern); 3593 if (status <= 0) { 3594 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 3595 return DP_TEST_NAK; 3596 } 3597 if (test_pattern != DP_COLOR_RAMP) 3598 return DP_TEST_NAK; 3599 3600 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 3601 &h_width, 2); 3602 if (status <= 0) { 3603 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 3604 return DP_TEST_NAK; 3605 } 3606 3607 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 3608 &v_height, 2); 3609 if (status <= 0) { 3610 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 3611 return DP_TEST_NAK; 3612 } 3613 3614 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 3615 &test_misc); 3616 if (status <= 0) { 3617 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 3618 return DP_TEST_NAK; 3619 } 3620 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 3621 return DP_TEST_NAK; 3622 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 3623 return DP_TEST_NAK; 3624 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 3625 case DP_TEST_BIT_DEPTH_6: 3626 intel_dp->compliance.test_data.bpc = 6; 3627 break; 3628 case DP_TEST_BIT_DEPTH_8: 3629 intel_dp->compliance.test_data.bpc = 8; 3630 break; 3631 default: 3632 return DP_TEST_NAK; 3633 } 3634 3635 intel_dp->compliance.test_data.video_pattern = test_pattern; 3636 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 3637 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 3638 /* Set test active flag here so userspace doesn't interrupt things */ 3639 intel_dp->compliance.test_active = true; 3640 3641 return DP_TEST_ACK; 3642 } 3643 3644 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 3645 { 3646 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3647 u8 test_result = DP_TEST_ACK; 3648 struct intel_connector *intel_connector = intel_dp->attached_connector; 3649 struct drm_connector *connector = &intel_connector->base; 3650 3651 if (intel_connector->detect_edid == NULL || 3652 connector->edid_corrupt || 3653 intel_dp->aux.i2c_defer_count > 6) { 3654 /* Check EDID read for NACKs, DEFERs and corruption 3655 * (DP CTS 1.2 Core r1.1) 3656 * 4.2.2.4 : Failed EDID read, I2C_NAK 3657 * 4.2.2.5 : Failed EDID read, I2C_DEFER 3658 * 4.2.2.6 : EDID corruption detected 3659 * Use failsafe mode for all cases 3660 */ 3661 if (intel_dp->aux.i2c_nack_count > 0 || 3662 intel_dp->aux.i2c_defer_count > 0) 3663 drm_dbg_kms(&i915->drm, 3664 "EDID read had %d NACKs, %d DEFERs\n", 3665 intel_dp->aux.i2c_nack_count, 3666 intel_dp->aux.i2c_defer_count); 3667 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 3668 } else { 3669 /* FIXME: Get rid of drm_edid_raw() */ 3670 const struct edid *block = drm_edid_raw(intel_connector->detect_edid); 3671 3672 /* We have to write the checksum of the last block read */ 3673 block += block->extensions; 3674 3675 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 3676 block->checksum) <= 0) 3677 drm_dbg_kms(&i915->drm, 3678 "Failed to write EDID checksum\n"); 3679 3680 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 3681 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 3682 } 3683 3684 /* Set test active flag here so userspace doesn't interrupt things */ 3685 intel_dp->compliance.test_active = true; 3686 3687 return test_result; 3688 } 3689 3690 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 3691 const struct intel_crtc_state *crtc_state) 3692 { 3693 struct drm_i915_private *dev_priv = 3694 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 3695 struct drm_dp_phy_test_params *data = 3696 &intel_dp->compliance.test_data.phytest; 3697 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3698 enum pipe pipe = crtc->pipe; 3699 u32 pattern_val; 3700 3701 switch (data->phy_pattern) { 3702 case DP_PHY_TEST_PATTERN_NONE: 3703 drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n"); 3704 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 3705 break; 3706 case DP_PHY_TEST_PATTERN_D10_2: 3707 drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n"); 3708 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3709 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 3710 break; 3711 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 3712 drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n"); 3713 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3714 DDI_DP_COMP_CTL_ENABLE | 3715 DDI_DP_COMP_CTL_SCRAMBLED_0); 3716 break; 3717 case DP_PHY_TEST_PATTERN_PRBS7: 3718 drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n"); 3719 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3720 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 3721 break; 3722 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 3723 /* 3724 * FIXME: Ideally pattern should come from DPCD 0x250. As 3725 * current firmware of DPR-100 could not set it, so hardcoding 3726 * now for complaince test. 3727 */ 3728 drm_dbg_kms(&dev_priv->drm, 3729 "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 3730 pattern_val = 0x3e0f83e0; 3731 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 3732 pattern_val = 0x0f83e0f8; 3733 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 3734 pattern_val = 0x0000f83e; 3735 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 3736 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3737 DDI_DP_COMP_CTL_ENABLE | 3738 DDI_DP_COMP_CTL_CUSTOM80); 3739 break; 3740 case DP_PHY_TEST_PATTERN_CP2520: 3741 /* 3742 * FIXME: Ideally pattern should come from DPCD 0x24A. As 3743 * current firmware of DPR-100 could not set it, so hardcoding 3744 * now for complaince test. 3745 */ 3746 drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n"); 3747 pattern_val = 0xFB; 3748 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3749 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 3750 pattern_val); 3751 break; 3752 default: 3753 WARN(1, "Invalid Phy Test Pattern\n"); 3754 } 3755 } 3756 3757 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 3758 const struct intel_crtc_state *crtc_state) 3759 { 3760 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3761 struct drm_dp_phy_test_params *data = 3762 &intel_dp->compliance.test_data.phytest; 3763 u8 link_status[DP_LINK_STATUS_SIZE]; 3764 3765 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3766 link_status) < 0) { 3767 drm_dbg_kms(&i915->drm, "failed to get link status\n"); 3768 return; 3769 } 3770 3771 /* retrieve vswing & pre-emphasis setting */ 3772 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 3773 link_status); 3774 3775 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); 3776 3777 intel_dp_phy_pattern_update(intel_dp, crtc_state); 3778 3779 drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, 3780 intel_dp->train_set, crtc_state->lane_count); 3781 3782 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 3783 link_status[DP_DPCD_REV]); 3784 } 3785 3786 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 3787 { 3788 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3789 struct drm_dp_phy_test_params *data = 3790 &intel_dp->compliance.test_data.phytest; 3791 3792 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 3793 drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n"); 3794 return DP_TEST_NAK; 3795 } 3796 3797 /* Set test active flag here so userspace doesn't interrupt things */ 3798 intel_dp->compliance.test_active = true; 3799 3800 return DP_TEST_ACK; 3801 } 3802 3803 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 3804 { 3805 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3806 u8 response = DP_TEST_NAK; 3807 u8 request = 0; 3808 int status; 3809 3810 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 3811 if (status <= 0) { 3812 drm_dbg_kms(&i915->drm, 3813 "Could not read test request from sink\n"); 3814 goto update_status; 3815 } 3816 3817 switch (request) { 3818 case DP_TEST_LINK_TRAINING: 3819 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 3820 response = intel_dp_autotest_link_training(intel_dp); 3821 break; 3822 case DP_TEST_LINK_VIDEO_PATTERN: 3823 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 3824 response = intel_dp_autotest_video_pattern(intel_dp); 3825 break; 3826 case DP_TEST_LINK_EDID_READ: 3827 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 3828 response = intel_dp_autotest_edid(intel_dp); 3829 break; 3830 case DP_TEST_LINK_PHY_TEST_PATTERN: 3831 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 3832 response = intel_dp_autotest_phy_pattern(intel_dp); 3833 break; 3834 default: 3835 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 3836 request); 3837 break; 3838 } 3839 3840 if (response & DP_TEST_ACK) 3841 intel_dp->compliance.test_type = request; 3842 3843 update_status: 3844 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 3845 if (status <= 0) 3846 drm_dbg_kms(&i915->drm, 3847 "Could not write test response to sink\n"); 3848 } 3849 3850 static bool intel_dp_link_ok(struct intel_dp *intel_dp, 3851 u8 link_status[DP_LINK_STATUS_SIZE]) 3852 { 3853 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3854 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3855 bool uhbr = intel_dp->link_rate >= 1000000; 3856 bool ok; 3857 3858 if (uhbr) 3859 ok = drm_dp_128b132b_lane_channel_eq_done(link_status, 3860 intel_dp->lane_count); 3861 else 3862 ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 3863 3864 if (ok) 3865 return true; 3866 3867 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 3868 drm_dbg_kms(&i915->drm, 3869 "[ENCODER:%d:%s] %s link not ok, retraining\n", 3870 encoder->base.base.id, encoder->base.name, 3871 uhbr ? "128b/132b" : "8b/10b"); 3872 3873 return false; 3874 } 3875 3876 static void 3877 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack) 3878 { 3879 bool handled = false; 3880 3881 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 3882 if (handled) 3883 ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY); 3884 3885 if (esi[1] & DP_CP_IRQ) { 3886 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3887 ack[1] |= DP_CP_IRQ; 3888 } 3889 } 3890 3891 static bool intel_dp_mst_link_status(struct intel_dp *intel_dp) 3892 { 3893 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3894 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3895 u8 link_status[DP_LINK_STATUS_SIZE] = {}; 3896 const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2; 3897 3898 if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status, 3899 esi_link_status_size) != esi_link_status_size) { 3900 drm_err(&i915->drm, 3901 "[ENCODER:%d:%s] Failed to read link status\n", 3902 encoder->base.base.id, encoder->base.name); 3903 return false; 3904 } 3905 3906 return intel_dp_link_ok(intel_dp, link_status); 3907 } 3908 3909 /** 3910 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 3911 * @intel_dp: Intel DP struct 3912 * 3913 * Read any pending MST interrupts, call MST core to handle these and ack the 3914 * interrupts. Check if the main and AUX link state is ok. 3915 * 3916 * Returns: 3917 * - %true if pending interrupts were serviced (or no interrupts were 3918 * pending) w/o detecting an error condition. 3919 * - %false if an error condition - like AUX failure or a loss of link - is 3920 * detected, which needs servicing from the hotplug work. 3921 */ 3922 static bool 3923 intel_dp_check_mst_status(struct intel_dp *intel_dp) 3924 { 3925 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3926 bool link_ok = true; 3927 3928 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 3929 3930 for (;;) { 3931 u8 esi[4] = {}; 3932 u8 ack[4] = {}; 3933 3934 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 3935 drm_dbg_kms(&i915->drm, 3936 "failed to get ESI - device may have failed\n"); 3937 link_ok = false; 3938 3939 break; 3940 } 3941 3942 drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi); 3943 3944 if (intel_dp->active_mst_links > 0 && link_ok && 3945 esi[3] & LINK_STATUS_CHANGED) { 3946 if (!intel_dp_mst_link_status(intel_dp)) 3947 link_ok = false; 3948 ack[3] |= LINK_STATUS_CHANGED; 3949 } 3950 3951 intel_dp_mst_hpd_irq(intel_dp, esi, ack); 3952 3953 if (!memchr_inv(ack, 0, sizeof(ack))) 3954 break; 3955 3956 if (!intel_dp_ack_sink_irq_esi(intel_dp, ack)) 3957 drm_dbg_kms(&i915->drm, "Failed to ack ESI\n"); 3958 } 3959 3960 return link_ok; 3961 } 3962 3963 static void 3964 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 3965 { 3966 bool is_active; 3967 u8 buf = 0; 3968 3969 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 3970 if (intel_dp->frl.is_trained && !is_active) { 3971 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 3972 return; 3973 3974 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 3975 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 3976 return; 3977 3978 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 3979 3980 intel_dp->frl.is_trained = false; 3981 3982 /* Restart FRL training or fall back to TMDS mode */ 3983 intel_dp_check_frl_training(intel_dp); 3984 } 3985 } 3986 3987 static bool 3988 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 3989 { 3990 u8 link_status[DP_LINK_STATUS_SIZE]; 3991 3992 if (!intel_dp->link_trained) 3993 return false; 3994 3995 /* 3996 * While PSR source HW is enabled, it will control main-link sending 3997 * frames, enabling and disabling it so trying to do a retrain will fail 3998 * as the link would or not be on or it could mix training patterns 3999 * and frame data at the same time causing retrain to fail. 4000 * Also when exiting PSR, HW will retrain the link anyways fixing 4001 * any link status error. 4002 */ 4003 if (intel_psr_enabled(intel_dp)) 4004 return false; 4005 4006 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 4007 link_status) < 0) 4008 return false; 4009 4010 /* 4011 * Validate the cached values of intel_dp->link_rate and 4012 * intel_dp->lane_count before attempting to retrain. 4013 * 4014 * FIXME would be nice to user the crtc state here, but since 4015 * we need to call this from the short HPD handler that seems 4016 * a bit hard. 4017 */ 4018 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 4019 intel_dp->lane_count)) 4020 return false; 4021 4022 /* Retrain if link not ok */ 4023 return !intel_dp_link_ok(intel_dp, link_status); 4024 } 4025 4026 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 4027 const struct drm_connector_state *conn_state) 4028 { 4029 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4030 struct intel_encoder *encoder; 4031 enum pipe pipe; 4032 4033 if (!conn_state->best_encoder) 4034 return false; 4035 4036 /* SST */ 4037 encoder = &dp_to_dig_port(intel_dp)->base; 4038 if (conn_state->best_encoder == &encoder->base) 4039 return true; 4040 4041 /* MST */ 4042 for_each_pipe(i915, pipe) { 4043 encoder = &intel_dp->mst_encoders[pipe]->base; 4044 if (conn_state->best_encoder == &encoder->base) 4045 return true; 4046 } 4047 4048 return false; 4049 } 4050 4051 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 4052 struct drm_modeset_acquire_ctx *ctx, 4053 u8 *pipe_mask) 4054 { 4055 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4056 struct drm_connector_list_iter conn_iter; 4057 struct intel_connector *connector; 4058 int ret = 0; 4059 4060 *pipe_mask = 0; 4061 4062 if (!intel_dp_needs_link_retrain(intel_dp)) 4063 return 0; 4064 4065 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 4066 for_each_intel_connector_iter(connector, &conn_iter) { 4067 struct drm_connector_state *conn_state = 4068 connector->base.state; 4069 struct intel_crtc_state *crtc_state; 4070 struct intel_crtc *crtc; 4071 4072 if (!intel_dp_has_connector(intel_dp, conn_state)) 4073 continue; 4074 4075 crtc = to_intel_crtc(conn_state->crtc); 4076 if (!crtc) 4077 continue; 4078 4079 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 4080 if (ret) 4081 break; 4082 4083 crtc_state = to_intel_crtc_state(crtc->base.state); 4084 4085 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 4086 4087 if (!crtc_state->hw.active) 4088 continue; 4089 4090 if (conn_state->commit && 4091 !try_wait_for_completion(&conn_state->commit->hw_done)) 4092 continue; 4093 4094 *pipe_mask |= BIT(crtc->pipe); 4095 } 4096 drm_connector_list_iter_end(&conn_iter); 4097 4098 if (!intel_dp_needs_link_retrain(intel_dp)) 4099 *pipe_mask = 0; 4100 4101 return ret; 4102 } 4103 4104 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 4105 { 4106 struct intel_connector *connector = intel_dp->attached_connector; 4107 4108 return connector->base.status == connector_status_connected || 4109 intel_dp->is_mst; 4110 } 4111 4112 int intel_dp_retrain_link(struct intel_encoder *encoder, 4113 struct drm_modeset_acquire_ctx *ctx) 4114 { 4115 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4116 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4117 struct intel_crtc *crtc; 4118 u8 pipe_mask; 4119 int ret; 4120 4121 if (!intel_dp_is_connected(intel_dp)) 4122 return 0; 4123 4124 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 4125 ctx); 4126 if (ret) 4127 return ret; 4128 4129 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &pipe_mask); 4130 if (ret) 4131 return ret; 4132 4133 if (pipe_mask == 0) 4134 return 0; 4135 4136 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 4137 encoder->base.base.id, encoder->base.name); 4138 4139 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 4140 const struct intel_crtc_state *crtc_state = 4141 to_intel_crtc_state(crtc->base.state); 4142 4143 /* Suppress underruns caused by re-training */ 4144 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 4145 if (crtc_state->has_pch_encoder) 4146 intel_set_pch_fifo_underrun_reporting(dev_priv, 4147 intel_crtc_pch_transcoder(crtc), false); 4148 } 4149 4150 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 4151 const struct intel_crtc_state *crtc_state = 4152 to_intel_crtc_state(crtc->base.state); 4153 4154 /* retrain on the MST master transcoder */ 4155 if (DISPLAY_VER(dev_priv) >= 12 && 4156 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 4157 !intel_dp_mst_is_master_trans(crtc_state)) 4158 continue; 4159 4160 intel_dp_check_frl_training(intel_dp); 4161 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); 4162 intel_dp_start_link_train(intel_dp, crtc_state); 4163 intel_dp_stop_link_train(intel_dp, crtc_state); 4164 break; 4165 } 4166 4167 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 4168 const struct intel_crtc_state *crtc_state = 4169 to_intel_crtc_state(crtc->base.state); 4170 4171 /* Keep underrun reporting disabled until things are stable */ 4172 intel_crtc_wait_for_next_vblank(crtc); 4173 4174 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 4175 if (crtc_state->has_pch_encoder) 4176 intel_set_pch_fifo_underrun_reporting(dev_priv, 4177 intel_crtc_pch_transcoder(crtc), true); 4178 } 4179 4180 return 0; 4181 } 4182 4183 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 4184 struct drm_modeset_acquire_ctx *ctx, 4185 u8 *pipe_mask) 4186 { 4187 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4188 struct drm_connector_list_iter conn_iter; 4189 struct intel_connector *connector; 4190 int ret = 0; 4191 4192 *pipe_mask = 0; 4193 4194 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 4195 for_each_intel_connector_iter(connector, &conn_iter) { 4196 struct drm_connector_state *conn_state = 4197 connector->base.state; 4198 struct intel_crtc_state *crtc_state; 4199 struct intel_crtc *crtc; 4200 4201 if (!intel_dp_has_connector(intel_dp, conn_state)) 4202 continue; 4203 4204 crtc = to_intel_crtc(conn_state->crtc); 4205 if (!crtc) 4206 continue; 4207 4208 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 4209 if (ret) 4210 break; 4211 4212 crtc_state = to_intel_crtc_state(crtc->base.state); 4213 4214 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 4215 4216 if (!crtc_state->hw.active) 4217 continue; 4218 4219 if (conn_state->commit && 4220 !try_wait_for_completion(&conn_state->commit->hw_done)) 4221 continue; 4222 4223 *pipe_mask |= BIT(crtc->pipe); 4224 } 4225 drm_connector_list_iter_end(&conn_iter); 4226 4227 return ret; 4228 } 4229 4230 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 4231 struct drm_modeset_acquire_ctx *ctx) 4232 { 4233 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4234 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4235 struct intel_crtc *crtc; 4236 u8 pipe_mask; 4237 int ret; 4238 4239 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 4240 ctx); 4241 if (ret) 4242 return ret; 4243 4244 ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask); 4245 if (ret) 4246 return ret; 4247 4248 if (pipe_mask == 0) 4249 return 0; 4250 4251 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 4252 encoder->base.base.id, encoder->base.name); 4253 4254 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) { 4255 const struct intel_crtc_state *crtc_state = 4256 to_intel_crtc_state(crtc->base.state); 4257 4258 /* test on the MST master transcoder */ 4259 if (DISPLAY_VER(dev_priv) >= 12 && 4260 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 4261 !intel_dp_mst_is_master_trans(crtc_state)) 4262 continue; 4263 4264 intel_dp_process_phy_request(intel_dp, crtc_state); 4265 break; 4266 } 4267 4268 return 0; 4269 } 4270 4271 void intel_dp_phy_test(struct intel_encoder *encoder) 4272 { 4273 struct drm_modeset_acquire_ctx ctx; 4274 int ret; 4275 4276 drm_modeset_acquire_init(&ctx, 0); 4277 4278 for (;;) { 4279 ret = intel_dp_do_phy_test(encoder, &ctx); 4280 4281 if (ret == -EDEADLK) { 4282 drm_modeset_backoff(&ctx); 4283 continue; 4284 } 4285 4286 break; 4287 } 4288 4289 drm_modeset_drop_locks(&ctx); 4290 drm_modeset_acquire_fini(&ctx); 4291 drm_WARN(encoder->base.dev, ret, 4292 "Acquiring modeset locks failed with %i\n", ret); 4293 } 4294 4295 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) 4296 { 4297 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4298 u8 val; 4299 4300 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 4301 return; 4302 4303 if (drm_dp_dpcd_readb(&intel_dp->aux, 4304 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 4305 return; 4306 4307 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 4308 4309 if (val & DP_AUTOMATED_TEST_REQUEST) 4310 intel_dp_handle_test_request(intel_dp); 4311 4312 if (val & DP_CP_IRQ) 4313 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 4314 4315 if (val & DP_SINK_SPECIFIC_IRQ) 4316 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 4317 } 4318 4319 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) 4320 { 4321 u8 val; 4322 4323 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 4324 return; 4325 4326 if (drm_dp_dpcd_readb(&intel_dp->aux, 4327 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) 4328 return; 4329 4330 if (drm_dp_dpcd_writeb(&intel_dp->aux, 4331 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) 4332 return; 4333 4334 if (val & HDMI_LINK_STATUS_CHANGED) 4335 intel_dp_handle_hdmi_link_status_change(intel_dp); 4336 } 4337 4338 /* 4339 * According to DP spec 4340 * 5.1.2: 4341 * 1. Read DPCD 4342 * 2. Configure link according to Receiver Capabilities 4343 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 4344 * 4. Check link status on receipt of hot-plug interrupt 4345 * 4346 * intel_dp_short_pulse - handles short pulse interrupts 4347 * when full detection is not required. 4348 * Returns %true if short pulse is handled and full detection 4349 * is NOT required and %false otherwise. 4350 */ 4351 static bool 4352 intel_dp_short_pulse(struct intel_dp *intel_dp) 4353 { 4354 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4355 u8 old_sink_count = intel_dp->sink_count; 4356 bool ret; 4357 4358 /* 4359 * Clearing compliance test variables to allow capturing 4360 * of values for next automated test request. 4361 */ 4362 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 4363 4364 /* 4365 * Now read the DPCD to see if it's actually running 4366 * If the current value of sink count doesn't match with 4367 * the value that was stored earlier or dpcd read failed 4368 * we need to do full detection 4369 */ 4370 ret = intel_dp_get_dpcd(intel_dp); 4371 4372 if ((old_sink_count != intel_dp->sink_count) || !ret) { 4373 /* No need to proceed if we are going to do full detect */ 4374 return false; 4375 } 4376 4377 intel_dp_check_device_service_irq(intel_dp); 4378 intel_dp_check_link_service_irq(intel_dp); 4379 4380 /* Handle CEC interrupts, if any */ 4381 drm_dp_cec_irq(&intel_dp->aux); 4382 4383 /* defer to the hotplug work for link retraining if needed */ 4384 if (intel_dp_needs_link_retrain(intel_dp)) 4385 return false; 4386 4387 intel_psr_short_pulse(intel_dp); 4388 4389 switch (intel_dp->compliance.test_type) { 4390 case DP_TEST_LINK_TRAINING: 4391 drm_dbg_kms(&dev_priv->drm, 4392 "Link Training Compliance Test requested\n"); 4393 /* Send a Hotplug Uevent to userspace to start modeset */ 4394 drm_kms_helper_hotplug_event(&dev_priv->drm); 4395 break; 4396 case DP_TEST_LINK_PHY_TEST_PATTERN: 4397 drm_dbg_kms(&dev_priv->drm, 4398 "PHY test pattern Compliance Test requested\n"); 4399 /* 4400 * Schedule long hpd to do the test 4401 * 4402 * FIXME get rid of the ad-hoc phy test modeset code 4403 * and properly incorporate it into the normal modeset. 4404 */ 4405 return false; 4406 } 4407 4408 return true; 4409 } 4410 4411 /* XXX this is probably wrong for multiple downstream ports */ 4412 static enum drm_connector_status 4413 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 4414 { 4415 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4416 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4417 u8 *dpcd = intel_dp->dpcd; 4418 u8 type; 4419 4420 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 4421 return connector_status_connected; 4422 4423 lspcon_resume(dig_port); 4424 4425 if (!intel_dp_get_dpcd(intel_dp)) 4426 return connector_status_disconnected; 4427 4428 /* if there's no downstream port, we're done */ 4429 if (!drm_dp_is_branch(dpcd)) 4430 return connector_status_connected; 4431 4432 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 4433 if (intel_dp_has_sink_count(intel_dp) && 4434 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 4435 return intel_dp->sink_count ? 4436 connector_status_connected : connector_status_disconnected; 4437 } 4438 4439 if (intel_dp_can_mst(intel_dp)) 4440 return connector_status_connected; 4441 4442 /* If no HPD, poke DDC gently */ 4443 if (drm_probe_ddc(&intel_dp->aux.ddc)) 4444 return connector_status_connected; 4445 4446 /* Well we tried, say unknown for unreliable port types */ 4447 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 4448 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 4449 if (type == DP_DS_PORT_TYPE_VGA || 4450 type == DP_DS_PORT_TYPE_NON_EDID) 4451 return connector_status_unknown; 4452 } else { 4453 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 4454 DP_DWN_STRM_PORT_TYPE_MASK; 4455 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 4456 type == DP_DWN_STRM_PORT_TYPE_OTHER) 4457 return connector_status_unknown; 4458 } 4459 4460 /* Anything else is out of spec, warn and ignore */ 4461 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 4462 return connector_status_disconnected; 4463 } 4464 4465 static enum drm_connector_status 4466 edp_detect(struct intel_dp *intel_dp) 4467 { 4468 return connector_status_connected; 4469 } 4470 4471 /* 4472 * intel_digital_port_connected - is the specified port connected? 4473 * @encoder: intel_encoder 4474 * 4475 * In cases where there's a connector physically connected but it can't be used 4476 * by our hardware we also return false, since the rest of the driver should 4477 * pretty much treat the port as disconnected. This is relevant for type-C 4478 * (starting on ICL) where there's ownership involved. 4479 * 4480 * Return %true if port is connected, %false otherwise. 4481 */ 4482 bool intel_digital_port_connected(struct intel_encoder *encoder) 4483 { 4484 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4485 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4486 bool is_connected = false; 4487 intel_wakeref_t wakeref; 4488 4489 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 4490 is_connected = dig_port->connected(encoder); 4491 4492 return is_connected; 4493 } 4494 4495 static const struct drm_edid * 4496 intel_dp_get_edid(struct intel_dp *intel_dp) 4497 { 4498 struct intel_connector *connector = intel_dp->attached_connector; 4499 const struct drm_edid *fixed_edid = connector->panel.fixed_edid; 4500 4501 /* Use panel fixed edid if we have one */ 4502 if (fixed_edid) { 4503 /* invalid edid */ 4504 if (IS_ERR(fixed_edid)) 4505 return NULL; 4506 4507 return drm_edid_dup(fixed_edid); 4508 } 4509 4510 return drm_edid_read_ddc(&connector->base, &intel_dp->aux.ddc); 4511 } 4512 4513 static void 4514 intel_dp_update_dfp(struct intel_dp *intel_dp, 4515 const struct drm_edid *drm_edid) 4516 { 4517 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4518 struct intel_connector *connector = intel_dp->attached_connector; 4519 const struct edid *edid; 4520 4521 /* FIXME: Get rid of drm_edid_raw() */ 4522 edid = drm_edid_raw(drm_edid); 4523 4524 intel_dp->dfp.max_bpc = 4525 drm_dp_downstream_max_bpc(intel_dp->dpcd, 4526 intel_dp->downstream_ports, edid); 4527 4528 intel_dp->dfp.max_dotclock = 4529 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 4530 intel_dp->downstream_ports); 4531 4532 intel_dp->dfp.min_tmds_clock = 4533 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 4534 intel_dp->downstream_ports, 4535 edid); 4536 intel_dp->dfp.max_tmds_clock = 4537 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 4538 intel_dp->downstream_ports, 4539 edid); 4540 4541 intel_dp->dfp.pcon_max_frl_bw = 4542 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 4543 intel_dp->downstream_ports); 4544 4545 drm_dbg_kms(&i915->drm, 4546 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 4547 connector->base.base.id, connector->base.name, 4548 intel_dp->dfp.max_bpc, 4549 intel_dp->dfp.max_dotclock, 4550 intel_dp->dfp.min_tmds_clock, 4551 intel_dp->dfp.max_tmds_clock, 4552 intel_dp->dfp.pcon_max_frl_bw); 4553 4554 intel_dp_get_pcon_dsc_cap(intel_dp); 4555 } 4556 4557 static void 4558 intel_dp_update_420(struct intel_dp *intel_dp) 4559 { 4560 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4561 struct intel_connector *connector = intel_dp->attached_connector; 4562 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr; 4563 4564 /* No YCbCr output support on gmch platforms */ 4565 if (HAS_GMCH(i915)) 4566 return; 4567 4568 /* 4569 * ILK doesn't seem capable of DP YCbCr output. The 4570 * displayed image is severly corrupted. SNB+ is fine. 4571 */ 4572 if (IS_IRONLAKE(i915)) 4573 return; 4574 4575 is_branch = drm_dp_is_branch(intel_dp->dpcd); 4576 ycbcr_420_passthrough = 4577 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 4578 intel_dp->downstream_ports); 4579 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 4580 ycbcr_444_to_420 = 4581 dp_to_dig_port(intel_dp)->lspcon.active || 4582 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 4583 intel_dp->downstream_ports); 4584 rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 4585 intel_dp->downstream_ports, 4586 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 4587 4588 if (DISPLAY_VER(i915) >= 11) { 4589 /* Let PCON convert from RGB->YCbCr if possible */ 4590 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) { 4591 intel_dp->dfp.rgb_to_ycbcr = true; 4592 intel_dp->dfp.ycbcr_444_to_420 = true; 4593 connector->base.ycbcr_420_allowed = true; 4594 } else { 4595 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ 4596 intel_dp->dfp.ycbcr_444_to_420 = 4597 ycbcr_444_to_420 && !ycbcr_420_passthrough; 4598 4599 connector->base.ycbcr_420_allowed = 4600 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; 4601 } 4602 } else { 4603 /* 4:4:4->4:2:0 conversion is the only way */ 4604 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; 4605 4606 connector->base.ycbcr_420_allowed = ycbcr_444_to_420; 4607 } 4608 4609 drm_dbg_kms(&i915->drm, 4610 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 4611 connector->base.base.id, connector->base.name, 4612 str_yes_no(intel_dp->dfp.rgb_to_ycbcr), 4613 str_yes_no(connector->base.ycbcr_420_allowed), 4614 str_yes_no(intel_dp->dfp.ycbcr_444_to_420)); 4615 } 4616 4617 static void 4618 intel_dp_set_edid(struct intel_dp *intel_dp) 4619 { 4620 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4621 struct intel_connector *connector = intel_dp->attached_connector; 4622 const struct drm_edid *drm_edid; 4623 const struct edid *edid; 4624 bool vrr_capable; 4625 4626 intel_dp_unset_edid(intel_dp); 4627 drm_edid = intel_dp_get_edid(intel_dp); 4628 connector->detect_edid = drm_edid; 4629 4630 /* Below we depend on display info having been updated */ 4631 drm_edid_connector_update(&connector->base, drm_edid); 4632 4633 vrr_capable = intel_vrr_is_capable(connector); 4634 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n", 4635 connector->base.base.id, connector->base.name, str_yes_no(vrr_capable)); 4636 drm_connector_set_vrr_capable_property(&connector->base, vrr_capable); 4637 4638 intel_dp_update_dfp(intel_dp, drm_edid); 4639 intel_dp_update_420(intel_dp); 4640 4641 /* FIXME: Get rid of drm_edid_raw() */ 4642 edid = drm_edid_raw(drm_edid); 4643 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 4644 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 4645 intel_dp->has_audio = drm_detect_monitor_audio(edid); 4646 } 4647 4648 drm_dp_cec_set_edid(&intel_dp->aux, edid); 4649 } 4650 4651 static void 4652 intel_dp_unset_edid(struct intel_dp *intel_dp) 4653 { 4654 struct intel_connector *connector = intel_dp->attached_connector; 4655 4656 drm_dp_cec_unset_edid(&intel_dp->aux); 4657 drm_edid_free(connector->detect_edid); 4658 connector->detect_edid = NULL; 4659 4660 intel_dp->has_hdmi_sink = false; 4661 intel_dp->has_audio = false; 4662 4663 intel_dp->dfp.max_bpc = 0; 4664 intel_dp->dfp.max_dotclock = 0; 4665 intel_dp->dfp.min_tmds_clock = 0; 4666 intel_dp->dfp.max_tmds_clock = 0; 4667 4668 intel_dp->dfp.pcon_max_frl_bw = 0; 4669 4670 intel_dp->dfp.ycbcr_444_to_420 = false; 4671 connector->base.ycbcr_420_allowed = false; 4672 4673 drm_connector_set_vrr_capable_property(&connector->base, 4674 false); 4675 } 4676 4677 static int 4678 intel_dp_detect(struct drm_connector *connector, 4679 struct drm_modeset_acquire_ctx *ctx, 4680 bool force) 4681 { 4682 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4683 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4684 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4685 struct intel_encoder *encoder = &dig_port->base; 4686 enum drm_connector_status status; 4687 4688 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4689 connector->base.id, connector->name); 4690 drm_WARN_ON(&dev_priv->drm, 4691 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 4692 4693 if (!INTEL_DISPLAY_ENABLED(dev_priv)) 4694 return connector_status_disconnected; 4695 4696 /* Can't disconnect eDP */ 4697 if (intel_dp_is_edp(intel_dp)) 4698 status = edp_detect(intel_dp); 4699 else if (intel_digital_port_connected(encoder)) 4700 status = intel_dp_detect_dpcd(intel_dp); 4701 else 4702 status = connector_status_disconnected; 4703 4704 if (status == connector_status_disconnected) { 4705 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 4706 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4707 4708 if (intel_dp->is_mst) { 4709 drm_dbg_kms(&dev_priv->drm, 4710 "MST device may have disappeared %d vs %d\n", 4711 intel_dp->is_mst, 4712 intel_dp->mst_mgr.mst_state); 4713 intel_dp->is_mst = false; 4714 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4715 intel_dp->is_mst); 4716 } 4717 4718 goto out; 4719 } 4720 4721 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 4722 if (HAS_DSC(dev_priv)) 4723 intel_dp_get_dsc_sink_cap(intel_dp); 4724 4725 intel_dp_configure_mst(intel_dp); 4726 4727 /* 4728 * TODO: Reset link params when switching to MST mode, until MST 4729 * supports link training fallback params. 4730 */ 4731 if (intel_dp->reset_link_params || intel_dp->is_mst) { 4732 intel_dp_reset_max_link_params(intel_dp); 4733 intel_dp->reset_link_params = false; 4734 } 4735 4736 intel_dp_print_rates(intel_dp); 4737 4738 if (intel_dp->is_mst) { 4739 /* 4740 * If we are in MST mode then this connector 4741 * won't appear connected or have anything 4742 * with EDID on it 4743 */ 4744 status = connector_status_disconnected; 4745 goto out; 4746 } 4747 4748 /* 4749 * Some external monitors do not signal loss of link synchronization 4750 * with an IRQ_HPD, so force a link status check. 4751 */ 4752 if (!intel_dp_is_edp(intel_dp)) { 4753 int ret; 4754 4755 ret = intel_dp_retrain_link(encoder, ctx); 4756 if (ret) 4757 return ret; 4758 } 4759 4760 /* 4761 * Clearing NACK and defer counts to get their exact values 4762 * while reading EDID which are required by Compliance tests 4763 * 4.2.2.4 and 4.2.2.5 4764 */ 4765 intel_dp->aux.i2c_nack_count = 0; 4766 intel_dp->aux.i2c_defer_count = 0; 4767 4768 intel_dp_set_edid(intel_dp); 4769 if (intel_dp_is_edp(intel_dp) || 4770 to_intel_connector(connector)->detect_edid) 4771 status = connector_status_connected; 4772 4773 intel_dp_check_device_service_irq(intel_dp); 4774 4775 out: 4776 if (status != connector_status_connected && !intel_dp->is_mst) 4777 intel_dp_unset_edid(intel_dp); 4778 4779 /* 4780 * Make sure the refs for power wells enabled during detect are 4781 * dropped to avoid a new detect cycle triggered by HPD polling. 4782 */ 4783 intel_display_power_flush_work(dev_priv); 4784 4785 if (!intel_dp_is_edp(intel_dp)) 4786 drm_dp_set_subconnector_property(connector, 4787 status, 4788 intel_dp->dpcd, 4789 intel_dp->downstream_ports); 4790 return status; 4791 } 4792 4793 static void 4794 intel_dp_force(struct drm_connector *connector) 4795 { 4796 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4797 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4798 struct intel_encoder *intel_encoder = &dig_port->base; 4799 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 4800 enum intel_display_power_domain aux_domain = 4801 intel_aux_power_domain(dig_port); 4802 intel_wakeref_t wakeref; 4803 4804 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4805 connector->base.id, connector->name); 4806 intel_dp_unset_edid(intel_dp); 4807 4808 if (connector->status != connector_status_connected) 4809 return; 4810 4811 wakeref = intel_display_power_get(dev_priv, aux_domain); 4812 4813 intel_dp_set_edid(intel_dp); 4814 4815 intel_display_power_put(dev_priv, aux_domain, wakeref); 4816 } 4817 4818 static int intel_dp_get_modes(struct drm_connector *connector) 4819 { 4820 struct intel_connector *intel_connector = to_intel_connector(connector); 4821 int num_modes; 4822 4823 /* drm_edid_connector_update() done in ->detect() or ->force() */ 4824 num_modes = drm_edid_connector_add_modes(connector); 4825 4826 /* Also add fixed mode, which may or may not be present in EDID */ 4827 if (intel_dp_is_edp(intel_attached_dp(intel_connector))) 4828 num_modes += intel_panel_get_modes(intel_connector); 4829 4830 if (num_modes) 4831 return num_modes; 4832 4833 if (!intel_connector->detect_edid) { 4834 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 4835 struct drm_display_mode *mode; 4836 4837 mode = drm_dp_downstream_mode(connector->dev, 4838 intel_dp->dpcd, 4839 intel_dp->downstream_ports); 4840 if (mode) { 4841 drm_mode_probed_add(connector, mode); 4842 num_modes++; 4843 } 4844 } 4845 4846 return num_modes; 4847 } 4848 4849 static int 4850 intel_dp_connector_register(struct drm_connector *connector) 4851 { 4852 struct drm_i915_private *i915 = to_i915(connector->dev); 4853 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4854 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4855 struct intel_lspcon *lspcon = &dig_port->lspcon; 4856 int ret; 4857 4858 ret = intel_connector_register(connector); 4859 if (ret) 4860 return ret; 4861 4862 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 4863 intel_dp->aux.name, connector->kdev->kobj.name); 4864 4865 intel_dp->aux.dev = connector->kdev; 4866 ret = drm_dp_aux_register(&intel_dp->aux); 4867 if (!ret) 4868 drm_dp_cec_register_connector(&intel_dp->aux, connector); 4869 4870 if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) 4871 return ret; 4872 4873 /* 4874 * ToDo: Clean this up to handle lspcon init and resume more 4875 * efficiently and streamlined. 4876 */ 4877 if (lspcon_init(dig_port)) { 4878 lspcon_detect_hdr_capability(lspcon); 4879 if (lspcon->hdr_supported) 4880 drm_connector_attach_hdr_output_metadata_property(connector); 4881 } 4882 4883 return ret; 4884 } 4885 4886 static void 4887 intel_dp_connector_unregister(struct drm_connector *connector) 4888 { 4889 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4890 4891 drm_dp_cec_unregister_connector(&intel_dp->aux); 4892 drm_dp_aux_unregister(&intel_dp->aux); 4893 intel_connector_unregister(connector); 4894 } 4895 4896 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 4897 { 4898 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 4899 struct intel_dp *intel_dp = &dig_port->dp; 4900 4901 intel_dp_mst_encoder_cleanup(dig_port); 4902 4903 intel_pps_vdd_off_sync(intel_dp); 4904 4905 /* 4906 * Ensure power off delay is respected on module remove, so that we can 4907 * reduce delays at driver probe. See pps_init_timestamps(). 4908 */ 4909 intel_pps_wait_power_cycle(intel_dp); 4910 4911 intel_dp_aux_fini(intel_dp); 4912 } 4913 4914 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 4915 { 4916 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4917 4918 intel_pps_vdd_off_sync(intel_dp); 4919 } 4920 4921 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 4922 { 4923 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4924 4925 intel_pps_wait_power_cycle(intel_dp); 4926 } 4927 4928 static int intel_modeset_tile_group(struct intel_atomic_state *state, 4929 int tile_group_id) 4930 { 4931 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4932 struct drm_connector_list_iter conn_iter; 4933 struct drm_connector *connector; 4934 int ret = 0; 4935 4936 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 4937 drm_for_each_connector_iter(connector, &conn_iter) { 4938 struct drm_connector_state *conn_state; 4939 struct intel_crtc_state *crtc_state; 4940 struct intel_crtc *crtc; 4941 4942 if (!connector->has_tile || 4943 connector->tile_group->id != tile_group_id) 4944 continue; 4945 4946 conn_state = drm_atomic_get_connector_state(&state->base, 4947 connector); 4948 if (IS_ERR(conn_state)) { 4949 ret = PTR_ERR(conn_state); 4950 break; 4951 } 4952 4953 crtc = to_intel_crtc(conn_state->crtc); 4954 4955 if (!crtc) 4956 continue; 4957 4958 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 4959 crtc_state->uapi.mode_changed = true; 4960 4961 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4962 if (ret) 4963 break; 4964 } 4965 drm_connector_list_iter_end(&conn_iter); 4966 4967 return ret; 4968 } 4969 4970 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 4971 { 4972 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4973 struct intel_crtc *crtc; 4974 4975 if (transcoders == 0) 4976 return 0; 4977 4978 for_each_intel_crtc(&dev_priv->drm, crtc) { 4979 struct intel_crtc_state *crtc_state; 4980 int ret; 4981 4982 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 4983 if (IS_ERR(crtc_state)) 4984 return PTR_ERR(crtc_state); 4985 4986 if (!crtc_state->hw.enable) 4987 continue; 4988 4989 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 4990 continue; 4991 4992 crtc_state->uapi.mode_changed = true; 4993 4994 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 4995 if (ret) 4996 return ret; 4997 4998 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4999 if (ret) 5000 return ret; 5001 5002 transcoders &= ~BIT(crtc_state->cpu_transcoder); 5003 } 5004 5005 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 5006 5007 return 0; 5008 } 5009 5010 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 5011 struct drm_connector *connector) 5012 { 5013 const struct drm_connector_state *old_conn_state = 5014 drm_atomic_get_old_connector_state(&state->base, connector); 5015 const struct intel_crtc_state *old_crtc_state; 5016 struct intel_crtc *crtc; 5017 u8 transcoders; 5018 5019 crtc = to_intel_crtc(old_conn_state->crtc); 5020 if (!crtc) 5021 return 0; 5022 5023 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 5024 5025 if (!old_crtc_state->hw.active) 5026 return 0; 5027 5028 transcoders = old_crtc_state->sync_mode_slaves_mask; 5029 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 5030 transcoders |= BIT(old_crtc_state->master_transcoder); 5031 5032 return intel_modeset_affected_transcoders(state, 5033 transcoders); 5034 } 5035 5036 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 5037 struct drm_atomic_state *_state) 5038 { 5039 struct drm_i915_private *dev_priv = to_i915(conn->dev); 5040 struct intel_atomic_state *state = to_intel_atomic_state(_state); 5041 struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn); 5042 struct intel_connector *intel_conn = to_intel_connector(conn); 5043 struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder); 5044 int ret; 5045 5046 ret = intel_digital_connector_atomic_check(conn, &state->base); 5047 if (ret) 5048 return ret; 5049 5050 if (intel_dp_mst_source_support(intel_dp)) { 5051 ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr); 5052 if (ret) 5053 return ret; 5054 } 5055 5056 /* 5057 * We don't enable port sync on BDW due to missing w/as and 5058 * due to not having adjusted the modeset sequence appropriately. 5059 */ 5060 if (DISPLAY_VER(dev_priv) < 9) 5061 return 0; 5062 5063 if (!intel_connector_needs_modeset(state, conn)) 5064 return 0; 5065 5066 if (conn->has_tile) { 5067 ret = intel_modeset_tile_group(state, conn->tile_group->id); 5068 if (ret) 5069 return ret; 5070 } 5071 5072 return intel_modeset_synced_crtcs(state, conn); 5073 } 5074 5075 static void intel_dp_oob_hotplug_event(struct drm_connector *connector) 5076 { 5077 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 5078 struct drm_i915_private *i915 = to_i915(connector->dev); 5079 5080 spin_lock_irq(&i915->irq_lock); 5081 i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin); 5082 spin_unlock_irq(&i915->irq_lock); 5083 queue_delayed_work(system_wq, &i915->display.hotplug.hotplug_work, 0); 5084 } 5085 5086 static const struct drm_connector_funcs intel_dp_connector_funcs = { 5087 .force = intel_dp_force, 5088 .fill_modes = drm_helper_probe_single_connector_modes, 5089 .atomic_get_property = intel_digital_connector_atomic_get_property, 5090 .atomic_set_property = intel_digital_connector_atomic_set_property, 5091 .late_register = intel_dp_connector_register, 5092 .early_unregister = intel_dp_connector_unregister, 5093 .destroy = intel_connector_destroy, 5094 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 5095 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 5096 .oob_hotplug_event = intel_dp_oob_hotplug_event, 5097 }; 5098 5099 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 5100 .detect_ctx = intel_dp_detect, 5101 .get_modes = intel_dp_get_modes, 5102 .mode_valid = intel_dp_mode_valid, 5103 .atomic_check = intel_dp_connector_atomic_check, 5104 }; 5105 5106 enum irqreturn 5107 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 5108 { 5109 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 5110 struct intel_dp *intel_dp = &dig_port->dp; 5111 5112 if (dig_port->base.type == INTEL_OUTPUT_EDP && 5113 (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { 5114 /* 5115 * vdd off can generate a long/short pulse on eDP which 5116 * would require vdd on to handle it, and thus we 5117 * would end up in an endless cycle of 5118 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 5119 */ 5120 drm_dbg_kms(&i915->drm, 5121 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 5122 long_hpd ? "long" : "short", 5123 dig_port->base.base.base.id, 5124 dig_port->base.base.name); 5125 return IRQ_HANDLED; 5126 } 5127 5128 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 5129 dig_port->base.base.base.id, 5130 dig_port->base.base.name, 5131 long_hpd ? "long" : "short"); 5132 5133 if (long_hpd) { 5134 intel_dp->reset_link_params = true; 5135 return IRQ_NONE; 5136 } 5137 5138 if (intel_dp->is_mst) { 5139 if (!intel_dp_check_mst_status(intel_dp)) 5140 return IRQ_NONE; 5141 } else if (!intel_dp_short_pulse(intel_dp)) { 5142 return IRQ_NONE; 5143 } 5144 5145 return IRQ_HANDLED; 5146 } 5147 5148 static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv, 5149 const struct intel_bios_encoder_data *devdata, 5150 enum port port) 5151 { 5152 /* 5153 * eDP not supported on g4x. so bail out early just 5154 * for a bit extra safety in case the VBT is bonkers. 5155 */ 5156 if (DISPLAY_VER(dev_priv) < 5) 5157 return false; 5158 5159 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) 5160 return true; 5161 5162 return devdata && intel_bios_encoder_supports_edp(devdata); 5163 } 5164 5165 bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port) 5166 { 5167 const struct intel_bios_encoder_data *devdata = 5168 intel_bios_encoder_data_lookup(i915, port); 5169 5170 return _intel_dp_is_port_edp(i915, devdata, port); 5171 } 5172 5173 static bool 5174 has_gamut_metadata_dip(struct intel_encoder *encoder) 5175 { 5176 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 5177 enum port port = encoder->port; 5178 5179 if (intel_bios_encoder_is_lspcon(encoder->devdata)) 5180 return false; 5181 5182 if (DISPLAY_VER(i915) >= 11) 5183 return true; 5184 5185 if (port == PORT_A) 5186 return false; 5187 5188 if (IS_HASWELL(i915) || IS_BROADWELL(i915) || 5189 DISPLAY_VER(i915) >= 9) 5190 return true; 5191 5192 return false; 5193 } 5194 5195 static void 5196 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 5197 { 5198 struct drm_i915_private *dev_priv = to_i915(connector->dev); 5199 enum port port = dp_to_dig_port(intel_dp)->base.port; 5200 5201 if (!intel_dp_is_edp(intel_dp)) 5202 drm_connector_attach_dp_subconnector_property(connector); 5203 5204 if (!IS_G4X(dev_priv) && port != PORT_A) 5205 intel_attach_force_audio_property(connector); 5206 5207 intel_attach_broadcast_rgb_property(connector); 5208 if (HAS_GMCH(dev_priv)) 5209 drm_connector_attach_max_bpc_property(connector, 6, 10); 5210 else if (DISPLAY_VER(dev_priv) >= 5) 5211 drm_connector_attach_max_bpc_property(connector, 6, 12); 5212 5213 /* Register HDMI colorspace for case of lspcon */ 5214 if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) { 5215 drm_connector_attach_content_type_property(connector); 5216 intel_attach_hdmi_colorspace_property(connector); 5217 } else { 5218 intel_attach_dp_colorspace_property(connector); 5219 } 5220 5221 if (has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base)) 5222 drm_connector_attach_hdr_output_metadata_property(connector); 5223 5224 if (HAS_VRR(dev_priv)) 5225 drm_connector_attach_vrr_capable_property(connector); 5226 } 5227 5228 static void 5229 intel_edp_add_properties(struct intel_dp *intel_dp) 5230 { 5231 struct intel_connector *connector = intel_dp->attached_connector; 5232 struct drm_i915_private *i915 = to_i915(connector->base.dev); 5233 const struct drm_display_mode *fixed_mode = 5234 intel_panel_preferred_fixed_mode(connector); 5235 5236 intel_attach_scaling_mode_property(&connector->base); 5237 5238 drm_connector_set_panel_orientation_with_quirk(&connector->base, 5239 i915->display.vbt.orientation, 5240 fixed_mode->hdisplay, 5241 fixed_mode->vdisplay); 5242 } 5243 5244 static void intel_edp_backlight_setup(struct intel_dp *intel_dp, 5245 struct intel_connector *connector) 5246 { 5247 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5248 enum pipe pipe = INVALID_PIPE; 5249 5250 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 5251 /* 5252 * Figure out the current pipe for the initial backlight setup. 5253 * If the current pipe isn't valid, try the PPS pipe, and if that 5254 * fails just assume pipe A. 5255 */ 5256 pipe = vlv_active_pipe(intel_dp); 5257 5258 if (pipe != PIPE_A && pipe != PIPE_B) 5259 pipe = intel_dp->pps.pps_pipe; 5260 5261 if (pipe != PIPE_A && pipe != PIPE_B) 5262 pipe = PIPE_A; 5263 } 5264 5265 intel_backlight_setup(connector, pipe); 5266 } 5267 5268 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 5269 struct intel_connector *intel_connector) 5270 { 5271 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5272 struct drm_connector *connector = &intel_connector->base; 5273 struct drm_display_mode *fixed_mode; 5274 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 5275 bool has_dpcd; 5276 const struct drm_edid *drm_edid; 5277 5278 if (!intel_dp_is_edp(intel_dp)) 5279 return true; 5280 5281 /* 5282 * On IBX/CPT we may get here with LVDS already registered. Since the 5283 * driver uses the only internal power sequencer available for both 5284 * eDP and LVDS bail out early in this case to prevent interfering 5285 * with an already powered-on LVDS power sequencer. 5286 */ 5287 if (intel_get_lvds_encoder(dev_priv)) { 5288 drm_WARN_ON(&dev_priv->drm, 5289 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 5290 drm_info(&dev_priv->drm, 5291 "LVDS was detected, not registering eDP\n"); 5292 5293 return false; 5294 } 5295 5296 intel_bios_init_panel_early(dev_priv, &intel_connector->panel, 5297 encoder->devdata); 5298 5299 if (!intel_pps_init(intel_dp)) { 5300 drm_info(&dev_priv->drm, 5301 "[ENCODER:%d:%s] unusable PPS, disabling eDP\n", 5302 encoder->base.base.id, encoder->base.name); 5303 /* 5304 * The BIOS may have still enabled VDD on the PPS even 5305 * though it's unusable. Make sure we turn it back off 5306 * and to release the power domain references/etc. 5307 */ 5308 goto out_vdd_off; 5309 } 5310 5311 /* Cache DPCD and EDID for edp. */ 5312 has_dpcd = intel_edp_init_dpcd(intel_dp); 5313 5314 if (!has_dpcd) { 5315 /* if this fails, presume the device is a ghost */ 5316 drm_info(&dev_priv->drm, 5317 "[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n", 5318 encoder->base.base.id, encoder->base.name); 5319 goto out_vdd_off; 5320 } 5321 5322 mutex_lock(&dev_priv->drm.mode_config.mutex); 5323 drm_edid = drm_edid_read_ddc(connector, &intel_dp->aux.ddc); 5324 if (!drm_edid) { 5325 /* Fallback to EDID from ACPI OpRegion, if any */ 5326 drm_edid = intel_opregion_get_edid(intel_connector); 5327 if (drm_edid) 5328 drm_dbg_kms(&dev_priv->drm, 5329 "[CONNECTOR:%d:%s] Using OpRegion EDID\n", 5330 connector->base.id, connector->name); 5331 } 5332 if (drm_edid) { 5333 if (drm_edid_connector_update(connector, drm_edid) || 5334 !drm_edid_connector_add_modes(connector)) { 5335 drm_edid_connector_update(connector, NULL); 5336 drm_edid_free(drm_edid); 5337 drm_edid = ERR_PTR(-EINVAL); 5338 } 5339 } else { 5340 drm_edid = ERR_PTR(-ENOENT); 5341 } 5342 5343 intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, 5344 IS_ERR(drm_edid) ? NULL : drm_edid); 5345 5346 intel_panel_add_edid_fixed_modes(intel_connector, true); 5347 5348 /* MSO requires information from the EDID */ 5349 intel_edp_mso_init(intel_dp); 5350 5351 /* multiply the mode clock and horizontal timings for MSO */ 5352 list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head) 5353 intel_edp_mso_mode_fixup(intel_connector, fixed_mode); 5354 5355 /* fallback to VBT if available for eDP */ 5356 if (!intel_panel_preferred_fixed_mode(intel_connector)) 5357 intel_panel_add_vbt_lfp_fixed_mode(intel_connector); 5358 5359 mutex_unlock(&dev_priv->drm.mode_config.mutex); 5360 5361 if (!intel_panel_preferred_fixed_mode(intel_connector)) { 5362 drm_info(&dev_priv->drm, 5363 "[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n", 5364 encoder->base.base.id, encoder->base.name); 5365 goto out_vdd_off; 5366 } 5367 5368 intel_panel_init(intel_connector, drm_edid); 5369 5370 intel_edp_backlight_setup(intel_dp, intel_connector); 5371 5372 intel_edp_add_properties(intel_dp); 5373 5374 intel_pps_init_late(intel_dp); 5375 5376 return true; 5377 5378 out_vdd_off: 5379 intel_pps_vdd_off_sync(intel_dp); 5380 5381 return false; 5382 } 5383 5384 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 5385 { 5386 struct intel_connector *intel_connector; 5387 struct drm_connector *connector; 5388 5389 intel_connector = container_of(work, typeof(*intel_connector), 5390 modeset_retry_work); 5391 connector = &intel_connector->base; 5392 drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id, 5393 connector->name); 5394 5395 /* Grab the locks before changing connector property*/ 5396 mutex_lock(&connector->dev->mode_config.mutex); 5397 /* Set connector link status to BAD and send a Uevent to notify 5398 * userspace to do a modeset. 5399 */ 5400 drm_connector_set_link_status_property(connector, 5401 DRM_MODE_LINK_STATUS_BAD); 5402 mutex_unlock(&connector->dev->mode_config.mutex); 5403 /* Send Hotplug uevent so userspace can reprobe */ 5404 drm_kms_helper_connector_hotplug_event(connector); 5405 } 5406 5407 bool 5408 intel_dp_init_connector(struct intel_digital_port *dig_port, 5409 struct intel_connector *intel_connector) 5410 { 5411 struct drm_connector *connector = &intel_connector->base; 5412 struct intel_dp *intel_dp = &dig_port->dp; 5413 struct intel_encoder *intel_encoder = &dig_port->base; 5414 struct drm_device *dev = intel_encoder->base.dev; 5415 struct drm_i915_private *dev_priv = to_i915(dev); 5416 enum port port = intel_encoder->port; 5417 enum phy phy = intel_port_to_phy(dev_priv, port); 5418 int type; 5419 5420 /* Initialize the work for modeset in case of link train failure */ 5421 INIT_WORK(&intel_connector->modeset_retry_work, 5422 intel_dp_modeset_retry_work_fn); 5423 5424 if (drm_WARN(dev, dig_port->max_lanes < 1, 5425 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 5426 dig_port->max_lanes, intel_encoder->base.base.id, 5427 intel_encoder->base.name)) 5428 return false; 5429 5430 intel_dp->reset_link_params = true; 5431 intel_dp->pps.pps_pipe = INVALID_PIPE; 5432 intel_dp->pps.active_pipe = INVALID_PIPE; 5433 5434 /* Preserve the current hw state. */ 5435 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 5436 intel_dp->attached_connector = intel_connector; 5437 5438 if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) { 5439 /* 5440 * Currently we don't support eDP on TypeC ports, although in 5441 * theory it could work on TypeC legacy ports. 5442 */ 5443 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 5444 type = DRM_MODE_CONNECTOR_eDP; 5445 intel_encoder->type = INTEL_OUTPUT_EDP; 5446 5447 /* eDP only on port B and/or C on vlv/chv */ 5448 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 5449 IS_CHERRYVIEW(dev_priv)) && 5450 port != PORT_B && port != PORT_C)) 5451 return false; 5452 } else { 5453 type = DRM_MODE_CONNECTOR_DisplayPort; 5454 } 5455 5456 intel_dp_set_default_sink_rates(intel_dp); 5457 intel_dp_set_default_max_sink_lane_count(intel_dp); 5458 5459 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5460 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); 5461 5462 drm_dbg_kms(&dev_priv->drm, 5463 "Adding %s connector on [ENCODER:%d:%s]\n", 5464 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 5465 intel_encoder->base.base.id, intel_encoder->base.name); 5466 5467 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 5468 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 5469 5470 if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12) 5471 connector->interlace_allowed = true; 5472 5473 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 5474 5475 intel_dp_aux_init(intel_dp); 5476 5477 intel_connector_attach_encoder(intel_connector, intel_encoder); 5478 5479 if (HAS_DDI(dev_priv)) 5480 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 5481 else 5482 intel_connector->get_hw_state = intel_connector_get_hw_state; 5483 5484 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 5485 intel_dp_aux_fini(intel_dp); 5486 goto fail; 5487 } 5488 5489 intel_dp_set_source_rates(intel_dp); 5490 intel_dp_set_common_rates(intel_dp); 5491 intel_dp_reset_max_link_params(intel_dp); 5492 5493 /* init MST on ports that can support it */ 5494 intel_dp_mst_encoder_init(dig_port, 5495 intel_connector->base.base.id); 5496 5497 intel_dp_add_properties(intel_dp, connector); 5498 5499 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 5500 int ret = intel_dp_hdcp_init(dig_port, intel_connector); 5501 if (ret) 5502 drm_dbg_kms(&dev_priv->drm, 5503 "HDCP init failed, skipping.\n"); 5504 } 5505 5506 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 5507 * 0xd. Failure to do so will result in spurious interrupts being 5508 * generated on the port when a cable is not attached. 5509 */ 5510 if (IS_G45(dev_priv)) { 5511 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 5512 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 5513 (temp & ~0xf) | 0xd); 5514 } 5515 5516 intel_dp->frl.is_trained = false; 5517 intel_dp->frl.trained_rate_gbps = 0; 5518 5519 intel_psr_init(intel_dp); 5520 5521 return true; 5522 5523 fail: 5524 drm_connector_cleanup(connector); 5525 5526 return false; 5527 } 5528 5529 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 5530 { 5531 struct intel_encoder *encoder; 5532 5533 if (!HAS_DISPLAY(dev_priv)) 5534 return; 5535 5536 for_each_intel_encoder(&dev_priv->drm, encoder) { 5537 struct intel_dp *intel_dp; 5538 5539 if (encoder->type != INTEL_OUTPUT_DDI) 5540 continue; 5541 5542 intel_dp = enc_to_intel_dp(encoder); 5543 5544 if (!intel_dp_mst_source_support(intel_dp)) 5545 continue; 5546 5547 if (intel_dp->is_mst) 5548 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 5549 } 5550 } 5551 5552 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 5553 { 5554 struct intel_encoder *encoder; 5555 5556 if (!HAS_DISPLAY(dev_priv)) 5557 return; 5558 5559 for_each_intel_encoder(&dev_priv->drm, encoder) { 5560 struct intel_dp *intel_dp; 5561 int ret; 5562 5563 if (encoder->type != INTEL_OUTPUT_DDI) 5564 continue; 5565 5566 intel_dp = enc_to_intel_dp(encoder); 5567 5568 if (!intel_dp_mst_source_support(intel_dp)) 5569 continue; 5570 5571 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 5572 true); 5573 if (ret) { 5574 intel_dp->is_mst = false; 5575 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5576 false); 5577 } 5578 } 5579 } 5580