1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/types.h> 33 34 #include <asm/byteorder.h> 35 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_crtc.h> 38 #include <drm/drm_dp_helper.h> 39 #include <drm/drm_edid.h> 40 #include <drm/drm_probe_helper.h> 41 42 #include "g4x_dp.h" 43 #include "i915_debugfs.h" 44 #include "i915_drv.h" 45 #include "intel_atomic.h" 46 #include "intel_audio.h" 47 #include "intel_backlight.h" 48 #include "intel_connector.h" 49 #include "intel_ddi.h" 50 #include "intel_de.h" 51 #include "intel_display_types.h" 52 #include "intel_dp.h" 53 #include "intel_dp_aux.h" 54 #include "intel_dp_hdcp.h" 55 #include "intel_dp_link_training.h" 56 #include "intel_dp_mst.h" 57 #include "intel_dpio_phy.h" 58 #include "intel_dpll.h" 59 #include "intel_drrs.h" 60 #include "intel_fifo_underrun.h" 61 #include "intel_hdcp.h" 62 #include "intel_hdmi.h" 63 #include "intel_hotplug.h" 64 #include "intel_lspcon.h" 65 #include "intel_lvds.h" 66 #include "intel_panel.h" 67 #include "intel_pps.h" 68 #include "intel_psr.h" 69 #include "intel_tc.h" 70 #include "intel_vdsc.h" 71 #include "intel_vrr.h" 72 73 #define DP_DPRX_ESI_LEN 14 74 75 /* DP DSC throughput values used for slice count calculations KPixels/s */ 76 #define DP_DSC_PEAK_PIXEL_RATE 2720000 77 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 78 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 79 80 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 81 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 82 83 /* Compliance test status bits */ 84 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 85 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 86 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 87 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 88 89 90 /* Constants for DP DSC configurations */ 91 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 92 93 /* With Single pipe configuration, HW is capable of supporting maximum 94 * of 4 slices per line. 95 */ 96 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 97 98 /** 99 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 100 * @intel_dp: DP struct 101 * 102 * If a CPU or PCH DP output is attached to an eDP panel, this function 103 * will return true, and false otherwise. 104 * 105 * This function is not safe to use prior to encoder type being set. 106 */ 107 bool intel_dp_is_edp(struct intel_dp *intel_dp) 108 { 109 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 110 111 return dig_port->base.type == INTEL_OUTPUT_EDP; 112 } 113 114 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 115 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc); 116 117 /* Is link rate UHBR and thus 128b/132b? */ 118 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) 119 { 120 return crtc_state->port_clock >= 1000000; 121 } 122 123 /* update sink rates from dpcd */ 124 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 125 { 126 static const int dp_rates[] = { 127 162000, 270000, 540000, 810000 128 }; 129 int i, max_rate; 130 int max_lttpr_rate; 131 132 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 133 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 134 static const int quirk_rates[] = { 162000, 270000, 324000 }; 135 136 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 137 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 138 139 return; 140 } 141 142 /* 143 * Sink rates for 8b/10b. 144 */ 145 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 146 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 147 if (max_lttpr_rate) 148 max_rate = min(max_rate, max_lttpr_rate); 149 150 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 151 if (dp_rates[i] > max_rate) 152 break; 153 intel_dp->sink_rates[i] = dp_rates[i]; 154 } 155 156 /* 157 * Sink rates for 128b/132b. If set, sink should support all 8b/10b 158 * rates and 10 Gbps. 159 */ 160 if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) { 161 u8 uhbr_rates = 0; 162 163 BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3); 164 165 drm_dp_dpcd_readb(&intel_dp->aux, 166 DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates); 167 168 if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) { 169 /* We have a repeater */ 170 if (intel_dp->lttpr_common_caps[0] >= 0x20 && 171 intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - 172 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] & 173 DP_PHY_REPEATER_128B132B_SUPPORTED) { 174 /* Repeater supports 128b/132b, valid UHBR rates */ 175 uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES - 176 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; 177 } else { 178 /* Does not support 128b/132b */ 179 uhbr_rates = 0; 180 } 181 } 182 183 if (uhbr_rates & DP_UHBR10) 184 intel_dp->sink_rates[i++] = 1000000; 185 if (uhbr_rates & DP_UHBR13_5) 186 intel_dp->sink_rates[i++] = 1350000; 187 if (uhbr_rates & DP_UHBR20) 188 intel_dp->sink_rates[i++] = 2000000; 189 } 190 191 intel_dp->num_sink_rates = i; 192 } 193 194 /* Get length of rates array potentially limited by max_rate. */ 195 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 196 { 197 int i; 198 199 /* Limit results by potentially reduced max rate */ 200 for (i = 0; i < len; i++) { 201 if (rates[len - i - 1] <= max_rate) 202 return len - i; 203 } 204 205 return 0; 206 } 207 208 /* Get length of common rates array potentially limited by max_rate. */ 209 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 210 int max_rate) 211 { 212 return intel_dp_rate_limit_len(intel_dp->common_rates, 213 intel_dp->num_common_rates, max_rate); 214 } 215 216 /* Theoretical max between source and sink */ 217 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 218 { 219 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 220 } 221 222 /* Theoretical max between source and sink */ 223 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 224 { 225 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 226 int source_max = dig_port->max_lanes; 227 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 228 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 229 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 230 231 if (lttpr_max) 232 sink_max = min(sink_max, lttpr_max); 233 234 return min3(source_max, sink_max, fia_max); 235 } 236 237 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 238 { 239 return intel_dp->max_link_lane_count; 240 } 241 242 /* 243 * The required data bandwidth for a mode with given pixel clock and bpp. This 244 * is the required net bandwidth independent of the data bandwidth efficiency. 245 */ 246 int 247 intel_dp_link_required(int pixel_clock, int bpp) 248 { 249 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 250 return DIV_ROUND_UP(pixel_clock * bpp, 8); 251 } 252 253 /* 254 * Given a link rate and lanes, get the data bandwidth. 255 * 256 * Data bandwidth is the actual payload rate, which depends on the data 257 * bandwidth efficiency and the link rate. 258 * 259 * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency 260 * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) = 261 * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by 262 * coincidence, the port clock in kHz matches the data bandwidth in kBps, and 263 * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no 264 * longer holds for data bandwidth as soon as FEC or MST is taken into account!) 265 * 266 * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For 267 * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875 268 * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000 269 * does not match the symbol clock, the port clock (not even if you think in 270 * terms of a byte clock), nor the data bandwidth. It only matches the link bit 271 * rate in units of 10000 bps. 272 */ 273 int 274 intel_dp_max_data_rate(int max_link_rate, int max_lanes) 275 { 276 if (max_link_rate >= 1000000) { 277 /* 278 * UHBR rates always use 128b/132b channel encoding, and have 279 * 97.71% data bandwidth efficiency. Consider max_link_rate the 280 * link bit rate in units of 10000 bps. 281 */ 282 int max_link_rate_kbps = max_link_rate * 10; 283 284 max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 9671, 10000); 285 max_link_rate = max_link_rate_kbps / 8; 286 } 287 288 /* 289 * Lower than UHBR rates always use 8b/10b channel encoding, and have 290 * 80% data bandwidth efficiency for SST non-FEC. However, this turns 291 * out to be a nop by coincidence, and can be skipped: 292 * 293 * int max_link_rate_kbps = max_link_rate * 10; 294 * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10); 295 * max_link_rate = max_link_rate_kbps / 8; 296 */ 297 298 return max_link_rate * max_lanes; 299 } 300 301 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) 302 { 303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 304 struct intel_encoder *encoder = &intel_dig_port->base; 305 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 306 307 return DISPLAY_VER(dev_priv) >= 12 || 308 (DISPLAY_VER(dev_priv) == 11 && 309 encoder->port != PORT_A); 310 } 311 312 static int dg2_max_source_rate(struct intel_dp *intel_dp) 313 { 314 return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; 315 } 316 317 static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy) 318 { 319 u32 voltage; 320 321 voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK; 322 323 return voltage == VOLTAGE_INFO_0_85V; 324 } 325 326 static int icl_max_source_rate(struct intel_dp *intel_dp) 327 { 328 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 329 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 330 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 331 332 if (intel_phy_is_combo(dev_priv, phy) && 333 (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp))) 334 return 540000; 335 336 return 810000; 337 } 338 339 static int ehl_max_source_rate(struct intel_dp *intel_dp) 340 { 341 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 342 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 343 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 344 345 if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy)) 346 return 540000; 347 348 return 810000; 349 } 350 351 static int dg1_max_source_rate(struct intel_dp *intel_dp) 352 { 353 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 354 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 355 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 356 357 if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy)) 358 return 540000; 359 360 return 810000; 361 } 362 363 static void 364 intel_dp_set_source_rates(struct intel_dp *intel_dp) 365 { 366 /* The values must be in increasing order */ 367 static const int icl_rates[] = { 368 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000, 369 1000000, 1350000, 370 }; 371 static const int bxt_rates[] = { 372 162000, 216000, 243000, 270000, 324000, 432000, 540000 373 }; 374 static const int skl_rates[] = { 375 162000, 216000, 270000, 324000, 432000, 540000 376 }; 377 static const int hsw_rates[] = { 378 162000, 270000, 540000 379 }; 380 static const int g4x_rates[] = { 381 162000, 270000 382 }; 383 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 384 struct intel_encoder *encoder = &dig_port->base; 385 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 386 const int *source_rates; 387 int size, max_rate = 0, vbt_max_rate; 388 389 /* This should only be done once */ 390 drm_WARN_ON(&dev_priv->drm, 391 intel_dp->source_rates || intel_dp->num_source_rates); 392 393 if (DISPLAY_VER(dev_priv) >= 11) { 394 source_rates = icl_rates; 395 size = ARRAY_SIZE(icl_rates); 396 if (IS_DG2(dev_priv)) 397 max_rate = dg2_max_source_rate(intel_dp); 398 else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || 399 IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 400 max_rate = dg1_max_source_rate(intel_dp); 401 else if (IS_JSL_EHL(dev_priv)) 402 max_rate = ehl_max_source_rate(intel_dp); 403 else 404 max_rate = icl_max_source_rate(intel_dp); 405 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 406 source_rates = bxt_rates; 407 size = ARRAY_SIZE(bxt_rates); 408 } else if (DISPLAY_VER(dev_priv) == 9) { 409 source_rates = skl_rates; 410 size = ARRAY_SIZE(skl_rates); 411 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 412 IS_BROADWELL(dev_priv)) { 413 source_rates = hsw_rates; 414 size = ARRAY_SIZE(hsw_rates); 415 } else { 416 source_rates = g4x_rates; 417 size = ARRAY_SIZE(g4x_rates); 418 } 419 420 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 421 if (max_rate && vbt_max_rate) 422 max_rate = min(max_rate, vbt_max_rate); 423 else if (vbt_max_rate) 424 max_rate = vbt_max_rate; 425 426 if (max_rate) 427 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 428 429 intel_dp->source_rates = source_rates; 430 intel_dp->num_source_rates = size; 431 } 432 433 static int intersect_rates(const int *source_rates, int source_len, 434 const int *sink_rates, int sink_len, 435 int *common_rates) 436 { 437 int i = 0, j = 0, k = 0; 438 439 while (i < source_len && j < sink_len) { 440 if (source_rates[i] == sink_rates[j]) { 441 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 442 return k; 443 common_rates[k] = source_rates[i]; 444 ++k; 445 ++i; 446 ++j; 447 } else if (source_rates[i] < sink_rates[j]) { 448 ++i; 449 } else { 450 ++j; 451 } 452 } 453 return k; 454 } 455 456 /* return index of rate in rates array, or -1 if not found */ 457 static int intel_dp_rate_index(const int *rates, int len, int rate) 458 { 459 int i; 460 461 for (i = 0; i < len; i++) 462 if (rate == rates[i]) 463 return i; 464 465 return -1; 466 } 467 468 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 469 { 470 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 471 472 drm_WARN_ON(&i915->drm, 473 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 474 475 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 476 intel_dp->num_source_rates, 477 intel_dp->sink_rates, 478 intel_dp->num_sink_rates, 479 intel_dp->common_rates); 480 481 /* Paranoia, there should always be something in common. */ 482 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 483 intel_dp->common_rates[0] = 162000; 484 intel_dp->num_common_rates = 1; 485 } 486 } 487 488 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 489 u8 lane_count) 490 { 491 /* 492 * FIXME: we need to synchronize the current link parameters with 493 * hardware readout. Currently fast link training doesn't work on 494 * boot-up. 495 */ 496 if (link_rate == 0 || 497 link_rate > intel_dp->max_link_rate) 498 return false; 499 500 if (lane_count == 0 || 501 lane_count > intel_dp_max_lane_count(intel_dp)) 502 return false; 503 504 return true; 505 } 506 507 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 508 int link_rate, 509 u8 lane_count) 510 { 511 const struct drm_display_mode *fixed_mode = 512 intel_dp->attached_connector->panel.fixed_mode; 513 int mode_rate, max_rate; 514 515 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 516 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 517 if (mode_rate > max_rate) 518 return false; 519 520 return true; 521 } 522 523 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 524 int link_rate, u8 lane_count) 525 { 526 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 527 int index; 528 529 /* 530 * TODO: Enable fallback on MST links once MST link compute can handle 531 * the fallback params. 532 */ 533 if (intel_dp->is_mst) { 534 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 535 return -1; 536 } 537 538 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { 539 drm_dbg_kms(&i915->drm, 540 "Retrying Link training for eDP with max parameters\n"); 541 intel_dp->use_max_params = true; 542 return 0; 543 } 544 545 index = intel_dp_rate_index(intel_dp->common_rates, 546 intel_dp->num_common_rates, 547 link_rate); 548 if (index > 0) { 549 if (intel_dp_is_edp(intel_dp) && 550 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 551 intel_dp->common_rates[index - 1], 552 lane_count)) { 553 drm_dbg_kms(&i915->drm, 554 "Retrying Link training for eDP with same parameters\n"); 555 return 0; 556 } 557 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 558 intel_dp->max_link_lane_count = lane_count; 559 } else if (lane_count > 1) { 560 if (intel_dp_is_edp(intel_dp) && 561 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 562 intel_dp_max_common_rate(intel_dp), 563 lane_count >> 1)) { 564 drm_dbg_kms(&i915->drm, 565 "Retrying Link training for eDP with same parameters\n"); 566 return 0; 567 } 568 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 569 intel_dp->max_link_lane_count = lane_count >> 1; 570 } else { 571 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 572 return -1; 573 } 574 575 return 0; 576 } 577 578 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 579 { 580 return div_u64(mul_u32_u32(mode_clock, 1000000U), 581 DP_DSC_FEC_OVERHEAD_FACTOR); 582 } 583 584 static int 585 small_joiner_ram_size_bits(struct drm_i915_private *i915) 586 { 587 if (DISPLAY_VER(i915) >= 13) 588 return 17280 * 8; 589 else if (DISPLAY_VER(i915) >= 11) 590 return 7680 * 8; 591 else 592 return 6144 * 8; 593 } 594 595 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 596 u32 link_clock, u32 lane_count, 597 u32 mode_clock, u32 mode_hdisplay, 598 bool bigjoiner, 599 u32 pipe_bpp) 600 { 601 u32 bits_per_pixel, max_bpp_small_joiner_ram; 602 int i; 603 604 /* 605 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 606 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 607 * for SST -> TimeSlotsPerMTP is 1, 608 * for MST -> TimeSlotsPerMTP has to be calculated 609 */ 610 bits_per_pixel = (link_clock * lane_count * 8) / 611 intel_dp_mode_to_fec_clock(mode_clock); 612 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 613 614 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 615 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 616 mode_hdisplay; 617 618 if (bigjoiner) 619 max_bpp_small_joiner_ram *= 2; 620 621 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 622 max_bpp_small_joiner_ram); 623 624 /* 625 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 626 * check, output bpp from small joiner RAM check) 627 */ 628 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 629 630 if (bigjoiner) { 631 u32 max_bpp_bigjoiner = 632 i915->max_cdclk_freq * 48 / 633 intel_dp_mode_to_fec_clock(mode_clock); 634 635 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner); 636 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); 637 } 638 639 /* Error out if the max bpp is less than smallest allowed valid bpp */ 640 if (bits_per_pixel < valid_dsc_bpp[0]) { 641 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 642 bits_per_pixel, valid_dsc_bpp[0]); 643 return 0; 644 } 645 646 /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */ 647 if (DISPLAY_VER(i915) >= 13) { 648 bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1); 649 } else { 650 /* Find the nearest match in the array of known BPPs from VESA */ 651 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 652 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 653 break; 654 } 655 bits_per_pixel = valid_dsc_bpp[i]; 656 } 657 658 /* 659 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 660 * fractional part is 0 661 */ 662 return bits_per_pixel << 4; 663 } 664 665 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 666 int mode_clock, int mode_hdisplay, 667 bool bigjoiner) 668 { 669 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 670 u8 min_slice_count, i; 671 int max_slice_width; 672 673 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 674 min_slice_count = DIV_ROUND_UP(mode_clock, 675 DP_DSC_MAX_ENC_THROUGHPUT_0); 676 else 677 min_slice_count = DIV_ROUND_UP(mode_clock, 678 DP_DSC_MAX_ENC_THROUGHPUT_1); 679 680 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 681 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 682 drm_dbg_kms(&i915->drm, 683 "Unsupported slice width %d by DP DSC Sink device\n", 684 max_slice_width); 685 return 0; 686 } 687 /* Also take into account max slice width */ 688 min_slice_count = max_t(u8, min_slice_count, 689 DIV_ROUND_UP(mode_hdisplay, 690 max_slice_width)); 691 692 /* Find the closest match to the valid slice count values */ 693 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 694 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; 695 696 if (test_slice_count > 697 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false)) 698 break; 699 700 /* big joiner needs small joiner to be enabled */ 701 if (bigjoiner && test_slice_count < 4) 702 continue; 703 704 if (min_slice_count <= test_slice_count) 705 return test_slice_count; 706 } 707 708 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 709 min_slice_count); 710 return 0; 711 } 712 713 static enum intel_output_format 714 intel_dp_output_format(struct drm_connector *connector, 715 const struct drm_display_mode *mode) 716 { 717 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 718 const struct drm_display_info *info = &connector->display_info; 719 720 if (!connector->ycbcr_420_allowed || 721 !drm_mode_is_420_only(info, mode)) 722 return INTEL_OUTPUT_FORMAT_RGB; 723 724 if (intel_dp->dfp.rgb_to_ycbcr && 725 intel_dp->dfp.ycbcr_444_to_420) 726 return INTEL_OUTPUT_FORMAT_RGB; 727 728 if (intel_dp->dfp.ycbcr_444_to_420) 729 return INTEL_OUTPUT_FORMAT_YCBCR444; 730 else 731 return INTEL_OUTPUT_FORMAT_YCBCR420; 732 } 733 734 int intel_dp_min_bpp(enum intel_output_format output_format) 735 { 736 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 737 return 6 * 3; 738 else 739 return 8 * 3; 740 } 741 742 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 743 { 744 /* 745 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 746 * format of the number of bytes per pixel will be half the number 747 * of bytes of RGB pixel. 748 */ 749 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 750 bpp /= 2; 751 752 return bpp; 753 } 754 755 static int 756 intel_dp_mode_min_output_bpp(struct drm_connector *connector, 757 const struct drm_display_mode *mode) 758 { 759 enum intel_output_format output_format = 760 intel_dp_output_format(connector, mode); 761 762 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 763 } 764 765 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 766 int hdisplay) 767 { 768 /* 769 * Older platforms don't like hdisplay==4096 with DP. 770 * 771 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 772 * and frame counter increment), but we don't get vblank interrupts, 773 * and the pipe underruns immediately. The link also doesn't seem 774 * to get trained properly. 775 * 776 * On CHV the vblank interrupts don't seem to disappear but 777 * otherwise the symptoms are similar. 778 * 779 * TODO: confirm the behaviour on HSW+ 780 */ 781 return hdisplay == 4096 && !HAS_DDI(dev_priv); 782 } 783 784 static enum drm_mode_status 785 intel_dp_mode_valid_downstream(struct intel_connector *connector, 786 const struct drm_display_mode *mode, 787 int target_clock) 788 { 789 struct intel_dp *intel_dp = intel_attached_dp(connector); 790 const struct drm_display_info *info = &connector->base.display_info; 791 int tmds_clock; 792 793 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 794 if (intel_dp->dfp.pcon_max_frl_bw) { 795 int target_bw; 796 int max_frl_bw; 797 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode); 798 799 target_bw = bpp * target_clock; 800 801 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 802 803 /* converting bw from Gbps to Kbps*/ 804 max_frl_bw = max_frl_bw * 1000000; 805 806 if (target_bw > max_frl_bw) 807 return MODE_CLOCK_HIGH; 808 809 return MODE_OK; 810 } 811 812 if (intel_dp->dfp.max_dotclock && 813 target_clock > intel_dp->dfp.max_dotclock) 814 return MODE_CLOCK_HIGH; 815 816 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 817 tmds_clock = target_clock; 818 if (drm_mode_is_420_only(info, mode)) 819 tmds_clock /= 2; 820 821 if (intel_dp->dfp.min_tmds_clock && 822 tmds_clock < intel_dp->dfp.min_tmds_clock) 823 return MODE_CLOCK_LOW; 824 if (intel_dp->dfp.max_tmds_clock && 825 tmds_clock > intel_dp->dfp.max_tmds_clock) 826 return MODE_CLOCK_HIGH; 827 828 return MODE_OK; 829 } 830 831 static bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp, 832 int hdisplay, int clock) 833 { 834 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 835 836 if (!intel_dp_can_bigjoiner(intel_dp)) 837 return false; 838 839 return clock > i915->max_dotclk_freq || hdisplay > 5120; 840 } 841 842 static enum drm_mode_status 843 intel_dp_mode_valid(struct drm_connector *connector, 844 struct drm_display_mode *mode) 845 { 846 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 847 struct intel_connector *intel_connector = to_intel_connector(connector); 848 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 849 struct drm_i915_private *dev_priv = to_i915(connector->dev); 850 int target_clock = mode->clock; 851 int max_rate, mode_rate, max_lanes, max_link_clock; 852 int max_dotclk = dev_priv->max_dotclk_freq; 853 u16 dsc_max_output_bpp = 0; 854 u8 dsc_slice_count = 0; 855 enum drm_mode_status status; 856 bool dsc = false, bigjoiner = false; 857 858 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 859 return MODE_NO_DBLESCAN; 860 861 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 862 return MODE_H_ILLEGAL; 863 864 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 865 status = intel_panel_mode_valid(intel_connector, mode); 866 if (status != MODE_OK) 867 return status; 868 869 target_clock = fixed_mode->clock; 870 } 871 872 if (mode->clock < 10000) 873 return MODE_CLOCK_LOW; 874 875 if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) { 876 bigjoiner = true; 877 max_dotclk *= 2; 878 } 879 if (target_clock > max_dotclk) 880 return MODE_CLOCK_HIGH; 881 882 max_link_clock = intel_dp_max_link_rate(intel_dp); 883 max_lanes = intel_dp_max_lane_count(intel_dp); 884 885 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 886 mode_rate = intel_dp_link_required(target_clock, 887 intel_dp_mode_min_output_bpp(connector, mode)); 888 889 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 890 return MODE_H_ILLEGAL; 891 892 /* 893 * Output bpp is stored in 6.4 format so right shift by 4 to get the 894 * integer value since we support only integer values of bpp. 895 */ 896 if (DISPLAY_VER(dev_priv) >= 10 && 897 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 898 /* 899 * TBD pass the connector BPC, 900 * for now U8_MAX so that max BPC on that platform would be picked 901 */ 902 int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX); 903 904 if (intel_dp_is_edp(intel_dp)) { 905 dsc_max_output_bpp = 906 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 907 dsc_slice_count = 908 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 909 true); 910 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 911 dsc_max_output_bpp = 912 intel_dp_dsc_get_output_bpp(dev_priv, 913 max_link_clock, 914 max_lanes, 915 target_clock, 916 mode->hdisplay, 917 bigjoiner, 918 pipe_bpp) >> 4; 919 dsc_slice_count = 920 intel_dp_dsc_get_slice_count(intel_dp, 921 target_clock, 922 mode->hdisplay, 923 bigjoiner); 924 } 925 926 dsc = dsc_max_output_bpp && dsc_slice_count; 927 } 928 929 /* 930 * Big joiner configuration needs DSC for TGL which is not true for 931 * XE_LPD where uncompressed joiner is supported. 932 */ 933 if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) 934 return MODE_CLOCK_HIGH; 935 936 if (mode_rate > max_rate && !dsc) 937 return MODE_CLOCK_HIGH; 938 939 status = intel_dp_mode_valid_downstream(intel_connector, 940 mode, target_clock); 941 if (status != MODE_OK) 942 return status; 943 944 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); 945 } 946 947 bool intel_dp_source_supports_tps3(struct drm_i915_private *i915) 948 { 949 return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915); 950 } 951 952 bool intel_dp_source_supports_tps4(struct drm_i915_private *i915) 953 { 954 return DISPLAY_VER(i915) >= 10; 955 } 956 957 static void snprintf_int_array(char *str, size_t len, 958 const int *array, int nelem) 959 { 960 int i; 961 962 str[0] = '\0'; 963 964 for (i = 0; i < nelem; i++) { 965 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 966 if (r >= len) 967 return; 968 str += r; 969 len -= r; 970 } 971 } 972 973 static void intel_dp_print_rates(struct intel_dp *intel_dp) 974 { 975 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 976 char str[128]; /* FIXME: too big for stack? */ 977 978 if (!drm_debug_enabled(DRM_UT_KMS)) 979 return; 980 981 snprintf_int_array(str, sizeof(str), 982 intel_dp->source_rates, intel_dp->num_source_rates); 983 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 984 985 snprintf_int_array(str, sizeof(str), 986 intel_dp->sink_rates, intel_dp->num_sink_rates); 987 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 988 989 snprintf_int_array(str, sizeof(str), 990 intel_dp->common_rates, intel_dp->num_common_rates); 991 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 992 } 993 994 int 995 intel_dp_max_link_rate(struct intel_dp *intel_dp) 996 { 997 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 998 int len; 999 1000 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1001 if (drm_WARN_ON(&i915->drm, len <= 0)) 1002 return 162000; 1003 1004 return intel_dp->common_rates[len - 1]; 1005 } 1006 1007 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1008 { 1009 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1010 int i = intel_dp_rate_index(intel_dp->sink_rates, 1011 intel_dp->num_sink_rates, rate); 1012 1013 if (drm_WARN_ON(&i915->drm, i < 0)) 1014 i = 0; 1015 1016 return i; 1017 } 1018 1019 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1020 u8 *link_bw, u8 *rate_select) 1021 { 1022 /* eDP 1.4 rate select method. */ 1023 if (intel_dp->use_rate_select) { 1024 *link_bw = 0; 1025 *rate_select = 1026 intel_dp_rate_select(intel_dp, port_clock); 1027 } else { 1028 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1029 *rate_select = 0; 1030 } 1031 } 1032 1033 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1034 const struct intel_crtc_state *pipe_config) 1035 { 1036 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1037 1038 /* On TGL, FEC is supported on all Pipes */ 1039 if (DISPLAY_VER(dev_priv) >= 12) 1040 return true; 1041 1042 if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A) 1043 return true; 1044 1045 return false; 1046 } 1047 1048 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1049 const struct intel_crtc_state *pipe_config) 1050 { 1051 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1052 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1053 } 1054 1055 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1056 const struct intel_crtc_state *crtc_state) 1057 { 1058 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) 1059 return false; 1060 1061 return intel_dsc_source_support(crtc_state) && 1062 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1063 } 1064 1065 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, 1066 const struct intel_crtc_state *crtc_state) 1067 { 1068 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 1069 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 1070 intel_dp->dfp.ycbcr_444_to_420); 1071 } 1072 1073 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, 1074 const struct intel_crtc_state *crtc_state, int bpc) 1075 { 1076 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; 1077 1078 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) 1079 clock /= 2; 1080 1081 return clock; 1082 } 1083 1084 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, 1085 const struct intel_crtc_state *crtc_state, int bpc) 1086 { 1087 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); 1088 1089 if (intel_dp->dfp.min_tmds_clock && 1090 tmds_clock < intel_dp->dfp.min_tmds_clock) 1091 return false; 1092 1093 if (intel_dp->dfp.max_tmds_clock && 1094 tmds_clock > intel_dp->dfp.max_tmds_clock) 1095 return false; 1096 1097 return true; 1098 } 1099 1100 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, 1101 const struct intel_crtc_state *crtc_state, 1102 int bpc) 1103 { 1104 1105 return intel_hdmi_deep_color_possible(crtc_state, bpc, 1106 intel_dp->has_hdmi_sink, 1107 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && 1108 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); 1109 } 1110 1111 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 1112 const struct intel_crtc_state *crtc_state) 1113 { 1114 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1115 struct intel_connector *intel_connector = intel_dp->attached_connector; 1116 int bpp, bpc; 1117 1118 bpc = crtc_state->pipe_bpp / 3; 1119 1120 if (intel_dp->dfp.max_bpc) 1121 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 1122 1123 if (intel_dp->dfp.min_tmds_clock) { 1124 for (; bpc >= 10; bpc -= 2) { 1125 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) 1126 break; 1127 } 1128 } 1129 1130 bpp = bpc * 3; 1131 if (intel_dp_is_edp(intel_dp)) { 1132 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1133 if (intel_connector->base.display_info.bpc == 0 && 1134 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1135 drm_dbg_kms(&dev_priv->drm, 1136 "clamping bpp for eDP panel to BIOS-provided %i\n", 1137 dev_priv->vbt.edp.bpp); 1138 bpp = dev_priv->vbt.edp.bpp; 1139 } 1140 } 1141 1142 return bpp; 1143 } 1144 1145 /* Adjust link config limits based on compliance test requests. */ 1146 void 1147 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1148 struct intel_crtc_state *pipe_config, 1149 struct link_config_limits *limits) 1150 { 1151 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1152 1153 /* For DP Compliance we override the computed bpp for the pipe */ 1154 if (intel_dp->compliance.test_data.bpc != 0) { 1155 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1156 1157 limits->min_bpp = limits->max_bpp = bpp; 1158 pipe_config->dither_force_disable = bpp == 6 * 3; 1159 1160 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1161 } 1162 1163 /* Use values requested by Compliance Test Request */ 1164 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1165 int index; 1166 1167 /* Validate the compliance test data since max values 1168 * might have changed due to link train fallback. 1169 */ 1170 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1171 intel_dp->compliance.test_lane_count)) { 1172 index = intel_dp_rate_index(intel_dp->common_rates, 1173 intel_dp->num_common_rates, 1174 intel_dp->compliance.test_link_rate); 1175 if (index >= 0) 1176 limits->min_rate = limits->max_rate = 1177 intel_dp->compliance.test_link_rate; 1178 limits->min_lane_count = limits->max_lane_count = 1179 intel_dp->compliance.test_lane_count; 1180 } 1181 } 1182 } 1183 1184 /* Optimize link config in order: max bpp, min clock, min lanes */ 1185 static int 1186 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 1187 struct intel_crtc_state *pipe_config, 1188 const struct link_config_limits *limits) 1189 { 1190 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1191 int bpp, i, lane_count; 1192 int mode_rate, link_rate, link_avail; 1193 1194 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 1195 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1196 1197 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1198 output_bpp); 1199 1200 for (i = 0; i < intel_dp->num_common_rates; i++) { 1201 link_rate = intel_dp->common_rates[i]; 1202 if (link_rate < limits->min_rate || 1203 link_rate > limits->max_rate) 1204 continue; 1205 1206 for (lane_count = limits->min_lane_count; 1207 lane_count <= limits->max_lane_count; 1208 lane_count <<= 1) { 1209 link_avail = intel_dp_max_data_rate(link_rate, 1210 lane_count); 1211 1212 if (mode_rate <= link_avail) { 1213 pipe_config->lane_count = lane_count; 1214 pipe_config->pipe_bpp = bpp; 1215 pipe_config->port_clock = link_rate; 1216 1217 return 0; 1218 } 1219 } 1220 } 1221 } 1222 1223 return -EINVAL; 1224 } 1225 1226 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc) 1227 { 1228 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1229 int i, num_bpc; 1230 u8 dsc_bpc[3] = {0}; 1231 u8 dsc_max_bpc; 1232 1233 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 1234 if (DISPLAY_VER(i915) >= 12) 1235 dsc_max_bpc = min_t(u8, 12, max_req_bpc); 1236 else 1237 dsc_max_bpc = min_t(u8, 10, max_req_bpc); 1238 1239 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 1240 dsc_bpc); 1241 for (i = 0; i < num_bpc; i++) { 1242 if (dsc_max_bpc >= dsc_bpc[i]) 1243 return dsc_bpc[i] * 3; 1244 } 1245 1246 return 0; 1247 } 1248 1249 #define DSC_SUPPORTED_VERSION_MIN 1 1250 1251 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 1252 struct intel_crtc_state *crtc_state) 1253 { 1254 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1255 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1256 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1257 u8 line_buf_depth; 1258 int ret; 1259 1260 /* 1261 * RC_MODEL_SIZE is currently a constant across all configurations. 1262 * 1263 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 1264 * DP_DSC_RC_BUF_SIZE for this. 1265 */ 1266 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1267 1268 /* 1269 * Slice Height of 8 works for all currently available panels. So start 1270 * with that if pic_height is an integral multiple of 8. Eventually add 1271 * logic to try multiple slice heights. 1272 */ 1273 if (vdsc_cfg->pic_height % 8 == 0) 1274 vdsc_cfg->slice_height = 8; 1275 else if (vdsc_cfg->pic_height % 4 == 0) 1276 vdsc_cfg->slice_height = 4; 1277 else 1278 vdsc_cfg->slice_height = 2; 1279 1280 ret = intel_dsc_compute_params(encoder, crtc_state); 1281 if (ret) 1282 return ret; 1283 1284 vdsc_cfg->dsc_version_major = 1285 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1286 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 1287 vdsc_cfg->dsc_version_minor = 1288 min(DSC_SUPPORTED_VERSION_MIN, 1289 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1290 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 1291 1292 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1293 DP_DSC_RGB; 1294 1295 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 1296 if (!line_buf_depth) { 1297 drm_dbg_kms(&i915->drm, 1298 "DSC Sink Line Buffer Depth invalid\n"); 1299 return -EINVAL; 1300 } 1301 1302 if (vdsc_cfg->dsc_version_minor == 2) 1303 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 1304 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 1305 else 1306 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 1307 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 1308 1309 vdsc_cfg->block_pred_enable = 1310 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 1311 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 1312 1313 return drm_dsc_compute_rc_parameters(vdsc_cfg); 1314 } 1315 1316 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 1317 struct intel_crtc_state *pipe_config, 1318 struct drm_connector_state *conn_state, 1319 struct link_config_limits *limits) 1320 { 1321 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1322 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 1323 const struct drm_display_mode *adjusted_mode = 1324 &pipe_config->hw.adjusted_mode; 1325 int pipe_bpp; 1326 int ret; 1327 1328 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 1329 intel_dp_supports_fec(intel_dp, pipe_config); 1330 1331 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 1332 return -EINVAL; 1333 1334 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc); 1335 1336 /* Min Input BPC for ICL+ is 8 */ 1337 if (pipe_bpp < 8 * 3) { 1338 drm_dbg_kms(&dev_priv->drm, 1339 "No DSC support for less than 8bpc\n"); 1340 return -EINVAL; 1341 } 1342 1343 /* 1344 * For now enable DSC for max bpp, max link rate, max lane count. 1345 * Optimize this later for the minimum possible link rate/lane count 1346 * with DSC enabled for the requested mode. 1347 */ 1348 pipe_config->pipe_bpp = pipe_bpp; 1349 pipe_config->port_clock = limits->max_rate; 1350 pipe_config->lane_count = limits->max_lane_count; 1351 1352 if (intel_dp_is_edp(intel_dp)) { 1353 pipe_config->dsc.compressed_bpp = 1354 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 1355 pipe_config->pipe_bpp); 1356 pipe_config->dsc.slice_count = 1357 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 1358 true); 1359 } else { 1360 u16 dsc_max_output_bpp; 1361 u8 dsc_dp_slice_count; 1362 1363 dsc_max_output_bpp = 1364 intel_dp_dsc_get_output_bpp(dev_priv, 1365 pipe_config->port_clock, 1366 pipe_config->lane_count, 1367 adjusted_mode->crtc_clock, 1368 adjusted_mode->crtc_hdisplay, 1369 pipe_config->bigjoiner, 1370 pipe_bpp); 1371 dsc_dp_slice_count = 1372 intel_dp_dsc_get_slice_count(intel_dp, 1373 adjusted_mode->crtc_clock, 1374 adjusted_mode->crtc_hdisplay, 1375 pipe_config->bigjoiner); 1376 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 1377 drm_dbg_kms(&dev_priv->drm, 1378 "Compressed BPP/Slice Count not supported\n"); 1379 return -EINVAL; 1380 } 1381 pipe_config->dsc.compressed_bpp = min_t(u16, 1382 dsc_max_output_bpp >> 4, 1383 pipe_config->pipe_bpp); 1384 pipe_config->dsc.slice_count = dsc_dp_slice_count; 1385 } 1386 1387 /* As of today we support DSC for only RGB */ 1388 if (intel_dp->force_dsc_bpp) { 1389 if (intel_dp->force_dsc_bpp >= 8 && 1390 intel_dp->force_dsc_bpp < pipe_bpp) { 1391 drm_dbg_kms(&dev_priv->drm, 1392 "DSC BPP forced to %d", 1393 intel_dp->force_dsc_bpp); 1394 pipe_config->dsc.compressed_bpp = 1395 intel_dp->force_dsc_bpp; 1396 } else { 1397 drm_dbg_kms(&dev_priv->drm, 1398 "Invalid DSC BPP %d", 1399 intel_dp->force_dsc_bpp); 1400 } 1401 } 1402 1403 /* 1404 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 1405 * is greater than the maximum Cdclock and if slice count is even 1406 * then we need to use 2 VDSC instances. 1407 */ 1408 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq || 1409 pipe_config->bigjoiner) { 1410 if (pipe_config->dsc.slice_count < 2) { 1411 drm_dbg_kms(&dev_priv->drm, 1412 "Cannot split stream to use 2 VDSC instances\n"); 1413 return -EINVAL; 1414 } 1415 1416 pipe_config->dsc.dsc_split = true; 1417 } 1418 1419 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 1420 if (ret < 0) { 1421 drm_dbg_kms(&dev_priv->drm, 1422 "Cannot compute valid DSC parameters for Input Bpp = %d " 1423 "Compressed BPP = %d\n", 1424 pipe_config->pipe_bpp, 1425 pipe_config->dsc.compressed_bpp); 1426 return ret; 1427 } 1428 1429 pipe_config->dsc.compression_enable = true; 1430 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 1431 "Compressed Bpp = %d Slice Count = %d\n", 1432 pipe_config->pipe_bpp, 1433 pipe_config->dsc.compressed_bpp, 1434 pipe_config->dsc.slice_count); 1435 1436 return 0; 1437 } 1438 1439 static int 1440 intel_dp_compute_link_config(struct intel_encoder *encoder, 1441 struct intel_crtc_state *pipe_config, 1442 struct drm_connector_state *conn_state) 1443 { 1444 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1445 const struct drm_display_mode *adjusted_mode = 1446 &pipe_config->hw.adjusted_mode; 1447 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1448 struct link_config_limits limits; 1449 int common_len; 1450 int ret; 1451 1452 common_len = intel_dp_common_len_rate_limit(intel_dp, 1453 intel_dp->max_link_rate); 1454 1455 /* No common link rates between source and sink */ 1456 drm_WARN_ON(encoder->base.dev, common_len <= 0); 1457 1458 limits.min_rate = intel_dp->common_rates[0]; 1459 limits.max_rate = intel_dp->common_rates[common_len - 1]; 1460 1461 limits.min_lane_count = 1; 1462 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 1463 1464 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); 1465 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); 1466 1467 if (intel_dp->use_max_params) { 1468 /* 1469 * Use the maximum clock and number of lanes the eDP panel 1470 * advertizes being capable of in case the initial fast 1471 * optimal params failed us. The panels are generally 1472 * designed to support only a single clock and lane 1473 * configuration, and typically on older panels these 1474 * values correspond to the native resolution of the panel. 1475 */ 1476 limits.min_lane_count = limits.max_lane_count; 1477 limits.min_rate = limits.max_rate; 1478 } 1479 1480 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 1481 1482 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 1483 "max rate %d max bpp %d pixel clock %iKHz\n", 1484 limits.max_lane_count, limits.max_rate, 1485 limits.max_bpp, adjusted_mode->crtc_clock); 1486 1487 if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay, 1488 adjusted_mode->crtc_clock)) 1489 pipe_config->bigjoiner = true; 1490 1491 /* 1492 * Optimize for slow and wide for everything, because there are some 1493 * eDP 1.3 and 1.4 panels don't work well with fast and narrow. 1494 */ 1495 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 1496 1497 /* 1498 * Pipe joiner needs compression upto display12 due to BW limitation. DG2 1499 * onwards pipe joiner can be enabled without compression. 1500 */ 1501 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 1502 if (ret || intel_dp->force_dsc_en || (DISPLAY_VER(i915) < 13 && 1503 pipe_config->bigjoiner)) { 1504 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 1505 conn_state, &limits); 1506 if (ret < 0) 1507 return ret; 1508 } 1509 1510 if (pipe_config->dsc.compression_enable) { 1511 drm_dbg_kms(&i915->drm, 1512 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 1513 pipe_config->lane_count, pipe_config->port_clock, 1514 pipe_config->pipe_bpp, 1515 pipe_config->dsc.compressed_bpp); 1516 1517 drm_dbg_kms(&i915->drm, 1518 "DP link rate required %i available %i\n", 1519 intel_dp_link_required(adjusted_mode->crtc_clock, 1520 pipe_config->dsc.compressed_bpp), 1521 intel_dp_max_data_rate(pipe_config->port_clock, 1522 pipe_config->lane_count)); 1523 } else { 1524 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 1525 pipe_config->lane_count, pipe_config->port_clock, 1526 pipe_config->pipe_bpp); 1527 1528 drm_dbg_kms(&i915->drm, 1529 "DP link rate required %i available %i\n", 1530 intel_dp_link_required(adjusted_mode->crtc_clock, 1531 pipe_config->pipe_bpp), 1532 intel_dp_max_data_rate(pipe_config->port_clock, 1533 pipe_config->lane_count)); 1534 } 1535 return 0; 1536 } 1537 1538 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 1539 const struct drm_connector_state *conn_state) 1540 { 1541 const struct intel_digital_connector_state *intel_conn_state = 1542 to_intel_digital_connector_state(conn_state); 1543 const struct drm_display_mode *adjusted_mode = 1544 &crtc_state->hw.adjusted_mode; 1545 1546 /* 1547 * Our YCbCr output is always limited range. 1548 * crtc_state->limited_color_range only applies to RGB, 1549 * and it must never be set for YCbCr or we risk setting 1550 * some conflicting bits in PIPECONF which will mess up 1551 * the colors on the monitor. 1552 */ 1553 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 1554 return false; 1555 1556 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 1557 /* 1558 * See: 1559 * CEA-861-E - 5.1 Default Encoding Parameters 1560 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 1561 */ 1562 return crtc_state->pipe_bpp != 18 && 1563 drm_default_rgb_quant_range(adjusted_mode) == 1564 HDMI_QUANTIZATION_RANGE_LIMITED; 1565 } else { 1566 return intel_conn_state->broadcast_rgb == 1567 INTEL_BROADCAST_RGB_LIMITED; 1568 } 1569 } 1570 1571 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 1572 enum port port) 1573 { 1574 if (IS_G4X(dev_priv)) 1575 return false; 1576 if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A) 1577 return false; 1578 1579 return true; 1580 } 1581 1582 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 1583 const struct drm_connector_state *conn_state, 1584 struct drm_dp_vsc_sdp *vsc) 1585 { 1586 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1587 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1588 1589 /* 1590 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1591 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 1592 * Colorimetry Format indication. 1593 */ 1594 vsc->revision = 0x5; 1595 vsc->length = 0x13; 1596 1597 /* DP 1.4a spec, Table 2-120 */ 1598 switch (crtc_state->output_format) { 1599 case INTEL_OUTPUT_FORMAT_YCBCR444: 1600 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 1601 break; 1602 case INTEL_OUTPUT_FORMAT_YCBCR420: 1603 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 1604 break; 1605 case INTEL_OUTPUT_FORMAT_RGB: 1606 default: 1607 vsc->pixelformat = DP_PIXELFORMAT_RGB; 1608 } 1609 1610 switch (conn_state->colorspace) { 1611 case DRM_MODE_COLORIMETRY_BT709_YCC: 1612 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1613 break; 1614 case DRM_MODE_COLORIMETRY_XVYCC_601: 1615 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 1616 break; 1617 case DRM_MODE_COLORIMETRY_XVYCC_709: 1618 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 1619 break; 1620 case DRM_MODE_COLORIMETRY_SYCC_601: 1621 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 1622 break; 1623 case DRM_MODE_COLORIMETRY_OPYCC_601: 1624 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 1625 break; 1626 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 1627 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 1628 break; 1629 case DRM_MODE_COLORIMETRY_BT2020_RGB: 1630 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 1631 break; 1632 case DRM_MODE_COLORIMETRY_BT2020_YCC: 1633 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 1634 break; 1635 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 1636 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 1637 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 1638 break; 1639 default: 1640 /* 1641 * RGB->YCBCR color conversion uses the BT.709 1642 * color space. 1643 */ 1644 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1645 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1646 else 1647 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 1648 break; 1649 } 1650 1651 vsc->bpc = crtc_state->pipe_bpp / 3; 1652 1653 /* only RGB pixelformat supports 6 bpc */ 1654 drm_WARN_ON(&dev_priv->drm, 1655 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 1656 1657 /* all YCbCr are always limited range */ 1658 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 1659 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 1660 } 1661 1662 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 1663 struct intel_crtc_state *crtc_state, 1664 const struct drm_connector_state *conn_state) 1665 { 1666 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 1667 1668 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 1669 if (crtc_state->has_psr) 1670 return; 1671 1672 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 1673 return; 1674 1675 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 1676 vsc->sdp_type = DP_SDP_VSC; 1677 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1678 &crtc_state->infoframes.vsc); 1679 } 1680 1681 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 1682 const struct intel_crtc_state *crtc_state, 1683 const struct drm_connector_state *conn_state, 1684 struct drm_dp_vsc_sdp *vsc) 1685 { 1686 vsc->sdp_type = DP_SDP_VSC; 1687 1688 if (crtc_state->has_psr2) { 1689 if (intel_dp->psr.colorimetry_support && 1690 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 1691 /* [PSR2, +Colorimetry] */ 1692 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1693 vsc); 1694 } else { 1695 /* 1696 * [PSR2, -Colorimetry] 1697 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 1698 * 3D stereo + PSR/PSR2 + Y-coordinate. 1699 */ 1700 vsc->revision = 0x4; 1701 vsc->length = 0xe; 1702 } 1703 } else { 1704 /* 1705 * [PSR1] 1706 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1707 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 1708 * higher). 1709 */ 1710 vsc->revision = 0x2; 1711 vsc->length = 0x8; 1712 } 1713 } 1714 1715 static void 1716 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 1717 struct intel_crtc_state *crtc_state, 1718 const struct drm_connector_state *conn_state) 1719 { 1720 int ret; 1721 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1722 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 1723 1724 if (!conn_state->hdr_output_metadata) 1725 return; 1726 1727 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 1728 1729 if (ret) { 1730 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 1731 return; 1732 } 1733 1734 crtc_state->infoframes.enable |= 1735 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 1736 } 1737 1738 int 1739 intel_dp_compute_config(struct intel_encoder *encoder, 1740 struct intel_crtc_state *pipe_config, 1741 struct drm_connector_state *conn_state) 1742 { 1743 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1744 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1745 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1746 enum port port = encoder->port; 1747 struct intel_connector *intel_connector = intel_dp->attached_connector; 1748 struct intel_digital_connector_state *intel_conn_state = 1749 to_intel_digital_connector_state(conn_state); 1750 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); 1751 int ret = 0, output_bpp; 1752 1753 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 1754 pipe_config->has_pch_encoder = true; 1755 1756 pipe_config->output_format = intel_dp_output_format(&intel_connector->base, 1757 adjusted_mode); 1758 1759 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 1760 ret = intel_panel_fitting(pipe_config, conn_state); 1761 if (ret) 1762 return ret; 1763 } 1764 1765 if (!intel_dp_port_has_audio(dev_priv, port)) 1766 pipe_config->has_audio = false; 1767 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 1768 pipe_config->has_audio = intel_dp->has_audio; 1769 else 1770 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 1771 1772 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1773 ret = intel_panel_compute_config(intel_connector, adjusted_mode); 1774 if (ret) 1775 return ret; 1776 1777 ret = intel_panel_fitting(pipe_config, conn_state); 1778 if (ret) 1779 return ret; 1780 } 1781 1782 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1783 return -EINVAL; 1784 1785 if (HAS_GMCH(dev_priv) && 1786 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 1787 return -EINVAL; 1788 1789 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 1790 return -EINVAL; 1791 1792 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 1793 return -EINVAL; 1794 1795 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 1796 if (ret < 0) 1797 return ret; 1798 1799 pipe_config->limited_color_range = 1800 intel_dp_limited_color_range(pipe_config, conn_state); 1801 1802 if (pipe_config->dsc.compression_enable) 1803 output_bpp = pipe_config->dsc.compressed_bpp; 1804 else 1805 output_bpp = intel_dp_output_bpp(pipe_config->output_format, 1806 pipe_config->pipe_bpp); 1807 1808 if (intel_dp->mso_link_count) { 1809 int n = intel_dp->mso_link_count; 1810 int overlap = intel_dp->mso_pixel_overlap; 1811 1812 pipe_config->splitter.enable = true; 1813 pipe_config->splitter.link_count = n; 1814 pipe_config->splitter.pixel_overlap = overlap; 1815 1816 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", 1817 n, overlap); 1818 1819 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; 1820 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; 1821 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; 1822 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; 1823 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; 1824 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; 1825 adjusted_mode->crtc_clock /= n; 1826 } 1827 1828 intel_link_compute_m_n(output_bpp, 1829 pipe_config->lane_count, 1830 adjusted_mode->crtc_clock, 1831 pipe_config->port_clock, 1832 &pipe_config->dp_m_n, 1833 constant_n, pipe_config->fec_enable); 1834 1835 /* FIXME: abstract this better */ 1836 if (pipe_config->splitter.enable) 1837 pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count; 1838 1839 if (!HAS_DDI(dev_priv)) 1840 g4x_dp_set_clock(encoder, pipe_config); 1841 1842 intel_vrr_compute_config(pipe_config, conn_state); 1843 intel_psr_compute_config(intel_dp, pipe_config, conn_state); 1844 intel_drrs_compute_config(intel_dp, pipe_config, output_bpp, 1845 constant_n); 1846 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 1847 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 1848 1849 return 0; 1850 } 1851 1852 void intel_dp_set_link_params(struct intel_dp *intel_dp, 1853 int link_rate, int lane_count) 1854 { 1855 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 1856 intel_dp->link_trained = false; 1857 intel_dp->link_rate = link_rate; 1858 intel_dp->lane_count = lane_count; 1859 } 1860 1861 /* Enable backlight PWM and backlight PP control. */ 1862 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 1863 const struct drm_connector_state *conn_state) 1864 { 1865 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 1866 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1867 1868 if (!intel_dp_is_edp(intel_dp)) 1869 return; 1870 1871 drm_dbg_kms(&i915->drm, "\n"); 1872 1873 intel_backlight_enable(crtc_state, conn_state); 1874 intel_pps_backlight_on(intel_dp); 1875 } 1876 1877 /* Disable backlight PP control and backlight PWM. */ 1878 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 1879 { 1880 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 1881 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1882 1883 if (!intel_dp_is_edp(intel_dp)) 1884 return; 1885 1886 drm_dbg_kms(&i915->drm, "\n"); 1887 1888 intel_pps_backlight_off(intel_dp); 1889 intel_backlight_disable(old_conn_state); 1890 } 1891 1892 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 1893 { 1894 /* 1895 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 1896 * be capable of signalling downstream hpd with a long pulse. 1897 * Whether or not that means D3 is safe to use is not clear, 1898 * but let's assume so until proven otherwise. 1899 * 1900 * FIXME should really check all downstream ports... 1901 */ 1902 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 1903 drm_dp_is_branch(intel_dp->dpcd) && 1904 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 1905 } 1906 1907 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 1908 const struct intel_crtc_state *crtc_state, 1909 bool enable) 1910 { 1911 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1912 int ret; 1913 1914 if (!crtc_state->dsc.compression_enable) 1915 return; 1916 1917 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 1918 enable ? DP_DECOMPRESSION_EN : 0); 1919 if (ret < 0) 1920 drm_dbg_kms(&i915->drm, 1921 "Failed to %s sink decompression state\n", 1922 enabledisable(enable)); 1923 } 1924 1925 static void 1926 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 1927 { 1928 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1929 u8 oui[] = { 0x00, 0xaa, 0x01 }; 1930 u8 buf[3] = { 0 }; 1931 1932 /* 1933 * During driver init, we want to be careful and avoid changing the source OUI if it's 1934 * already set to what we want, so as to avoid clearing any state by accident 1935 */ 1936 if (careful) { 1937 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 1938 drm_err(&i915->drm, "Failed to read source OUI\n"); 1939 1940 if (memcmp(oui, buf, sizeof(oui)) == 0) 1941 return; 1942 } 1943 1944 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 1945 drm_err(&i915->drm, "Failed to write source OUI\n"); 1946 } 1947 1948 /* If the device supports it, try to set the power state appropriately */ 1949 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 1950 { 1951 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1952 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1953 int ret, i; 1954 1955 /* Should have a valid DPCD by this point */ 1956 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1957 return; 1958 1959 if (mode != DP_SET_POWER_D0) { 1960 if (downstream_hpd_needs_d0(intel_dp)) 1961 return; 1962 1963 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 1964 } else { 1965 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 1966 1967 lspcon_resume(dp_to_dig_port(intel_dp)); 1968 1969 /* Write the source OUI as early as possible */ 1970 if (intel_dp_is_edp(intel_dp)) 1971 intel_edp_init_source_oui(intel_dp, false); 1972 1973 /* 1974 * When turning on, we need to retry for 1ms to give the sink 1975 * time to wake up. 1976 */ 1977 for (i = 0; i < 3; i++) { 1978 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 1979 if (ret == 1) 1980 break; 1981 msleep(1); 1982 } 1983 1984 if (ret == 1 && lspcon->active) 1985 lspcon_wait_pcon_mode(lspcon); 1986 } 1987 1988 if (ret != 1) 1989 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 1990 encoder->base.base.id, encoder->base.name, 1991 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 1992 } 1993 1994 static bool 1995 intel_dp_get_dpcd(struct intel_dp *intel_dp); 1996 1997 /** 1998 * intel_dp_sync_state - sync the encoder state during init/resume 1999 * @encoder: intel encoder to sync 2000 * @crtc_state: state for the CRTC connected to the encoder 2001 * 2002 * Sync any state stored in the encoder wrt. HW state during driver init 2003 * and system resume. 2004 */ 2005 void intel_dp_sync_state(struct intel_encoder *encoder, 2006 const struct intel_crtc_state *crtc_state) 2007 { 2008 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2009 2010 /* 2011 * Don't clobber DPCD if it's been already read out during output 2012 * setup (eDP) or detect. 2013 */ 2014 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2015 intel_dp_get_dpcd(intel_dp); 2016 2017 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 2018 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 2019 } 2020 2021 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 2022 struct intel_crtc_state *crtc_state) 2023 { 2024 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2025 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2026 2027 /* 2028 * If BIOS has set an unsupported or non-standard link rate for some 2029 * reason force an encoder recompute and full modeset. 2030 */ 2031 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 2032 crtc_state->port_clock) < 0) { 2033 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n"); 2034 crtc_state->uapi.connectors_changed = true; 2035 return false; 2036 } 2037 2038 /* 2039 * FIXME hack to force full modeset when DSC is being used. 2040 * 2041 * As long as we do not have full state readout and config comparison 2042 * of crtc_state->dsc, we have no way to ensure reliable fastset. 2043 * Remove once we have readout for DSC. 2044 */ 2045 if (crtc_state->dsc.compression_enable) { 2046 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n"); 2047 crtc_state->uapi.mode_changed = true; 2048 return false; 2049 } 2050 2051 if (CAN_PSR(intel_dp)) { 2052 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n"); 2053 crtc_state->uapi.mode_changed = true; 2054 return false; 2055 } 2056 2057 return true; 2058 } 2059 2060 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 2061 { 2062 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2063 2064 /* Clear the cached register set to avoid using stale values */ 2065 2066 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 2067 2068 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 2069 intel_dp->pcon_dsc_dpcd, 2070 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 2071 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", 2072 DP_PCON_DSC_ENCODER); 2073 2074 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", 2075 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 2076 } 2077 2078 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 2079 { 2080 int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 2081 int i; 2082 2083 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 2084 if (frl_bw_mask & (1 << i)) 2085 return bw_gbps[i]; 2086 } 2087 return 0; 2088 } 2089 2090 static int intel_dp_pcon_set_frl_mask(int max_frl) 2091 { 2092 switch (max_frl) { 2093 case 48: 2094 return DP_PCON_FRL_BW_MASK_48GBPS; 2095 case 40: 2096 return DP_PCON_FRL_BW_MASK_40GBPS; 2097 case 32: 2098 return DP_PCON_FRL_BW_MASK_32GBPS; 2099 case 24: 2100 return DP_PCON_FRL_BW_MASK_24GBPS; 2101 case 18: 2102 return DP_PCON_FRL_BW_MASK_18GBPS; 2103 case 9: 2104 return DP_PCON_FRL_BW_MASK_9GBPS; 2105 } 2106 2107 return 0; 2108 } 2109 2110 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 2111 { 2112 struct intel_connector *intel_connector = intel_dp->attached_connector; 2113 struct drm_connector *connector = &intel_connector->base; 2114 int max_frl_rate; 2115 int max_lanes, rate_per_lane; 2116 int max_dsc_lanes, dsc_rate_per_lane; 2117 2118 max_lanes = connector->display_info.hdmi.max_lanes; 2119 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; 2120 max_frl_rate = max_lanes * rate_per_lane; 2121 2122 if (connector->display_info.hdmi.dsc_cap.v_1p2) { 2123 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; 2124 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; 2125 if (max_dsc_lanes && dsc_rate_per_lane) 2126 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 2127 } 2128 2129 return max_frl_rate; 2130 } 2131 2132 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 2133 { 2134 #define TIMEOUT_FRL_READY_MS 500 2135 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 2136 2137 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2138 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 2139 u8 max_frl_bw_mask = 0, frl_trained_mask; 2140 bool is_active; 2141 2142 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2143 if (ret < 0) 2144 return ret; 2145 2146 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 2147 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 2148 2149 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 2150 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); 2151 2152 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 2153 2154 if (max_frl_bw <= 0) 2155 return -EINVAL; 2156 2157 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 2158 if (ret < 0) 2159 return ret; 2160 /* Wait for PCON to be FRL Ready */ 2161 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); 2162 2163 if (!is_active) 2164 return -ETIMEDOUT; 2165 2166 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 2167 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, 2168 DP_PCON_ENABLE_SEQUENTIAL_LINK); 2169 if (ret < 0) 2170 return ret; 2171 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, 2172 DP_PCON_FRL_LINK_TRAIN_NORMAL); 2173 if (ret < 0) 2174 return ret; 2175 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 2176 if (ret < 0) 2177 return ret; 2178 /* 2179 * Wait for FRL to be completed 2180 * Check if the HDMI Link is up and active. 2181 */ 2182 wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); 2183 2184 if (!is_active) 2185 return -ETIMEDOUT; 2186 2187 /* Verify HDMI Link configuration shows FRL Mode */ 2188 if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != 2189 DP_PCON_HDMI_MODE_FRL) { 2190 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); 2191 return -EINVAL; 2192 } 2193 drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); 2194 2195 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 2196 intel_dp->frl.is_trained = true; 2197 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); 2198 2199 return 0; 2200 } 2201 2202 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 2203 { 2204 if (drm_dp_is_branch(intel_dp->dpcd) && 2205 intel_dp->has_hdmi_sink && 2206 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 2207 return true; 2208 2209 return false; 2210 } 2211 2212 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 2213 { 2214 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2215 2216 /* 2217 * Always go for FRL training if: 2218 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) 2219 * -sink is HDMI2.1 2220 */ 2221 if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) || 2222 !intel_dp_is_hdmi_2_1_sink(intel_dp) || 2223 intel_dp->frl.is_trained) 2224 return; 2225 2226 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 2227 int ret, mode; 2228 2229 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); 2230 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2231 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 2232 2233 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 2234 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); 2235 } else { 2236 drm_dbg(&dev_priv->drm, "FRL training Completed\n"); 2237 } 2238 } 2239 2240 static int 2241 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 2242 { 2243 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 2244 2245 return intel_hdmi_dsc_get_slice_height(vactive); 2246 } 2247 2248 static int 2249 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 2250 const struct intel_crtc_state *crtc_state) 2251 { 2252 struct intel_connector *intel_connector = intel_dp->attached_connector; 2253 struct drm_connector *connector = &intel_connector->base; 2254 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; 2255 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; 2256 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 2257 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 2258 2259 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 2260 pcon_max_slice_width, 2261 hdmi_max_slices, hdmi_throughput); 2262 } 2263 2264 static int 2265 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 2266 const struct intel_crtc_state *crtc_state, 2267 int num_slices, int slice_width) 2268 { 2269 struct intel_connector *intel_connector = intel_dp->attached_connector; 2270 struct drm_connector *connector = &intel_connector->base; 2271 int output_format = crtc_state->output_format; 2272 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; 2273 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 2274 int hdmi_max_chunk_bytes = 2275 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; 2276 2277 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 2278 num_slices, output_format, hdmi_all_bpp, 2279 hdmi_max_chunk_bytes); 2280 } 2281 2282 void 2283 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 2284 const struct intel_crtc_state *crtc_state) 2285 { 2286 u8 pps_param[6]; 2287 int slice_height; 2288 int slice_width; 2289 int num_slices; 2290 int bits_per_pixel; 2291 int ret; 2292 struct intel_connector *intel_connector = intel_dp->attached_connector; 2293 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2294 struct drm_connector *connector; 2295 bool hdmi_is_dsc_1_2; 2296 2297 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 2298 return; 2299 2300 if (!intel_connector) 2301 return; 2302 connector = &intel_connector->base; 2303 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; 2304 2305 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 2306 !hdmi_is_dsc_1_2) 2307 return; 2308 2309 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 2310 if (!slice_height) 2311 return; 2312 2313 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 2314 if (!num_slices) 2315 return; 2316 2317 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 2318 num_slices); 2319 2320 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 2321 num_slices, slice_width); 2322 if (!bits_per_pixel) 2323 return; 2324 2325 pps_param[0] = slice_height & 0xFF; 2326 pps_param[1] = slice_height >> 8; 2327 pps_param[2] = slice_width & 0xFF; 2328 pps_param[3] = slice_width >> 8; 2329 pps_param[4] = bits_per_pixel & 0xFF; 2330 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 2331 2332 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 2333 if (ret < 0) 2334 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); 2335 } 2336 2337 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 2338 const struct intel_crtc_state *crtc_state) 2339 { 2340 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2341 u8 tmp; 2342 2343 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 2344 return; 2345 2346 if (!drm_dp_is_branch(intel_dp->dpcd)) 2347 return; 2348 2349 tmp = intel_dp->has_hdmi_sink ? 2350 DP_HDMI_DVI_OUTPUT_CONFIG : 0; 2351 2352 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2353 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 2354 drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n", 2355 enabledisable(intel_dp->has_hdmi_sink)); 2356 2357 tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 2358 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 2359 2360 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2361 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 2362 drm_dbg_kms(&i915->drm, 2363 "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n", 2364 enabledisable(intel_dp->dfp.ycbcr_444_to_420)); 2365 2366 tmp = 0; 2367 if (intel_dp->dfp.rgb_to_ycbcr) { 2368 bool bt2020, bt709; 2369 2370 /* 2371 * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only 2372 * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default. 2373 * 2374 */ 2375 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE; 2376 2377 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2378 intel_dp->downstream_ports, 2379 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 2380 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2381 intel_dp->downstream_ports, 2382 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 2383 switch (crtc_state->infoframes.vsc.colorimetry) { 2384 case DP_COLORIMETRY_BT2020_RGB: 2385 case DP_COLORIMETRY_BT2020_YCC: 2386 if (bt2020) 2387 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE; 2388 break; 2389 case DP_COLORIMETRY_BT709_YCC: 2390 case DP_COLORIMETRY_XVYCC_709: 2391 if (bt709) 2392 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE; 2393 break; 2394 default: 2395 break; 2396 } 2397 } 2398 2399 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 2400 drm_dbg_kms(&i915->drm, 2401 "Failed to %s protocol converter RGB->YCbCr conversion mode\n", 2402 enabledisable(tmp)); 2403 } 2404 2405 2406 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 2407 { 2408 u8 dprx = 0; 2409 2410 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 2411 &dprx) != 1) 2412 return false; 2413 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 2414 } 2415 2416 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 2417 { 2418 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2419 2420 /* 2421 * Clear the cached register set to avoid using stale values 2422 * for the sinks that do not support DSC. 2423 */ 2424 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 2425 2426 /* Clear fec_capable to avoid using stale values */ 2427 intel_dp->fec_capable = 0; 2428 2429 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 2430 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 2431 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2432 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 2433 intel_dp->dsc_dpcd, 2434 sizeof(intel_dp->dsc_dpcd)) < 0) 2435 drm_err(&i915->drm, 2436 "Failed to read DPCD register 0x%x\n", 2437 DP_DSC_SUPPORT); 2438 2439 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 2440 (int)sizeof(intel_dp->dsc_dpcd), 2441 intel_dp->dsc_dpcd); 2442 2443 /* FEC is supported only on DP 1.4 */ 2444 if (!intel_dp_is_edp(intel_dp) && 2445 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 2446 &intel_dp->fec_capable) < 0) 2447 drm_err(&i915->drm, 2448 "Failed to read FEC DPCD register\n"); 2449 2450 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 2451 intel_dp->fec_capable); 2452 } 2453 } 2454 2455 static void intel_edp_mso_mode_fixup(struct intel_connector *connector, 2456 struct drm_display_mode *mode) 2457 { 2458 struct intel_dp *intel_dp = intel_attached_dp(connector); 2459 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2460 int n = intel_dp->mso_link_count; 2461 int overlap = intel_dp->mso_pixel_overlap; 2462 2463 if (!mode || !n) 2464 return; 2465 2466 mode->hdisplay = (mode->hdisplay - overlap) * n; 2467 mode->hsync_start = (mode->hsync_start - overlap) * n; 2468 mode->hsync_end = (mode->hsync_end - overlap) * n; 2469 mode->htotal = (mode->htotal - overlap) * n; 2470 mode->clock *= n; 2471 2472 drm_mode_set_name(mode); 2473 2474 drm_dbg_kms(&i915->drm, 2475 "[CONNECTOR:%d:%s] using generated MSO mode: ", 2476 connector->base.base.id, connector->base.name); 2477 drm_mode_debug_printmodeline(mode); 2478 } 2479 2480 static void intel_edp_mso_init(struct intel_dp *intel_dp) 2481 { 2482 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2483 struct intel_connector *connector = intel_dp->attached_connector; 2484 struct drm_display_info *info = &connector->base.display_info; 2485 u8 mso; 2486 2487 if (intel_dp->edp_dpcd[0] < DP_EDP_14) 2488 return; 2489 2490 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { 2491 drm_err(&i915->drm, "Failed to read MSO cap\n"); 2492 return; 2493 } 2494 2495 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ 2496 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; 2497 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { 2498 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); 2499 mso = 0; 2500 } 2501 2502 if (mso) { 2503 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n", 2504 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso, 2505 info->mso_pixel_overlap); 2506 if (!HAS_MSO(i915)) { 2507 drm_err(&i915->drm, "No source MSO support, disabling\n"); 2508 mso = 0; 2509 } 2510 } 2511 2512 intel_dp->mso_link_count = mso; 2513 intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; 2514 } 2515 2516 static bool 2517 intel_edp_init_dpcd(struct intel_dp *intel_dp) 2518 { 2519 struct drm_i915_private *dev_priv = 2520 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 2521 2522 /* this function is meant to be called only once */ 2523 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 2524 2525 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 2526 return false; 2527 2528 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2529 drm_dp_is_branch(intel_dp->dpcd)); 2530 2531 /* 2532 * Read the eDP display control registers. 2533 * 2534 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 2535 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 2536 * set, but require eDP 1.4+ detection (e.g. for supported link rates 2537 * method). The display control registers should read zero if they're 2538 * not supported anyway. 2539 */ 2540 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 2541 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 2542 sizeof(intel_dp->edp_dpcd)) { 2543 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 2544 (int)sizeof(intel_dp->edp_dpcd), 2545 intel_dp->edp_dpcd); 2546 2547 intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; 2548 } 2549 2550 /* 2551 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 2552 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 2553 */ 2554 intel_psr_init_dpcd(intel_dp); 2555 2556 /* Read the eDP 1.4+ supported link rates. */ 2557 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2558 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 2559 int i; 2560 2561 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 2562 sink_rates, sizeof(sink_rates)); 2563 2564 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 2565 int val = le16_to_cpu(sink_rates[i]); 2566 2567 if (val == 0) 2568 break; 2569 2570 /* Value read multiplied by 200kHz gives the per-lane 2571 * link rate in kHz. The source rates are, however, 2572 * stored in terms of LS_Clk kHz. The full conversion 2573 * back to symbols is 2574 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 2575 */ 2576 intel_dp->sink_rates[i] = (val * 200) / 10; 2577 } 2578 intel_dp->num_sink_rates = i; 2579 } 2580 2581 /* 2582 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 2583 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 2584 */ 2585 if (intel_dp->num_sink_rates) 2586 intel_dp->use_rate_select = true; 2587 else 2588 intel_dp_set_sink_rates(intel_dp); 2589 2590 intel_dp_set_common_rates(intel_dp); 2591 2592 /* Read the eDP DSC DPCD registers */ 2593 if (DISPLAY_VER(dev_priv) >= 10) 2594 intel_dp_get_dsc_sink_cap(intel_dp); 2595 2596 /* 2597 * If needed, program our source OUI so we can make various Intel-specific AUX services 2598 * available (such as HDR backlight controls) 2599 */ 2600 intel_edp_init_source_oui(intel_dp, true); 2601 2602 return true; 2603 } 2604 2605 static bool 2606 intel_dp_has_sink_count(struct intel_dp *intel_dp) 2607 { 2608 if (!intel_dp->attached_connector) 2609 return false; 2610 2611 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 2612 intel_dp->dpcd, 2613 &intel_dp->desc); 2614 } 2615 2616 static bool 2617 intel_dp_get_dpcd(struct intel_dp *intel_dp) 2618 { 2619 int ret; 2620 2621 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) 2622 return false; 2623 2624 /* 2625 * Don't clobber cached eDP rates. Also skip re-reading 2626 * the OUI/ID since we know it won't change. 2627 */ 2628 if (!intel_dp_is_edp(intel_dp)) { 2629 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2630 drm_dp_is_branch(intel_dp->dpcd)); 2631 2632 intel_dp_set_sink_rates(intel_dp); 2633 intel_dp_set_common_rates(intel_dp); 2634 } 2635 2636 if (intel_dp_has_sink_count(intel_dp)) { 2637 ret = drm_dp_read_sink_count(&intel_dp->aux); 2638 if (ret < 0) 2639 return false; 2640 2641 /* 2642 * Sink count can change between short pulse hpd hence 2643 * a member variable in intel_dp will track any changes 2644 * between short pulse interrupts. 2645 */ 2646 intel_dp->sink_count = ret; 2647 2648 /* 2649 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 2650 * a dongle is present but no display. Unless we require to know 2651 * if a dongle is present or not, we don't need to update 2652 * downstream port information. So, an early return here saves 2653 * time from performing other operations which are not required. 2654 */ 2655 if (!intel_dp->sink_count) 2656 return false; 2657 } 2658 2659 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 2660 intel_dp->downstream_ports) == 0; 2661 } 2662 2663 static bool 2664 intel_dp_can_mst(struct intel_dp *intel_dp) 2665 { 2666 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2667 2668 return i915->params.enable_dp_mst && 2669 intel_dp_mst_source_support(intel_dp) && 2670 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 2671 } 2672 2673 static void 2674 intel_dp_configure_mst(struct intel_dp *intel_dp) 2675 { 2676 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2677 struct intel_encoder *encoder = 2678 &dp_to_dig_port(intel_dp)->base; 2679 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 2680 2681 drm_dbg_kms(&i915->drm, 2682 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 2683 encoder->base.base.id, encoder->base.name, 2684 yesno(intel_dp_mst_source_support(intel_dp)), yesno(sink_can_mst), 2685 yesno(i915->params.enable_dp_mst)); 2686 2687 if (!intel_dp_mst_source_support(intel_dp)) 2688 return; 2689 2690 intel_dp->is_mst = sink_can_mst && 2691 i915->params.enable_dp_mst; 2692 2693 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 2694 intel_dp->is_mst); 2695 } 2696 2697 static bool 2698 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2699 { 2700 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 2701 sink_irq_vector, DP_DPRX_ESI_LEN) == 2702 DP_DPRX_ESI_LEN; 2703 } 2704 2705 bool 2706 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 2707 const struct drm_connector_state *conn_state) 2708 { 2709 /* 2710 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 2711 * of Color Encoding Format and Content Color Gamut], in order to 2712 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 2713 */ 2714 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2715 return true; 2716 2717 switch (conn_state->colorspace) { 2718 case DRM_MODE_COLORIMETRY_SYCC_601: 2719 case DRM_MODE_COLORIMETRY_OPYCC_601: 2720 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2721 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2722 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2723 return true; 2724 default: 2725 break; 2726 } 2727 2728 return false; 2729 } 2730 2731 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 2732 struct dp_sdp *sdp, size_t size) 2733 { 2734 size_t length = sizeof(struct dp_sdp); 2735 2736 if (size < length) 2737 return -ENOSPC; 2738 2739 memset(sdp, 0, size); 2740 2741 /* 2742 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 2743 * VSC SDP Header Bytes 2744 */ 2745 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 2746 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 2747 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 2748 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 2749 2750 /* 2751 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 2752 * per DP 1.4a spec. 2753 */ 2754 if (vsc->revision != 0x5) 2755 goto out; 2756 2757 /* VSC SDP Payload for DB16 through DB18 */ 2758 /* Pixel Encoding and Colorimetry Formats */ 2759 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 2760 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 2761 2762 switch (vsc->bpc) { 2763 case 6: 2764 /* 6bpc: 0x0 */ 2765 break; 2766 case 8: 2767 sdp->db[17] = 0x1; /* DB17[3:0] */ 2768 break; 2769 case 10: 2770 sdp->db[17] = 0x2; 2771 break; 2772 case 12: 2773 sdp->db[17] = 0x3; 2774 break; 2775 case 16: 2776 sdp->db[17] = 0x4; 2777 break; 2778 default: 2779 MISSING_CASE(vsc->bpc); 2780 break; 2781 } 2782 /* Dynamic Range and Component Bit Depth */ 2783 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 2784 sdp->db[17] |= 0x80; /* DB17[7] */ 2785 2786 /* Content Type */ 2787 sdp->db[18] = vsc->content_type & 0x7; 2788 2789 out: 2790 return length; 2791 } 2792 2793 static ssize_t 2794 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 2795 struct dp_sdp *sdp, 2796 size_t size) 2797 { 2798 size_t length = sizeof(struct dp_sdp); 2799 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 2800 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 2801 ssize_t len; 2802 2803 if (size < length) 2804 return -ENOSPC; 2805 2806 memset(sdp, 0, size); 2807 2808 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 2809 if (len < 0) { 2810 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 2811 return -ENOSPC; 2812 } 2813 2814 if (len != infoframe_size) { 2815 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 2816 return -ENOSPC; 2817 } 2818 2819 /* 2820 * Set up the infoframe sdp packet for HDR static metadata. 2821 * Prepare VSC Header for SU as per DP 1.4a spec, 2822 * Table 2-100 and Table 2-101 2823 */ 2824 2825 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 2826 sdp->sdp_header.HB0 = 0; 2827 /* 2828 * Packet Type 80h + Non-audio INFOFRAME Type value 2829 * HDMI_INFOFRAME_TYPE_DRM: 0x87 2830 * - 80h + Non-audio INFOFRAME Type value 2831 * - InfoFrame Type: 0x07 2832 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 2833 */ 2834 sdp->sdp_header.HB1 = drm_infoframe->type; 2835 /* 2836 * Least Significant Eight Bits of (Data Byte Count – 1) 2837 * infoframe_size - 1 2838 */ 2839 sdp->sdp_header.HB2 = 0x1D; 2840 /* INFOFRAME SDP Version Number */ 2841 sdp->sdp_header.HB3 = (0x13 << 2); 2842 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 2843 sdp->db[0] = drm_infoframe->version; 2844 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 2845 sdp->db[1] = drm_infoframe->length; 2846 /* 2847 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 2848 * HDMI_INFOFRAME_HEADER_SIZE 2849 */ 2850 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 2851 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 2852 HDMI_DRM_INFOFRAME_SIZE); 2853 2854 /* 2855 * Size of DP infoframe sdp packet for HDR static metadata consists of 2856 * - DP SDP Header(struct dp_sdp_header): 4 bytes 2857 * - Two Data Blocks: 2 bytes 2858 * CTA Header Byte2 (INFOFRAME Version Number) 2859 * CTA Header Byte3 (Length of INFOFRAME) 2860 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 2861 * 2862 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 2863 * infoframe size. But GEN11+ has larger than that size, write_infoframe 2864 * will pad rest of the size. 2865 */ 2866 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 2867 } 2868 2869 static void intel_write_dp_sdp(struct intel_encoder *encoder, 2870 const struct intel_crtc_state *crtc_state, 2871 unsigned int type) 2872 { 2873 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2874 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2875 struct dp_sdp sdp = {}; 2876 ssize_t len; 2877 2878 if ((crtc_state->infoframes.enable & 2879 intel_hdmi_infoframe_enable(type)) == 0) 2880 return; 2881 2882 switch (type) { 2883 case DP_SDP_VSC: 2884 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 2885 sizeof(sdp)); 2886 break; 2887 case HDMI_PACKET_TYPE_GAMUT_METADATA: 2888 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 2889 &sdp, sizeof(sdp)); 2890 break; 2891 default: 2892 MISSING_CASE(type); 2893 return; 2894 } 2895 2896 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 2897 return; 2898 2899 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 2900 } 2901 2902 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 2903 const struct intel_crtc_state *crtc_state, 2904 const struct drm_dp_vsc_sdp *vsc) 2905 { 2906 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2907 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2908 struct dp_sdp sdp = {}; 2909 ssize_t len; 2910 2911 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 2912 2913 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 2914 return; 2915 2916 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 2917 &sdp, len); 2918 } 2919 2920 void intel_dp_set_infoframes(struct intel_encoder *encoder, 2921 bool enable, 2922 const struct intel_crtc_state *crtc_state, 2923 const struct drm_connector_state *conn_state) 2924 { 2925 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2926 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 2927 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 2928 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 2929 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 2930 u32 val = intel_de_read(dev_priv, reg) & ~dip_enable; 2931 2932 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 2933 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 2934 if (!crtc_state->has_psr) 2935 val &= ~VIDEO_DIP_ENABLE_VSC_HSW; 2936 2937 intel_de_write(dev_priv, reg, val); 2938 intel_de_posting_read(dev_priv, reg); 2939 2940 if (!enable) 2941 return; 2942 2943 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 2944 if (!crtc_state->has_psr) 2945 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 2946 2947 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 2948 } 2949 2950 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 2951 const void *buffer, size_t size) 2952 { 2953 const struct dp_sdp *sdp = buffer; 2954 2955 if (size < sizeof(struct dp_sdp)) 2956 return -EINVAL; 2957 2958 memset(vsc, 0, sizeof(*vsc)); 2959 2960 if (sdp->sdp_header.HB0 != 0) 2961 return -EINVAL; 2962 2963 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 2964 return -EINVAL; 2965 2966 vsc->sdp_type = sdp->sdp_header.HB1; 2967 vsc->revision = sdp->sdp_header.HB2; 2968 vsc->length = sdp->sdp_header.HB3; 2969 2970 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 2971 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 2972 /* 2973 * - HB2 = 0x2, HB3 = 0x8 2974 * VSC SDP supporting 3D stereo + PSR 2975 * - HB2 = 0x4, HB3 = 0xe 2976 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 2977 * first scan line of the SU region (applies to eDP v1.4b 2978 * and higher). 2979 */ 2980 return 0; 2981 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 2982 /* 2983 * - HB2 = 0x5, HB3 = 0x13 2984 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 2985 * Format. 2986 */ 2987 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 2988 vsc->colorimetry = sdp->db[16] & 0xf; 2989 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 2990 2991 switch (sdp->db[17] & 0x7) { 2992 case 0x0: 2993 vsc->bpc = 6; 2994 break; 2995 case 0x1: 2996 vsc->bpc = 8; 2997 break; 2998 case 0x2: 2999 vsc->bpc = 10; 3000 break; 3001 case 0x3: 3002 vsc->bpc = 12; 3003 break; 3004 case 0x4: 3005 vsc->bpc = 16; 3006 break; 3007 default: 3008 MISSING_CASE(sdp->db[17] & 0x7); 3009 return -EINVAL; 3010 } 3011 3012 vsc->content_type = sdp->db[18] & 0x7; 3013 } else { 3014 return -EINVAL; 3015 } 3016 3017 return 0; 3018 } 3019 3020 static int 3021 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 3022 const void *buffer, size_t size) 3023 { 3024 int ret; 3025 3026 const struct dp_sdp *sdp = buffer; 3027 3028 if (size < sizeof(struct dp_sdp)) 3029 return -EINVAL; 3030 3031 if (sdp->sdp_header.HB0 != 0) 3032 return -EINVAL; 3033 3034 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 3035 return -EINVAL; 3036 3037 /* 3038 * Least Significant Eight Bits of (Data Byte Count – 1) 3039 * 1Dh (i.e., Data Byte Count = 30 bytes). 3040 */ 3041 if (sdp->sdp_header.HB2 != 0x1D) 3042 return -EINVAL; 3043 3044 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 3045 if ((sdp->sdp_header.HB3 & 0x3) != 0) 3046 return -EINVAL; 3047 3048 /* INFOFRAME SDP Version Number */ 3049 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 3050 return -EINVAL; 3051 3052 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 3053 if (sdp->db[0] != 1) 3054 return -EINVAL; 3055 3056 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 3057 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 3058 return -EINVAL; 3059 3060 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 3061 HDMI_DRM_INFOFRAME_SIZE); 3062 3063 return ret; 3064 } 3065 3066 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 3067 struct intel_crtc_state *crtc_state, 3068 struct drm_dp_vsc_sdp *vsc) 3069 { 3070 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3071 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3072 unsigned int type = DP_SDP_VSC; 3073 struct dp_sdp sdp = {}; 3074 int ret; 3075 3076 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 3077 if (crtc_state->has_psr) 3078 return; 3079 3080 if ((crtc_state->infoframes.enable & 3081 intel_hdmi_infoframe_enable(type)) == 0) 3082 return; 3083 3084 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 3085 3086 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 3087 3088 if (ret) 3089 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 3090 } 3091 3092 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 3093 struct intel_crtc_state *crtc_state, 3094 struct hdmi_drm_infoframe *drm_infoframe) 3095 { 3096 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3097 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3098 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 3099 struct dp_sdp sdp = {}; 3100 int ret; 3101 3102 if ((crtc_state->infoframes.enable & 3103 intel_hdmi_infoframe_enable(type)) == 0) 3104 return; 3105 3106 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 3107 sizeof(sdp)); 3108 3109 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 3110 sizeof(sdp)); 3111 3112 if (ret) 3113 drm_dbg_kms(&dev_priv->drm, 3114 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 3115 } 3116 3117 void intel_read_dp_sdp(struct intel_encoder *encoder, 3118 struct intel_crtc_state *crtc_state, 3119 unsigned int type) 3120 { 3121 switch (type) { 3122 case DP_SDP_VSC: 3123 intel_read_dp_vsc_sdp(encoder, crtc_state, 3124 &crtc_state->infoframes.vsc); 3125 break; 3126 case HDMI_PACKET_TYPE_GAMUT_METADATA: 3127 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 3128 &crtc_state->infoframes.drm.drm); 3129 break; 3130 default: 3131 MISSING_CASE(type); 3132 break; 3133 } 3134 } 3135 3136 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 3137 { 3138 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3139 int status = 0; 3140 int test_link_rate; 3141 u8 test_lane_count, test_link_bw; 3142 /* (DP CTS 1.2) 3143 * 4.3.1.11 3144 */ 3145 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 3146 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 3147 &test_lane_count); 3148 3149 if (status <= 0) { 3150 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 3151 return DP_TEST_NAK; 3152 } 3153 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 3154 3155 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 3156 &test_link_bw); 3157 if (status <= 0) { 3158 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 3159 return DP_TEST_NAK; 3160 } 3161 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 3162 3163 /* Validate the requested link rate and lane count */ 3164 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 3165 test_lane_count)) 3166 return DP_TEST_NAK; 3167 3168 intel_dp->compliance.test_lane_count = test_lane_count; 3169 intel_dp->compliance.test_link_rate = test_link_rate; 3170 3171 return DP_TEST_ACK; 3172 } 3173 3174 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 3175 { 3176 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3177 u8 test_pattern; 3178 u8 test_misc; 3179 __be16 h_width, v_height; 3180 int status = 0; 3181 3182 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 3183 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 3184 &test_pattern); 3185 if (status <= 0) { 3186 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 3187 return DP_TEST_NAK; 3188 } 3189 if (test_pattern != DP_COLOR_RAMP) 3190 return DP_TEST_NAK; 3191 3192 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 3193 &h_width, 2); 3194 if (status <= 0) { 3195 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 3196 return DP_TEST_NAK; 3197 } 3198 3199 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 3200 &v_height, 2); 3201 if (status <= 0) { 3202 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 3203 return DP_TEST_NAK; 3204 } 3205 3206 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 3207 &test_misc); 3208 if (status <= 0) { 3209 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 3210 return DP_TEST_NAK; 3211 } 3212 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 3213 return DP_TEST_NAK; 3214 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 3215 return DP_TEST_NAK; 3216 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 3217 case DP_TEST_BIT_DEPTH_6: 3218 intel_dp->compliance.test_data.bpc = 6; 3219 break; 3220 case DP_TEST_BIT_DEPTH_8: 3221 intel_dp->compliance.test_data.bpc = 8; 3222 break; 3223 default: 3224 return DP_TEST_NAK; 3225 } 3226 3227 intel_dp->compliance.test_data.video_pattern = test_pattern; 3228 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 3229 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 3230 /* Set test active flag here so userspace doesn't interrupt things */ 3231 intel_dp->compliance.test_active = true; 3232 3233 return DP_TEST_ACK; 3234 } 3235 3236 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 3237 { 3238 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3239 u8 test_result = DP_TEST_ACK; 3240 struct intel_connector *intel_connector = intel_dp->attached_connector; 3241 struct drm_connector *connector = &intel_connector->base; 3242 3243 if (intel_connector->detect_edid == NULL || 3244 connector->edid_corrupt || 3245 intel_dp->aux.i2c_defer_count > 6) { 3246 /* Check EDID read for NACKs, DEFERs and corruption 3247 * (DP CTS 1.2 Core r1.1) 3248 * 4.2.2.4 : Failed EDID read, I2C_NAK 3249 * 4.2.2.5 : Failed EDID read, I2C_DEFER 3250 * 4.2.2.6 : EDID corruption detected 3251 * Use failsafe mode for all cases 3252 */ 3253 if (intel_dp->aux.i2c_nack_count > 0 || 3254 intel_dp->aux.i2c_defer_count > 0) 3255 drm_dbg_kms(&i915->drm, 3256 "EDID read had %d NACKs, %d DEFERs\n", 3257 intel_dp->aux.i2c_nack_count, 3258 intel_dp->aux.i2c_defer_count); 3259 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 3260 } else { 3261 struct edid *block = intel_connector->detect_edid; 3262 3263 /* We have to write the checksum 3264 * of the last block read 3265 */ 3266 block += intel_connector->detect_edid->extensions; 3267 3268 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 3269 block->checksum) <= 0) 3270 drm_dbg_kms(&i915->drm, 3271 "Failed to write EDID checksum\n"); 3272 3273 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 3274 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 3275 } 3276 3277 /* Set test active flag here so userspace doesn't interrupt things */ 3278 intel_dp->compliance.test_active = true; 3279 3280 return test_result; 3281 } 3282 3283 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 3284 const struct intel_crtc_state *crtc_state) 3285 { 3286 struct drm_i915_private *dev_priv = 3287 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 3288 struct drm_dp_phy_test_params *data = 3289 &intel_dp->compliance.test_data.phytest; 3290 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3291 enum pipe pipe = crtc->pipe; 3292 u32 pattern_val; 3293 3294 switch (data->phy_pattern) { 3295 case DP_PHY_TEST_PATTERN_NONE: 3296 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 3297 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 3298 break; 3299 case DP_PHY_TEST_PATTERN_D10_2: 3300 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 3301 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3302 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 3303 break; 3304 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 3305 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 3306 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3307 DDI_DP_COMP_CTL_ENABLE | 3308 DDI_DP_COMP_CTL_SCRAMBLED_0); 3309 break; 3310 case DP_PHY_TEST_PATTERN_PRBS7: 3311 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 3312 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3313 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 3314 break; 3315 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 3316 /* 3317 * FIXME: Ideally pattern should come from DPCD 0x250. As 3318 * current firmware of DPR-100 could not set it, so hardcoding 3319 * now for complaince test. 3320 */ 3321 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 3322 pattern_val = 0x3e0f83e0; 3323 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 3324 pattern_val = 0x0f83e0f8; 3325 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 3326 pattern_val = 0x0000f83e; 3327 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 3328 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3329 DDI_DP_COMP_CTL_ENABLE | 3330 DDI_DP_COMP_CTL_CUSTOM80); 3331 break; 3332 case DP_PHY_TEST_PATTERN_CP2520: 3333 /* 3334 * FIXME: Ideally pattern should come from DPCD 0x24A. As 3335 * current firmware of DPR-100 could not set it, so hardcoding 3336 * now for complaince test. 3337 */ 3338 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 3339 pattern_val = 0xFB; 3340 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3341 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 3342 pattern_val); 3343 break; 3344 default: 3345 WARN(1, "Invalid Phy Test Pattern\n"); 3346 } 3347 } 3348 3349 static void 3350 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, 3351 const struct intel_crtc_state *crtc_state) 3352 { 3353 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3354 struct drm_device *dev = dig_port->base.base.dev; 3355 struct drm_i915_private *dev_priv = to_i915(dev); 3356 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 3357 enum pipe pipe = crtc->pipe; 3358 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 3359 3360 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 3361 TRANS_DDI_FUNC_CTL(pipe)); 3362 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 3363 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 3364 3365 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 3366 TGL_TRANS_DDI_PORT_MASK); 3367 trans_conf_value &= ~PIPECONF_ENABLE; 3368 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 3369 3370 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 3371 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 3372 trans_ddi_func_ctl_value); 3373 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 3374 } 3375 3376 static void 3377 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, 3378 const struct intel_crtc_state *crtc_state) 3379 { 3380 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3381 struct drm_device *dev = dig_port->base.base.dev; 3382 struct drm_i915_private *dev_priv = to_i915(dev); 3383 enum port port = dig_port->base.port; 3384 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 3385 enum pipe pipe = crtc->pipe; 3386 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 3387 3388 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 3389 TRANS_DDI_FUNC_CTL(pipe)); 3390 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 3391 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 3392 3393 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 3394 TGL_TRANS_DDI_SELECT_PORT(port); 3395 trans_conf_value |= PIPECONF_ENABLE; 3396 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 3397 3398 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 3399 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 3400 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 3401 trans_ddi_func_ctl_value); 3402 } 3403 3404 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 3405 const struct intel_crtc_state *crtc_state) 3406 { 3407 struct drm_dp_phy_test_params *data = 3408 &intel_dp->compliance.test_data.phytest; 3409 u8 link_status[DP_LINK_STATUS_SIZE]; 3410 3411 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3412 link_status) < 0) { 3413 DRM_DEBUG_KMS("failed to get link status\n"); 3414 return; 3415 } 3416 3417 /* retrieve vswing & pre-emphasis setting */ 3418 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 3419 link_status); 3420 3421 intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); 3422 3423 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); 3424 3425 intel_dp_phy_pattern_update(intel_dp, crtc_state); 3426 3427 intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); 3428 3429 drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, 3430 intel_dp->train_set, crtc_state->lane_count); 3431 3432 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 3433 link_status[DP_DPCD_REV]); 3434 } 3435 3436 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 3437 { 3438 struct drm_dp_phy_test_params *data = 3439 &intel_dp->compliance.test_data.phytest; 3440 3441 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 3442 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 3443 return DP_TEST_NAK; 3444 } 3445 3446 /* Set test active flag here so userspace doesn't interrupt things */ 3447 intel_dp->compliance.test_active = true; 3448 3449 return DP_TEST_ACK; 3450 } 3451 3452 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 3453 { 3454 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3455 u8 response = DP_TEST_NAK; 3456 u8 request = 0; 3457 int status; 3458 3459 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 3460 if (status <= 0) { 3461 drm_dbg_kms(&i915->drm, 3462 "Could not read test request from sink\n"); 3463 goto update_status; 3464 } 3465 3466 switch (request) { 3467 case DP_TEST_LINK_TRAINING: 3468 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 3469 response = intel_dp_autotest_link_training(intel_dp); 3470 break; 3471 case DP_TEST_LINK_VIDEO_PATTERN: 3472 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 3473 response = intel_dp_autotest_video_pattern(intel_dp); 3474 break; 3475 case DP_TEST_LINK_EDID_READ: 3476 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 3477 response = intel_dp_autotest_edid(intel_dp); 3478 break; 3479 case DP_TEST_LINK_PHY_TEST_PATTERN: 3480 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 3481 response = intel_dp_autotest_phy_pattern(intel_dp); 3482 break; 3483 default: 3484 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 3485 request); 3486 break; 3487 } 3488 3489 if (response & DP_TEST_ACK) 3490 intel_dp->compliance.test_type = request; 3491 3492 update_status: 3493 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 3494 if (status <= 0) 3495 drm_dbg_kms(&i915->drm, 3496 "Could not write test response to sink\n"); 3497 } 3498 3499 static void 3500 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled) 3501 { 3502 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled); 3503 3504 if (esi[1] & DP_CP_IRQ) { 3505 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3506 *handled = true; 3507 } 3508 } 3509 3510 /** 3511 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 3512 * @intel_dp: Intel DP struct 3513 * 3514 * Read any pending MST interrupts, call MST core to handle these and ack the 3515 * interrupts. Check if the main and AUX link state is ok. 3516 * 3517 * Returns: 3518 * - %true if pending interrupts were serviced (or no interrupts were 3519 * pending) w/o detecting an error condition. 3520 * - %false if an error condition - like AUX failure or a loss of link - is 3521 * detected, which needs servicing from the hotplug work. 3522 */ 3523 static bool 3524 intel_dp_check_mst_status(struct intel_dp *intel_dp) 3525 { 3526 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3527 bool link_ok = true; 3528 3529 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 3530 3531 for (;;) { 3532 /* 3533 * The +2 is because DP_DPRX_ESI_LEN is 14, but we then 3534 * pass in "esi+10" to drm_dp_channel_eq_ok(), which 3535 * takes a 6-byte array. So we actually need 16 bytes 3536 * here. 3537 * 3538 * Somebody who knows what the limits actually are 3539 * should check this, but for now this is at least 3540 * harmless and avoids a valid compiler warning about 3541 * using more of the array than we have allocated. 3542 */ 3543 u8 esi[DP_DPRX_ESI_LEN+2] = {}; 3544 bool handled; 3545 int retry; 3546 3547 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 3548 drm_dbg_kms(&i915->drm, 3549 "failed to get ESI - device may have failed\n"); 3550 link_ok = false; 3551 3552 break; 3553 } 3554 3555 /* check link status - esi[10] = 0x200c */ 3556 if (intel_dp->active_mst_links > 0 && link_ok && 3557 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 3558 drm_dbg_kms(&i915->drm, 3559 "channel EQ not ok, retraining\n"); 3560 link_ok = false; 3561 } 3562 3563 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 3564 3565 intel_dp_mst_hpd_irq(intel_dp, esi, &handled); 3566 3567 if (!handled) 3568 break; 3569 3570 for (retry = 0; retry < 3; retry++) { 3571 int wret; 3572 3573 wret = drm_dp_dpcd_write(&intel_dp->aux, 3574 DP_SINK_COUNT_ESI+1, 3575 &esi[1], 3); 3576 if (wret == 3) 3577 break; 3578 } 3579 } 3580 3581 return link_ok; 3582 } 3583 3584 static void 3585 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 3586 { 3587 bool is_active; 3588 u8 buf = 0; 3589 3590 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 3591 if (intel_dp->frl.is_trained && !is_active) { 3592 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 3593 return; 3594 3595 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 3596 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 3597 return; 3598 3599 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 3600 3601 /* Restart FRL training or fall back to TMDS mode */ 3602 intel_dp_check_frl_training(intel_dp); 3603 } 3604 } 3605 3606 static bool 3607 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 3608 { 3609 u8 link_status[DP_LINK_STATUS_SIZE]; 3610 3611 if (!intel_dp->link_trained) 3612 return false; 3613 3614 /* 3615 * While PSR source HW is enabled, it will control main-link sending 3616 * frames, enabling and disabling it so trying to do a retrain will fail 3617 * as the link would or not be on or it could mix training patterns 3618 * and frame data at the same time causing retrain to fail. 3619 * Also when exiting PSR, HW will retrain the link anyways fixing 3620 * any link status error. 3621 */ 3622 if (intel_psr_enabled(intel_dp)) 3623 return false; 3624 3625 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3626 link_status) < 0) 3627 return false; 3628 3629 /* 3630 * Validate the cached values of intel_dp->link_rate and 3631 * intel_dp->lane_count before attempting to retrain. 3632 * 3633 * FIXME would be nice to user the crtc state here, but since 3634 * we need to call this from the short HPD handler that seems 3635 * a bit hard. 3636 */ 3637 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 3638 intel_dp->lane_count)) 3639 return false; 3640 3641 /* Retrain if Channel EQ or CR not ok */ 3642 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 3643 } 3644 3645 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 3646 const struct drm_connector_state *conn_state) 3647 { 3648 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3649 struct intel_encoder *encoder; 3650 enum pipe pipe; 3651 3652 if (!conn_state->best_encoder) 3653 return false; 3654 3655 /* SST */ 3656 encoder = &dp_to_dig_port(intel_dp)->base; 3657 if (conn_state->best_encoder == &encoder->base) 3658 return true; 3659 3660 /* MST */ 3661 for_each_pipe(i915, pipe) { 3662 encoder = &intel_dp->mst_encoders[pipe]->base; 3663 if (conn_state->best_encoder == &encoder->base) 3664 return true; 3665 } 3666 3667 return false; 3668 } 3669 3670 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 3671 struct drm_modeset_acquire_ctx *ctx, 3672 u32 *crtc_mask) 3673 { 3674 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3675 struct drm_connector_list_iter conn_iter; 3676 struct intel_connector *connector; 3677 int ret = 0; 3678 3679 *crtc_mask = 0; 3680 3681 if (!intel_dp_needs_link_retrain(intel_dp)) 3682 return 0; 3683 3684 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 3685 for_each_intel_connector_iter(connector, &conn_iter) { 3686 struct drm_connector_state *conn_state = 3687 connector->base.state; 3688 struct intel_crtc_state *crtc_state; 3689 struct intel_crtc *crtc; 3690 3691 if (!intel_dp_has_connector(intel_dp, conn_state)) 3692 continue; 3693 3694 crtc = to_intel_crtc(conn_state->crtc); 3695 if (!crtc) 3696 continue; 3697 3698 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 3699 if (ret) 3700 break; 3701 3702 crtc_state = to_intel_crtc_state(crtc->base.state); 3703 3704 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 3705 3706 if (!crtc_state->hw.active) 3707 continue; 3708 3709 if (conn_state->commit && 3710 !try_wait_for_completion(&conn_state->commit->hw_done)) 3711 continue; 3712 3713 *crtc_mask |= drm_crtc_mask(&crtc->base); 3714 } 3715 drm_connector_list_iter_end(&conn_iter); 3716 3717 if (!intel_dp_needs_link_retrain(intel_dp)) 3718 *crtc_mask = 0; 3719 3720 return ret; 3721 } 3722 3723 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 3724 { 3725 struct intel_connector *connector = intel_dp->attached_connector; 3726 3727 return connector->base.status == connector_status_connected || 3728 intel_dp->is_mst; 3729 } 3730 3731 int intel_dp_retrain_link(struct intel_encoder *encoder, 3732 struct drm_modeset_acquire_ctx *ctx) 3733 { 3734 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3735 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3736 struct intel_crtc *crtc; 3737 u32 crtc_mask; 3738 int ret; 3739 3740 if (!intel_dp_is_connected(intel_dp)) 3741 return 0; 3742 3743 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 3744 ctx); 3745 if (ret) 3746 return ret; 3747 3748 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 3749 if (ret) 3750 return ret; 3751 3752 if (crtc_mask == 0) 3753 return 0; 3754 3755 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 3756 encoder->base.base.id, encoder->base.name); 3757 3758 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3759 const struct intel_crtc_state *crtc_state = 3760 to_intel_crtc_state(crtc->base.state); 3761 3762 /* Suppress underruns caused by re-training */ 3763 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 3764 if (crtc_state->has_pch_encoder) 3765 intel_set_pch_fifo_underrun_reporting(dev_priv, 3766 intel_crtc_pch_transcoder(crtc), false); 3767 } 3768 3769 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3770 const struct intel_crtc_state *crtc_state = 3771 to_intel_crtc_state(crtc->base.state); 3772 3773 /* retrain on the MST master transcoder */ 3774 if (DISPLAY_VER(dev_priv) >= 12 && 3775 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 3776 !intel_dp_mst_is_master_trans(crtc_state)) 3777 continue; 3778 3779 intel_dp_check_frl_training(intel_dp); 3780 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); 3781 intel_dp_start_link_train(intel_dp, crtc_state); 3782 intel_dp_stop_link_train(intel_dp, crtc_state); 3783 break; 3784 } 3785 3786 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3787 const struct intel_crtc_state *crtc_state = 3788 to_intel_crtc_state(crtc->base.state); 3789 3790 /* Keep underrun reporting disabled until things are stable */ 3791 intel_wait_for_vblank(dev_priv, crtc->pipe); 3792 3793 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 3794 if (crtc_state->has_pch_encoder) 3795 intel_set_pch_fifo_underrun_reporting(dev_priv, 3796 intel_crtc_pch_transcoder(crtc), true); 3797 } 3798 3799 return 0; 3800 } 3801 3802 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 3803 struct drm_modeset_acquire_ctx *ctx, 3804 u32 *crtc_mask) 3805 { 3806 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3807 struct drm_connector_list_iter conn_iter; 3808 struct intel_connector *connector; 3809 int ret = 0; 3810 3811 *crtc_mask = 0; 3812 3813 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 3814 for_each_intel_connector_iter(connector, &conn_iter) { 3815 struct drm_connector_state *conn_state = 3816 connector->base.state; 3817 struct intel_crtc_state *crtc_state; 3818 struct intel_crtc *crtc; 3819 3820 if (!intel_dp_has_connector(intel_dp, conn_state)) 3821 continue; 3822 3823 crtc = to_intel_crtc(conn_state->crtc); 3824 if (!crtc) 3825 continue; 3826 3827 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 3828 if (ret) 3829 break; 3830 3831 crtc_state = to_intel_crtc_state(crtc->base.state); 3832 3833 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 3834 3835 if (!crtc_state->hw.active) 3836 continue; 3837 3838 if (conn_state->commit && 3839 !try_wait_for_completion(&conn_state->commit->hw_done)) 3840 continue; 3841 3842 *crtc_mask |= drm_crtc_mask(&crtc->base); 3843 } 3844 drm_connector_list_iter_end(&conn_iter); 3845 3846 return ret; 3847 } 3848 3849 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 3850 struct drm_modeset_acquire_ctx *ctx) 3851 { 3852 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3853 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3854 struct intel_crtc *crtc; 3855 u32 crtc_mask; 3856 int ret; 3857 3858 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 3859 ctx); 3860 if (ret) 3861 return ret; 3862 3863 ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask); 3864 if (ret) 3865 return ret; 3866 3867 if (crtc_mask == 0) 3868 return 0; 3869 3870 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 3871 encoder->base.base.id, encoder->base.name); 3872 3873 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3874 const struct intel_crtc_state *crtc_state = 3875 to_intel_crtc_state(crtc->base.state); 3876 3877 /* test on the MST master transcoder */ 3878 if (DISPLAY_VER(dev_priv) >= 12 && 3879 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 3880 !intel_dp_mst_is_master_trans(crtc_state)) 3881 continue; 3882 3883 intel_dp_process_phy_request(intel_dp, crtc_state); 3884 break; 3885 } 3886 3887 return 0; 3888 } 3889 3890 void intel_dp_phy_test(struct intel_encoder *encoder) 3891 { 3892 struct drm_modeset_acquire_ctx ctx; 3893 int ret; 3894 3895 drm_modeset_acquire_init(&ctx, 0); 3896 3897 for (;;) { 3898 ret = intel_dp_do_phy_test(encoder, &ctx); 3899 3900 if (ret == -EDEADLK) { 3901 drm_modeset_backoff(&ctx); 3902 continue; 3903 } 3904 3905 break; 3906 } 3907 3908 drm_modeset_drop_locks(&ctx); 3909 drm_modeset_acquire_fini(&ctx); 3910 drm_WARN(encoder->base.dev, ret, 3911 "Acquiring modeset locks failed with %i\n", ret); 3912 } 3913 3914 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) 3915 { 3916 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3917 u8 val; 3918 3919 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3920 return; 3921 3922 if (drm_dp_dpcd_readb(&intel_dp->aux, 3923 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 3924 return; 3925 3926 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 3927 3928 if (val & DP_AUTOMATED_TEST_REQUEST) 3929 intel_dp_handle_test_request(intel_dp); 3930 3931 if (val & DP_CP_IRQ) 3932 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3933 3934 if (val & DP_SINK_SPECIFIC_IRQ) 3935 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 3936 } 3937 3938 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) 3939 { 3940 u8 val; 3941 3942 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3943 return; 3944 3945 if (drm_dp_dpcd_readb(&intel_dp->aux, 3946 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) 3947 return; 3948 3949 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3950 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) 3951 return; 3952 3953 if (val & HDMI_LINK_STATUS_CHANGED) 3954 intel_dp_handle_hdmi_link_status_change(intel_dp); 3955 } 3956 3957 /* 3958 * According to DP spec 3959 * 5.1.2: 3960 * 1. Read DPCD 3961 * 2. Configure link according to Receiver Capabilities 3962 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 3963 * 4. Check link status on receipt of hot-plug interrupt 3964 * 3965 * intel_dp_short_pulse - handles short pulse interrupts 3966 * when full detection is not required. 3967 * Returns %true if short pulse is handled and full detection 3968 * is NOT required and %false otherwise. 3969 */ 3970 static bool 3971 intel_dp_short_pulse(struct intel_dp *intel_dp) 3972 { 3973 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3974 u8 old_sink_count = intel_dp->sink_count; 3975 bool ret; 3976 3977 /* 3978 * Clearing compliance test variables to allow capturing 3979 * of values for next automated test request. 3980 */ 3981 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 3982 3983 /* 3984 * Now read the DPCD to see if it's actually running 3985 * If the current value of sink count doesn't match with 3986 * the value that was stored earlier or dpcd read failed 3987 * we need to do full detection 3988 */ 3989 ret = intel_dp_get_dpcd(intel_dp); 3990 3991 if ((old_sink_count != intel_dp->sink_count) || !ret) { 3992 /* No need to proceed if we are going to do full detect */ 3993 return false; 3994 } 3995 3996 intel_dp_check_device_service_irq(intel_dp); 3997 intel_dp_check_link_service_irq(intel_dp); 3998 3999 /* Handle CEC interrupts, if any */ 4000 drm_dp_cec_irq(&intel_dp->aux); 4001 4002 /* defer to the hotplug work for link retraining if needed */ 4003 if (intel_dp_needs_link_retrain(intel_dp)) 4004 return false; 4005 4006 intel_psr_short_pulse(intel_dp); 4007 4008 switch (intel_dp->compliance.test_type) { 4009 case DP_TEST_LINK_TRAINING: 4010 drm_dbg_kms(&dev_priv->drm, 4011 "Link Training Compliance Test requested\n"); 4012 /* Send a Hotplug Uevent to userspace to start modeset */ 4013 drm_kms_helper_hotplug_event(&dev_priv->drm); 4014 break; 4015 case DP_TEST_LINK_PHY_TEST_PATTERN: 4016 drm_dbg_kms(&dev_priv->drm, 4017 "PHY test pattern Compliance Test requested\n"); 4018 /* 4019 * Schedule long hpd to do the test 4020 * 4021 * FIXME get rid of the ad-hoc phy test modeset code 4022 * and properly incorporate it into the normal modeset. 4023 */ 4024 return false; 4025 } 4026 4027 return true; 4028 } 4029 4030 /* XXX this is probably wrong for multiple downstream ports */ 4031 static enum drm_connector_status 4032 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 4033 { 4034 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4035 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4036 u8 *dpcd = intel_dp->dpcd; 4037 u8 type; 4038 4039 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 4040 return connector_status_connected; 4041 4042 lspcon_resume(dig_port); 4043 4044 if (!intel_dp_get_dpcd(intel_dp)) 4045 return connector_status_disconnected; 4046 4047 /* if there's no downstream port, we're done */ 4048 if (!drm_dp_is_branch(dpcd)) 4049 return connector_status_connected; 4050 4051 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 4052 if (intel_dp_has_sink_count(intel_dp) && 4053 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 4054 return intel_dp->sink_count ? 4055 connector_status_connected : connector_status_disconnected; 4056 } 4057 4058 if (intel_dp_can_mst(intel_dp)) 4059 return connector_status_connected; 4060 4061 /* If no HPD, poke DDC gently */ 4062 if (drm_probe_ddc(&intel_dp->aux.ddc)) 4063 return connector_status_connected; 4064 4065 /* Well we tried, say unknown for unreliable port types */ 4066 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 4067 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 4068 if (type == DP_DS_PORT_TYPE_VGA || 4069 type == DP_DS_PORT_TYPE_NON_EDID) 4070 return connector_status_unknown; 4071 } else { 4072 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 4073 DP_DWN_STRM_PORT_TYPE_MASK; 4074 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 4075 type == DP_DWN_STRM_PORT_TYPE_OTHER) 4076 return connector_status_unknown; 4077 } 4078 4079 /* Anything else is out of spec, warn and ignore */ 4080 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 4081 return connector_status_disconnected; 4082 } 4083 4084 static enum drm_connector_status 4085 edp_detect(struct intel_dp *intel_dp) 4086 { 4087 return connector_status_connected; 4088 } 4089 4090 /* 4091 * intel_digital_port_connected - is the specified port connected? 4092 * @encoder: intel_encoder 4093 * 4094 * In cases where there's a connector physically connected but it can't be used 4095 * by our hardware we also return false, since the rest of the driver should 4096 * pretty much treat the port as disconnected. This is relevant for type-C 4097 * (starting on ICL) where there's ownership involved. 4098 * 4099 * Return %true if port is connected, %false otherwise. 4100 */ 4101 bool intel_digital_port_connected(struct intel_encoder *encoder) 4102 { 4103 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4104 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4105 bool is_connected = false; 4106 intel_wakeref_t wakeref; 4107 4108 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 4109 is_connected = dig_port->connected(encoder); 4110 4111 return is_connected; 4112 } 4113 4114 static struct edid * 4115 intel_dp_get_edid(struct intel_dp *intel_dp) 4116 { 4117 struct intel_connector *intel_connector = intel_dp->attached_connector; 4118 4119 /* use cached edid if we have one */ 4120 if (intel_connector->edid) { 4121 /* invalid edid */ 4122 if (IS_ERR(intel_connector->edid)) 4123 return NULL; 4124 4125 return drm_edid_duplicate(intel_connector->edid); 4126 } else 4127 return drm_get_edid(&intel_connector->base, 4128 &intel_dp->aux.ddc); 4129 } 4130 4131 static void 4132 intel_dp_update_dfp(struct intel_dp *intel_dp, 4133 const struct edid *edid) 4134 { 4135 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4136 struct intel_connector *connector = intel_dp->attached_connector; 4137 4138 intel_dp->dfp.max_bpc = 4139 drm_dp_downstream_max_bpc(intel_dp->dpcd, 4140 intel_dp->downstream_ports, edid); 4141 4142 intel_dp->dfp.max_dotclock = 4143 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 4144 intel_dp->downstream_ports); 4145 4146 intel_dp->dfp.min_tmds_clock = 4147 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 4148 intel_dp->downstream_ports, 4149 edid); 4150 intel_dp->dfp.max_tmds_clock = 4151 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 4152 intel_dp->downstream_ports, 4153 edid); 4154 4155 intel_dp->dfp.pcon_max_frl_bw = 4156 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 4157 intel_dp->downstream_ports); 4158 4159 drm_dbg_kms(&i915->drm, 4160 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 4161 connector->base.base.id, connector->base.name, 4162 intel_dp->dfp.max_bpc, 4163 intel_dp->dfp.max_dotclock, 4164 intel_dp->dfp.min_tmds_clock, 4165 intel_dp->dfp.max_tmds_clock, 4166 intel_dp->dfp.pcon_max_frl_bw); 4167 4168 intel_dp_get_pcon_dsc_cap(intel_dp); 4169 } 4170 4171 static void 4172 intel_dp_update_420(struct intel_dp *intel_dp) 4173 { 4174 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4175 struct intel_connector *connector = intel_dp->attached_connector; 4176 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr; 4177 4178 /* No YCbCr output support on gmch platforms */ 4179 if (HAS_GMCH(i915)) 4180 return; 4181 4182 /* 4183 * ILK doesn't seem capable of DP YCbCr output. The 4184 * displayed image is severly corrupted. SNB+ is fine. 4185 */ 4186 if (IS_IRONLAKE(i915)) 4187 return; 4188 4189 is_branch = drm_dp_is_branch(intel_dp->dpcd); 4190 ycbcr_420_passthrough = 4191 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 4192 intel_dp->downstream_ports); 4193 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 4194 ycbcr_444_to_420 = 4195 dp_to_dig_port(intel_dp)->lspcon.active || 4196 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 4197 intel_dp->downstream_ports); 4198 rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 4199 intel_dp->downstream_ports, 4200 DP_DS_HDMI_BT601_RGB_YCBCR_CONV | 4201 DP_DS_HDMI_BT709_RGB_YCBCR_CONV | 4202 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 4203 4204 if (DISPLAY_VER(i915) >= 11) { 4205 /* Let PCON convert from RGB->YCbCr if possible */ 4206 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) { 4207 intel_dp->dfp.rgb_to_ycbcr = true; 4208 intel_dp->dfp.ycbcr_444_to_420 = true; 4209 connector->base.ycbcr_420_allowed = true; 4210 } else { 4211 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ 4212 intel_dp->dfp.ycbcr_444_to_420 = 4213 ycbcr_444_to_420 && !ycbcr_420_passthrough; 4214 4215 connector->base.ycbcr_420_allowed = 4216 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; 4217 } 4218 } else { 4219 /* 4:4:4->4:2:0 conversion is the only way */ 4220 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; 4221 4222 connector->base.ycbcr_420_allowed = ycbcr_444_to_420; 4223 } 4224 4225 drm_dbg_kms(&i915->drm, 4226 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 4227 connector->base.base.id, connector->base.name, 4228 yesno(intel_dp->dfp.rgb_to_ycbcr), 4229 yesno(connector->base.ycbcr_420_allowed), 4230 yesno(intel_dp->dfp.ycbcr_444_to_420)); 4231 } 4232 4233 static void 4234 intel_dp_set_edid(struct intel_dp *intel_dp) 4235 { 4236 struct intel_connector *connector = intel_dp->attached_connector; 4237 struct edid *edid; 4238 4239 intel_dp_unset_edid(intel_dp); 4240 edid = intel_dp_get_edid(intel_dp); 4241 connector->detect_edid = edid; 4242 4243 intel_dp_update_dfp(intel_dp, edid); 4244 intel_dp_update_420(intel_dp); 4245 4246 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 4247 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 4248 intel_dp->has_audio = drm_detect_monitor_audio(edid); 4249 } 4250 4251 drm_dp_cec_set_edid(&intel_dp->aux, edid); 4252 } 4253 4254 static void 4255 intel_dp_unset_edid(struct intel_dp *intel_dp) 4256 { 4257 struct intel_connector *connector = intel_dp->attached_connector; 4258 4259 drm_dp_cec_unset_edid(&intel_dp->aux); 4260 kfree(connector->detect_edid); 4261 connector->detect_edid = NULL; 4262 4263 intel_dp->has_hdmi_sink = false; 4264 intel_dp->has_audio = false; 4265 4266 intel_dp->dfp.max_bpc = 0; 4267 intel_dp->dfp.max_dotclock = 0; 4268 intel_dp->dfp.min_tmds_clock = 0; 4269 intel_dp->dfp.max_tmds_clock = 0; 4270 4271 intel_dp->dfp.pcon_max_frl_bw = 0; 4272 4273 intel_dp->dfp.ycbcr_444_to_420 = false; 4274 connector->base.ycbcr_420_allowed = false; 4275 } 4276 4277 static int 4278 intel_dp_detect(struct drm_connector *connector, 4279 struct drm_modeset_acquire_ctx *ctx, 4280 bool force) 4281 { 4282 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4283 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4284 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4285 struct intel_encoder *encoder = &dig_port->base; 4286 enum drm_connector_status status; 4287 4288 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4289 connector->base.id, connector->name); 4290 drm_WARN_ON(&dev_priv->drm, 4291 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 4292 4293 if (!INTEL_DISPLAY_ENABLED(dev_priv)) 4294 return connector_status_disconnected; 4295 4296 /* Can't disconnect eDP */ 4297 if (intel_dp_is_edp(intel_dp)) 4298 status = edp_detect(intel_dp); 4299 else if (intel_digital_port_connected(encoder)) 4300 status = intel_dp_detect_dpcd(intel_dp); 4301 else 4302 status = connector_status_disconnected; 4303 4304 if (status == connector_status_disconnected) { 4305 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 4306 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4307 4308 if (intel_dp->is_mst) { 4309 drm_dbg_kms(&dev_priv->drm, 4310 "MST device may have disappeared %d vs %d\n", 4311 intel_dp->is_mst, 4312 intel_dp->mst_mgr.mst_state); 4313 intel_dp->is_mst = false; 4314 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4315 intel_dp->is_mst); 4316 } 4317 4318 goto out; 4319 } 4320 4321 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 4322 if (DISPLAY_VER(dev_priv) >= 11) 4323 intel_dp_get_dsc_sink_cap(intel_dp); 4324 4325 intel_dp_configure_mst(intel_dp); 4326 4327 /* 4328 * TODO: Reset link params when switching to MST mode, until MST 4329 * supports link training fallback params. 4330 */ 4331 if (intel_dp->reset_link_params || intel_dp->is_mst) { 4332 /* Initial max link lane count */ 4333 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 4334 4335 /* Initial max link rate */ 4336 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 4337 4338 intel_dp->reset_link_params = false; 4339 } 4340 4341 intel_dp_print_rates(intel_dp); 4342 4343 if (intel_dp->is_mst) { 4344 /* 4345 * If we are in MST mode then this connector 4346 * won't appear connected or have anything 4347 * with EDID on it 4348 */ 4349 status = connector_status_disconnected; 4350 goto out; 4351 } 4352 4353 /* 4354 * Some external monitors do not signal loss of link synchronization 4355 * with an IRQ_HPD, so force a link status check. 4356 */ 4357 if (!intel_dp_is_edp(intel_dp)) { 4358 int ret; 4359 4360 ret = intel_dp_retrain_link(encoder, ctx); 4361 if (ret) 4362 return ret; 4363 } 4364 4365 /* 4366 * Clearing NACK and defer counts to get their exact values 4367 * while reading EDID which are required by Compliance tests 4368 * 4.2.2.4 and 4.2.2.5 4369 */ 4370 intel_dp->aux.i2c_nack_count = 0; 4371 intel_dp->aux.i2c_defer_count = 0; 4372 4373 intel_dp_set_edid(intel_dp); 4374 if (intel_dp_is_edp(intel_dp) || 4375 to_intel_connector(connector)->detect_edid) 4376 status = connector_status_connected; 4377 4378 intel_dp_check_device_service_irq(intel_dp); 4379 4380 out: 4381 if (status != connector_status_connected && !intel_dp->is_mst) 4382 intel_dp_unset_edid(intel_dp); 4383 4384 /* 4385 * Make sure the refs for power wells enabled during detect are 4386 * dropped to avoid a new detect cycle triggered by HPD polling. 4387 */ 4388 intel_display_power_flush_work(dev_priv); 4389 4390 if (!intel_dp_is_edp(intel_dp)) 4391 drm_dp_set_subconnector_property(connector, 4392 status, 4393 intel_dp->dpcd, 4394 intel_dp->downstream_ports); 4395 return status; 4396 } 4397 4398 static void 4399 intel_dp_force(struct drm_connector *connector) 4400 { 4401 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4402 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4403 struct intel_encoder *intel_encoder = &dig_port->base; 4404 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 4405 enum intel_display_power_domain aux_domain = 4406 intel_aux_power_domain(dig_port); 4407 intel_wakeref_t wakeref; 4408 4409 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4410 connector->base.id, connector->name); 4411 intel_dp_unset_edid(intel_dp); 4412 4413 if (connector->status != connector_status_connected) 4414 return; 4415 4416 wakeref = intel_display_power_get(dev_priv, aux_domain); 4417 4418 intel_dp_set_edid(intel_dp); 4419 4420 intel_display_power_put(dev_priv, aux_domain, wakeref); 4421 } 4422 4423 static int intel_dp_get_modes(struct drm_connector *connector) 4424 { 4425 struct intel_connector *intel_connector = to_intel_connector(connector); 4426 struct edid *edid; 4427 int num_modes = 0; 4428 4429 edid = intel_connector->detect_edid; 4430 if (edid) { 4431 num_modes = intel_connector_update_modes(connector, edid); 4432 4433 if (intel_vrr_is_capable(connector)) 4434 drm_connector_set_vrr_capable_property(connector, 4435 true); 4436 } 4437 4438 /* Also add fixed mode, which may or may not be present in EDID */ 4439 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && 4440 intel_connector->panel.fixed_mode) { 4441 struct drm_display_mode *mode; 4442 4443 mode = drm_mode_duplicate(connector->dev, 4444 intel_connector->panel.fixed_mode); 4445 if (mode) { 4446 drm_mode_probed_add(connector, mode); 4447 num_modes++; 4448 } 4449 } 4450 4451 if (num_modes) 4452 return num_modes; 4453 4454 if (!edid) { 4455 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 4456 struct drm_display_mode *mode; 4457 4458 mode = drm_dp_downstream_mode(connector->dev, 4459 intel_dp->dpcd, 4460 intel_dp->downstream_ports); 4461 if (mode) { 4462 drm_mode_probed_add(connector, mode); 4463 num_modes++; 4464 } 4465 } 4466 4467 return num_modes; 4468 } 4469 4470 static int 4471 intel_dp_connector_register(struct drm_connector *connector) 4472 { 4473 struct drm_i915_private *i915 = to_i915(connector->dev); 4474 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4475 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4476 struct intel_lspcon *lspcon = &dig_port->lspcon; 4477 int ret; 4478 4479 ret = intel_connector_register(connector); 4480 if (ret) 4481 return ret; 4482 4483 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 4484 intel_dp->aux.name, connector->kdev->kobj.name); 4485 4486 intel_dp->aux.dev = connector->kdev; 4487 ret = drm_dp_aux_register(&intel_dp->aux); 4488 if (!ret) 4489 drm_dp_cec_register_connector(&intel_dp->aux, connector); 4490 4491 if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) 4492 return ret; 4493 4494 /* 4495 * ToDo: Clean this up to handle lspcon init and resume more 4496 * efficiently and streamlined. 4497 */ 4498 if (lspcon_init(dig_port)) { 4499 lspcon_detect_hdr_capability(lspcon); 4500 if (lspcon->hdr_supported) 4501 drm_object_attach_property(&connector->base, 4502 connector->dev->mode_config.hdr_output_metadata_property, 4503 0); 4504 } 4505 4506 return ret; 4507 } 4508 4509 static void 4510 intel_dp_connector_unregister(struct drm_connector *connector) 4511 { 4512 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4513 4514 drm_dp_cec_unregister_connector(&intel_dp->aux); 4515 drm_dp_aux_unregister(&intel_dp->aux); 4516 intel_connector_unregister(connector); 4517 } 4518 4519 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 4520 { 4521 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 4522 struct intel_dp *intel_dp = &dig_port->dp; 4523 4524 intel_dp_mst_encoder_cleanup(dig_port); 4525 4526 intel_pps_vdd_off_sync(intel_dp); 4527 4528 intel_dp_aux_fini(intel_dp); 4529 } 4530 4531 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 4532 { 4533 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4534 4535 intel_pps_vdd_off_sync(intel_dp); 4536 } 4537 4538 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 4539 { 4540 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4541 4542 intel_pps_wait_power_cycle(intel_dp); 4543 } 4544 4545 static int intel_modeset_tile_group(struct intel_atomic_state *state, 4546 int tile_group_id) 4547 { 4548 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4549 struct drm_connector_list_iter conn_iter; 4550 struct drm_connector *connector; 4551 int ret = 0; 4552 4553 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 4554 drm_for_each_connector_iter(connector, &conn_iter) { 4555 struct drm_connector_state *conn_state; 4556 struct intel_crtc_state *crtc_state; 4557 struct intel_crtc *crtc; 4558 4559 if (!connector->has_tile || 4560 connector->tile_group->id != tile_group_id) 4561 continue; 4562 4563 conn_state = drm_atomic_get_connector_state(&state->base, 4564 connector); 4565 if (IS_ERR(conn_state)) { 4566 ret = PTR_ERR(conn_state); 4567 break; 4568 } 4569 4570 crtc = to_intel_crtc(conn_state->crtc); 4571 4572 if (!crtc) 4573 continue; 4574 4575 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 4576 crtc_state->uapi.mode_changed = true; 4577 4578 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4579 if (ret) 4580 break; 4581 } 4582 drm_connector_list_iter_end(&conn_iter); 4583 4584 return ret; 4585 } 4586 4587 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 4588 { 4589 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4590 struct intel_crtc *crtc; 4591 4592 if (transcoders == 0) 4593 return 0; 4594 4595 for_each_intel_crtc(&dev_priv->drm, crtc) { 4596 struct intel_crtc_state *crtc_state; 4597 int ret; 4598 4599 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 4600 if (IS_ERR(crtc_state)) 4601 return PTR_ERR(crtc_state); 4602 4603 if (!crtc_state->hw.enable) 4604 continue; 4605 4606 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 4607 continue; 4608 4609 crtc_state->uapi.mode_changed = true; 4610 4611 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 4612 if (ret) 4613 return ret; 4614 4615 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4616 if (ret) 4617 return ret; 4618 4619 transcoders &= ~BIT(crtc_state->cpu_transcoder); 4620 } 4621 4622 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 4623 4624 return 0; 4625 } 4626 4627 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 4628 struct drm_connector *connector) 4629 { 4630 const struct drm_connector_state *old_conn_state = 4631 drm_atomic_get_old_connector_state(&state->base, connector); 4632 const struct intel_crtc_state *old_crtc_state; 4633 struct intel_crtc *crtc; 4634 u8 transcoders; 4635 4636 crtc = to_intel_crtc(old_conn_state->crtc); 4637 if (!crtc) 4638 return 0; 4639 4640 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 4641 4642 if (!old_crtc_state->hw.active) 4643 return 0; 4644 4645 transcoders = old_crtc_state->sync_mode_slaves_mask; 4646 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 4647 transcoders |= BIT(old_crtc_state->master_transcoder); 4648 4649 return intel_modeset_affected_transcoders(state, 4650 transcoders); 4651 } 4652 4653 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 4654 struct drm_atomic_state *_state) 4655 { 4656 struct drm_i915_private *dev_priv = to_i915(conn->dev); 4657 struct intel_atomic_state *state = to_intel_atomic_state(_state); 4658 int ret; 4659 4660 ret = intel_digital_connector_atomic_check(conn, &state->base); 4661 if (ret) 4662 return ret; 4663 4664 /* 4665 * We don't enable port sync on BDW due to missing w/as and 4666 * due to not having adjusted the modeset sequence appropriately. 4667 */ 4668 if (DISPLAY_VER(dev_priv) < 9) 4669 return 0; 4670 4671 if (!intel_connector_needs_modeset(state, conn)) 4672 return 0; 4673 4674 if (conn->has_tile) { 4675 ret = intel_modeset_tile_group(state, conn->tile_group->id); 4676 if (ret) 4677 return ret; 4678 } 4679 4680 return intel_modeset_synced_crtcs(state, conn); 4681 } 4682 4683 static void intel_dp_oob_hotplug_event(struct drm_connector *connector) 4684 { 4685 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 4686 struct drm_i915_private *i915 = to_i915(connector->dev); 4687 4688 spin_lock_irq(&i915->irq_lock); 4689 i915->hotplug.event_bits |= BIT(encoder->hpd_pin); 4690 spin_unlock_irq(&i915->irq_lock); 4691 queue_delayed_work(system_wq, &i915->hotplug.hotplug_work, 0); 4692 } 4693 4694 static const struct drm_connector_funcs intel_dp_connector_funcs = { 4695 .force = intel_dp_force, 4696 .fill_modes = drm_helper_probe_single_connector_modes, 4697 .atomic_get_property = intel_digital_connector_atomic_get_property, 4698 .atomic_set_property = intel_digital_connector_atomic_set_property, 4699 .late_register = intel_dp_connector_register, 4700 .early_unregister = intel_dp_connector_unregister, 4701 .destroy = intel_connector_destroy, 4702 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 4703 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 4704 .oob_hotplug_event = intel_dp_oob_hotplug_event, 4705 }; 4706 4707 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 4708 .detect_ctx = intel_dp_detect, 4709 .get_modes = intel_dp_get_modes, 4710 .mode_valid = intel_dp_mode_valid, 4711 .atomic_check = intel_dp_connector_atomic_check, 4712 }; 4713 4714 enum irqreturn 4715 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 4716 { 4717 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 4718 struct intel_dp *intel_dp = &dig_port->dp; 4719 4720 if (dig_port->base.type == INTEL_OUTPUT_EDP && 4721 (long_hpd || !intel_pps_have_power(intel_dp))) { 4722 /* 4723 * vdd off can generate a long/short pulse on eDP which 4724 * would require vdd on to handle it, and thus we 4725 * would end up in an endless cycle of 4726 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 4727 */ 4728 drm_dbg_kms(&i915->drm, 4729 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 4730 long_hpd ? "long" : "short", 4731 dig_port->base.base.base.id, 4732 dig_port->base.base.name); 4733 return IRQ_HANDLED; 4734 } 4735 4736 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 4737 dig_port->base.base.base.id, 4738 dig_port->base.base.name, 4739 long_hpd ? "long" : "short"); 4740 4741 if (long_hpd) { 4742 intel_dp->reset_link_params = true; 4743 return IRQ_NONE; 4744 } 4745 4746 if (intel_dp->is_mst) { 4747 if (!intel_dp_check_mst_status(intel_dp)) 4748 return IRQ_NONE; 4749 } else if (!intel_dp_short_pulse(intel_dp)) { 4750 return IRQ_NONE; 4751 } 4752 4753 return IRQ_HANDLED; 4754 } 4755 4756 /* check the VBT to see whether the eDP is on another port */ 4757 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 4758 { 4759 /* 4760 * eDP not supported on g4x. so bail out early just 4761 * for a bit extra safety in case the VBT is bonkers. 4762 */ 4763 if (DISPLAY_VER(dev_priv) < 5) 4764 return false; 4765 4766 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) 4767 return true; 4768 4769 return intel_bios_is_port_edp(dev_priv, port); 4770 } 4771 4772 static void 4773 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 4774 { 4775 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4776 enum port port = dp_to_dig_port(intel_dp)->base.port; 4777 4778 if (!intel_dp_is_edp(intel_dp)) 4779 drm_connector_attach_dp_subconnector_property(connector); 4780 4781 if (!IS_G4X(dev_priv) && port != PORT_A) 4782 intel_attach_force_audio_property(connector); 4783 4784 intel_attach_broadcast_rgb_property(connector); 4785 if (HAS_GMCH(dev_priv)) 4786 drm_connector_attach_max_bpc_property(connector, 6, 10); 4787 else if (DISPLAY_VER(dev_priv) >= 5) 4788 drm_connector_attach_max_bpc_property(connector, 6, 12); 4789 4790 /* Register HDMI colorspace for case of lspcon */ 4791 if (intel_bios_is_lspcon_present(dev_priv, port)) { 4792 drm_connector_attach_content_type_property(connector); 4793 intel_attach_hdmi_colorspace_property(connector); 4794 } else { 4795 intel_attach_dp_colorspace_property(connector); 4796 } 4797 4798 if (IS_GEMINILAKE(dev_priv) || DISPLAY_VER(dev_priv) >= 11) 4799 drm_object_attach_property(&connector->base, 4800 connector->dev->mode_config.hdr_output_metadata_property, 4801 0); 4802 4803 if (intel_dp_is_edp(intel_dp)) { 4804 u32 allowed_scalers; 4805 4806 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 4807 if (!HAS_GMCH(dev_priv)) 4808 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 4809 4810 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 4811 4812 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 4813 4814 } 4815 4816 if (HAS_VRR(dev_priv)) 4817 drm_connector_attach_vrr_capable_property(connector); 4818 } 4819 4820 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 4821 struct intel_connector *intel_connector) 4822 { 4823 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4824 struct drm_device *dev = &dev_priv->drm; 4825 struct drm_connector *connector = &intel_connector->base; 4826 struct drm_display_mode *fixed_mode = NULL; 4827 struct drm_display_mode *downclock_mode = NULL; 4828 bool has_dpcd; 4829 enum pipe pipe = INVALID_PIPE; 4830 struct edid *edid; 4831 4832 if (!intel_dp_is_edp(intel_dp)) 4833 return true; 4834 4835 /* 4836 * On IBX/CPT we may get here with LVDS already registered. Since the 4837 * driver uses the only internal power sequencer available for both 4838 * eDP and LVDS bail out early in this case to prevent interfering 4839 * with an already powered-on LVDS power sequencer. 4840 */ 4841 if (intel_get_lvds_encoder(dev_priv)) { 4842 drm_WARN_ON(dev, 4843 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 4844 drm_info(&dev_priv->drm, 4845 "LVDS was detected, not registering eDP\n"); 4846 4847 return false; 4848 } 4849 4850 intel_pps_init(intel_dp); 4851 4852 /* Cache DPCD and EDID for edp. */ 4853 has_dpcd = intel_edp_init_dpcd(intel_dp); 4854 4855 if (!has_dpcd) { 4856 /* if this fails, presume the device is a ghost */ 4857 drm_info(&dev_priv->drm, 4858 "failed to retrieve link info, disabling eDP\n"); 4859 goto out_vdd_off; 4860 } 4861 4862 mutex_lock(&dev->mode_config.mutex); 4863 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 4864 if (edid) { 4865 if (drm_add_edid_modes(connector, edid)) { 4866 drm_connector_update_edid_property(connector, edid); 4867 } else { 4868 kfree(edid); 4869 edid = ERR_PTR(-EINVAL); 4870 } 4871 } else { 4872 edid = ERR_PTR(-ENOENT); 4873 } 4874 intel_connector->edid = edid; 4875 4876 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 4877 if (fixed_mode) 4878 downclock_mode = intel_drrs_init(intel_connector, fixed_mode); 4879 4880 /* MSO requires information from the EDID */ 4881 intel_edp_mso_init(intel_dp); 4882 4883 /* multiply the mode clock and horizontal timings for MSO */ 4884 intel_edp_mso_mode_fixup(intel_connector, fixed_mode); 4885 intel_edp_mso_mode_fixup(intel_connector, downclock_mode); 4886 4887 /* fallback to VBT if available for eDP */ 4888 if (!fixed_mode) 4889 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 4890 mutex_unlock(&dev->mode_config.mutex); 4891 4892 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4893 /* 4894 * Figure out the current pipe for the initial backlight setup. 4895 * If the current pipe isn't valid, try the PPS pipe, and if that 4896 * fails just assume pipe A. 4897 */ 4898 pipe = vlv_active_pipe(intel_dp); 4899 4900 if (pipe != PIPE_A && pipe != PIPE_B) 4901 pipe = intel_dp->pps.pps_pipe; 4902 4903 if (pipe != PIPE_A && pipe != PIPE_B) 4904 pipe = PIPE_A; 4905 4906 drm_dbg_kms(&dev_priv->drm, 4907 "using pipe %c for initial backlight setup\n", 4908 pipe_name(pipe)); 4909 } 4910 4911 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 4912 if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) 4913 intel_connector->panel.backlight.power = intel_pps_backlight_power; 4914 intel_backlight_setup(intel_connector, pipe); 4915 4916 if (fixed_mode) { 4917 drm_connector_set_panel_orientation_with_quirk(connector, 4918 dev_priv->vbt.orientation, 4919 fixed_mode->hdisplay, fixed_mode->vdisplay); 4920 } 4921 4922 return true; 4923 4924 out_vdd_off: 4925 intel_pps_vdd_off_sync(intel_dp); 4926 4927 return false; 4928 } 4929 4930 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 4931 { 4932 struct intel_connector *intel_connector; 4933 struct drm_connector *connector; 4934 4935 intel_connector = container_of(work, typeof(*intel_connector), 4936 modeset_retry_work); 4937 connector = &intel_connector->base; 4938 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 4939 connector->name); 4940 4941 /* Grab the locks before changing connector property*/ 4942 mutex_lock(&connector->dev->mode_config.mutex); 4943 /* Set connector link status to BAD and send a Uevent to notify 4944 * userspace to do a modeset. 4945 */ 4946 drm_connector_set_link_status_property(connector, 4947 DRM_MODE_LINK_STATUS_BAD); 4948 mutex_unlock(&connector->dev->mode_config.mutex); 4949 /* Send Hotplug uevent so userspace can reprobe */ 4950 drm_kms_helper_connector_hotplug_event(connector); 4951 } 4952 4953 bool 4954 intel_dp_init_connector(struct intel_digital_port *dig_port, 4955 struct intel_connector *intel_connector) 4956 { 4957 struct drm_connector *connector = &intel_connector->base; 4958 struct intel_dp *intel_dp = &dig_port->dp; 4959 struct intel_encoder *intel_encoder = &dig_port->base; 4960 struct drm_device *dev = intel_encoder->base.dev; 4961 struct drm_i915_private *dev_priv = to_i915(dev); 4962 enum port port = intel_encoder->port; 4963 enum phy phy = intel_port_to_phy(dev_priv, port); 4964 int type; 4965 4966 /* Initialize the work for modeset in case of link train failure */ 4967 INIT_WORK(&intel_connector->modeset_retry_work, 4968 intel_dp_modeset_retry_work_fn); 4969 4970 if (drm_WARN(dev, dig_port->max_lanes < 1, 4971 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 4972 dig_port->max_lanes, intel_encoder->base.base.id, 4973 intel_encoder->base.name)) 4974 return false; 4975 4976 intel_dp->reset_link_params = true; 4977 intel_dp->pps.pps_pipe = INVALID_PIPE; 4978 intel_dp->pps.active_pipe = INVALID_PIPE; 4979 4980 /* Preserve the current hw state. */ 4981 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 4982 intel_dp->attached_connector = intel_connector; 4983 4984 if (intel_dp_is_port_edp(dev_priv, port)) { 4985 /* 4986 * Currently we don't support eDP on TypeC ports, although in 4987 * theory it could work on TypeC legacy ports. 4988 */ 4989 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 4990 type = DRM_MODE_CONNECTOR_eDP; 4991 intel_encoder->type = INTEL_OUTPUT_EDP; 4992 4993 /* eDP only on port B and/or C on vlv/chv */ 4994 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 4995 IS_CHERRYVIEW(dev_priv)) && 4996 port != PORT_B && port != PORT_C)) 4997 return false; 4998 } else { 4999 type = DRM_MODE_CONNECTOR_DisplayPort; 5000 } 5001 5002 intel_dp_set_source_rates(intel_dp); 5003 5004 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5005 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); 5006 5007 drm_dbg_kms(&dev_priv->drm, 5008 "Adding %s connector on [ENCODER:%d:%s]\n", 5009 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 5010 intel_encoder->base.base.id, intel_encoder->base.name); 5011 5012 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 5013 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 5014 5015 if (!HAS_GMCH(dev_priv)) 5016 connector->interlace_allowed = true; 5017 connector->doublescan_allowed = 0; 5018 5019 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 5020 5021 intel_dp_aux_init(intel_dp); 5022 5023 intel_connector_attach_encoder(intel_connector, intel_encoder); 5024 5025 if (HAS_DDI(dev_priv)) 5026 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 5027 else 5028 intel_connector->get_hw_state = intel_connector_get_hw_state; 5029 5030 /* init MST on ports that can support it */ 5031 intel_dp_mst_encoder_init(dig_port, 5032 intel_connector->base.base.id); 5033 5034 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 5035 intel_dp_aux_fini(intel_dp); 5036 intel_dp_mst_encoder_cleanup(dig_port); 5037 goto fail; 5038 } 5039 5040 intel_dp_add_properties(intel_dp, connector); 5041 5042 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 5043 int ret = intel_dp_hdcp_init(dig_port, intel_connector); 5044 if (ret) 5045 drm_dbg_kms(&dev_priv->drm, 5046 "HDCP init failed, skipping.\n"); 5047 } 5048 5049 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 5050 * 0xd. Failure to do so will result in spurious interrupts being 5051 * generated on the port when a cable is not attached. 5052 */ 5053 if (IS_G45(dev_priv)) { 5054 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 5055 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 5056 (temp & ~0xf) | 0xd); 5057 } 5058 5059 intel_dp->frl.is_trained = false; 5060 intel_dp->frl.trained_rate_gbps = 0; 5061 5062 intel_psr_init(intel_dp); 5063 5064 return true; 5065 5066 fail: 5067 drm_connector_cleanup(connector); 5068 5069 return false; 5070 } 5071 5072 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 5073 { 5074 struct intel_encoder *encoder; 5075 5076 if (!HAS_DISPLAY(dev_priv)) 5077 return; 5078 5079 for_each_intel_encoder(&dev_priv->drm, encoder) { 5080 struct intel_dp *intel_dp; 5081 5082 if (encoder->type != INTEL_OUTPUT_DDI) 5083 continue; 5084 5085 intel_dp = enc_to_intel_dp(encoder); 5086 5087 if (!intel_dp_mst_source_support(intel_dp)) 5088 continue; 5089 5090 if (intel_dp->is_mst) 5091 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 5092 } 5093 } 5094 5095 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 5096 { 5097 struct intel_encoder *encoder; 5098 5099 if (!HAS_DISPLAY(dev_priv)) 5100 return; 5101 5102 for_each_intel_encoder(&dev_priv->drm, encoder) { 5103 struct intel_dp *intel_dp; 5104 int ret; 5105 5106 if (encoder->type != INTEL_OUTPUT_DDI) 5107 continue; 5108 5109 intel_dp = enc_to_intel_dp(encoder); 5110 5111 if (!intel_dp_mst_source_support(intel_dp)) 5112 continue; 5113 5114 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 5115 true); 5116 if (ret) { 5117 intel_dp->is_mst = false; 5118 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5119 false); 5120 } 5121 } 5122 } 5123