1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/types.h> 33 34 #include <asm/byteorder.h> 35 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_crtc.h> 38 #include <drm/drm_dp_helper.h> 39 #include <drm/drm_edid.h> 40 #include <drm/drm_probe_helper.h> 41 42 #include "g4x_dp.h" 43 #include "i915_debugfs.h" 44 #include "i915_drv.h" 45 #include "intel_atomic.h" 46 #include "intel_audio.h" 47 #include "intel_backlight.h" 48 #include "intel_connector.h" 49 #include "intel_ddi.h" 50 #include "intel_de.h" 51 #include "intel_display_types.h" 52 #include "intel_dp.h" 53 #include "intel_dp_aux.h" 54 #include "intel_dp_hdcp.h" 55 #include "intel_dp_link_training.h" 56 #include "intel_dp_mst.h" 57 #include "intel_dpio_phy.h" 58 #include "intel_dpll.h" 59 #include "intel_drrs.h" 60 #include "intel_fifo_underrun.h" 61 #include "intel_hdcp.h" 62 #include "intel_hdmi.h" 63 #include "intel_hotplug.h" 64 #include "intel_lspcon.h" 65 #include "intel_lvds.h" 66 #include "intel_panel.h" 67 #include "intel_pps.h" 68 #include "intel_psr.h" 69 #include "intel_tc.h" 70 #include "intel_vdsc.h" 71 #include "intel_vrr.h" 72 73 #define DP_DPRX_ESI_LEN 14 74 75 /* DP DSC throughput values used for slice count calculations KPixels/s */ 76 #define DP_DSC_PEAK_PIXEL_RATE 2720000 77 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 78 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 79 80 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 81 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 82 83 /* Compliance test status bits */ 84 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 85 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 86 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 87 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 88 89 90 /* Constants for DP DSC configurations */ 91 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 92 93 /* With Single pipe configuration, HW is capable of supporting maximum 94 * of 4 slices per line. 95 */ 96 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 97 98 /** 99 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 100 * @intel_dp: DP struct 101 * 102 * If a CPU or PCH DP output is attached to an eDP panel, this function 103 * will return true, and false otherwise. 104 * 105 * This function is not safe to use prior to encoder type being set. 106 */ 107 bool intel_dp_is_edp(struct intel_dp *intel_dp) 108 { 109 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 110 111 return dig_port->base.type == INTEL_OUTPUT_EDP; 112 } 113 114 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 115 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc); 116 117 /* Is link rate UHBR and thus 128b/132b? */ 118 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) 119 { 120 return crtc_state->port_clock >= 1000000; 121 } 122 123 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) 124 { 125 intel_dp->sink_rates[0] = 162000; 126 intel_dp->num_sink_rates = 1; 127 } 128 129 /* update sink rates from dpcd */ 130 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 131 { 132 static const int dp_rates[] = { 133 162000, 270000, 540000, 810000 134 }; 135 int i, max_rate; 136 int max_lttpr_rate; 137 138 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 139 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 140 static const int quirk_rates[] = { 162000, 270000, 324000 }; 141 142 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 143 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 144 145 return; 146 } 147 148 /* 149 * Sink rates for 8b/10b. 150 */ 151 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 152 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 153 if (max_lttpr_rate) 154 max_rate = min(max_rate, max_lttpr_rate); 155 156 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 157 if (dp_rates[i] > max_rate) 158 break; 159 intel_dp->sink_rates[i] = dp_rates[i]; 160 } 161 162 /* 163 * Sink rates for 128b/132b. If set, sink should support all 8b/10b 164 * rates and 10 Gbps. 165 */ 166 if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) { 167 u8 uhbr_rates = 0; 168 169 BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3); 170 171 drm_dp_dpcd_readb(&intel_dp->aux, 172 DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates); 173 174 if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) { 175 /* We have a repeater */ 176 if (intel_dp->lttpr_common_caps[0] >= 0x20 && 177 intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - 178 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] & 179 DP_PHY_REPEATER_128B132B_SUPPORTED) { 180 /* Repeater supports 128b/132b, valid UHBR rates */ 181 uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES - 182 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; 183 } else { 184 /* Does not support 128b/132b */ 185 uhbr_rates = 0; 186 } 187 } 188 189 if (uhbr_rates & DP_UHBR10) 190 intel_dp->sink_rates[i++] = 1000000; 191 if (uhbr_rates & DP_UHBR13_5) 192 intel_dp->sink_rates[i++] = 1350000; 193 if (uhbr_rates & DP_UHBR20) 194 intel_dp->sink_rates[i++] = 2000000; 195 } 196 197 intel_dp->num_sink_rates = i; 198 } 199 200 /* Get length of rates array potentially limited by max_rate. */ 201 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 202 { 203 int i; 204 205 /* Limit results by potentially reduced max rate */ 206 for (i = 0; i < len; i++) { 207 if (rates[len - i - 1] <= max_rate) 208 return len - i; 209 } 210 211 return 0; 212 } 213 214 /* Get length of common rates array potentially limited by max_rate. */ 215 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 216 int max_rate) 217 { 218 return intel_dp_rate_limit_len(intel_dp->common_rates, 219 intel_dp->num_common_rates, max_rate); 220 } 221 222 /* Theoretical max between source and sink */ 223 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 224 { 225 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 226 } 227 228 /* Theoretical max between source and sink */ 229 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 230 { 231 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 232 int source_max = dig_port->max_lanes; 233 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 234 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 235 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 236 237 if (lttpr_max) 238 sink_max = min(sink_max, lttpr_max); 239 240 return min3(source_max, sink_max, fia_max); 241 } 242 243 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 244 { 245 return intel_dp->max_link_lane_count; 246 } 247 248 /* 249 * The required data bandwidth for a mode with given pixel clock and bpp. This 250 * is the required net bandwidth independent of the data bandwidth efficiency. 251 */ 252 int 253 intel_dp_link_required(int pixel_clock, int bpp) 254 { 255 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 256 return DIV_ROUND_UP(pixel_clock * bpp, 8); 257 } 258 259 /* 260 * Given a link rate and lanes, get the data bandwidth. 261 * 262 * Data bandwidth is the actual payload rate, which depends on the data 263 * bandwidth efficiency and the link rate. 264 * 265 * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency 266 * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) = 267 * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by 268 * coincidence, the port clock in kHz matches the data bandwidth in kBps, and 269 * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no 270 * longer holds for data bandwidth as soon as FEC or MST is taken into account!) 271 * 272 * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For 273 * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875 274 * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000 275 * does not match the symbol clock, the port clock (not even if you think in 276 * terms of a byte clock), nor the data bandwidth. It only matches the link bit 277 * rate in units of 10000 bps. 278 */ 279 int 280 intel_dp_max_data_rate(int max_link_rate, int max_lanes) 281 { 282 if (max_link_rate >= 1000000) { 283 /* 284 * UHBR rates always use 128b/132b channel encoding, and have 285 * 97.71% data bandwidth efficiency. Consider max_link_rate the 286 * link bit rate in units of 10000 bps. 287 */ 288 int max_link_rate_kbps = max_link_rate * 10; 289 290 max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000); 291 max_link_rate = max_link_rate_kbps / 8; 292 } 293 294 /* 295 * Lower than UHBR rates always use 8b/10b channel encoding, and have 296 * 80% data bandwidth efficiency for SST non-FEC. However, this turns 297 * out to be a nop by coincidence, and can be skipped: 298 * 299 * int max_link_rate_kbps = max_link_rate * 10; 300 * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10); 301 * max_link_rate = max_link_rate_kbps / 8; 302 */ 303 304 return max_link_rate * max_lanes; 305 } 306 307 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) 308 { 309 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 310 struct intel_encoder *encoder = &intel_dig_port->base; 311 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 312 313 return DISPLAY_VER(dev_priv) >= 12 || 314 (DISPLAY_VER(dev_priv) == 11 && 315 encoder->port != PORT_A); 316 } 317 318 static int dg2_max_source_rate(struct intel_dp *intel_dp) 319 { 320 return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; 321 } 322 323 static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy) 324 { 325 u32 voltage; 326 327 voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK; 328 329 return voltage == VOLTAGE_INFO_0_85V; 330 } 331 332 static int icl_max_source_rate(struct intel_dp *intel_dp) 333 { 334 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 335 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 336 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 337 338 if (intel_phy_is_combo(dev_priv, phy) && 339 (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp))) 340 return 540000; 341 342 return 810000; 343 } 344 345 static int ehl_max_source_rate(struct intel_dp *intel_dp) 346 { 347 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 348 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 349 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 350 351 if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy)) 352 return 540000; 353 354 return 810000; 355 } 356 357 static int dg1_max_source_rate(struct intel_dp *intel_dp) 358 { 359 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 360 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 361 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 362 363 if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy)) 364 return 540000; 365 366 return 810000; 367 } 368 369 static void 370 intel_dp_set_source_rates(struct intel_dp *intel_dp) 371 { 372 /* The values must be in increasing order */ 373 static const int icl_rates[] = { 374 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000, 375 1000000, 1350000, 376 }; 377 static const int bxt_rates[] = { 378 162000, 216000, 243000, 270000, 324000, 432000, 540000 379 }; 380 static const int skl_rates[] = { 381 162000, 216000, 270000, 324000, 432000, 540000 382 }; 383 static const int hsw_rates[] = { 384 162000, 270000, 540000 385 }; 386 static const int g4x_rates[] = { 387 162000, 270000 388 }; 389 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 390 struct intel_encoder *encoder = &dig_port->base; 391 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 392 const int *source_rates; 393 int size, max_rate = 0, vbt_max_rate; 394 395 /* This should only be done once */ 396 drm_WARN_ON(&dev_priv->drm, 397 intel_dp->source_rates || intel_dp->num_source_rates); 398 399 if (DISPLAY_VER(dev_priv) >= 11) { 400 source_rates = icl_rates; 401 size = ARRAY_SIZE(icl_rates); 402 if (IS_DG2(dev_priv)) 403 max_rate = dg2_max_source_rate(intel_dp); 404 else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || 405 IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 406 max_rate = dg1_max_source_rate(intel_dp); 407 else if (IS_JSL_EHL(dev_priv)) 408 max_rate = ehl_max_source_rate(intel_dp); 409 else 410 max_rate = icl_max_source_rate(intel_dp); 411 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 412 source_rates = bxt_rates; 413 size = ARRAY_SIZE(bxt_rates); 414 } else if (DISPLAY_VER(dev_priv) == 9) { 415 source_rates = skl_rates; 416 size = ARRAY_SIZE(skl_rates); 417 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 418 IS_BROADWELL(dev_priv)) { 419 source_rates = hsw_rates; 420 size = ARRAY_SIZE(hsw_rates); 421 } else { 422 source_rates = g4x_rates; 423 size = ARRAY_SIZE(g4x_rates); 424 } 425 426 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 427 if (max_rate && vbt_max_rate) 428 max_rate = min(max_rate, vbt_max_rate); 429 else if (vbt_max_rate) 430 max_rate = vbt_max_rate; 431 432 if (max_rate) 433 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 434 435 intel_dp->source_rates = source_rates; 436 intel_dp->num_source_rates = size; 437 } 438 439 static int intersect_rates(const int *source_rates, int source_len, 440 const int *sink_rates, int sink_len, 441 int *common_rates) 442 { 443 int i = 0, j = 0, k = 0; 444 445 while (i < source_len && j < sink_len) { 446 if (source_rates[i] == sink_rates[j]) { 447 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 448 return k; 449 common_rates[k] = source_rates[i]; 450 ++k; 451 ++i; 452 ++j; 453 } else if (source_rates[i] < sink_rates[j]) { 454 ++i; 455 } else { 456 ++j; 457 } 458 } 459 return k; 460 } 461 462 /* return index of rate in rates array, or -1 if not found */ 463 static int intel_dp_rate_index(const int *rates, int len, int rate) 464 { 465 int i; 466 467 for (i = 0; i < len; i++) 468 if (rate == rates[i]) 469 return i; 470 471 return -1; 472 } 473 474 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 475 { 476 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 477 478 drm_WARN_ON(&i915->drm, 479 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 480 481 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 482 intel_dp->num_source_rates, 483 intel_dp->sink_rates, 484 intel_dp->num_sink_rates, 485 intel_dp->common_rates); 486 487 /* Paranoia, there should always be something in common. */ 488 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 489 intel_dp->common_rates[0] = 162000; 490 intel_dp->num_common_rates = 1; 491 } 492 } 493 494 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 495 u8 lane_count) 496 { 497 /* 498 * FIXME: we need to synchronize the current link parameters with 499 * hardware readout. Currently fast link training doesn't work on 500 * boot-up. 501 */ 502 if (link_rate == 0 || 503 link_rate > intel_dp->max_link_rate) 504 return false; 505 506 if (lane_count == 0 || 507 lane_count > intel_dp_max_lane_count(intel_dp)) 508 return false; 509 510 return true; 511 } 512 513 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 514 int link_rate, 515 u8 lane_count) 516 { 517 const struct drm_display_mode *fixed_mode = 518 intel_dp->attached_connector->panel.fixed_mode; 519 int mode_rate, max_rate; 520 521 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 522 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 523 if (mode_rate > max_rate) 524 return false; 525 526 return true; 527 } 528 529 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 530 int link_rate, u8 lane_count) 531 { 532 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 533 int index; 534 535 /* 536 * TODO: Enable fallback on MST links once MST link compute can handle 537 * the fallback params. 538 */ 539 if (intel_dp->is_mst) { 540 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 541 return -1; 542 } 543 544 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { 545 drm_dbg_kms(&i915->drm, 546 "Retrying Link training for eDP with max parameters\n"); 547 intel_dp->use_max_params = true; 548 return 0; 549 } 550 551 index = intel_dp_rate_index(intel_dp->common_rates, 552 intel_dp->num_common_rates, 553 link_rate); 554 if (index > 0) { 555 if (intel_dp_is_edp(intel_dp) && 556 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 557 intel_dp->common_rates[index - 1], 558 lane_count)) { 559 drm_dbg_kms(&i915->drm, 560 "Retrying Link training for eDP with same parameters\n"); 561 return 0; 562 } 563 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 564 intel_dp->max_link_lane_count = lane_count; 565 } else if (lane_count > 1) { 566 if (intel_dp_is_edp(intel_dp) && 567 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 568 intel_dp_max_common_rate(intel_dp), 569 lane_count >> 1)) { 570 drm_dbg_kms(&i915->drm, 571 "Retrying Link training for eDP with same parameters\n"); 572 return 0; 573 } 574 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 575 intel_dp->max_link_lane_count = lane_count >> 1; 576 } else { 577 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 578 return -1; 579 } 580 581 return 0; 582 } 583 584 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 585 { 586 return div_u64(mul_u32_u32(mode_clock, 1000000U), 587 DP_DSC_FEC_OVERHEAD_FACTOR); 588 } 589 590 static int 591 small_joiner_ram_size_bits(struct drm_i915_private *i915) 592 { 593 if (DISPLAY_VER(i915) >= 13) 594 return 17280 * 8; 595 else if (DISPLAY_VER(i915) >= 11) 596 return 7680 * 8; 597 else 598 return 6144 * 8; 599 } 600 601 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 602 u32 link_clock, u32 lane_count, 603 u32 mode_clock, u32 mode_hdisplay, 604 bool bigjoiner, 605 u32 pipe_bpp) 606 { 607 u32 bits_per_pixel, max_bpp_small_joiner_ram; 608 int i; 609 610 /* 611 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 612 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 613 * for SST -> TimeSlotsPerMTP is 1, 614 * for MST -> TimeSlotsPerMTP has to be calculated 615 */ 616 bits_per_pixel = (link_clock * lane_count * 8) / 617 intel_dp_mode_to_fec_clock(mode_clock); 618 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 619 620 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 621 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 622 mode_hdisplay; 623 624 if (bigjoiner) 625 max_bpp_small_joiner_ram *= 2; 626 627 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 628 max_bpp_small_joiner_ram); 629 630 /* 631 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 632 * check, output bpp from small joiner RAM check) 633 */ 634 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 635 636 if (bigjoiner) { 637 u32 max_bpp_bigjoiner = 638 i915->max_cdclk_freq * 48 / 639 intel_dp_mode_to_fec_clock(mode_clock); 640 641 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner); 642 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); 643 } 644 645 /* Error out if the max bpp is less than smallest allowed valid bpp */ 646 if (bits_per_pixel < valid_dsc_bpp[0]) { 647 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 648 bits_per_pixel, valid_dsc_bpp[0]); 649 return 0; 650 } 651 652 /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */ 653 if (DISPLAY_VER(i915) >= 13) { 654 bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1); 655 } else { 656 /* Find the nearest match in the array of known BPPs from VESA */ 657 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 658 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 659 break; 660 } 661 bits_per_pixel = valid_dsc_bpp[i]; 662 } 663 664 /* 665 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 666 * fractional part is 0 667 */ 668 return bits_per_pixel << 4; 669 } 670 671 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 672 int mode_clock, int mode_hdisplay, 673 bool bigjoiner) 674 { 675 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 676 u8 min_slice_count, i; 677 int max_slice_width; 678 679 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 680 min_slice_count = DIV_ROUND_UP(mode_clock, 681 DP_DSC_MAX_ENC_THROUGHPUT_0); 682 else 683 min_slice_count = DIV_ROUND_UP(mode_clock, 684 DP_DSC_MAX_ENC_THROUGHPUT_1); 685 686 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 687 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 688 drm_dbg_kms(&i915->drm, 689 "Unsupported slice width %d by DP DSC Sink device\n", 690 max_slice_width); 691 return 0; 692 } 693 /* Also take into account max slice width */ 694 min_slice_count = max_t(u8, min_slice_count, 695 DIV_ROUND_UP(mode_hdisplay, 696 max_slice_width)); 697 698 /* Find the closest match to the valid slice count values */ 699 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 700 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; 701 702 if (test_slice_count > 703 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false)) 704 break; 705 706 /* big joiner needs small joiner to be enabled */ 707 if (bigjoiner && test_slice_count < 4) 708 continue; 709 710 if (min_slice_count <= test_slice_count) 711 return test_slice_count; 712 } 713 714 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 715 min_slice_count); 716 return 0; 717 } 718 719 static enum intel_output_format 720 intel_dp_output_format(struct drm_connector *connector, 721 const struct drm_display_mode *mode) 722 { 723 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 724 const struct drm_display_info *info = &connector->display_info; 725 726 if (!connector->ycbcr_420_allowed || 727 !drm_mode_is_420_only(info, mode)) 728 return INTEL_OUTPUT_FORMAT_RGB; 729 730 if (intel_dp->dfp.rgb_to_ycbcr && 731 intel_dp->dfp.ycbcr_444_to_420) 732 return INTEL_OUTPUT_FORMAT_RGB; 733 734 if (intel_dp->dfp.ycbcr_444_to_420) 735 return INTEL_OUTPUT_FORMAT_YCBCR444; 736 else 737 return INTEL_OUTPUT_FORMAT_YCBCR420; 738 } 739 740 int intel_dp_min_bpp(enum intel_output_format output_format) 741 { 742 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 743 return 6 * 3; 744 else 745 return 8 * 3; 746 } 747 748 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 749 { 750 /* 751 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 752 * format of the number of bytes per pixel will be half the number 753 * of bytes of RGB pixel. 754 */ 755 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 756 bpp /= 2; 757 758 return bpp; 759 } 760 761 static int 762 intel_dp_mode_min_output_bpp(struct drm_connector *connector, 763 const struct drm_display_mode *mode) 764 { 765 enum intel_output_format output_format = 766 intel_dp_output_format(connector, mode); 767 768 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 769 } 770 771 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 772 int hdisplay) 773 { 774 /* 775 * Older platforms don't like hdisplay==4096 with DP. 776 * 777 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 778 * and frame counter increment), but we don't get vblank interrupts, 779 * and the pipe underruns immediately. The link also doesn't seem 780 * to get trained properly. 781 * 782 * On CHV the vblank interrupts don't seem to disappear but 783 * otherwise the symptoms are similar. 784 * 785 * TODO: confirm the behaviour on HSW+ 786 */ 787 return hdisplay == 4096 && !HAS_DDI(dev_priv); 788 } 789 790 static enum drm_mode_status 791 intel_dp_mode_valid_downstream(struct intel_connector *connector, 792 const struct drm_display_mode *mode, 793 int target_clock) 794 { 795 struct intel_dp *intel_dp = intel_attached_dp(connector); 796 const struct drm_display_info *info = &connector->base.display_info; 797 int tmds_clock; 798 799 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 800 if (intel_dp->dfp.pcon_max_frl_bw) { 801 int target_bw; 802 int max_frl_bw; 803 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode); 804 805 target_bw = bpp * target_clock; 806 807 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 808 809 /* converting bw from Gbps to Kbps*/ 810 max_frl_bw = max_frl_bw * 1000000; 811 812 if (target_bw > max_frl_bw) 813 return MODE_CLOCK_HIGH; 814 815 return MODE_OK; 816 } 817 818 if (intel_dp->dfp.max_dotclock && 819 target_clock > intel_dp->dfp.max_dotclock) 820 return MODE_CLOCK_HIGH; 821 822 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 823 tmds_clock = target_clock; 824 if (drm_mode_is_420_only(info, mode)) 825 tmds_clock /= 2; 826 827 if (intel_dp->dfp.min_tmds_clock && 828 tmds_clock < intel_dp->dfp.min_tmds_clock) 829 return MODE_CLOCK_LOW; 830 if (intel_dp->dfp.max_tmds_clock && 831 tmds_clock > intel_dp->dfp.max_tmds_clock) 832 return MODE_CLOCK_HIGH; 833 834 return MODE_OK; 835 } 836 837 static bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp, 838 int hdisplay, int clock) 839 { 840 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 841 842 if (!intel_dp_can_bigjoiner(intel_dp)) 843 return false; 844 845 return clock > i915->max_dotclk_freq || hdisplay > 5120; 846 } 847 848 static enum drm_mode_status 849 intel_dp_mode_valid(struct drm_connector *connector, 850 struct drm_display_mode *mode) 851 { 852 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 853 struct intel_connector *intel_connector = to_intel_connector(connector); 854 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 855 struct drm_i915_private *dev_priv = to_i915(connector->dev); 856 int target_clock = mode->clock; 857 int max_rate, mode_rate, max_lanes, max_link_clock; 858 int max_dotclk = dev_priv->max_dotclk_freq; 859 u16 dsc_max_output_bpp = 0; 860 u8 dsc_slice_count = 0; 861 enum drm_mode_status status; 862 bool dsc = false, bigjoiner = false; 863 864 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 865 return MODE_NO_DBLESCAN; 866 867 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 868 return MODE_H_ILLEGAL; 869 870 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 871 status = intel_panel_mode_valid(intel_connector, mode); 872 if (status != MODE_OK) 873 return status; 874 875 target_clock = fixed_mode->clock; 876 } 877 878 if (mode->clock < 10000) 879 return MODE_CLOCK_LOW; 880 881 if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) { 882 bigjoiner = true; 883 max_dotclk *= 2; 884 } 885 if (target_clock > max_dotclk) 886 return MODE_CLOCK_HIGH; 887 888 max_link_clock = intel_dp_max_link_rate(intel_dp); 889 max_lanes = intel_dp_max_lane_count(intel_dp); 890 891 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 892 mode_rate = intel_dp_link_required(target_clock, 893 intel_dp_mode_min_output_bpp(connector, mode)); 894 895 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 896 return MODE_H_ILLEGAL; 897 898 /* 899 * Output bpp is stored in 6.4 format so right shift by 4 to get the 900 * integer value since we support only integer values of bpp. 901 */ 902 if (DISPLAY_VER(dev_priv) >= 10 && 903 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 904 /* 905 * TBD pass the connector BPC, 906 * for now U8_MAX so that max BPC on that platform would be picked 907 */ 908 int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX); 909 910 if (intel_dp_is_edp(intel_dp)) { 911 dsc_max_output_bpp = 912 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 913 dsc_slice_count = 914 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 915 true); 916 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 917 dsc_max_output_bpp = 918 intel_dp_dsc_get_output_bpp(dev_priv, 919 max_link_clock, 920 max_lanes, 921 target_clock, 922 mode->hdisplay, 923 bigjoiner, 924 pipe_bpp) >> 4; 925 dsc_slice_count = 926 intel_dp_dsc_get_slice_count(intel_dp, 927 target_clock, 928 mode->hdisplay, 929 bigjoiner); 930 } 931 932 dsc = dsc_max_output_bpp && dsc_slice_count; 933 } 934 935 /* 936 * Big joiner configuration needs DSC for TGL which is not true for 937 * XE_LPD where uncompressed joiner is supported. 938 */ 939 if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) 940 return MODE_CLOCK_HIGH; 941 942 if (mode_rate > max_rate && !dsc) 943 return MODE_CLOCK_HIGH; 944 945 status = intel_dp_mode_valid_downstream(intel_connector, 946 mode, target_clock); 947 if (status != MODE_OK) 948 return status; 949 950 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); 951 } 952 953 bool intel_dp_source_supports_tps3(struct drm_i915_private *i915) 954 { 955 return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915); 956 } 957 958 bool intel_dp_source_supports_tps4(struct drm_i915_private *i915) 959 { 960 return DISPLAY_VER(i915) >= 10; 961 } 962 963 static void snprintf_int_array(char *str, size_t len, 964 const int *array, int nelem) 965 { 966 int i; 967 968 str[0] = '\0'; 969 970 for (i = 0; i < nelem; i++) { 971 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 972 if (r >= len) 973 return; 974 str += r; 975 len -= r; 976 } 977 } 978 979 static void intel_dp_print_rates(struct intel_dp *intel_dp) 980 { 981 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 982 char str[128]; /* FIXME: too big for stack? */ 983 984 if (!drm_debug_enabled(DRM_UT_KMS)) 985 return; 986 987 snprintf_int_array(str, sizeof(str), 988 intel_dp->source_rates, intel_dp->num_source_rates); 989 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 990 991 snprintf_int_array(str, sizeof(str), 992 intel_dp->sink_rates, intel_dp->num_sink_rates); 993 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 994 995 snprintf_int_array(str, sizeof(str), 996 intel_dp->common_rates, intel_dp->num_common_rates); 997 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 998 } 999 1000 int 1001 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1002 { 1003 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1004 int len; 1005 1006 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1007 if (drm_WARN_ON(&i915->drm, len <= 0)) 1008 return 162000; 1009 1010 return intel_dp->common_rates[len - 1]; 1011 } 1012 1013 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1014 { 1015 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1016 int i = intel_dp_rate_index(intel_dp->sink_rates, 1017 intel_dp->num_sink_rates, rate); 1018 1019 if (drm_WARN_ON(&i915->drm, i < 0)) 1020 i = 0; 1021 1022 return i; 1023 } 1024 1025 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1026 u8 *link_bw, u8 *rate_select) 1027 { 1028 /* eDP 1.4 rate select method. */ 1029 if (intel_dp->use_rate_select) { 1030 *link_bw = 0; 1031 *rate_select = 1032 intel_dp_rate_select(intel_dp, port_clock); 1033 } else { 1034 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1035 *rate_select = 0; 1036 } 1037 } 1038 1039 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1040 const struct intel_crtc_state *pipe_config) 1041 { 1042 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1043 1044 /* On TGL, FEC is supported on all Pipes */ 1045 if (DISPLAY_VER(dev_priv) >= 12) 1046 return true; 1047 1048 if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A) 1049 return true; 1050 1051 return false; 1052 } 1053 1054 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1055 const struct intel_crtc_state *pipe_config) 1056 { 1057 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1058 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1059 } 1060 1061 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1062 const struct intel_crtc_state *crtc_state) 1063 { 1064 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) 1065 return false; 1066 1067 return intel_dsc_source_support(crtc_state) && 1068 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1069 } 1070 1071 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, 1072 const struct intel_crtc_state *crtc_state) 1073 { 1074 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 1075 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 1076 intel_dp->dfp.ycbcr_444_to_420); 1077 } 1078 1079 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, 1080 const struct intel_crtc_state *crtc_state, int bpc) 1081 { 1082 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; 1083 1084 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) 1085 clock /= 2; 1086 1087 return clock; 1088 } 1089 1090 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, 1091 const struct intel_crtc_state *crtc_state, int bpc) 1092 { 1093 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); 1094 1095 if (intel_dp->dfp.min_tmds_clock && 1096 tmds_clock < intel_dp->dfp.min_tmds_clock) 1097 return false; 1098 1099 if (intel_dp->dfp.max_tmds_clock && 1100 tmds_clock > intel_dp->dfp.max_tmds_clock) 1101 return false; 1102 1103 return true; 1104 } 1105 1106 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, 1107 const struct intel_crtc_state *crtc_state, 1108 int bpc) 1109 { 1110 1111 return intel_hdmi_deep_color_possible(crtc_state, bpc, 1112 intel_dp->has_hdmi_sink, 1113 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && 1114 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); 1115 } 1116 1117 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 1118 const struct intel_crtc_state *crtc_state) 1119 { 1120 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1121 struct intel_connector *intel_connector = intel_dp->attached_connector; 1122 int bpp, bpc; 1123 1124 bpc = crtc_state->pipe_bpp / 3; 1125 1126 if (intel_dp->dfp.max_bpc) 1127 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 1128 1129 if (intel_dp->dfp.min_tmds_clock) { 1130 for (; bpc >= 10; bpc -= 2) { 1131 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) 1132 break; 1133 } 1134 } 1135 1136 bpp = bpc * 3; 1137 if (intel_dp_is_edp(intel_dp)) { 1138 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1139 if (intel_connector->base.display_info.bpc == 0 && 1140 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1141 drm_dbg_kms(&dev_priv->drm, 1142 "clamping bpp for eDP panel to BIOS-provided %i\n", 1143 dev_priv->vbt.edp.bpp); 1144 bpp = dev_priv->vbt.edp.bpp; 1145 } 1146 } 1147 1148 return bpp; 1149 } 1150 1151 /* Adjust link config limits based on compliance test requests. */ 1152 void 1153 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1154 struct intel_crtc_state *pipe_config, 1155 struct link_config_limits *limits) 1156 { 1157 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1158 1159 /* For DP Compliance we override the computed bpp for the pipe */ 1160 if (intel_dp->compliance.test_data.bpc != 0) { 1161 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1162 1163 limits->min_bpp = limits->max_bpp = bpp; 1164 pipe_config->dither_force_disable = bpp == 6 * 3; 1165 1166 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1167 } 1168 1169 /* Use values requested by Compliance Test Request */ 1170 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1171 int index; 1172 1173 /* Validate the compliance test data since max values 1174 * might have changed due to link train fallback. 1175 */ 1176 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1177 intel_dp->compliance.test_lane_count)) { 1178 index = intel_dp_rate_index(intel_dp->common_rates, 1179 intel_dp->num_common_rates, 1180 intel_dp->compliance.test_link_rate); 1181 if (index >= 0) 1182 limits->min_rate = limits->max_rate = 1183 intel_dp->compliance.test_link_rate; 1184 limits->min_lane_count = limits->max_lane_count = 1185 intel_dp->compliance.test_lane_count; 1186 } 1187 } 1188 } 1189 1190 /* Optimize link config in order: max bpp, min clock, min lanes */ 1191 static int 1192 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 1193 struct intel_crtc_state *pipe_config, 1194 const struct link_config_limits *limits) 1195 { 1196 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1197 int bpp, i, lane_count; 1198 int mode_rate, link_rate, link_avail; 1199 1200 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 1201 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1202 1203 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1204 output_bpp); 1205 1206 for (i = 0; i < intel_dp->num_common_rates; i++) { 1207 link_rate = intel_dp->common_rates[i]; 1208 if (link_rate < limits->min_rate || 1209 link_rate > limits->max_rate) 1210 continue; 1211 1212 for (lane_count = limits->min_lane_count; 1213 lane_count <= limits->max_lane_count; 1214 lane_count <<= 1) { 1215 link_avail = intel_dp_max_data_rate(link_rate, 1216 lane_count); 1217 1218 if (mode_rate <= link_avail) { 1219 pipe_config->lane_count = lane_count; 1220 pipe_config->pipe_bpp = bpp; 1221 pipe_config->port_clock = link_rate; 1222 1223 return 0; 1224 } 1225 } 1226 } 1227 } 1228 1229 return -EINVAL; 1230 } 1231 1232 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc) 1233 { 1234 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1235 int i, num_bpc; 1236 u8 dsc_bpc[3] = {0}; 1237 u8 dsc_max_bpc; 1238 1239 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 1240 if (DISPLAY_VER(i915) >= 12) 1241 dsc_max_bpc = min_t(u8, 12, max_req_bpc); 1242 else 1243 dsc_max_bpc = min_t(u8, 10, max_req_bpc); 1244 1245 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 1246 dsc_bpc); 1247 for (i = 0; i < num_bpc; i++) { 1248 if (dsc_max_bpc >= dsc_bpc[i]) 1249 return dsc_bpc[i] * 3; 1250 } 1251 1252 return 0; 1253 } 1254 1255 #define DSC_SUPPORTED_VERSION_MIN 1 1256 1257 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 1258 struct intel_crtc_state *crtc_state) 1259 { 1260 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1261 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1262 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1263 u8 line_buf_depth; 1264 int ret; 1265 1266 /* 1267 * RC_MODEL_SIZE is currently a constant across all configurations. 1268 * 1269 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 1270 * DP_DSC_RC_BUF_SIZE for this. 1271 */ 1272 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1273 1274 /* 1275 * Slice Height of 8 works for all currently available panels. So start 1276 * with that if pic_height is an integral multiple of 8. Eventually add 1277 * logic to try multiple slice heights. 1278 */ 1279 if (vdsc_cfg->pic_height % 8 == 0) 1280 vdsc_cfg->slice_height = 8; 1281 else if (vdsc_cfg->pic_height % 4 == 0) 1282 vdsc_cfg->slice_height = 4; 1283 else 1284 vdsc_cfg->slice_height = 2; 1285 1286 ret = intel_dsc_compute_params(encoder, crtc_state); 1287 if (ret) 1288 return ret; 1289 1290 vdsc_cfg->dsc_version_major = 1291 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1292 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 1293 vdsc_cfg->dsc_version_minor = 1294 min(DSC_SUPPORTED_VERSION_MIN, 1295 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1296 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 1297 1298 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1299 DP_DSC_RGB; 1300 1301 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 1302 if (!line_buf_depth) { 1303 drm_dbg_kms(&i915->drm, 1304 "DSC Sink Line Buffer Depth invalid\n"); 1305 return -EINVAL; 1306 } 1307 1308 if (vdsc_cfg->dsc_version_minor == 2) 1309 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 1310 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 1311 else 1312 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 1313 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 1314 1315 vdsc_cfg->block_pred_enable = 1316 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 1317 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 1318 1319 return drm_dsc_compute_rc_parameters(vdsc_cfg); 1320 } 1321 1322 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 1323 struct intel_crtc_state *pipe_config, 1324 struct drm_connector_state *conn_state, 1325 struct link_config_limits *limits) 1326 { 1327 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1328 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 1329 const struct drm_display_mode *adjusted_mode = 1330 &pipe_config->hw.adjusted_mode; 1331 int pipe_bpp; 1332 int ret; 1333 1334 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 1335 intel_dp_supports_fec(intel_dp, pipe_config); 1336 1337 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 1338 return -EINVAL; 1339 1340 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc); 1341 1342 /* Min Input BPC for ICL+ is 8 */ 1343 if (pipe_bpp < 8 * 3) { 1344 drm_dbg_kms(&dev_priv->drm, 1345 "No DSC support for less than 8bpc\n"); 1346 return -EINVAL; 1347 } 1348 1349 /* 1350 * For now enable DSC for max bpp, max link rate, max lane count. 1351 * Optimize this later for the minimum possible link rate/lane count 1352 * with DSC enabled for the requested mode. 1353 */ 1354 pipe_config->pipe_bpp = pipe_bpp; 1355 pipe_config->port_clock = limits->max_rate; 1356 pipe_config->lane_count = limits->max_lane_count; 1357 1358 if (intel_dp_is_edp(intel_dp)) { 1359 pipe_config->dsc.compressed_bpp = 1360 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 1361 pipe_config->pipe_bpp); 1362 pipe_config->dsc.slice_count = 1363 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 1364 true); 1365 } else { 1366 u16 dsc_max_output_bpp; 1367 u8 dsc_dp_slice_count; 1368 1369 dsc_max_output_bpp = 1370 intel_dp_dsc_get_output_bpp(dev_priv, 1371 pipe_config->port_clock, 1372 pipe_config->lane_count, 1373 adjusted_mode->crtc_clock, 1374 adjusted_mode->crtc_hdisplay, 1375 pipe_config->bigjoiner, 1376 pipe_bpp); 1377 dsc_dp_slice_count = 1378 intel_dp_dsc_get_slice_count(intel_dp, 1379 adjusted_mode->crtc_clock, 1380 adjusted_mode->crtc_hdisplay, 1381 pipe_config->bigjoiner); 1382 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 1383 drm_dbg_kms(&dev_priv->drm, 1384 "Compressed BPP/Slice Count not supported\n"); 1385 return -EINVAL; 1386 } 1387 pipe_config->dsc.compressed_bpp = min_t(u16, 1388 dsc_max_output_bpp >> 4, 1389 pipe_config->pipe_bpp); 1390 pipe_config->dsc.slice_count = dsc_dp_slice_count; 1391 } 1392 1393 /* As of today we support DSC for only RGB */ 1394 if (intel_dp->force_dsc_bpp) { 1395 if (intel_dp->force_dsc_bpp >= 8 && 1396 intel_dp->force_dsc_bpp < pipe_bpp) { 1397 drm_dbg_kms(&dev_priv->drm, 1398 "DSC BPP forced to %d", 1399 intel_dp->force_dsc_bpp); 1400 pipe_config->dsc.compressed_bpp = 1401 intel_dp->force_dsc_bpp; 1402 } else { 1403 drm_dbg_kms(&dev_priv->drm, 1404 "Invalid DSC BPP %d", 1405 intel_dp->force_dsc_bpp); 1406 } 1407 } 1408 1409 /* 1410 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 1411 * is greater than the maximum Cdclock and if slice count is even 1412 * then we need to use 2 VDSC instances. 1413 */ 1414 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq || 1415 pipe_config->bigjoiner) { 1416 if (pipe_config->dsc.slice_count < 2) { 1417 drm_dbg_kms(&dev_priv->drm, 1418 "Cannot split stream to use 2 VDSC instances\n"); 1419 return -EINVAL; 1420 } 1421 1422 pipe_config->dsc.dsc_split = true; 1423 } 1424 1425 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 1426 if (ret < 0) { 1427 drm_dbg_kms(&dev_priv->drm, 1428 "Cannot compute valid DSC parameters for Input Bpp = %d " 1429 "Compressed BPP = %d\n", 1430 pipe_config->pipe_bpp, 1431 pipe_config->dsc.compressed_bpp); 1432 return ret; 1433 } 1434 1435 pipe_config->dsc.compression_enable = true; 1436 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 1437 "Compressed Bpp = %d Slice Count = %d\n", 1438 pipe_config->pipe_bpp, 1439 pipe_config->dsc.compressed_bpp, 1440 pipe_config->dsc.slice_count); 1441 1442 return 0; 1443 } 1444 1445 static int 1446 intel_dp_compute_link_config(struct intel_encoder *encoder, 1447 struct intel_crtc_state *pipe_config, 1448 struct drm_connector_state *conn_state) 1449 { 1450 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1451 const struct drm_display_mode *adjusted_mode = 1452 &pipe_config->hw.adjusted_mode; 1453 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1454 struct link_config_limits limits; 1455 int common_len; 1456 int ret; 1457 1458 common_len = intel_dp_common_len_rate_limit(intel_dp, 1459 intel_dp->max_link_rate); 1460 1461 /* No common link rates between source and sink */ 1462 drm_WARN_ON(encoder->base.dev, common_len <= 0); 1463 1464 limits.min_rate = intel_dp->common_rates[0]; 1465 limits.max_rate = intel_dp->common_rates[common_len - 1]; 1466 1467 limits.min_lane_count = 1; 1468 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 1469 1470 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); 1471 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); 1472 1473 if (intel_dp->use_max_params) { 1474 /* 1475 * Use the maximum clock and number of lanes the eDP panel 1476 * advertizes being capable of in case the initial fast 1477 * optimal params failed us. The panels are generally 1478 * designed to support only a single clock and lane 1479 * configuration, and typically on older panels these 1480 * values correspond to the native resolution of the panel. 1481 */ 1482 limits.min_lane_count = limits.max_lane_count; 1483 limits.min_rate = limits.max_rate; 1484 } 1485 1486 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 1487 1488 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 1489 "max rate %d max bpp %d pixel clock %iKHz\n", 1490 limits.max_lane_count, limits.max_rate, 1491 limits.max_bpp, adjusted_mode->crtc_clock); 1492 1493 if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay, 1494 adjusted_mode->crtc_clock)) 1495 pipe_config->bigjoiner = true; 1496 1497 /* 1498 * Optimize for slow and wide for everything, because there are some 1499 * eDP 1.3 and 1.4 panels don't work well with fast and narrow. 1500 */ 1501 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 1502 1503 /* 1504 * Pipe joiner needs compression upto display12 due to BW limitation. DG2 1505 * onwards pipe joiner can be enabled without compression. 1506 */ 1507 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 1508 if (ret || intel_dp->force_dsc_en || (DISPLAY_VER(i915) < 13 && 1509 pipe_config->bigjoiner)) { 1510 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 1511 conn_state, &limits); 1512 if (ret < 0) 1513 return ret; 1514 } 1515 1516 if (pipe_config->dsc.compression_enable) { 1517 drm_dbg_kms(&i915->drm, 1518 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 1519 pipe_config->lane_count, pipe_config->port_clock, 1520 pipe_config->pipe_bpp, 1521 pipe_config->dsc.compressed_bpp); 1522 1523 drm_dbg_kms(&i915->drm, 1524 "DP link rate required %i available %i\n", 1525 intel_dp_link_required(adjusted_mode->crtc_clock, 1526 pipe_config->dsc.compressed_bpp), 1527 intel_dp_max_data_rate(pipe_config->port_clock, 1528 pipe_config->lane_count)); 1529 } else { 1530 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 1531 pipe_config->lane_count, pipe_config->port_clock, 1532 pipe_config->pipe_bpp); 1533 1534 drm_dbg_kms(&i915->drm, 1535 "DP link rate required %i available %i\n", 1536 intel_dp_link_required(adjusted_mode->crtc_clock, 1537 pipe_config->pipe_bpp), 1538 intel_dp_max_data_rate(pipe_config->port_clock, 1539 pipe_config->lane_count)); 1540 } 1541 return 0; 1542 } 1543 1544 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 1545 const struct drm_connector_state *conn_state) 1546 { 1547 const struct intel_digital_connector_state *intel_conn_state = 1548 to_intel_digital_connector_state(conn_state); 1549 const struct drm_display_mode *adjusted_mode = 1550 &crtc_state->hw.adjusted_mode; 1551 1552 /* 1553 * Our YCbCr output is always limited range. 1554 * crtc_state->limited_color_range only applies to RGB, 1555 * and it must never be set for YCbCr or we risk setting 1556 * some conflicting bits in PIPECONF which will mess up 1557 * the colors on the monitor. 1558 */ 1559 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 1560 return false; 1561 1562 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 1563 /* 1564 * See: 1565 * CEA-861-E - 5.1 Default Encoding Parameters 1566 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 1567 */ 1568 return crtc_state->pipe_bpp != 18 && 1569 drm_default_rgb_quant_range(adjusted_mode) == 1570 HDMI_QUANTIZATION_RANGE_LIMITED; 1571 } else { 1572 return intel_conn_state->broadcast_rgb == 1573 INTEL_BROADCAST_RGB_LIMITED; 1574 } 1575 } 1576 1577 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 1578 enum port port) 1579 { 1580 if (IS_G4X(dev_priv)) 1581 return false; 1582 if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A) 1583 return false; 1584 1585 return true; 1586 } 1587 1588 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 1589 const struct drm_connector_state *conn_state, 1590 struct drm_dp_vsc_sdp *vsc) 1591 { 1592 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1593 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1594 1595 /* 1596 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1597 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 1598 * Colorimetry Format indication. 1599 */ 1600 vsc->revision = 0x5; 1601 vsc->length = 0x13; 1602 1603 /* DP 1.4a spec, Table 2-120 */ 1604 switch (crtc_state->output_format) { 1605 case INTEL_OUTPUT_FORMAT_YCBCR444: 1606 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 1607 break; 1608 case INTEL_OUTPUT_FORMAT_YCBCR420: 1609 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 1610 break; 1611 case INTEL_OUTPUT_FORMAT_RGB: 1612 default: 1613 vsc->pixelformat = DP_PIXELFORMAT_RGB; 1614 } 1615 1616 switch (conn_state->colorspace) { 1617 case DRM_MODE_COLORIMETRY_BT709_YCC: 1618 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1619 break; 1620 case DRM_MODE_COLORIMETRY_XVYCC_601: 1621 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 1622 break; 1623 case DRM_MODE_COLORIMETRY_XVYCC_709: 1624 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 1625 break; 1626 case DRM_MODE_COLORIMETRY_SYCC_601: 1627 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 1628 break; 1629 case DRM_MODE_COLORIMETRY_OPYCC_601: 1630 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 1631 break; 1632 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 1633 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 1634 break; 1635 case DRM_MODE_COLORIMETRY_BT2020_RGB: 1636 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 1637 break; 1638 case DRM_MODE_COLORIMETRY_BT2020_YCC: 1639 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 1640 break; 1641 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 1642 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 1643 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 1644 break; 1645 default: 1646 /* 1647 * RGB->YCBCR color conversion uses the BT.709 1648 * color space. 1649 */ 1650 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1651 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1652 else 1653 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 1654 break; 1655 } 1656 1657 vsc->bpc = crtc_state->pipe_bpp / 3; 1658 1659 /* only RGB pixelformat supports 6 bpc */ 1660 drm_WARN_ON(&dev_priv->drm, 1661 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 1662 1663 /* all YCbCr are always limited range */ 1664 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 1665 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 1666 } 1667 1668 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 1669 struct intel_crtc_state *crtc_state, 1670 const struct drm_connector_state *conn_state) 1671 { 1672 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 1673 1674 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 1675 if (crtc_state->has_psr) 1676 return; 1677 1678 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 1679 return; 1680 1681 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 1682 vsc->sdp_type = DP_SDP_VSC; 1683 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1684 &crtc_state->infoframes.vsc); 1685 } 1686 1687 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 1688 const struct intel_crtc_state *crtc_state, 1689 const struct drm_connector_state *conn_state, 1690 struct drm_dp_vsc_sdp *vsc) 1691 { 1692 vsc->sdp_type = DP_SDP_VSC; 1693 1694 if (crtc_state->has_psr2) { 1695 if (intel_dp->psr.colorimetry_support && 1696 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 1697 /* [PSR2, +Colorimetry] */ 1698 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1699 vsc); 1700 } else { 1701 /* 1702 * [PSR2, -Colorimetry] 1703 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 1704 * 3D stereo + PSR/PSR2 + Y-coordinate. 1705 */ 1706 vsc->revision = 0x4; 1707 vsc->length = 0xe; 1708 } 1709 } else { 1710 /* 1711 * [PSR1] 1712 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1713 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 1714 * higher). 1715 */ 1716 vsc->revision = 0x2; 1717 vsc->length = 0x8; 1718 } 1719 } 1720 1721 static void 1722 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 1723 struct intel_crtc_state *crtc_state, 1724 const struct drm_connector_state *conn_state) 1725 { 1726 int ret; 1727 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1728 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 1729 1730 if (!conn_state->hdr_output_metadata) 1731 return; 1732 1733 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 1734 1735 if (ret) { 1736 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 1737 return; 1738 } 1739 1740 crtc_state->infoframes.enable |= 1741 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 1742 } 1743 1744 int 1745 intel_dp_compute_config(struct intel_encoder *encoder, 1746 struct intel_crtc_state *pipe_config, 1747 struct drm_connector_state *conn_state) 1748 { 1749 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1750 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1751 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1752 enum port port = encoder->port; 1753 struct intel_connector *intel_connector = intel_dp->attached_connector; 1754 struct intel_digital_connector_state *intel_conn_state = 1755 to_intel_digital_connector_state(conn_state); 1756 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); 1757 int ret = 0, output_bpp; 1758 1759 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 1760 pipe_config->has_pch_encoder = true; 1761 1762 pipe_config->output_format = intel_dp_output_format(&intel_connector->base, 1763 adjusted_mode); 1764 1765 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 1766 ret = intel_panel_fitting(pipe_config, conn_state); 1767 if (ret) 1768 return ret; 1769 } 1770 1771 if (!intel_dp_port_has_audio(dev_priv, port)) 1772 pipe_config->has_audio = false; 1773 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 1774 pipe_config->has_audio = intel_dp->has_audio; 1775 else 1776 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 1777 1778 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1779 ret = intel_panel_compute_config(intel_connector, adjusted_mode); 1780 if (ret) 1781 return ret; 1782 1783 ret = intel_panel_fitting(pipe_config, conn_state); 1784 if (ret) 1785 return ret; 1786 } 1787 1788 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1789 return -EINVAL; 1790 1791 if (HAS_GMCH(dev_priv) && 1792 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 1793 return -EINVAL; 1794 1795 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 1796 return -EINVAL; 1797 1798 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 1799 return -EINVAL; 1800 1801 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 1802 if (ret < 0) 1803 return ret; 1804 1805 pipe_config->limited_color_range = 1806 intel_dp_limited_color_range(pipe_config, conn_state); 1807 1808 if (pipe_config->dsc.compression_enable) 1809 output_bpp = pipe_config->dsc.compressed_bpp; 1810 else 1811 output_bpp = intel_dp_output_bpp(pipe_config->output_format, 1812 pipe_config->pipe_bpp); 1813 1814 if (intel_dp->mso_link_count) { 1815 int n = intel_dp->mso_link_count; 1816 int overlap = intel_dp->mso_pixel_overlap; 1817 1818 pipe_config->splitter.enable = true; 1819 pipe_config->splitter.link_count = n; 1820 pipe_config->splitter.pixel_overlap = overlap; 1821 1822 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", 1823 n, overlap); 1824 1825 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; 1826 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; 1827 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; 1828 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; 1829 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; 1830 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; 1831 adjusted_mode->crtc_clock /= n; 1832 } 1833 1834 intel_link_compute_m_n(output_bpp, 1835 pipe_config->lane_count, 1836 adjusted_mode->crtc_clock, 1837 pipe_config->port_clock, 1838 &pipe_config->dp_m_n, 1839 constant_n, pipe_config->fec_enable); 1840 1841 /* FIXME: abstract this better */ 1842 if (pipe_config->splitter.enable) 1843 pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count; 1844 1845 if (!HAS_DDI(dev_priv)) 1846 g4x_dp_set_clock(encoder, pipe_config); 1847 1848 intel_vrr_compute_config(pipe_config, conn_state); 1849 intel_psr_compute_config(intel_dp, pipe_config, conn_state); 1850 intel_drrs_compute_config(intel_dp, pipe_config, output_bpp, 1851 constant_n); 1852 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 1853 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 1854 1855 return 0; 1856 } 1857 1858 void intel_dp_set_link_params(struct intel_dp *intel_dp, 1859 int link_rate, int lane_count) 1860 { 1861 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 1862 intel_dp->link_trained = false; 1863 intel_dp->link_rate = link_rate; 1864 intel_dp->lane_count = lane_count; 1865 } 1866 1867 static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp) 1868 { 1869 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 1870 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 1871 } 1872 1873 /* Enable backlight PWM and backlight PP control. */ 1874 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 1875 const struct drm_connector_state *conn_state) 1876 { 1877 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 1878 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1879 1880 if (!intel_dp_is_edp(intel_dp)) 1881 return; 1882 1883 drm_dbg_kms(&i915->drm, "\n"); 1884 1885 intel_backlight_enable(crtc_state, conn_state); 1886 intel_pps_backlight_on(intel_dp); 1887 } 1888 1889 /* Disable backlight PP control and backlight PWM. */ 1890 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 1891 { 1892 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 1893 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1894 1895 if (!intel_dp_is_edp(intel_dp)) 1896 return; 1897 1898 drm_dbg_kms(&i915->drm, "\n"); 1899 1900 intel_pps_backlight_off(intel_dp); 1901 intel_backlight_disable(old_conn_state); 1902 } 1903 1904 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 1905 { 1906 /* 1907 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 1908 * be capable of signalling downstream hpd with a long pulse. 1909 * Whether or not that means D3 is safe to use is not clear, 1910 * but let's assume so until proven otherwise. 1911 * 1912 * FIXME should really check all downstream ports... 1913 */ 1914 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 1915 drm_dp_is_branch(intel_dp->dpcd) && 1916 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 1917 } 1918 1919 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 1920 const struct intel_crtc_state *crtc_state, 1921 bool enable) 1922 { 1923 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1924 int ret; 1925 1926 if (!crtc_state->dsc.compression_enable) 1927 return; 1928 1929 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 1930 enable ? DP_DECOMPRESSION_EN : 0); 1931 if (ret < 0) 1932 drm_dbg_kms(&i915->drm, 1933 "Failed to %s sink decompression state\n", 1934 enabledisable(enable)); 1935 } 1936 1937 static void 1938 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 1939 { 1940 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1941 u8 oui[] = { 0x00, 0xaa, 0x01 }; 1942 u8 buf[3] = { 0 }; 1943 1944 /* 1945 * During driver init, we want to be careful and avoid changing the source OUI if it's 1946 * already set to what we want, so as to avoid clearing any state by accident 1947 */ 1948 if (careful) { 1949 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 1950 drm_err(&i915->drm, "Failed to read source OUI\n"); 1951 1952 if (memcmp(oui, buf, sizeof(oui)) == 0) 1953 return; 1954 } 1955 1956 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 1957 drm_err(&i915->drm, "Failed to write source OUI\n"); 1958 } 1959 1960 /* If the device supports it, try to set the power state appropriately */ 1961 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 1962 { 1963 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1964 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1965 int ret, i; 1966 1967 /* Should have a valid DPCD by this point */ 1968 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1969 return; 1970 1971 if (mode != DP_SET_POWER_D0) { 1972 if (downstream_hpd_needs_d0(intel_dp)) 1973 return; 1974 1975 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 1976 } else { 1977 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 1978 1979 lspcon_resume(dp_to_dig_port(intel_dp)); 1980 1981 /* Write the source OUI as early as possible */ 1982 if (intel_dp_is_edp(intel_dp)) 1983 intel_edp_init_source_oui(intel_dp, false); 1984 1985 /* 1986 * When turning on, we need to retry for 1ms to give the sink 1987 * time to wake up. 1988 */ 1989 for (i = 0; i < 3; i++) { 1990 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 1991 if (ret == 1) 1992 break; 1993 msleep(1); 1994 } 1995 1996 if (ret == 1 && lspcon->active) 1997 lspcon_wait_pcon_mode(lspcon); 1998 } 1999 2000 if (ret != 1) 2001 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 2002 encoder->base.base.id, encoder->base.name, 2003 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 2004 } 2005 2006 static bool 2007 intel_dp_get_dpcd(struct intel_dp *intel_dp); 2008 2009 /** 2010 * intel_dp_sync_state - sync the encoder state during init/resume 2011 * @encoder: intel encoder to sync 2012 * @crtc_state: state for the CRTC connected to the encoder 2013 * 2014 * Sync any state stored in the encoder wrt. HW state during driver init 2015 * and system resume. 2016 */ 2017 void intel_dp_sync_state(struct intel_encoder *encoder, 2018 const struct intel_crtc_state *crtc_state) 2019 { 2020 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2021 2022 if (!crtc_state) 2023 return; 2024 2025 /* 2026 * Don't clobber DPCD if it's been already read out during output 2027 * setup (eDP) or detect. 2028 */ 2029 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2030 intel_dp_get_dpcd(intel_dp); 2031 2032 intel_dp_reset_max_link_params(intel_dp); 2033 } 2034 2035 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 2036 struct intel_crtc_state *crtc_state) 2037 { 2038 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2039 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2040 2041 /* 2042 * If BIOS has set an unsupported or non-standard link rate for some 2043 * reason force an encoder recompute and full modeset. 2044 */ 2045 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 2046 crtc_state->port_clock) < 0) { 2047 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n"); 2048 crtc_state->uapi.connectors_changed = true; 2049 return false; 2050 } 2051 2052 /* 2053 * FIXME hack to force full modeset when DSC is being used. 2054 * 2055 * As long as we do not have full state readout and config comparison 2056 * of crtc_state->dsc, we have no way to ensure reliable fastset. 2057 * Remove once we have readout for DSC. 2058 */ 2059 if (crtc_state->dsc.compression_enable) { 2060 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n"); 2061 crtc_state->uapi.mode_changed = true; 2062 return false; 2063 } 2064 2065 if (CAN_PSR(intel_dp)) { 2066 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n"); 2067 crtc_state->uapi.mode_changed = true; 2068 return false; 2069 } 2070 2071 return true; 2072 } 2073 2074 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 2075 { 2076 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2077 2078 /* Clear the cached register set to avoid using stale values */ 2079 2080 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 2081 2082 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 2083 intel_dp->pcon_dsc_dpcd, 2084 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 2085 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", 2086 DP_PCON_DSC_ENCODER); 2087 2088 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", 2089 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 2090 } 2091 2092 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 2093 { 2094 int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 2095 int i; 2096 2097 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 2098 if (frl_bw_mask & (1 << i)) 2099 return bw_gbps[i]; 2100 } 2101 return 0; 2102 } 2103 2104 static int intel_dp_pcon_set_frl_mask(int max_frl) 2105 { 2106 switch (max_frl) { 2107 case 48: 2108 return DP_PCON_FRL_BW_MASK_48GBPS; 2109 case 40: 2110 return DP_PCON_FRL_BW_MASK_40GBPS; 2111 case 32: 2112 return DP_PCON_FRL_BW_MASK_32GBPS; 2113 case 24: 2114 return DP_PCON_FRL_BW_MASK_24GBPS; 2115 case 18: 2116 return DP_PCON_FRL_BW_MASK_18GBPS; 2117 case 9: 2118 return DP_PCON_FRL_BW_MASK_9GBPS; 2119 } 2120 2121 return 0; 2122 } 2123 2124 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 2125 { 2126 struct intel_connector *intel_connector = intel_dp->attached_connector; 2127 struct drm_connector *connector = &intel_connector->base; 2128 int max_frl_rate; 2129 int max_lanes, rate_per_lane; 2130 int max_dsc_lanes, dsc_rate_per_lane; 2131 2132 max_lanes = connector->display_info.hdmi.max_lanes; 2133 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; 2134 max_frl_rate = max_lanes * rate_per_lane; 2135 2136 if (connector->display_info.hdmi.dsc_cap.v_1p2) { 2137 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; 2138 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; 2139 if (max_dsc_lanes && dsc_rate_per_lane) 2140 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 2141 } 2142 2143 return max_frl_rate; 2144 } 2145 2146 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 2147 { 2148 #define TIMEOUT_FRL_READY_MS 500 2149 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 2150 2151 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2152 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 2153 u8 max_frl_bw_mask = 0, frl_trained_mask; 2154 bool is_active; 2155 2156 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2157 if (ret < 0) 2158 return ret; 2159 2160 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 2161 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 2162 2163 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 2164 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); 2165 2166 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 2167 2168 if (max_frl_bw <= 0) 2169 return -EINVAL; 2170 2171 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 2172 if (ret < 0) 2173 return ret; 2174 /* Wait for PCON to be FRL Ready */ 2175 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); 2176 2177 if (!is_active) 2178 return -ETIMEDOUT; 2179 2180 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 2181 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, 2182 DP_PCON_ENABLE_SEQUENTIAL_LINK); 2183 if (ret < 0) 2184 return ret; 2185 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, 2186 DP_PCON_FRL_LINK_TRAIN_NORMAL); 2187 if (ret < 0) 2188 return ret; 2189 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 2190 if (ret < 0) 2191 return ret; 2192 /* 2193 * Wait for FRL to be completed 2194 * Check if the HDMI Link is up and active. 2195 */ 2196 wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); 2197 2198 if (!is_active) 2199 return -ETIMEDOUT; 2200 2201 /* Verify HDMI Link configuration shows FRL Mode */ 2202 if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != 2203 DP_PCON_HDMI_MODE_FRL) { 2204 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); 2205 return -EINVAL; 2206 } 2207 drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); 2208 2209 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 2210 intel_dp->frl.is_trained = true; 2211 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); 2212 2213 return 0; 2214 } 2215 2216 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 2217 { 2218 if (drm_dp_is_branch(intel_dp->dpcd) && 2219 intel_dp->has_hdmi_sink && 2220 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 2221 return true; 2222 2223 return false; 2224 } 2225 2226 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 2227 { 2228 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2229 2230 /* 2231 * Always go for FRL training if: 2232 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) 2233 * -sink is HDMI2.1 2234 */ 2235 if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) || 2236 !intel_dp_is_hdmi_2_1_sink(intel_dp) || 2237 intel_dp->frl.is_trained) 2238 return; 2239 2240 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 2241 int ret, mode; 2242 2243 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); 2244 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2245 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 2246 2247 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 2248 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); 2249 } else { 2250 drm_dbg(&dev_priv->drm, "FRL training Completed\n"); 2251 } 2252 } 2253 2254 static int 2255 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 2256 { 2257 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 2258 2259 return intel_hdmi_dsc_get_slice_height(vactive); 2260 } 2261 2262 static int 2263 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 2264 const struct intel_crtc_state *crtc_state) 2265 { 2266 struct intel_connector *intel_connector = intel_dp->attached_connector; 2267 struct drm_connector *connector = &intel_connector->base; 2268 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; 2269 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; 2270 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 2271 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 2272 2273 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 2274 pcon_max_slice_width, 2275 hdmi_max_slices, hdmi_throughput); 2276 } 2277 2278 static int 2279 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 2280 const struct intel_crtc_state *crtc_state, 2281 int num_slices, int slice_width) 2282 { 2283 struct intel_connector *intel_connector = intel_dp->attached_connector; 2284 struct drm_connector *connector = &intel_connector->base; 2285 int output_format = crtc_state->output_format; 2286 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; 2287 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 2288 int hdmi_max_chunk_bytes = 2289 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; 2290 2291 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 2292 num_slices, output_format, hdmi_all_bpp, 2293 hdmi_max_chunk_bytes); 2294 } 2295 2296 void 2297 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 2298 const struct intel_crtc_state *crtc_state) 2299 { 2300 u8 pps_param[6]; 2301 int slice_height; 2302 int slice_width; 2303 int num_slices; 2304 int bits_per_pixel; 2305 int ret; 2306 struct intel_connector *intel_connector = intel_dp->attached_connector; 2307 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2308 struct drm_connector *connector; 2309 bool hdmi_is_dsc_1_2; 2310 2311 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 2312 return; 2313 2314 if (!intel_connector) 2315 return; 2316 connector = &intel_connector->base; 2317 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; 2318 2319 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 2320 !hdmi_is_dsc_1_2) 2321 return; 2322 2323 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 2324 if (!slice_height) 2325 return; 2326 2327 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 2328 if (!num_slices) 2329 return; 2330 2331 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 2332 num_slices); 2333 2334 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 2335 num_slices, slice_width); 2336 if (!bits_per_pixel) 2337 return; 2338 2339 pps_param[0] = slice_height & 0xFF; 2340 pps_param[1] = slice_height >> 8; 2341 pps_param[2] = slice_width & 0xFF; 2342 pps_param[3] = slice_width >> 8; 2343 pps_param[4] = bits_per_pixel & 0xFF; 2344 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 2345 2346 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 2347 if (ret < 0) 2348 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); 2349 } 2350 2351 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 2352 const struct intel_crtc_state *crtc_state) 2353 { 2354 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2355 u8 tmp; 2356 2357 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 2358 return; 2359 2360 if (!drm_dp_is_branch(intel_dp->dpcd)) 2361 return; 2362 2363 tmp = intel_dp->has_hdmi_sink ? 2364 DP_HDMI_DVI_OUTPUT_CONFIG : 0; 2365 2366 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2367 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 2368 drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n", 2369 enabledisable(intel_dp->has_hdmi_sink)); 2370 2371 tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 2372 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 2373 2374 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2375 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 2376 drm_dbg_kms(&i915->drm, 2377 "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n", 2378 enabledisable(intel_dp->dfp.ycbcr_444_to_420)); 2379 2380 tmp = 0; 2381 if (intel_dp->dfp.rgb_to_ycbcr) { 2382 bool bt2020, bt709; 2383 2384 /* 2385 * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only 2386 * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default. 2387 * 2388 */ 2389 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE; 2390 2391 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2392 intel_dp->downstream_ports, 2393 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 2394 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2395 intel_dp->downstream_ports, 2396 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 2397 switch (crtc_state->infoframes.vsc.colorimetry) { 2398 case DP_COLORIMETRY_BT2020_RGB: 2399 case DP_COLORIMETRY_BT2020_YCC: 2400 if (bt2020) 2401 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE; 2402 break; 2403 case DP_COLORIMETRY_BT709_YCC: 2404 case DP_COLORIMETRY_XVYCC_709: 2405 if (bt709) 2406 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE; 2407 break; 2408 default: 2409 break; 2410 } 2411 } 2412 2413 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 2414 drm_dbg_kms(&i915->drm, 2415 "Failed to %s protocol converter RGB->YCbCr conversion mode\n", 2416 enabledisable(tmp)); 2417 } 2418 2419 2420 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 2421 { 2422 u8 dprx = 0; 2423 2424 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 2425 &dprx) != 1) 2426 return false; 2427 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 2428 } 2429 2430 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 2431 { 2432 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2433 2434 /* 2435 * Clear the cached register set to avoid using stale values 2436 * for the sinks that do not support DSC. 2437 */ 2438 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 2439 2440 /* Clear fec_capable to avoid using stale values */ 2441 intel_dp->fec_capable = 0; 2442 2443 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 2444 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 2445 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2446 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 2447 intel_dp->dsc_dpcd, 2448 sizeof(intel_dp->dsc_dpcd)) < 0) 2449 drm_err(&i915->drm, 2450 "Failed to read DPCD register 0x%x\n", 2451 DP_DSC_SUPPORT); 2452 2453 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 2454 (int)sizeof(intel_dp->dsc_dpcd), 2455 intel_dp->dsc_dpcd); 2456 2457 /* FEC is supported only on DP 1.4 */ 2458 if (!intel_dp_is_edp(intel_dp) && 2459 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 2460 &intel_dp->fec_capable) < 0) 2461 drm_err(&i915->drm, 2462 "Failed to read FEC DPCD register\n"); 2463 2464 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 2465 intel_dp->fec_capable); 2466 } 2467 } 2468 2469 static void intel_edp_mso_mode_fixup(struct intel_connector *connector, 2470 struct drm_display_mode *mode) 2471 { 2472 struct intel_dp *intel_dp = intel_attached_dp(connector); 2473 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2474 int n = intel_dp->mso_link_count; 2475 int overlap = intel_dp->mso_pixel_overlap; 2476 2477 if (!mode || !n) 2478 return; 2479 2480 mode->hdisplay = (mode->hdisplay - overlap) * n; 2481 mode->hsync_start = (mode->hsync_start - overlap) * n; 2482 mode->hsync_end = (mode->hsync_end - overlap) * n; 2483 mode->htotal = (mode->htotal - overlap) * n; 2484 mode->clock *= n; 2485 2486 drm_mode_set_name(mode); 2487 2488 drm_dbg_kms(&i915->drm, 2489 "[CONNECTOR:%d:%s] using generated MSO mode: ", 2490 connector->base.base.id, connector->base.name); 2491 drm_mode_debug_printmodeline(mode); 2492 } 2493 2494 static void intel_edp_mso_init(struct intel_dp *intel_dp) 2495 { 2496 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2497 struct intel_connector *connector = intel_dp->attached_connector; 2498 struct drm_display_info *info = &connector->base.display_info; 2499 u8 mso; 2500 2501 if (intel_dp->edp_dpcd[0] < DP_EDP_14) 2502 return; 2503 2504 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { 2505 drm_err(&i915->drm, "Failed to read MSO cap\n"); 2506 return; 2507 } 2508 2509 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ 2510 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; 2511 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { 2512 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); 2513 mso = 0; 2514 } 2515 2516 if (mso) { 2517 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n", 2518 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso, 2519 info->mso_pixel_overlap); 2520 if (!HAS_MSO(i915)) { 2521 drm_err(&i915->drm, "No source MSO support, disabling\n"); 2522 mso = 0; 2523 } 2524 } 2525 2526 intel_dp->mso_link_count = mso; 2527 intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; 2528 } 2529 2530 static bool 2531 intel_edp_init_dpcd(struct intel_dp *intel_dp) 2532 { 2533 struct drm_i915_private *dev_priv = 2534 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 2535 2536 /* this function is meant to be called only once */ 2537 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 2538 2539 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 2540 return false; 2541 2542 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2543 drm_dp_is_branch(intel_dp->dpcd)); 2544 2545 /* 2546 * Read the eDP display control registers. 2547 * 2548 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 2549 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 2550 * set, but require eDP 1.4+ detection (e.g. for supported link rates 2551 * method). The display control registers should read zero if they're 2552 * not supported anyway. 2553 */ 2554 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 2555 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 2556 sizeof(intel_dp->edp_dpcd)) { 2557 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 2558 (int)sizeof(intel_dp->edp_dpcd), 2559 intel_dp->edp_dpcd); 2560 2561 intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; 2562 } 2563 2564 /* 2565 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 2566 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 2567 */ 2568 intel_psr_init_dpcd(intel_dp); 2569 2570 /* Clear the default sink rates */ 2571 intel_dp->num_sink_rates = 0; 2572 2573 /* Read the eDP 1.4+ supported link rates. */ 2574 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2575 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 2576 int i; 2577 2578 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 2579 sink_rates, sizeof(sink_rates)); 2580 2581 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 2582 int val = le16_to_cpu(sink_rates[i]); 2583 2584 if (val == 0) 2585 break; 2586 2587 /* Value read multiplied by 200kHz gives the per-lane 2588 * link rate in kHz. The source rates are, however, 2589 * stored in terms of LS_Clk kHz. The full conversion 2590 * back to symbols is 2591 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 2592 */ 2593 intel_dp->sink_rates[i] = (val * 200) / 10; 2594 } 2595 intel_dp->num_sink_rates = i; 2596 } 2597 2598 /* 2599 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 2600 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 2601 */ 2602 if (intel_dp->num_sink_rates) 2603 intel_dp->use_rate_select = true; 2604 else 2605 intel_dp_set_sink_rates(intel_dp); 2606 2607 intel_dp_set_common_rates(intel_dp); 2608 intel_dp_reset_max_link_params(intel_dp); 2609 2610 /* Read the eDP DSC DPCD registers */ 2611 if (DISPLAY_VER(dev_priv) >= 10) 2612 intel_dp_get_dsc_sink_cap(intel_dp); 2613 2614 /* 2615 * If needed, program our source OUI so we can make various Intel-specific AUX services 2616 * available (such as HDR backlight controls) 2617 */ 2618 intel_edp_init_source_oui(intel_dp, true); 2619 2620 return true; 2621 } 2622 2623 static bool 2624 intel_dp_has_sink_count(struct intel_dp *intel_dp) 2625 { 2626 if (!intel_dp->attached_connector) 2627 return false; 2628 2629 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 2630 intel_dp->dpcd, 2631 &intel_dp->desc); 2632 } 2633 2634 static bool 2635 intel_dp_get_dpcd(struct intel_dp *intel_dp) 2636 { 2637 int ret; 2638 2639 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) 2640 return false; 2641 2642 /* 2643 * Don't clobber cached eDP rates. Also skip re-reading 2644 * the OUI/ID since we know it won't change. 2645 */ 2646 if (!intel_dp_is_edp(intel_dp)) { 2647 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2648 drm_dp_is_branch(intel_dp->dpcd)); 2649 2650 intel_dp_set_sink_rates(intel_dp); 2651 intel_dp_set_common_rates(intel_dp); 2652 } 2653 2654 if (intel_dp_has_sink_count(intel_dp)) { 2655 ret = drm_dp_read_sink_count(&intel_dp->aux); 2656 if (ret < 0) 2657 return false; 2658 2659 /* 2660 * Sink count can change between short pulse hpd hence 2661 * a member variable in intel_dp will track any changes 2662 * between short pulse interrupts. 2663 */ 2664 intel_dp->sink_count = ret; 2665 2666 /* 2667 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 2668 * a dongle is present but no display. Unless we require to know 2669 * if a dongle is present or not, we don't need to update 2670 * downstream port information. So, an early return here saves 2671 * time from performing other operations which are not required. 2672 */ 2673 if (!intel_dp->sink_count) 2674 return false; 2675 } 2676 2677 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 2678 intel_dp->downstream_ports) == 0; 2679 } 2680 2681 static bool 2682 intel_dp_can_mst(struct intel_dp *intel_dp) 2683 { 2684 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2685 2686 return i915->params.enable_dp_mst && 2687 intel_dp_mst_source_support(intel_dp) && 2688 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 2689 } 2690 2691 static void 2692 intel_dp_configure_mst(struct intel_dp *intel_dp) 2693 { 2694 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2695 struct intel_encoder *encoder = 2696 &dp_to_dig_port(intel_dp)->base; 2697 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 2698 2699 drm_dbg_kms(&i915->drm, 2700 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 2701 encoder->base.base.id, encoder->base.name, 2702 yesno(intel_dp_mst_source_support(intel_dp)), yesno(sink_can_mst), 2703 yesno(i915->params.enable_dp_mst)); 2704 2705 if (!intel_dp_mst_source_support(intel_dp)) 2706 return; 2707 2708 intel_dp->is_mst = sink_can_mst && 2709 i915->params.enable_dp_mst; 2710 2711 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 2712 intel_dp->is_mst); 2713 } 2714 2715 static bool 2716 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2717 { 2718 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 2719 sink_irq_vector, DP_DPRX_ESI_LEN) == 2720 DP_DPRX_ESI_LEN; 2721 } 2722 2723 bool 2724 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 2725 const struct drm_connector_state *conn_state) 2726 { 2727 /* 2728 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 2729 * of Color Encoding Format and Content Color Gamut], in order to 2730 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 2731 */ 2732 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2733 return true; 2734 2735 switch (conn_state->colorspace) { 2736 case DRM_MODE_COLORIMETRY_SYCC_601: 2737 case DRM_MODE_COLORIMETRY_OPYCC_601: 2738 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2739 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2740 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2741 return true; 2742 default: 2743 break; 2744 } 2745 2746 return false; 2747 } 2748 2749 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 2750 struct dp_sdp *sdp, size_t size) 2751 { 2752 size_t length = sizeof(struct dp_sdp); 2753 2754 if (size < length) 2755 return -ENOSPC; 2756 2757 memset(sdp, 0, size); 2758 2759 /* 2760 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 2761 * VSC SDP Header Bytes 2762 */ 2763 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 2764 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 2765 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 2766 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 2767 2768 /* 2769 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 2770 * per DP 1.4a spec. 2771 */ 2772 if (vsc->revision != 0x5) 2773 goto out; 2774 2775 /* VSC SDP Payload for DB16 through DB18 */ 2776 /* Pixel Encoding and Colorimetry Formats */ 2777 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 2778 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 2779 2780 switch (vsc->bpc) { 2781 case 6: 2782 /* 6bpc: 0x0 */ 2783 break; 2784 case 8: 2785 sdp->db[17] = 0x1; /* DB17[3:0] */ 2786 break; 2787 case 10: 2788 sdp->db[17] = 0x2; 2789 break; 2790 case 12: 2791 sdp->db[17] = 0x3; 2792 break; 2793 case 16: 2794 sdp->db[17] = 0x4; 2795 break; 2796 default: 2797 MISSING_CASE(vsc->bpc); 2798 break; 2799 } 2800 /* Dynamic Range and Component Bit Depth */ 2801 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 2802 sdp->db[17] |= 0x80; /* DB17[7] */ 2803 2804 /* Content Type */ 2805 sdp->db[18] = vsc->content_type & 0x7; 2806 2807 out: 2808 return length; 2809 } 2810 2811 static ssize_t 2812 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 2813 struct dp_sdp *sdp, 2814 size_t size) 2815 { 2816 size_t length = sizeof(struct dp_sdp); 2817 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 2818 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 2819 ssize_t len; 2820 2821 if (size < length) 2822 return -ENOSPC; 2823 2824 memset(sdp, 0, size); 2825 2826 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 2827 if (len < 0) { 2828 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 2829 return -ENOSPC; 2830 } 2831 2832 if (len != infoframe_size) { 2833 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 2834 return -ENOSPC; 2835 } 2836 2837 /* 2838 * Set up the infoframe sdp packet for HDR static metadata. 2839 * Prepare VSC Header for SU as per DP 1.4a spec, 2840 * Table 2-100 and Table 2-101 2841 */ 2842 2843 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 2844 sdp->sdp_header.HB0 = 0; 2845 /* 2846 * Packet Type 80h + Non-audio INFOFRAME Type value 2847 * HDMI_INFOFRAME_TYPE_DRM: 0x87 2848 * - 80h + Non-audio INFOFRAME Type value 2849 * - InfoFrame Type: 0x07 2850 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 2851 */ 2852 sdp->sdp_header.HB1 = drm_infoframe->type; 2853 /* 2854 * Least Significant Eight Bits of (Data Byte Count – 1) 2855 * infoframe_size - 1 2856 */ 2857 sdp->sdp_header.HB2 = 0x1D; 2858 /* INFOFRAME SDP Version Number */ 2859 sdp->sdp_header.HB3 = (0x13 << 2); 2860 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 2861 sdp->db[0] = drm_infoframe->version; 2862 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 2863 sdp->db[1] = drm_infoframe->length; 2864 /* 2865 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 2866 * HDMI_INFOFRAME_HEADER_SIZE 2867 */ 2868 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 2869 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 2870 HDMI_DRM_INFOFRAME_SIZE); 2871 2872 /* 2873 * Size of DP infoframe sdp packet for HDR static metadata consists of 2874 * - DP SDP Header(struct dp_sdp_header): 4 bytes 2875 * - Two Data Blocks: 2 bytes 2876 * CTA Header Byte2 (INFOFRAME Version Number) 2877 * CTA Header Byte3 (Length of INFOFRAME) 2878 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 2879 * 2880 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 2881 * infoframe size. But GEN11+ has larger than that size, write_infoframe 2882 * will pad rest of the size. 2883 */ 2884 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 2885 } 2886 2887 static void intel_write_dp_sdp(struct intel_encoder *encoder, 2888 const struct intel_crtc_state *crtc_state, 2889 unsigned int type) 2890 { 2891 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2892 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2893 struct dp_sdp sdp = {}; 2894 ssize_t len; 2895 2896 if ((crtc_state->infoframes.enable & 2897 intel_hdmi_infoframe_enable(type)) == 0) 2898 return; 2899 2900 switch (type) { 2901 case DP_SDP_VSC: 2902 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 2903 sizeof(sdp)); 2904 break; 2905 case HDMI_PACKET_TYPE_GAMUT_METADATA: 2906 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 2907 &sdp, sizeof(sdp)); 2908 break; 2909 default: 2910 MISSING_CASE(type); 2911 return; 2912 } 2913 2914 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 2915 return; 2916 2917 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 2918 } 2919 2920 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 2921 const struct intel_crtc_state *crtc_state, 2922 const struct drm_dp_vsc_sdp *vsc) 2923 { 2924 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2925 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2926 struct dp_sdp sdp = {}; 2927 ssize_t len; 2928 2929 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 2930 2931 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 2932 return; 2933 2934 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 2935 &sdp, len); 2936 } 2937 2938 void intel_dp_set_infoframes(struct intel_encoder *encoder, 2939 bool enable, 2940 const struct intel_crtc_state *crtc_state, 2941 const struct drm_connector_state *conn_state) 2942 { 2943 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2944 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 2945 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 2946 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 2947 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 2948 u32 val = intel_de_read(dev_priv, reg) & ~dip_enable; 2949 2950 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 2951 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 2952 if (!crtc_state->has_psr) 2953 val &= ~VIDEO_DIP_ENABLE_VSC_HSW; 2954 2955 intel_de_write(dev_priv, reg, val); 2956 intel_de_posting_read(dev_priv, reg); 2957 2958 if (!enable) 2959 return; 2960 2961 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 2962 if (!crtc_state->has_psr) 2963 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 2964 2965 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 2966 } 2967 2968 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 2969 const void *buffer, size_t size) 2970 { 2971 const struct dp_sdp *sdp = buffer; 2972 2973 if (size < sizeof(struct dp_sdp)) 2974 return -EINVAL; 2975 2976 memset(vsc, 0, sizeof(*vsc)); 2977 2978 if (sdp->sdp_header.HB0 != 0) 2979 return -EINVAL; 2980 2981 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 2982 return -EINVAL; 2983 2984 vsc->sdp_type = sdp->sdp_header.HB1; 2985 vsc->revision = sdp->sdp_header.HB2; 2986 vsc->length = sdp->sdp_header.HB3; 2987 2988 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 2989 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 2990 /* 2991 * - HB2 = 0x2, HB3 = 0x8 2992 * VSC SDP supporting 3D stereo + PSR 2993 * - HB2 = 0x4, HB3 = 0xe 2994 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 2995 * first scan line of the SU region (applies to eDP v1.4b 2996 * and higher). 2997 */ 2998 return 0; 2999 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 3000 /* 3001 * - HB2 = 0x5, HB3 = 0x13 3002 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 3003 * Format. 3004 */ 3005 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 3006 vsc->colorimetry = sdp->db[16] & 0xf; 3007 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 3008 3009 switch (sdp->db[17] & 0x7) { 3010 case 0x0: 3011 vsc->bpc = 6; 3012 break; 3013 case 0x1: 3014 vsc->bpc = 8; 3015 break; 3016 case 0x2: 3017 vsc->bpc = 10; 3018 break; 3019 case 0x3: 3020 vsc->bpc = 12; 3021 break; 3022 case 0x4: 3023 vsc->bpc = 16; 3024 break; 3025 default: 3026 MISSING_CASE(sdp->db[17] & 0x7); 3027 return -EINVAL; 3028 } 3029 3030 vsc->content_type = sdp->db[18] & 0x7; 3031 } else { 3032 return -EINVAL; 3033 } 3034 3035 return 0; 3036 } 3037 3038 static int 3039 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 3040 const void *buffer, size_t size) 3041 { 3042 int ret; 3043 3044 const struct dp_sdp *sdp = buffer; 3045 3046 if (size < sizeof(struct dp_sdp)) 3047 return -EINVAL; 3048 3049 if (sdp->sdp_header.HB0 != 0) 3050 return -EINVAL; 3051 3052 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 3053 return -EINVAL; 3054 3055 /* 3056 * Least Significant Eight Bits of (Data Byte Count – 1) 3057 * 1Dh (i.e., Data Byte Count = 30 bytes). 3058 */ 3059 if (sdp->sdp_header.HB2 != 0x1D) 3060 return -EINVAL; 3061 3062 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 3063 if ((sdp->sdp_header.HB3 & 0x3) != 0) 3064 return -EINVAL; 3065 3066 /* INFOFRAME SDP Version Number */ 3067 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 3068 return -EINVAL; 3069 3070 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 3071 if (sdp->db[0] != 1) 3072 return -EINVAL; 3073 3074 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 3075 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 3076 return -EINVAL; 3077 3078 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 3079 HDMI_DRM_INFOFRAME_SIZE); 3080 3081 return ret; 3082 } 3083 3084 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 3085 struct intel_crtc_state *crtc_state, 3086 struct drm_dp_vsc_sdp *vsc) 3087 { 3088 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3089 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3090 unsigned int type = DP_SDP_VSC; 3091 struct dp_sdp sdp = {}; 3092 int ret; 3093 3094 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 3095 if (crtc_state->has_psr) 3096 return; 3097 3098 if ((crtc_state->infoframes.enable & 3099 intel_hdmi_infoframe_enable(type)) == 0) 3100 return; 3101 3102 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 3103 3104 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 3105 3106 if (ret) 3107 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 3108 } 3109 3110 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 3111 struct intel_crtc_state *crtc_state, 3112 struct hdmi_drm_infoframe *drm_infoframe) 3113 { 3114 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3115 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3116 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 3117 struct dp_sdp sdp = {}; 3118 int ret; 3119 3120 if ((crtc_state->infoframes.enable & 3121 intel_hdmi_infoframe_enable(type)) == 0) 3122 return; 3123 3124 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 3125 sizeof(sdp)); 3126 3127 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 3128 sizeof(sdp)); 3129 3130 if (ret) 3131 drm_dbg_kms(&dev_priv->drm, 3132 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 3133 } 3134 3135 void intel_read_dp_sdp(struct intel_encoder *encoder, 3136 struct intel_crtc_state *crtc_state, 3137 unsigned int type) 3138 { 3139 switch (type) { 3140 case DP_SDP_VSC: 3141 intel_read_dp_vsc_sdp(encoder, crtc_state, 3142 &crtc_state->infoframes.vsc); 3143 break; 3144 case HDMI_PACKET_TYPE_GAMUT_METADATA: 3145 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 3146 &crtc_state->infoframes.drm.drm); 3147 break; 3148 default: 3149 MISSING_CASE(type); 3150 break; 3151 } 3152 } 3153 3154 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 3155 { 3156 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3157 int status = 0; 3158 int test_link_rate; 3159 u8 test_lane_count, test_link_bw; 3160 /* (DP CTS 1.2) 3161 * 4.3.1.11 3162 */ 3163 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 3164 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 3165 &test_lane_count); 3166 3167 if (status <= 0) { 3168 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 3169 return DP_TEST_NAK; 3170 } 3171 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 3172 3173 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 3174 &test_link_bw); 3175 if (status <= 0) { 3176 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 3177 return DP_TEST_NAK; 3178 } 3179 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 3180 3181 /* Validate the requested link rate and lane count */ 3182 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 3183 test_lane_count)) 3184 return DP_TEST_NAK; 3185 3186 intel_dp->compliance.test_lane_count = test_lane_count; 3187 intel_dp->compliance.test_link_rate = test_link_rate; 3188 3189 return DP_TEST_ACK; 3190 } 3191 3192 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 3193 { 3194 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3195 u8 test_pattern; 3196 u8 test_misc; 3197 __be16 h_width, v_height; 3198 int status = 0; 3199 3200 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 3201 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 3202 &test_pattern); 3203 if (status <= 0) { 3204 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 3205 return DP_TEST_NAK; 3206 } 3207 if (test_pattern != DP_COLOR_RAMP) 3208 return DP_TEST_NAK; 3209 3210 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 3211 &h_width, 2); 3212 if (status <= 0) { 3213 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 3214 return DP_TEST_NAK; 3215 } 3216 3217 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 3218 &v_height, 2); 3219 if (status <= 0) { 3220 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 3221 return DP_TEST_NAK; 3222 } 3223 3224 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 3225 &test_misc); 3226 if (status <= 0) { 3227 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 3228 return DP_TEST_NAK; 3229 } 3230 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 3231 return DP_TEST_NAK; 3232 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 3233 return DP_TEST_NAK; 3234 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 3235 case DP_TEST_BIT_DEPTH_6: 3236 intel_dp->compliance.test_data.bpc = 6; 3237 break; 3238 case DP_TEST_BIT_DEPTH_8: 3239 intel_dp->compliance.test_data.bpc = 8; 3240 break; 3241 default: 3242 return DP_TEST_NAK; 3243 } 3244 3245 intel_dp->compliance.test_data.video_pattern = test_pattern; 3246 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 3247 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 3248 /* Set test active flag here so userspace doesn't interrupt things */ 3249 intel_dp->compliance.test_active = true; 3250 3251 return DP_TEST_ACK; 3252 } 3253 3254 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 3255 { 3256 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3257 u8 test_result = DP_TEST_ACK; 3258 struct intel_connector *intel_connector = intel_dp->attached_connector; 3259 struct drm_connector *connector = &intel_connector->base; 3260 3261 if (intel_connector->detect_edid == NULL || 3262 connector->edid_corrupt || 3263 intel_dp->aux.i2c_defer_count > 6) { 3264 /* Check EDID read for NACKs, DEFERs and corruption 3265 * (DP CTS 1.2 Core r1.1) 3266 * 4.2.2.4 : Failed EDID read, I2C_NAK 3267 * 4.2.2.5 : Failed EDID read, I2C_DEFER 3268 * 4.2.2.6 : EDID corruption detected 3269 * Use failsafe mode for all cases 3270 */ 3271 if (intel_dp->aux.i2c_nack_count > 0 || 3272 intel_dp->aux.i2c_defer_count > 0) 3273 drm_dbg_kms(&i915->drm, 3274 "EDID read had %d NACKs, %d DEFERs\n", 3275 intel_dp->aux.i2c_nack_count, 3276 intel_dp->aux.i2c_defer_count); 3277 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 3278 } else { 3279 struct edid *block = intel_connector->detect_edid; 3280 3281 /* We have to write the checksum 3282 * of the last block read 3283 */ 3284 block += intel_connector->detect_edid->extensions; 3285 3286 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 3287 block->checksum) <= 0) 3288 drm_dbg_kms(&i915->drm, 3289 "Failed to write EDID checksum\n"); 3290 3291 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 3292 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 3293 } 3294 3295 /* Set test active flag here so userspace doesn't interrupt things */ 3296 intel_dp->compliance.test_active = true; 3297 3298 return test_result; 3299 } 3300 3301 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 3302 const struct intel_crtc_state *crtc_state) 3303 { 3304 struct drm_i915_private *dev_priv = 3305 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 3306 struct drm_dp_phy_test_params *data = 3307 &intel_dp->compliance.test_data.phytest; 3308 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3309 enum pipe pipe = crtc->pipe; 3310 u32 pattern_val; 3311 3312 switch (data->phy_pattern) { 3313 case DP_PHY_TEST_PATTERN_NONE: 3314 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 3315 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 3316 break; 3317 case DP_PHY_TEST_PATTERN_D10_2: 3318 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 3319 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3320 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 3321 break; 3322 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 3323 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 3324 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3325 DDI_DP_COMP_CTL_ENABLE | 3326 DDI_DP_COMP_CTL_SCRAMBLED_0); 3327 break; 3328 case DP_PHY_TEST_PATTERN_PRBS7: 3329 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 3330 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3331 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 3332 break; 3333 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 3334 /* 3335 * FIXME: Ideally pattern should come from DPCD 0x250. As 3336 * current firmware of DPR-100 could not set it, so hardcoding 3337 * now for complaince test. 3338 */ 3339 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 3340 pattern_val = 0x3e0f83e0; 3341 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 3342 pattern_val = 0x0f83e0f8; 3343 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 3344 pattern_val = 0x0000f83e; 3345 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 3346 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3347 DDI_DP_COMP_CTL_ENABLE | 3348 DDI_DP_COMP_CTL_CUSTOM80); 3349 break; 3350 case DP_PHY_TEST_PATTERN_CP2520: 3351 /* 3352 * FIXME: Ideally pattern should come from DPCD 0x24A. As 3353 * current firmware of DPR-100 could not set it, so hardcoding 3354 * now for complaince test. 3355 */ 3356 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 3357 pattern_val = 0xFB; 3358 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3359 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 3360 pattern_val); 3361 break; 3362 default: 3363 WARN(1, "Invalid Phy Test Pattern\n"); 3364 } 3365 } 3366 3367 static void 3368 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, 3369 const struct intel_crtc_state *crtc_state) 3370 { 3371 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3372 struct drm_device *dev = dig_port->base.base.dev; 3373 struct drm_i915_private *dev_priv = to_i915(dev); 3374 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 3375 enum pipe pipe = crtc->pipe; 3376 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 3377 3378 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 3379 TRANS_DDI_FUNC_CTL(pipe)); 3380 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 3381 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 3382 3383 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 3384 TGL_TRANS_DDI_PORT_MASK); 3385 trans_conf_value &= ~PIPECONF_ENABLE; 3386 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 3387 3388 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 3389 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 3390 trans_ddi_func_ctl_value); 3391 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 3392 } 3393 3394 static void 3395 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, 3396 const struct intel_crtc_state *crtc_state) 3397 { 3398 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3399 struct drm_device *dev = dig_port->base.base.dev; 3400 struct drm_i915_private *dev_priv = to_i915(dev); 3401 enum port port = dig_port->base.port; 3402 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 3403 enum pipe pipe = crtc->pipe; 3404 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 3405 3406 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 3407 TRANS_DDI_FUNC_CTL(pipe)); 3408 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 3409 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 3410 3411 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 3412 TGL_TRANS_DDI_SELECT_PORT(port); 3413 trans_conf_value |= PIPECONF_ENABLE; 3414 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 3415 3416 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 3417 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 3418 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 3419 trans_ddi_func_ctl_value); 3420 } 3421 3422 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 3423 const struct intel_crtc_state *crtc_state) 3424 { 3425 struct drm_dp_phy_test_params *data = 3426 &intel_dp->compliance.test_data.phytest; 3427 u8 link_status[DP_LINK_STATUS_SIZE]; 3428 3429 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3430 link_status) < 0) { 3431 DRM_DEBUG_KMS("failed to get link status\n"); 3432 return; 3433 } 3434 3435 /* retrieve vswing & pre-emphasis setting */ 3436 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 3437 link_status); 3438 3439 intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); 3440 3441 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); 3442 3443 intel_dp_phy_pattern_update(intel_dp, crtc_state); 3444 3445 intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); 3446 3447 drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, 3448 intel_dp->train_set, crtc_state->lane_count); 3449 3450 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 3451 link_status[DP_DPCD_REV]); 3452 } 3453 3454 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 3455 { 3456 struct drm_dp_phy_test_params *data = 3457 &intel_dp->compliance.test_data.phytest; 3458 3459 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 3460 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 3461 return DP_TEST_NAK; 3462 } 3463 3464 /* Set test active flag here so userspace doesn't interrupt things */ 3465 intel_dp->compliance.test_active = true; 3466 3467 return DP_TEST_ACK; 3468 } 3469 3470 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 3471 { 3472 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3473 u8 response = DP_TEST_NAK; 3474 u8 request = 0; 3475 int status; 3476 3477 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 3478 if (status <= 0) { 3479 drm_dbg_kms(&i915->drm, 3480 "Could not read test request from sink\n"); 3481 goto update_status; 3482 } 3483 3484 switch (request) { 3485 case DP_TEST_LINK_TRAINING: 3486 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 3487 response = intel_dp_autotest_link_training(intel_dp); 3488 break; 3489 case DP_TEST_LINK_VIDEO_PATTERN: 3490 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 3491 response = intel_dp_autotest_video_pattern(intel_dp); 3492 break; 3493 case DP_TEST_LINK_EDID_READ: 3494 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 3495 response = intel_dp_autotest_edid(intel_dp); 3496 break; 3497 case DP_TEST_LINK_PHY_TEST_PATTERN: 3498 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 3499 response = intel_dp_autotest_phy_pattern(intel_dp); 3500 break; 3501 default: 3502 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 3503 request); 3504 break; 3505 } 3506 3507 if (response & DP_TEST_ACK) 3508 intel_dp->compliance.test_type = request; 3509 3510 update_status: 3511 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 3512 if (status <= 0) 3513 drm_dbg_kms(&i915->drm, 3514 "Could not write test response to sink\n"); 3515 } 3516 3517 static void 3518 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled) 3519 { 3520 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled); 3521 3522 if (esi[1] & DP_CP_IRQ) { 3523 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3524 *handled = true; 3525 } 3526 } 3527 3528 /** 3529 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 3530 * @intel_dp: Intel DP struct 3531 * 3532 * Read any pending MST interrupts, call MST core to handle these and ack the 3533 * interrupts. Check if the main and AUX link state is ok. 3534 * 3535 * Returns: 3536 * - %true if pending interrupts were serviced (or no interrupts were 3537 * pending) w/o detecting an error condition. 3538 * - %false if an error condition - like AUX failure or a loss of link - is 3539 * detected, which needs servicing from the hotplug work. 3540 */ 3541 static bool 3542 intel_dp_check_mst_status(struct intel_dp *intel_dp) 3543 { 3544 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3545 bool link_ok = true; 3546 3547 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 3548 3549 for (;;) { 3550 /* 3551 * The +2 is because DP_DPRX_ESI_LEN is 14, but we then 3552 * pass in "esi+10" to drm_dp_channel_eq_ok(), which 3553 * takes a 6-byte array. So we actually need 16 bytes 3554 * here. 3555 * 3556 * Somebody who knows what the limits actually are 3557 * should check this, but for now this is at least 3558 * harmless and avoids a valid compiler warning about 3559 * using more of the array than we have allocated. 3560 */ 3561 u8 esi[DP_DPRX_ESI_LEN+2] = {}; 3562 bool handled; 3563 int retry; 3564 3565 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 3566 drm_dbg_kms(&i915->drm, 3567 "failed to get ESI - device may have failed\n"); 3568 link_ok = false; 3569 3570 break; 3571 } 3572 3573 /* check link status - esi[10] = 0x200c */ 3574 if (intel_dp->active_mst_links > 0 && link_ok && 3575 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 3576 drm_dbg_kms(&i915->drm, 3577 "channel EQ not ok, retraining\n"); 3578 link_ok = false; 3579 } 3580 3581 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 3582 3583 intel_dp_mst_hpd_irq(intel_dp, esi, &handled); 3584 3585 if (!handled) 3586 break; 3587 3588 for (retry = 0; retry < 3; retry++) { 3589 int wret; 3590 3591 wret = drm_dp_dpcd_write(&intel_dp->aux, 3592 DP_SINK_COUNT_ESI+1, 3593 &esi[1], 3); 3594 if (wret == 3) 3595 break; 3596 } 3597 } 3598 3599 return link_ok; 3600 } 3601 3602 static void 3603 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 3604 { 3605 bool is_active; 3606 u8 buf = 0; 3607 3608 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 3609 if (intel_dp->frl.is_trained && !is_active) { 3610 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 3611 return; 3612 3613 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 3614 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 3615 return; 3616 3617 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 3618 3619 /* Restart FRL training or fall back to TMDS mode */ 3620 intel_dp_check_frl_training(intel_dp); 3621 } 3622 } 3623 3624 static bool 3625 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 3626 { 3627 u8 link_status[DP_LINK_STATUS_SIZE]; 3628 3629 if (!intel_dp->link_trained) 3630 return false; 3631 3632 /* 3633 * While PSR source HW is enabled, it will control main-link sending 3634 * frames, enabling and disabling it so trying to do a retrain will fail 3635 * as the link would or not be on or it could mix training patterns 3636 * and frame data at the same time causing retrain to fail. 3637 * Also when exiting PSR, HW will retrain the link anyways fixing 3638 * any link status error. 3639 */ 3640 if (intel_psr_enabled(intel_dp)) 3641 return false; 3642 3643 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3644 link_status) < 0) 3645 return false; 3646 3647 /* 3648 * Validate the cached values of intel_dp->link_rate and 3649 * intel_dp->lane_count before attempting to retrain. 3650 * 3651 * FIXME would be nice to user the crtc state here, but since 3652 * we need to call this from the short HPD handler that seems 3653 * a bit hard. 3654 */ 3655 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 3656 intel_dp->lane_count)) 3657 return false; 3658 3659 /* Retrain if Channel EQ or CR not ok */ 3660 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 3661 } 3662 3663 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 3664 const struct drm_connector_state *conn_state) 3665 { 3666 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3667 struct intel_encoder *encoder; 3668 enum pipe pipe; 3669 3670 if (!conn_state->best_encoder) 3671 return false; 3672 3673 /* SST */ 3674 encoder = &dp_to_dig_port(intel_dp)->base; 3675 if (conn_state->best_encoder == &encoder->base) 3676 return true; 3677 3678 /* MST */ 3679 for_each_pipe(i915, pipe) { 3680 encoder = &intel_dp->mst_encoders[pipe]->base; 3681 if (conn_state->best_encoder == &encoder->base) 3682 return true; 3683 } 3684 3685 return false; 3686 } 3687 3688 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 3689 struct drm_modeset_acquire_ctx *ctx, 3690 u32 *crtc_mask) 3691 { 3692 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3693 struct drm_connector_list_iter conn_iter; 3694 struct intel_connector *connector; 3695 int ret = 0; 3696 3697 *crtc_mask = 0; 3698 3699 if (!intel_dp_needs_link_retrain(intel_dp)) 3700 return 0; 3701 3702 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 3703 for_each_intel_connector_iter(connector, &conn_iter) { 3704 struct drm_connector_state *conn_state = 3705 connector->base.state; 3706 struct intel_crtc_state *crtc_state; 3707 struct intel_crtc *crtc; 3708 3709 if (!intel_dp_has_connector(intel_dp, conn_state)) 3710 continue; 3711 3712 crtc = to_intel_crtc(conn_state->crtc); 3713 if (!crtc) 3714 continue; 3715 3716 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 3717 if (ret) 3718 break; 3719 3720 crtc_state = to_intel_crtc_state(crtc->base.state); 3721 3722 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 3723 3724 if (!crtc_state->hw.active) 3725 continue; 3726 3727 if (conn_state->commit && 3728 !try_wait_for_completion(&conn_state->commit->hw_done)) 3729 continue; 3730 3731 *crtc_mask |= drm_crtc_mask(&crtc->base); 3732 } 3733 drm_connector_list_iter_end(&conn_iter); 3734 3735 if (!intel_dp_needs_link_retrain(intel_dp)) 3736 *crtc_mask = 0; 3737 3738 return ret; 3739 } 3740 3741 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 3742 { 3743 struct intel_connector *connector = intel_dp->attached_connector; 3744 3745 return connector->base.status == connector_status_connected || 3746 intel_dp->is_mst; 3747 } 3748 3749 int intel_dp_retrain_link(struct intel_encoder *encoder, 3750 struct drm_modeset_acquire_ctx *ctx) 3751 { 3752 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3753 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3754 struct intel_crtc *crtc; 3755 u32 crtc_mask; 3756 int ret; 3757 3758 if (!intel_dp_is_connected(intel_dp)) 3759 return 0; 3760 3761 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 3762 ctx); 3763 if (ret) 3764 return ret; 3765 3766 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 3767 if (ret) 3768 return ret; 3769 3770 if (crtc_mask == 0) 3771 return 0; 3772 3773 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 3774 encoder->base.base.id, encoder->base.name); 3775 3776 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3777 const struct intel_crtc_state *crtc_state = 3778 to_intel_crtc_state(crtc->base.state); 3779 3780 /* Suppress underruns caused by re-training */ 3781 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 3782 if (crtc_state->has_pch_encoder) 3783 intel_set_pch_fifo_underrun_reporting(dev_priv, 3784 intel_crtc_pch_transcoder(crtc), false); 3785 } 3786 3787 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3788 const struct intel_crtc_state *crtc_state = 3789 to_intel_crtc_state(crtc->base.state); 3790 3791 /* retrain on the MST master transcoder */ 3792 if (DISPLAY_VER(dev_priv) >= 12 && 3793 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 3794 !intel_dp_mst_is_master_trans(crtc_state)) 3795 continue; 3796 3797 intel_dp_check_frl_training(intel_dp); 3798 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); 3799 intel_dp_start_link_train(intel_dp, crtc_state); 3800 intel_dp_stop_link_train(intel_dp, crtc_state); 3801 break; 3802 } 3803 3804 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3805 const struct intel_crtc_state *crtc_state = 3806 to_intel_crtc_state(crtc->base.state); 3807 3808 /* Keep underrun reporting disabled until things are stable */ 3809 intel_wait_for_vblank(dev_priv, crtc->pipe); 3810 3811 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 3812 if (crtc_state->has_pch_encoder) 3813 intel_set_pch_fifo_underrun_reporting(dev_priv, 3814 intel_crtc_pch_transcoder(crtc), true); 3815 } 3816 3817 return 0; 3818 } 3819 3820 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 3821 struct drm_modeset_acquire_ctx *ctx, 3822 u32 *crtc_mask) 3823 { 3824 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3825 struct drm_connector_list_iter conn_iter; 3826 struct intel_connector *connector; 3827 int ret = 0; 3828 3829 *crtc_mask = 0; 3830 3831 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 3832 for_each_intel_connector_iter(connector, &conn_iter) { 3833 struct drm_connector_state *conn_state = 3834 connector->base.state; 3835 struct intel_crtc_state *crtc_state; 3836 struct intel_crtc *crtc; 3837 3838 if (!intel_dp_has_connector(intel_dp, conn_state)) 3839 continue; 3840 3841 crtc = to_intel_crtc(conn_state->crtc); 3842 if (!crtc) 3843 continue; 3844 3845 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 3846 if (ret) 3847 break; 3848 3849 crtc_state = to_intel_crtc_state(crtc->base.state); 3850 3851 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 3852 3853 if (!crtc_state->hw.active) 3854 continue; 3855 3856 if (conn_state->commit && 3857 !try_wait_for_completion(&conn_state->commit->hw_done)) 3858 continue; 3859 3860 *crtc_mask |= drm_crtc_mask(&crtc->base); 3861 } 3862 drm_connector_list_iter_end(&conn_iter); 3863 3864 return ret; 3865 } 3866 3867 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 3868 struct drm_modeset_acquire_ctx *ctx) 3869 { 3870 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3871 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3872 struct intel_crtc *crtc; 3873 u32 crtc_mask; 3874 int ret; 3875 3876 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 3877 ctx); 3878 if (ret) 3879 return ret; 3880 3881 ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask); 3882 if (ret) 3883 return ret; 3884 3885 if (crtc_mask == 0) 3886 return 0; 3887 3888 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 3889 encoder->base.base.id, encoder->base.name); 3890 3891 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3892 const struct intel_crtc_state *crtc_state = 3893 to_intel_crtc_state(crtc->base.state); 3894 3895 /* test on the MST master transcoder */ 3896 if (DISPLAY_VER(dev_priv) >= 12 && 3897 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 3898 !intel_dp_mst_is_master_trans(crtc_state)) 3899 continue; 3900 3901 intel_dp_process_phy_request(intel_dp, crtc_state); 3902 break; 3903 } 3904 3905 return 0; 3906 } 3907 3908 void intel_dp_phy_test(struct intel_encoder *encoder) 3909 { 3910 struct drm_modeset_acquire_ctx ctx; 3911 int ret; 3912 3913 drm_modeset_acquire_init(&ctx, 0); 3914 3915 for (;;) { 3916 ret = intel_dp_do_phy_test(encoder, &ctx); 3917 3918 if (ret == -EDEADLK) { 3919 drm_modeset_backoff(&ctx); 3920 continue; 3921 } 3922 3923 break; 3924 } 3925 3926 drm_modeset_drop_locks(&ctx); 3927 drm_modeset_acquire_fini(&ctx); 3928 drm_WARN(encoder->base.dev, ret, 3929 "Acquiring modeset locks failed with %i\n", ret); 3930 } 3931 3932 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) 3933 { 3934 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3935 u8 val; 3936 3937 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3938 return; 3939 3940 if (drm_dp_dpcd_readb(&intel_dp->aux, 3941 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 3942 return; 3943 3944 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 3945 3946 if (val & DP_AUTOMATED_TEST_REQUEST) 3947 intel_dp_handle_test_request(intel_dp); 3948 3949 if (val & DP_CP_IRQ) 3950 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3951 3952 if (val & DP_SINK_SPECIFIC_IRQ) 3953 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 3954 } 3955 3956 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) 3957 { 3958 u8 val; 3959 3960 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3961 return; 3962 3963 if (drm_dp_dpcd_readb(&intel_dp->aux, 3964 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) 3965 return; 3966 3967 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3968 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) 3969 return; 3970 3971 if (val & HDMI_LINK_STATUS_CHANGED) 3972 intel_dp_handle_hdmi_link_status_change(intel_dp); 3973 } 3974 3975 /* 3976 * According to DP spec 3977 * 5.1.2: 3978 * 1. Read DPCD 3979 * 2. Configure link according to Receiver Capabilities 3980 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 3981 * 4. Check link status on receipt of hot-plug interrupt 3982 * 3983 * intel_dp_short_pulse - handles short pulse interrupts 3984 * when full detection is not required. 3985 * Returns %true if short pulse is handled and full detection 3986 * is NOT required and %false otherwise. 3987 */ 3988 static bool 3989 intel_dp_short_pulse(struct intel_dp *intel_dp) 3990 { 3991 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3992 u8 old_sink_count = intel_dp->sink_count; 3993 bool ret; 3994 3995 /* 3996 * Clearing compliance test variables to allow capturing 3997 * of values for next automated test request. 3998 */ 3999 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 4000 4001 /* 4002 * Now read the DPCD to see if it's actually running 4003 * If the current value of sink count doesn't match with 4004 * the value that was stored earlier or dpcd read failed 4005 * we need to do full detection 4006 */ 4007 ret = intel_dp_get_dpcd(intel_dp); 4008 4009 if ((old_sink_count != intel_dp->sink_count) || !ret) { 4010 /* No need to proceed if we are going to do full detect */ 4011 return false; 4012 } 4013 4014 intel_dp_check_device_service_irq(intel_dp); 4015 intel_dp_check_link_service_irq(intel_dp); 4016 4017 /* Handle CEC interrupts, if any */ 4018 drm_dp_cec_irq(&intel_dp->aux); 4019 4020 /* defer to the hotplug work for link retraining if needed */ 4021 if (intel_dp_needs_link_retrain(intel_dp)) 4022 return false; 4023 4024 intel_psr_short_pulse(intel_dp); 4025 4026 switch (intel_dp->compliance.test_type) { 4027 case DP_TEST_LINK_TRAINING: 4028 drm_dbg_kms(&dev_priv->drm, 4029 "Link Training Compliance Test requested\n"); 4030 /* Send a Hotplug Uevent to userspace to start modeset */ 4031 drm_kms_helper_hotplug_event(&dev_priv->drm); 4032 break; 4033 case DP_TEST_LINK_PHY_TEST_PATTERN: 4034 drm_dbg_kms(&dev_priv->drm, 4035 "PHY test pattern Compliance Test requested\n"); 4036 /* 4037 * Schedule long hpd to do the test 4038 * 4039 * FIXME get rid of the ad-hoc phy test modeset code 4040 * and properly incorporate it into the normal modeset. 4041 */ 4042 return false; 4043 } 4044 4045 return true; 4046 } 4047 4048 /* XXX this is probably wrong for multiple downstream ports */ 4049 static enum drm_connector_status 4050 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 4051 { 4052 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4053 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4054 u8 *dpcd = intel_dp->dpcd; 4055 u8 type; 4056 4057 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 4058 return connector_status_connected; 4059 4060 lspcon_resume(dig_port); 4061 4062 if (!intel_dp_get_dpcd(intel_dp)) 4063 return connector_status_disconnected; 4064 4065 /* if there's no downstream port, we're done */ 4066 if (!drm_dp_is_branch(dpcd)) 4067 return connector_status_connected; 4068 4069 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 4070 if (intel_dp_has_sink_count(intel_dp) && 4071 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 4072 return intel_dp->sink_count ? 4073 connector_status_connected : connector_status_disconnected; 4074 } 4075 4076 if (intel_dp_can_mst(intel_dp)) 4077 return connector_status_connected; 4078 4079 /* If no HPD, poke DDC gently */ 4080 if (drm_probe_ddc(&intel_dp->aux.ddc)) 4081 return connector_status_connected; 4082 4083 /* Well we tried, say unknown for unreliable port types */ 4084 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 4085 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 4086 if (type == DP_DS_PORT_TYPE_VGA || 4087 type == DP_DS_PORT_TYPE_NON_EDID) 4088 return connector_status_unknown; 4089 } else { 4090 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 4091 DP_DWN_STRM_PORT_TYPE_MASK; 4092 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 4093 type == DP_DWN_STRM_PORT_TYPE_OTHER) 4094 return connector_status_unknown; 4095 } 4096 4097 /* Anything else is out of spec, warn and ignore */ 4098 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 4099 return connector_status_disconnected; 4100 } 4101 4102 static enum drm_connector_status 4103 edp_detect(struct intel_dp *intel_dp) 4104 { 4105 return connector_status_connected; 4106 } 4107 4108 /* 4109 * intel_digital_port_connected - is the specified port connected? 4110 * @encoder: intel_encoder 4111 * 4112 * In cases where there's a connector physically connected but it can't be used 4113 * by our hardware we also return false, since the rest of the driver should 4114 * pretty much treat the port as disconnected. This is relevant for type-C 4115 * (starting on ICL) where there's ownership involved. 4116 * 4117 * Return %true if port is connected, %false otherwise. 4118 */ 4119 bool intel_digital_port_connected(struct intel_encoder *encoder) 4120 { 4121 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4122 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4123 bool is_connected = false; 4124 intel_wakeref_t wakeref; 4125 4126 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 4127 is_connected = dig_port->connected(encoder); 4128 4129 return is_connected; 4130 } 4131 4132 static struct edid * 4133 intel_dp_get_edid(struct intel_dp *intel_dp) 4134 { 4135 struct intel_connector *intel_connector = intel_dp->attached_connector; 4136 4137 /* use cached edid if we have one */ 4138 if (intel_connector->edid) { 4139 /* invalid edid */ 4140 if (IS_ERR(intel_connector->edid)) 4141 return NULL; 4142 4143 return drm_edid_duplicate(intel_connector->edid); 4144 } else 4145 return drm_get_edid(&intel_connector->base, 4146 &intel_dp->aux.ddc); 4147 } 4148 4149 static void 4150 intel_dp_update_dfp(struct intel_dp *intel_dp, 4151 const struct edid *edid) 4152 { 4153 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4154 struct intel_connector *connector = intel_dp->attached_connector; 4155 4156 intel_dp->dfp.max_bpc = 4157 drm_dp_downstream_max_bpc(intel_dp->dpcd, 4158 intel_dp->downstream_ports, edid); 4159 4160 intel_dp->dfp.max_dotclock = 4161 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 4162 intel_dp->downstream_ports); 4163 4164 intel_dp->dfp.min_tmds_clock = 4165 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 4166 intel_dp->downstream_ports, 4167 edid); 4168 intel_dp->dfp.max_tmds_clock = 4169 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 4170 intel_dp->downstream_ports, 4171 edid); 4172 4173 intel_dp->dfp.pcon_max_frl_bw = 4174 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 4175 intel_dp->downstream_ports); 4176 4177 drm_dbg_kms(&i915->drm, 4178 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 4179 connector->base.base.id, connector->base.name, 4180 intel_dp->dfp.max_bpc, 4181 intel_dp->dfp.max_dotclock, 4182 intel_dp->dfp.min_tmds_clock, 4183 intel_dp->dfp.max_tmds_clock, 4184 intel_dp->dfp.pcon_max_frl_bw); 4185 4186 intel_dp_get_pcon_dsc_cap(intel_dp); 4187 } 4188 4189 static void 4190 intel_dp_update_420(struct intel_dp *intel_dp) 4191 { 4192 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4193 struct intel_connector *connector = intel_dp->attached_connector; 4194 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr; 4195 4196 /* No YCbCr output support on gmch platforms */ 4197 if (HAS_GMCH(i915)) 4198 return; 4199 4200 /* 4201 * ILK doesn't seem capable of DP YCbCr output. The 4202 * displayed image is severly corrupted. SNB+ is fine. 4203 */ 4204 if (IS_IRONLAKE(i915)) 4205 return; 4206 4207 is_branch = drm_dp_is_branch(intel_dp->dpcd); 4208 ycbcr_420_passthrough = 4209 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 4210 intel_dp->downstream_ports); 4211 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 4212 ycbcr_444_to_420 = 4213 dp_to_dig_port(intel_dp)->lspcon.active || 4214 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 4215 intel_dp->downstream_ports); 4216 rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 4217 intel_dp->downstream_ports, 4218 DP_DS_HDMI_BT601_RGB_YCBCR_CONV | 4219 DP_DS_HDMI_BT709_RGB_YCBCR_CONV | 4220 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 4221 4222 if (DISPLAY_VER(i915) >= 11) { 4223 /* Let PCON convert from RGB->YCbCr if possible */ 4224 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) { 4225 intel_dp->dfp.rgb_to_ycbcr = true; 4226 intel_dp->dfp.ycbcr_444_to_420 = true; 4227 connector->base.ycbcr_420_allowed = true; 4228 } else { 4229 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ 4230 intel_dp->dfp.ycbcr_444_to_420 = 4231 ycbcr_444_to_420 && !ycbcr_420_passthrough; 4232 4233 connector->base.ycbcr_420_allowed = 4234 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; 4235 } 4236 } else { 4237 /* 4:4:4->4:2:0 conversion is the only way */ 4238 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; 4239 4240 connector->base.ycbcr_420_allowed = ycbcr_444_to_420; 4241 } 4242 4243 drm_dbg_kms(&i915->drm, 4244 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 4245 connector->base.base.id, connector->base.name, 4246 yesno(intel_dp->dfp.rgb_to_ycbcr), 4247 yesno(connector->base.ycbcr_420_allowed), 4248 yesno(intel_dp->dfp.ycbcr_444_to_420)); 4249 } 4250 4251 static void 4252 intel_dp_set_edid(struct intel_dp *intel_dp) 4253 { 4254 struct intel_connector *connector = intel_dp->attached_connector; 4255 struct edid *edid; 4256 4257 intel_dp_unset_edid(intel_dp); 4258 edid = intel_dp_get_edid(intel_dp); 4259 connector->detect_edid = edid; 4260 4261 intel_dp_update_dfp(intel_dp, edid); 4262 intel_dp_update_420(intel_dp); 4263 4264 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 4265 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 4266 intel_dp->has_audio = drm_detect_monitor_audio(edid); 4267 } 4268 4269 drm_dp_cec_set_edid(&intel_dp->aux, edid); 4270 } 4271 4272 static void 4273 intel_dp_unset_edid(struct intel_dp *intel_dp) 4274 { 4275 struct intel_connector *connector = intel_dp->attached_connector; 4276 4277 drm_dp_cec_unset_edid(&intel_dp->aux); 4278 kfree(connector->detect_edid); 4279 connector->detect_edid = NULL; 4280 4281 intel_dp->has_hdmi_sink = false; 4282 intel_dp->has_audio = false; 4283 4284 intel_dp->dfp.max_bpc = 0; 4285 intel_dp->dfp.max_dotclock = 0; 4286 intel_dp->dfp.min_tmds_clock = 0; 4287 intel_dp->dfp.max_tmds_clock = 0; 4288 4289 intel_dp->dfp.pcon_max_frl_bw = 0; 4290 4291 intel_dp->dfp.ycbcr_444_to_420 = false; 4292 connector->base.ycbcr_420_allowed = false; 4293 } 4294 4295 static int 4296 intel_dp_detect(struct drm_connector *connector, 4297 struct drm_modeset_acquire_ctx *ctx, 4298 bool force) 4299 { 4300 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4301 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4302 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4303 struct intel_encoder *encoder = &dig_port->base; 4304 enum drm_connector_status status; 4305 4306 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4307 connector->base.id, connector->name); 4308 drm_WARN_ON(&dev_priv->drm, 4309 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 4310 4311 if (!INTEL_DISPLAY_ENABLED(dev_priv)) 4312 return connector_status_disconnected; 4313 4314 /* Can't disconnect eDP */ 4315 if (intel_dp_is_edp(intel_dp)) 4316 status = edp_detect(intel_dp); 4317 else if (intel_digital_port_connected(encoder)) 4318 status = intel_dp_detect_dpcd(intel_dp); 4319 else 4320 status = connector_status_disconnected; 4321 4322 if (status == connector_status_disconnected) { 4323 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 4324 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4325 4326 if (intel_dp->is_mst) { 4327 drm_dbg_kms(&dev_priv->drm, 4328 "MST device may have disappeared %d vs %d\n", 4329 intel_dp->is_mst, 4330 intel_dp->mst_mgr.mst_state); 4331 intel_dp->is_mst = false; 4332 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4333 intel_dp->is_mst); 4334 } 4335 4336 goto out; 4337 } 4338 4339 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 4340 if (DISPLAY_VER(dev_priv) >= 11) 4341 intel_dp_get_dsc_sink_cap(intel_dp); 4342 4343 intel_dp_configure_mst(intel_dp); 4344 4345 /* 4346 * TODO: Reset link params when switching to MST mode, until MST 4347 * supports link training fallback params. 4348 */ 4349 if (intel_dp->reset_link_params || intel_dp->is_mst) { 4350 intel_dp_reset_max_link_params(intel_dp); 4351 intel_dp->reset_link_params = false; 4352 } 4353 4354 intel_dp_print_rates(intel_dp); 4355 4356 if (intel_dp->is_mst) { 4357 /* 4358 * If we are in MST mode then this connector 4359 * won't appear connected or have anything 4360 * with EDID on it 4361 */ 4362 status = connector_status_disconnected; 4363 goto out; 4364 } 4365 4366 /* 4367 * Some external monitors do not signal loss of link synchronization 4368 * with an IRQ_HPD, so force a link status check. 4369 */ 4370 if (!intel_dp_is_edp(intel_dp)) { 4371 int ret; 4372 4373 ret = intel_dp_retrain_link(encoder, ctx); 4374 if (ret) 4375 return ret; 4376 } 4377 4378 /* 4379 * Clearing NACK and defer counts to get their exact values 4380 * while reading EDID which are required by Compliance tests 4381 * 4.2.2.4 and 4.2.2.5 4382 */ 4383 intel_dp->aux.i2c_nack_count = 0; 4384 intel_dp->aux.i2c_defer_count = 0; 4385 4386 intel_dp_set_edid(intel_dp); 4387 if (intel_dp_is_edp(intel_dp) || 4388 to_intel_connector(connector)->detect_edid) 4389 status = connector_status_connected; 4390 4391 intel_dp_check_device_service_irq(intel_dp); 4392 4393 out: 4394 if (status != connector_status_connected && !intel_dp->is_mst) 4395 intel_dp_unset_edid(intel_dp); 4396 4397 /* 4398 * Make sure the refs for power wells enabled during detect are 4399 * dropped to avoid a new detect cycle triggered by HPD polling. 4400 */ 4401 intel_display_power_flush_work(dev_priv); 4402 4403 if (!intel_dp_is_edp(intel_dp)) 4404 drm_dp_set_subconnector_property(connector, 4405 status, 4406 intel_dp->dpcd, 4407 intel_dp->downstream_ports); 4408 return status; 4409 } 4410 4411 static void 4412 intel_dp_force(struct drm_connector *connector) 4413 { 4414 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4415 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4416 struct intel_encoder *intel_encoder = &dig_port->base; 4417 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 4418 enum intel_display_power_domain aux_domain = 4419 intel_aux_power_domain(dig_port); 4420 intel_wakeref_t wakeref; 4421 4422 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4423 connector->base.id, connector->name); 4424 intel_dp_unset_edid(intel_dp); 4425 4426 if (connector->status != connector_status_connected) 4427 return; 4428 4429 wakeref = intel_display_power_get(dev_priv, aux_domain); 4430 4431 intel_dp_set_edid(intel_dp); 4432 4433 intel_display_power_put(dev_priv, aux_domain, wakeref); 4434 } 4435 4436 static int intel_dp_get_modes(struct drm_connector *connector) 4437 { 4438 struct intel_connector *intel_connector = to_intel_connector(connector); 4439 struct edid *edid; 4440 int num_modes = 0; 4441 4442 edid = intel_connector->detect_edid; 4443 if (edid) { 4444 num_modes = intel_connector_update_modes(connector, edid); 4445 4446 if (intel_vrr_is_capable(connector)) 4447 drm_connector_set_vrr_capable_property(connector, 4448 true); 4449 } 4450 4451 /* Also add fixed mode, which may or may not be present in EDID */ 4452 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && 4453 intel_connector->panel.fixed_mode) { 4454 struct drm_display_mode *mode; 4455 4456 mode = drm_mode_duplicate(connector->dev, 4457 intel_connector->panel.fixed_mode); 4458 if (mode) { 4459 drm_mode_probed_add(connector, mode); 4460 num_modes++; 4461 } 4462 } 4463 4464 if (num_modes) 4465 return num_modes; 4466 4467 if (!edid) { 4468 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 4469 struct drm_display_mode *mode; 4470 4471 mode = drm_dp_downstream_mode(connector->dev, 4472 intel_dp->dpcd, 4473 intel_dp->downstream_ports); 4474 if (mode) { 4475 drm_mode_probed_add(connector, mode); 4476 num_modes++; 4477 } 4478 } 4479 4480 return num_modes; 4481 } 4482 4483 static int 4484 intel_dp_connector_register(struct drm_connector *connector) 4485 { 4486 struct drm_i915_private *i915 = to_i915(connector->dev); 4487 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4488 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4489 struct intel_lspcon *lspcon = &dig_port->lspcon; 4490 int ret; 4491 4492 ret = intel_connector_register(connector); 4493 if (ret) 4494 return ret; 4495 4496 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 4497 intel_dp->aux.name, connector->kdev->kobj.name); 4498 4499 intel_dp->aux.dev = connector->kdev; 4500 ret = drm_dp_aux_register(&intel_dp->aux); 4501 if (!ret) 4502 drm_dp_cec_register_connector(&intel_dp->aux, connector); 4503 4504 if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) 4505 return ret; 4506 4507 /* 4508 * ToDo: Clean this up to handle lspcon init and resume more 4509 * efficiently and streamlined. 4510 */ 4511 if (lspcon_init(dig_port)) { 4512 lspcon_detect_hdr_capability(lspcon); 4513 if (lspcon->hdr_supported) 4514 drm_object_attach_property(&connector->base, 4515 connector->dev->mode_config.hdr_output_metadata_property, 4516 0); 4517 } 4518 4519 return ret; 4520 } 4521 4522 static void 4523 intel_dp_connector_unregister(struct drm_connector *connector) 4524 { 4525 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4526 4527 drm_dp_cec_unregister_connector(&intel_dp->aux); 4528 drm_dp_aux_unregister(&intel_dp->aux); 4529 intel_connector_unregister(connector); 4530 } 4531 4532 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 4533 { 4534 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 4535 struct intel_dp *intel_dp = &dig_port->dp; 4536 4537 intel_dp_mst_encoder_cleanup(dig_port); 4538 4539 intel_pps_vdd_off_sync(intel_dp); 4540 4541 intel_dp_aux_fini(intel_dp); 4542 } 4543 4544 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 4545 { 4546 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4547 4548 intel_pps_vdd_off_sync(intel_dp); 4549 } 4550 4551 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 4552 { 4553 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4554 4555 intel_pps_wait_power_cycle(intel_dp); 4556 } 4557 4558 static int intel_modeset_tile_group(struct intel_atomic_state *state, 4559 int tile_group_id) 4560 { 4561 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4562 struct drm_connector_list_iter conn_iter; 4563 struct drm_connector *connector; 4564 int ret = 0; 4565 4566 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 4567 drm_for_each_connector_iter(connector, &conn_iter) { 4568 struct drm_connector_state *conn_state; 4569 struct intel_crtc_state *crtc_state; 4570 struct intel_crtc *crtc; 4571 4572 if (!connector->has_tile || 4573 connector->tile_group->id != tile_group_id) 4574 continue; 4575 4576 conn_state = drm_atomic_get_connector_state(&state->base, 4577 connector); 4578 if (IS_ERR(conn_state)) { 4579 ret = PTR_ERR(conn_state); 4580 break; 4581 } 4582 4583 crtc = to_intel_crtc(conn_state->crtc); 4584 4585 if (!crtc) 4586 continue; 4587 4588 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 4589 crtc_state->uapi.mode_changed = true; 4590 4591 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4592 if (ret) 4593 break; 4594 } 4595 drm_connector_list_iter_end(&conn_iter); 4596 4597 return ret; 4598 } 4599 4600 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 4601 { 4602 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4603 struct intel_crtc *crtc; 4604 4605 if (transcoders == 0) 4606 return 0; 4607 4608 for_each_intel_crtc(&dev_priv->drm, crtc) { 4609 struct intel_crtc_state *crtc_state; 4610 int ret; 4611 4612 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 4613 if (IS_ERR(crtc_state)) 4614 return PTR_ERR(crtc_state); 4615 4616 if (!crtc_state->hw.enable) 4617 continue; 4618 4619 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 4620 continue; 4621 4622 crtc_state->uapi.mode_changed = true; 4623 4624 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 4625 if (ret) 4626 return ret; 4627 4628 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4629 if (ret) 4630 return ret; 4631 4632 transcoders &= ~BIT(crtc_state->cpu_transcoder); 4633 } 4634 4635 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 4636 4637 return 0; 4638 } 4639 4640 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 4641 struct drm_connector *connector) 4642 { 4643 const struct drm_connector_state *old_conn_state = 4644 drm_atomic_get_old_connector_state(&state->base, connector); 4645 const struct intel_crtc_state *old_crtc_state; 4646 struct intel_crtc *crtc; 4647 u8 transcoders; 4648 4649 crtc = to_intel_crtc(old_conn_state->crtc); 4650 if (!crtc) 4651 return 0; 4652 4653 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 4654 4655 if (!old_crtc_state->hw.active) 4656 return 0; 4657 4658 transcoders = old_crtc_state->sync_mode_slaves_mask; 4659 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 4660 transcoders |= BIT(old_crtc_state->master_transcoder); 4661 4662 return intel_modeset_affected_transcoders(state, 4663 transcoders); 4664 } 4665 4666 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 4667 struct drm_atomic_state *_state) 4668 { 4669 struct drm_i915_private *dev_priv = to_i915(conn->dev); 4670 struct intel_atomic_state *state = to_intel_atomic_state(_state); 4671 int ret; 4672 4673 ret = intel_digital_connector_atomic_check(conn, &state->base); 4674 if (ret) 4675 return ret; 4676 4677 /* 4678 * We don't enable port sync on BDW due to missing w/as and 4679 * due to not having adjusted the modeset sequence appropriately. 4680 */ 4681 if (DISPLAY_VER(dev_priv) < 9) 4682 return 0; 4683 4684 if (!intel_connector_needs_modeset(state, conn)) 4685 return 0; 4686 4687 if (conn->has_tile) { 4688 ret = intel_modeset_tile_group(state, conn->tile_group->id); 4689 if (ret) 4690 return ret; 4691 } 4692 4693 return intel_modeset_synced_crtcs(state, conn); 4694 } 4695 4696 static void intel_dp_oob_hotplug_event(struct drm_connector *connector) 4697 { 4698 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 4699 struct drm_i915_private *i915 = to_i915(connector->dev); 4700 4701 spin_lock_irq(&i915->irq_lock); 4702 i915->hotplug.event_bits |= BIT(encoder->hpd_pin); 4703 spin_unlock_irq(&i915->irq_lock); 4704 queue_delayed_work(system_wq, &i915->hotplug.hotplug_work, 0); 4705 } 4706 4707 static const struct drm_connector_funcs intel_dp_connector_funcs = { 4708 .force = intel_dp_force, 4709 .fill_modes = drm_helper_probe_single_connector_modes, 4710 .atomic_get_property = intel_digital_connector_atomic_get_property, 4711 .atomic_set_property = intel_digital_connector_atomic_set_property, 4712 .late_register = intel_dp_connector_register, 4713 .early_unregister = intel_dp_connector_unregister, 4714 .destroy = intel_connector_destroy, 4715 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 4716 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 4717 .oob_hotplug_event = intel_dp_oob_hotplug_event, 4718 }; 4719 4720 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 4721 .detect_ctx = intel_dp_detect, 4722 .get_modes = intel_dp_get_modes, 4723 .mode_valid = intel_dp_mode_valid, 4724 .atomic_check = intel_dp_connector_atomic_check, 4725 }; 4726 4727 enum irqreturn 4728 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 4729 { 4730 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 4731 struct intel_dp *intel_dp = &dig_port->dp; 4732 4733 if (dig_port->base.type == INTEL_OUTPUT_EDP && 4734 (long_hpd || !intel_pps_have_power(intel_dp))) { 4735 /* 4736 * vdd off can generate a long/short pulse on eDP which 4737 * would require vdd on to handle it, and thus we 4738 * would end up in an endless cycle of 4739 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 4740 */ 4741 drm_dbg_kms(&i915->drm, 4742 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 4743 long_hpd ? "long" : "short", 4744 dig_port->base.base.base.id, 4745 dig_port->base.base.name); 4746 return IRQ_HANDLED; 4747 } 4748 4749 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 4750 dig_port->base.base.base.id, 4751 dig_port->base.base.name, 4752 long_hpd ? "long" : "short"); 4753 4754 if (long_hpd) { 4755 intel_dp->reset_link_params = true; 4756 return IRQ_NONE; 4757 } 4758 4759 if (intel_dp->is_mst) { 4760 if (!intel_dp_check_mst_status(intel_dp)) 4761 return IRQ_NONE; 4762 } else if (!intel_dp_short_pulse(intel_dp)) { 4763 return IRQ_NONE; 4764 } 4765 4766 return IRQ_HANDLED; 4767 } 4768 4769 /* check the VBT to see whether the eDP is on another port */ 4770 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 4771 { 4772 /* 4773 * eDP not supported on g4x. so bail out early just 4774 * for a bit extra safety in case the VBT is bonkers. 4775 */ 4776 if (DISPLAY_VER(dev_priv) < 5) 4777 return false; 4778 4779 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) 4780 return true; 4781 4782 return intel_bios_is_port_edp(dev_priv, port); 4783 } 4784 4785 static void 4786 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 4787 { 4788 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4789 enum port port = dp_to_dig_port(intel_dp)->base.port; 4790 4791 if (!intel_dp_is_edp(intel_dp)) 4792 drm_connector_attach_dp_subconnector_property(connector); 4793 4794 if (!IS_G4X(dev_priv) && port != PORT_A) 4795 intel_attach_force_audio_property(connector); 4796 4797 intel_attach_broadcast_rgb_property(connector); 4798 if (HAS_GMCH(dev_priv)) 4799 drm_connector_attach_max_bpc_property(connector, 6, 10); 4800 else if (DISPLAY_VER(dev_priv) >= 5) 4801 drm_connector_attach_max_bpc_property(connector, 6, 12); 4802 4803 /* Register HDMI colorspace for case of lspcon */ 4804 if (intel_bios_is_lspcon_present(dev_priv, port)) { 4805 drm_connector_attach_content_type_property(connector); 4806 intel_attach_hdmi_colorspace_property(connector); 4807 } else { 4808 intel_attach_dp_colorspace_property(connector); 4809 } 4810 4811 if (IS_GEMINILAKE(dev_priv) || DISPLAY_VER(dev_priv) >= 11) 4812 drm_object_attach_property(&connector->base, 4813 connector->dev->mode_config.hdr_output_metadata_property, 4814 0); 4815 4816 if (intel_dp_is_edp(intel_dp)) { 4817 u32 allowed_scalers; 4818 4819 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 4820 if (!HAS_GMCH(dev_priv)) 4821 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 4822 4823 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 4824 4825 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 4826 4827 } 4828 4829 if (HAS_VRR(dev_priv)) 4830 drm_connector_attach_vrr_capable_property(connector); 4831 } 4832 4833 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 4834 struct intel_connector *intel_connector) 4835 { 4836 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4837 struct drm_device *dev = &dev_priv->drm; 4838 struct drm_connector *connector = &intel_connector->base; 4839 struct drm_display_mode *fixed_mode = NULL; 4840 struct drm_display_mode *downclock_mode = NULL; 4841 bool has_dpcd; 4842 enum pipe pipe = INVALID_PIPE; 4843 struct edid *edid; 4844 4845 if (!intel_dp_is_edp(intel_dp)) 4846 return true; 4847 4848 /* 4849 * On IBX/CPT we may get here with LVDS already registered. Since the 4850 * driver uses the only internal power sequencer available for both 4851 * eDP and LVDS bail out early in this case to prevent interfering 4852 * with an already powered-on LVDS power sequencer. 4853 */ 4854 if (intel_get_lvds_encoder(dev_priv)) { 4855 drm_WARN_ON(dev, 4856 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 4857 drm_info(&dev_priv->drm, 4858 "LVDS was detected, not registering eDP\n"); 4859 4860 return false; 4861 } 4862 4863 intel_pps_init(intel_dp); 4864 4865 /* Cache DPCD and EDID for edp. */ 4866 has_dpcd = intel_edp_init_dpcd(intel_dp); 4867 4868 if (!has_dpcd) { 4869 /* if this fails, presume the device is a ghost */ 4870 drm_info(&dev_priv->drm, 4871 "failed to retrieve link info, disabling eDP\n"); 4872 goto out_vdd_off; 4873 } 4874 4875 mutex_lock(&dev->mode_config.mutex); 4876 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 4877 if (edid) { 4878 if (drm_add_edid_modes(connector, edid)) { 4879 drm_connector_update_edid_property(connector, edid); 4880 } else { 4881 kfree(edid); 4882 edid = ERR_PTR(-EINVAL); 4883 } 4884 } else { 4885 edid = ERR_PTR(-ENOENT); 4886 } 4887 intel_connector->edid = edid; 4888 4889 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 4890 if (fixed_mode) 4891 downclock_mode = intel_drrs_init(intel_connector, fixed_mode); 4892 4893 /* MSO requires information from the EDID */ 4894 intel_edp_mso_init(intel_dp); 4895 4896 /* multiply the mode clock and horizontal timings for MSO */ 4897 intel_edp_mso_mode_fixup(intel_connector, fixed_mode); 4898 intel_edp_mso_mode_fixup(intel_connector, downclock_mode); 4899 4900 /* fallback to VBT if available for eDP */ 4901 if (!fixed_mode) 4902 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 4903 mutex_unlock(&dev->mode_config.mutex); 4904 4905 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4906 /* 4907 * Figure out the current pipe for the initial backlight setup. 4908 * If the current pipe isn't valid, try the PPS pipe, and if that 4909 * fails just assume pipe A. 4910 */ 4911 pipe = vlv_active_pipe(intel_dp); 4912 4913 if (pipe != PIPE_A && pipe != PIPE_B) 4914 pipe = intel_dp->pps.pps_pipe; 4915 4916 if (pipe != PIPE_A && pipe != PIPE_B) 4917 pipe = PIPE_A; 4918 4919 drm_dbg_kms(&dev_priv->drm, 4920 "using pipe %c for initial backlight setup\n", 4921 pipe_name(pipe)); 4922 } 4923 4924 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 4925 if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) 4926 intel_connector->panel.backlight.power = intel_pps_backlight_power; 4927 intel_backlight_setup(intel_connector, pipe); 4928 4929 if (fixed_mode) { 4930 drm_connector_set_panel_orientation_with_quirk(connector, 4931 dev_priv->vbt.orientation, 4932 fixed_mode->hdisplay, fixed_mode->vdisplay); 4933 } 4934 4935 return true; 4936 4937 out_vdd_off: 4938 intel_pps_vdd_off_sync(intel_dp); 4939 4940 return false; 4941 } 4942 4943 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 4944 { 4945 struct intel_connector *intel_connector; 4946 struct drm_connector *connector; 4947 4948 intel_connector = container_of(work, typeof(*intel_connector), 4949 modeset_retry_work); 4950 connector = &intel_connector->base; 4951 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 4952 connector->name); 4953 4954 /* Grab the locks before changing connector property*/ 4955 mutex_lock(&connector->dev->mode_config.mutex); 4956 /* Set connector link status to BAD and send a Uevent to notify 4957 * userspace to do a modeset. 4958 */ 4959 drm_connector_set_link_status_property(connector, 4960 DRM_MODE_LINK_STATUS_BAD); 4961 mutex_unlock(&connector->dev->mode_config.mutex); 4962 /* Send Hotplug uevent so userspace can reprobe */ 4963 drm_kms_helper_hotplug_event(connector->dev); 4964 } 4965 4966 bool 4967 intel_dp_init_connector(struct intel_digital_port *dig_port, 4968 struct intel_connector *intel_connector) 4969 { 4970 struct drm_connector *connector = &intel_connector->base; 4971 struct intel_dp *intel_dp = &dig_port->dp; 4972 struct intel_encoder *intel_encoder = &dig_port->base; 4973 struct drm_device *dev = intel_encoder->base.dev; 4974 struct drm_i915_private *dev_priv = to_i915(dev); 4975 enum port port = intel_encoder->port; 4976 enum phy phy = intel_port_to_phy(dev_priv, port); 4977 int type; 4978 4979 /* Initialize the work for modeset in case of link train failure */ 4980 INIT_WORK(&intel_connector->modeset_retry_work, 4981 intel_dp_modeset_retry_work_fn); 4982 4983 if (drm_WARN(dev, dig_port->max_lanes < 1, 4984 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 4985 dig_port->max_lanes, intel_encoder->base.base.id, 4986 intel_encoder->base.name)) 4987 return false; 4988 4989 intel_dp->reset_link_params = true; 4990 intel_dp->pps.pps_pipe = INVALID_PIPE; 4991 intel_dp->pps.active_pipe = INVALID_PIPE; 4992 4993 /* Preserve the current hw state. */ 4994 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 4995 intel_dp->attached_connector = intel_connector; 4996 4997 if (intel_dp_is_port_edp(dev_priv, port)) { 4998 /* 4999 * Currently we don't support eDP on TypeC ports, although in 5000 * theory it could work on TypeC legacy ports. 5001 */ 5002 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 5003 type = DRM_MODE_CONNECTOR_eDP; 5004 intel_encoder->type = INTEL_OUTPUT_EDP; 5005 5006 /* eDP only on port B and/or C on vlv/chv */ 5007 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 5008 IS_CHERRYVIEW(dev_priv)) && 5009 port != PORT_B && port != PORT_C)) 5010 return false; 5011 } else { 5012 type = DRM_MODE_CONNECTOR_DisplayPort; 5013 } 5014 5015 intel_dp_set_source_rates(intel_dp); 5016 intel_dp_set_default_sink_rates(intel_dp); 5017 intel_dp_set_common_rates(intel_dp); 5018 intel_dp_reset_max_link_params(intel_dp); 5019 5020 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5021 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); 5022 5023 drm_dbg_kms(&dev_priv->drm, 5024 "Adding %s connector on [ENCODER:%d:%s]\n", 5025 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 5026 intel_encoder->base.base.id, intel_encoder->base.name); 5027 5028 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 5029 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 5030 5031 if (!HAS_GMCH(dev_priv)) 5032 connector->interlace_allowed = true; 5033 connector->doublescan_allowed = 0; 5034 5035 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 5036 5037 intel_dp_aux_init(intel_dp); 5038 5039 intel_connector_attach_encoder(intel_connector, intel_encoder); 5040 5041 if (HAS_DDI(dev_priv)) 5042 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 5043 else 5044 intel_connector->get_hw_state = intel_connector_get_hw_state; 5045 5046 /* init MST on ports that can support it */ 5047 intel_dp_mst_encoder_init(dig_port, 5048 intel_connector->base.base.id); 5049 5050 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 5051 intel_dp_aux_fini(intel_dp); 5052 intel_dp_mst_encoder_cleanup(dig_port); 5053 goto fail; 5054 } 5055 5056 intel_dp_add_properties(intel_dp, connector); 5057 5058 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 5059 int ret = intel_dp_hdcp_init(dig_port, intel_connector); 5060 if (ret) 5061 drm_dbg_kms(&dev_priv->drm, 5062 "HDCP init failed, skipping.\n"); 5063 } 5064 5065 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 5066 * 0xd. Failure to do so will result in spurious interrupts being 5067 * generated on the port when a cable is not attached. 5068 */ 5069 if (IS_G45(dev_priv)) { 5070 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 5071 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 5072 (temp & ~0xf) | 0xd); 5073 } 5074 5075 intel_dp->frl.is_trained = false; 5076 intel_dp->frl.trained_rate_gbps = 0; 5077 5078 intel_psr_init(intel_dp); 5079 5080 return true; 5081 5082 fail: 5083 drm_connector_cleanup(connector); 5084 5085 return false; 5086 } 5087 5088 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 5089 { 5090 struct intel_encoder *encoder; 5091 5092 if (!HAS_DISPLAY(dev_priv)) 5093 return; 5094 5095 for_each_intel_encoder(&dev_priv->drm, encoder) { 5096 struct intel_dp *intel_dp; 5097 5098 if (encoder->type != INTEL_OUTPUT_DDI) 5099 continue; 5100 5101 intel_dp = enc_to_intel_dp(encoder); 5102 5103 if (!intel_dp_mst_source_support(intel_dp)) 5104 continue; 5105 5106 if (intel_dp->is_mst) 5107 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 5108 } 5109 } 5110 5111 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 5112 { 5113 struct intel_encoder *encoder; 5114 5115 if (!HAS_DISPLAY(dev_priv)) 5116 return; 5117 5118 for_each_intel_encoder(&dev_priv->drm, encoder) { 5119 struct intel_dp *intel_dp; 5120 int ret; 5121 5122 if (encoder->type != INTEL_OUTPUT_DDI) 5123 continue; 5124 5125 intel_dp = enc_to_intel_dp(encoder); 5126 5127 if (!intel_dp_mst_source_support(intel_dp)) 5128 continue; 5129 5130 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 5131 true); 5132 if (ret) { 5133 intel_dp->is_mst = false; 5134 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5135 false); 5136 } 5137 } 5138 } 5139