1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/timekeeping.h> 33 #include <linux/types.h> 34 35 #include <asm/byteorder.h> 36 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_crtc.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_probe_helper.h> 42 43 #include "g4x_dp.h" 44 #include "i915_debugfs.h" 45 #include "i915_drv.h" 46 #include "intel_atomic.h" 47 #include "intel_audio.h" 48 #include "intel_backlight.h" 49 #include "intel_connector.h" 50 #include "intel_ddi.h" 51 #include "intel_de.h" 52 #include "intel_display_types.h" 53 #include "intel_dp.h" 54 #include "intel_dp_aux.h" 55 #include "intel_dp_hdcp.h" 56 #include "intel_dp_link_training.h" 57 #include "intel_dp_mst.h" 58 #include "intel_dpio_phy.h" 59 #include "intel_dpll.h" 60 #include "intel_drrs.h" 61 #include "intel_fifo_underrun.h" 62 #include "intel_hdcp.h" 63 #include "intel_hdmi.h" 64 #include "intel_hotplug.h" 65 #include "intel_lspcon.h" 66 #include "intel_lvds.h" 67 #include "intel_panel.h" 68 #include "intel_pps.h" 69 #include "intel_psr.h" 70 #include "intel_tc.h" 71 #include "intel_vdsc.h" 72 #include "intel_vrr.h" 73 74 #define DP_DPRX_ESI_LEN 14 75 76 /* DP DSC throughput values used for slice count calculations KPixels/s */ 77 #define DP_DSC_PEAK_PIXEL_RATE 2720000 78 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 79 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 80 81 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 82 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 83 84 /* Compliance test status bits */ 85 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 86 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 87 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 88 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 89 90 91 /* Constants for DP DSC configurations */ 92 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 93 94 /* With Single pipe configuration, HW is capable of supporting maximum 95 * of 4 slices per line. 96 */ 97 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 98 99 /** 100 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 101 * @intel_dp: DP struct 102 * 103 * If a CPU or PCH DP output is attached to an eDP panel, this function 104 * will return true, and false otherwise. 105 * 106 * This function is not safe to use prior to encoder type being set. 107 */ 108 bool intel_dp_is_edp(struct intel_dp *intel_dp) 109 { 110 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 111 112 return dig_port->base.type == INTEL_OUTPUT_EDP; 113 } 114 115 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 116 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc); 117 118 /* Is link rate UHBR and thus 128b/132b? */ 119 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) 120 { 121 return crtc_state->port_clock >= 1000000; 122 } 123 124 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) 125 { 126 intel_dp->sink_rates[0] = 162000; 127 intel_dp->num_sink_rates = 1; 128 } 129 130 /* update sink rates from dpcd */ 131 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 132 { 133 static const int dp_rates[] = { 134 162000, 270000, 540000, 810000 135 }; 136 int i, max_rate; 137 int max_lttpr_rate; 138 139 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 140 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 141 static const int quirk_rates[] = { 162000, 270000, 324000 }; 142 143 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 144 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 145 146 return; 147 } 148 149 /* 150 * Sink rates for 8b/10b. 151 */ 152 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 153 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 154 if (max_lttpr_rate) 155 max_rate = min(max_rate, max_lttpr_rate); 156 157 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 158 if (dp_rates[i] > max_rate) 159 break; 160 intel_dp->sink_rates[i] = dp_rates[i]; 161 } 162 163 /* 164 * Sink rates for 128b/132b. If set, sink should support all 8b/10b 165 * rates and 10 Gbps. 166 */ 167 if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) { 168 u8 uhbr_rates = 0; 169 170 BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3); 171 172 drm_dp_dpcd_readb(&intel_dp->aux, 173 DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates); 174 175 if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) { 176 /* We have a repeater */ 177 if (intel_dp->lttpr_common_caps[0] >= 0x20 && 178 intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - 179 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] & 180 DP_PHY_REPEATER_128B132B_SUPPORTED) { 181 /* Repeater supports 128b/132b, valid UHBR rates */ 182 uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES - 183 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; 184 } else { 185 /* Does not support 128b/132b */ 186 uhbr_rates = 0; 187 } 188 } 189 190 if (uhbr_rates & DP_UHBR10) 191 intel_dp->sink_rates[i++] = 1000000; 192 if (uhbr_rates & DP_UHBR13_5) 193 intel_dp->sink_rates[i++] = 1350000; 194 if (uhbr_rates & DP_UHBR20) 195 intel_dp->sink_rates[i++] = 2000000; 196 } 197 198 intel_dp->num_sink_rates = i; 199 } 200 201 /* Get length of rates array potentially limited by max_rate. */ 202 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 203 { 204 int i; 205 206 /* Limit results by potentially reduced max rate */ 207 for (i = 0; i < len; i++) { 208 if (rates[len - i - 1] <= max_rate) 209 return len - i; 210 } 211 212 return 0; 213 } 214 215 /* Get length of common rates array potentially limited by max_rate. */ 216 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 217 int max_rate) 218 { 219 return intel_dp_rate_limit_len(intel_dp->common_rates, 220 intel_dp->num_common_rates, max_rate); 221 } 222 223 /* Theoretical max between source and sink */ 224 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 225 { 226 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 227 } 228 229 /* Theoretical max between source and sink */ 230 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 231 { 232 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 233 int source_max = dig_port->max_lanes; 234 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 235 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 236 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 237 238 if (lttpr_max) 239 sink_max = min(sink_max, lttpr_max); 240 241 return min3(source_max, sink_max, fia_max); 242 } 243 244 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 245 { 246 return intel_dp->max_link_lane_count; 247 } 248 249 /* 250 * The required data bandwidth for a mode with given pixel clock and bpp. This 251 * is the required net bandwidth independent of the data bandwidth efficiency. 252 */ 253 int 254 intel_dp_link_required(int pixel_clock, int bpp) 255 { 256 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 257 return DIV_ROUND_UP(pixel_clock * bpp, 8); 258 } 259 260 /* 261 * Given a link rate and lanes, get the data bandwidth. 262 * 263 * Data bandwidth is the actual payload rate, which depends on the data 264 * bandwidth efficiency and the link rate. 265 * 266 * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency 267 * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) = 268 * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by 269 * coincidence, the port clock in kHz matches the data bandwidth in kBps, and 270 * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no 271 * longer holds for data bandwidth as soon as FEC or MST is taken into account!) 272 * 273 * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For 274 * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875 275 * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000 276 * does not match the symbol clock, the port clock (not even if you think in 277 * terms of a byte clock), nor the data bandwidth. It only matches the link bit 278 * rate in units of 10000 bps. 279 */ 280 int 281 intel_dp_max_data_rate(int max_link_rate, int max_lanes) 282 { 283 if (max_link_rate >= 1000000) { 284 /* 285 * UHBR rates always use 128b/132b channel encoding, and have 286 * 97.71% data bandwidth efficiency. Consider max_link_rate the 287 * link bit rate in units of 10000 bps. 288 */ 289 int max_link_rate_kbps = max_link_rate * 10; 290 291 max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000); 292 max_link_rate = max_link_rate_kbps / 8; 293 } 294 295 /* 296 * Lower than UHBR rates always use 8b/10b channel encoding, and have 297 * 80% data bandwidth efficiency for SST non-FEC. However, this turns 298 * out to be a nop by coincidence, and can be skipped: 299 * 300 * int max_link_rate_kbps = max_link_rate * 10; 301 * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10); 302 * max_link_rate = max_link_rate_kbps / 8; 303 */ 304 305 return max_link_rate * max_lanes; 306 } 307 308 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) 309 { 310 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 311 struct intel_encoder *encoder = &intel_dig_port->base; 312 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 313 314 return DISPLAY_VER(dev_priv) >= 12 || 315 (DISPLAY_VER(dev_priv) == 11 && 316 encoder->port != PORT_A); 317 } 318 319 static int dg2_max_source_rate(struct intel_dp *intel_dp) 320 { 321 return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; 322 } 323 324 static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy) 325 { 326 u32 voltage; 327 328 voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK; 329 330 return voltage == VOLTAGE_INFO_0_85V; 331 } 332 333 static int icl_max_source_rate(struct intel_dp *intel_dp) 334 { 335 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 336 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 337 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 338 339 if (intel_phy_is_combo(dev_priv, phy) && 340 (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp))) 341 return 540000; 342 343 return 810000; 344 } 345 346 static int ehl_max_source_rate(struct intel_dp *intel_dp) 347 { 348 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 349 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 350 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 351 352 if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy)) 353 return 540000; 354 355 return 810000; 356 } 357 358 static int dg1_max_source_rate(struct intel_dp *intel_dp) 359 { 360 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 361 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 362 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 363 364 if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy)) 365 return 540000; 366 367 return 810000; 368 } 369 370 static void 371 intel_dp_set_source_rates(struct intel_dp *intel_dp) 372 { 373 /* The values must be in increasing order */ 374 static const int icl_rates[] = { 375 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000, 376 1000000, 1350000, 377 }; 378 static const int bxt_rates[] = { 379 162000, 216000, 243000, 270000, 324000, 432000, 540000 380 }; 381 static const int skl_rates[] = { 382 162000, 216000, 270000, 324000, 432000, 540000 383 }; 384 static const int hsw_rates[] = { 385 162000, 270000, 540000 386 }; 387 static const int g4x_rates[] = { 388 162000, 270000 389 }; 390 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 391 struct intel_encoder *encoder = &dig_port->base; 392 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 393 const int *source_rates; 394 int size, max_rate = 0, vbt_max_rate; 395 396 /* This should only be done once */ 397 drm_WARN_ON(&dev_priv->drm, 398 intel_dp->source_rates || intel_dp->num_source_rates); 399 400 if (DISPLAY_VER(dev_priv) >= 11) { 401 source_rates = icl_rates; 402 size = ARRAY_SIZE(icl_rates); 403 if (IS_DG2(dev_priv)) 404 max_rate = dg2_max_source_rate(intel_dp); 405 else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || 406 IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 407 max_rate = dg1_max_source_rate(intel_dp); 408 else if (IS_JSL_EHL(dev_priv)) 409 max_rate = ehl_max_source_rate(intel_dp); 410 else 411 max_rate = icl_max_source_rate(intel_dp); 412 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 413 source_rates = bxt_rates; 414 size = ARRAY_SIZE(bxt_rates); 415 } else if (DISPLAY_VER(dev_priv) == 9) { 416 source_rates = skl_rates; 417 size = ARRAY_SIZE(skl_rates); 418 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 419 IS_BROADWELL(dev_priv)) { 420 source_rates = hsw_rates; 421 size = ARRAY_SIZE(hsw_rates); 422 } else { 423 source_rates = g4x_rates; 424 size = ARRAY_SIZE(g4x_rates); 425 } 426 427 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 428 if (max_rate && vbt_max_rate) 429 max_rate = min(max_rate, vbt_max_rate); 430 else if (vbt_max_rate) 431 max_rate = vbt_max_rate; 432 433 if (max_rate) 434 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 435 436 intel_dp->source_rates = source_rates; 437 intel_dp->num_source_rates = size; 438 } 439 440 static int intersect_rates(const int *source_rates, int source_len, 441 const int *sink_rates, int sink_len, 442 int *common_rates) 443 { 444 int i = 0, j = 0, k = 0; 445 446 while (i < source_len && j < sink_len) { 447 if (source_rates[i] == sink_rates[j]) { 448 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 449 return k; 450 common_rates[k] = source_rates[i]; 451 ++k; 452 ++i; 453 ++j; 454 } else if (source_rates[i] < sink_rates[j]) { 455 ++i; 456 } else { 457 ++j; 458 } 459 } 460 return k; 461 } 462 463 /* return index of rate in rates array, or -1 if not found */ 464 static int intel_dp_rate_index(const int *rates, int len, int rate) 465 { 466 int i; 467 468 for (i = 0; i < len; i++) 469 if (rate == rates[i]) 470 return i; 471 472 return -1; 473 } 474 475 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 476 { 477 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 478 479 drm_WARN_ON(&i915->drm, 480 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 481 482 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 483 intel_dp->num_source_rates, 484 intel_dp->sink_rates, 485 intel_dp->num_sink_rates, 486 intel_dp->common_rates); 487 488 /* Paranoia, there should always be something in common. */ 489 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 490 intel_dp->common_rates[0] = 162000; 491 intel_dp->num_common_rates = 1; 492 } 493 } 494 495 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 496 u8 lane_count) 497 { 498 /* 499 * FIXME: we need to synchronize the current link parameters with 500 * hardware readout. Currently fast link training doesn't work on 501 * boot-up. 502 */ 503 if (link_rate == 0 || 504 link_rate > intel_dp->max_link_rate) 505 return false; 506 507 if (lane_count == 0 || 508 lane_count > intel_dp_max_lane_count(intel_dp)) 509 return false; 510 511 return true; 512 } 513 514 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 515 int link_rate, 516 u8 lane_count) 517 { 518 const struct drm_display_mode *fixed_mode = 519 intel_dp->attached_connector->panel.fixed_mode; 520 int mode_rate, max_rate; 521 522 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 523 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 524 if (mode_rate > max_rate) 525 return false; 526 527 return true; 528 } 529 530 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 531 int link_rate, u8 lane_count) 532 { 533 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 534 int index; 535 536 /* 537 * TODO: Enable fallback on MST links once MST link compute can handle 538 * the fallback params. 539 */ 540 if (intel_dp->is_mst) { 541 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 542 return -1; 543 } 544 545 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { 546 drm_dbg_kms(&i915->drm, 547 "Retrying Link training for eDP with max parameters\n"); 548 intel_dp->use_max_params = true; 549 return 0; 550 } 551 552 index = intel_dp_rate_index(intel_dp->common_rates, 553 intel_dp->num_common_rates, 554 link_rate); 555 if (index > 0) { 556 if (intel_dp_is_edp(intel_dp) && 557 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 558 intel_dp->common_rates[index - 1], 559 lane_count)) { 560 drm_dbg_kms(&i915->drm, 561 "Retrying Link training for eDP with same parameters\n"); 562 return 0; 563 } 564 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 565 intel_dp->max_link_lane_count = lane_count; 566 } else if (lane_count > 1) { 567 if (intel_dp_is_edp(intel_dp) && 568 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 569 intel_dp_max_common_rate(intel_dp), 570 lane_count >> 1)) { 571 drm_dbg_kms(&i915->drm, 572 "Retrying Link training for eDP with same parameters\n"); 573 return 0; 574 } 575 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 576 intel_dp->max_link_lane_count = lane_count >> 1; 577 } else { 578 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 579 return -1; 580 } 581 582 return 0; 583 } 584 585 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 586 { 587 return div_u64(mul_u32_u32(mode_clock, 1000000U), 588 DP_DSC_FEC_OVERHEAD_FACTOR); 589 } 590 591 static int 592 small_joiner_ram_size_bits(struct drm_i915_private *i915) 593 { 594 if (DISPLAY_VER(i915) >= 13) 595 return 17280 * 8; 596 else if (DISPLAY_VER(i915) >= 11) 597 return 7680 * 8; 598 else 599 return 6144 * 8; 600 } 601 602 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 603 u32 link_clock, u32 lane_count, 604 u32 mode_clock, u32 mode_hdisplay, 605 bool bigjoiner, 606 u32 pipe_bpp) 607 { 608 u32 bits_per_pixel, max_bpp_small_joiner_ram; 609 int i; 610 611 /* 612 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 613 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 614 * for SST -> TimeSlotsPerMTP is 1, 615 * for MST -> TimeSlotsPerMTP has to be calculated 616 */ 617 bits_per_pixel = (link_clock * lane_count * 8) / 618 intel_dp_mode_to_fec_clock(mode_clock); 619 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 620 621 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 622 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 623 mode_hdisplay; 624 625 if (bigjoiner) 626 max_bpp_small_joiner_ram *= 2; 627 628 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 629 max_bpp_small_joiner_ram); 630 631 /* 632 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 633 * check, output bpp from small joiner RAM check) 634 */ 635 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 636 637 if (bigjoiner) { 638 u32 max_bpp_bigjoiner = 639 i915->max_cdclk_freq * 48 / 640 intel_dp_mode_to_fec_clock(mode_clock); 641 642 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner); 643 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); 644 } 645 646 /* Error out if the max bpp is less than smallest allowed valid bpp */ 647 if (bits_per_pixel < valid_dsc_bpp[0]) { 648 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 649 bits_per_pixel, valid_dsc_bpp[0]); 650 return 0; 651 } 652 653 /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */ 654 if (DISPLAY_VER(i915) >= 13) { 655 bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1); 656 } else { 657 /* Find the nearest match in the array of known BPPs from VESA */ 658 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 659 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 660 break; 661 } 662 bits_per_pixel = valid_dsc_bpp[i]; 663 } 664 665 /* 666 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 667 * fractional part is 0 668 */ 669 return bits_per_pixel << 4; 670 } 671 672 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 673 int mode_clock, int mode_hdisplay, 674 bool bigjoiner) 675 { 676 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 677 u8 min_slice_count, i; 678 int max_slice_width; 679 680 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 681 min_slice_count = DIV_ROUND_UP(mode_clock, 682 DP_DSC_MAX_ENC_THROUGHPUT_0); 683 else 684 min_slice_count = DIV_ROUND_UP(mode_clock, 685 DP_DSC_MAX_ENC_THROUGHPUT_1); 686 687 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 688 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 689 drm_dbg_kms(&i915->drm, 690 "Unsupported slice width %d by DP DSC Sink device\n", 691 max_slice_width); 692 return 0; 693 } 694 /* Also take into account max slice width */ 695 min_slice_count = max_t(u8, min_slice_count, 696 DIV_ROUND_UP(mode_hdisplay, 697 max_slice_width)); 698 699 /* Find the closest match to the valid slice count values */ 700 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 701 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; 702 703 if (test_slice_count > 704 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false)) 705 break; 706 707 /* big joiner needs small joiner to be enabled */ 708 if (bigjoiner && test_slice_count < 4) 709 continue; 710 711 if (min_slice_count <= test_slice_count) 712 return test_slice_count; 713 } 714 715 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 716 min_slice_count); 717 return 0; 718 } 719 720 static enum intel_output_format 721 intel_dp_output_format(struct drm_connector *connector, 722 const struct drm_display_mode *mode) 723 { 724 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 725 const struct drm_display_info *info = &connector->display_info; 726 727 if (!connector->ycbcr_420_allowed || 728 !drm_mode_is_420_only(info, mode)) 729 return INTEL_OUTPUT_FORMAT_RGB; 730 731 if (intel_dp->dfp.rgb_to_ycbcr && 732 intel_dp->dfp.ycbcr_444_to_420) 733 return INTEL_OUTPUT_FORMAT_RGB; 734 735 if (intel_dp->dfp.ycbcr_444_to_420) 736 return INTEL_OUTPUT_FORMAT_YCBCR444; 737 else 738 return INTEL_OUTPUT_FORMAT_YCBCR420; 739 } 740 741 int intel_dp_min_bpp(enum intel_output_format output_format) 742 { 743 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 744 return 6 * 3; 745 else 746 return 8 * 3; 747 } 748 749 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 750 { 751 /* 752 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 753 * format of the number of bytes per pixel will be half the number 754 * of bytes of RGB pixel. 755 */ 756 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 757 bpp /= 2; 758 759 return bpp; 760 } 761 762 static int 763 intel_dp_mode_min_output_bpp(struct drm_connector *connector, 764 const struct drm_display_mode *mode) 765 { 766 enum intel_output_format output_format = 767 intel_dp_output_format(connector, mode); 768 769 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 770 } 771 772 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 773 int hdisplay) 774 { 775 /* 776 * Older platforms don't like hdisplay==4096 with DP. 777 * 778 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 779 * and frame counter increment), but we don't get vblank interrupts, 780 * and the pipe underruns immediately. The link also doesn't seem 781 * to get trained properly. 782 * 783 * On CHV the vblank interrupts don't seem to disappear but 784 * otherwise the symptoms are similar. 785 * 786 * TODO: confirm the behaviour on HSW+ 787 */ 788 return hdisplay == 4096 && !HAS_DDI(dev_priv); 789 } 790 791 static enum drm_mode_status 792 intel_dp_mode_valid_downstream(struct intel_connector *connector, 793 const struct drm_display_mode *mode, 794 int target_clock) 795 { 796 struct intel_dp *intel_dp = intel_attached_dp(connector); 797 const struct drm_display_info *info = &connector->base.display_info; 798 int tmds_clock; 799 800 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 801 if (intel_dp->dfp.pcon_max_frl_bw) { 802 int target_bw; 803 int max_frl_bw; 804 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode); 805 806 target_bw = bpp * target_clock; 807 808 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 809 810 /* converting bw from Gbps to Kbps*/ 811 max_frl_bw = max_frl_bw * 1000000; 812 813 if (target_bw > max_frl_bw) 814 return MODE_CLOCK_HIGH; 815 816 return MODE_OK; 817 } 818 819 if (intel_dp->dfp.max_dotclock && 820 target_clock > intel_dp->dfp.max_dotclock) 821 return MODE_CLOCK_HIGH; 822 823 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 824 tmds_clock = target_clock; 825 if (drm_mode_is_420_only(info, mode)) 826 tmds_clock /= 2; 827 828 if (intel_dp->dfp.min_tmds_clock && 829 tmds_clock < intel_dp->dfp.min_tmds_clock) 830 return MODE_CLOCK_LOW; 831 if (intel_dp->dfp.max_tmds_clock && 832 tmds_clock > intel_dp->dfp.max_tmds_clock) 833 return MODE_CLOCK_HIGH; 834 835 return MODE_OK; 836 } 837 838 static bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp, 839 int hdisplay, int clock) 840 { 841 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 842 843 if (!intel_dp_can_bigjoiner(intel_dp)) 844 return false; 845 846 return clock > i915->max_dotclk_freq || hdisplay > 5120; 847 } 848 849 static enum drm_mode_status 850 intel_dp_mode_valid(struct drm_connector *connector, 851 struct drm_display_mode *mode) 852 { 853 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 854 struct intel_connector *intel_connector = to_intel_connector(connector); 855 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 856 struct drm_i915_private *dev_priv = to_i915(connector->dev); 857 int target_clock = mode->clock; 858 int max_rate, mode_rate, max_lanes, max_link_clock; 859 int max_dotclk = dev_priv->max_dotclk_freq; 860 u16 dsc_max_output_bpp = 0; 861 u8 dsc_slice_count = 0; 862 enum drm_mode_status status; 863 bool dsc = false, bigjoiner = false; 864 865 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 866 return MODE_NO_DBLESCAN; 867 868 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 869 return MODE_H_ILLEGAL; 870 871 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 872 status = intel_panel_mode_valid(intel_connector, mode); 873 if (status != MODE_OK) 874 return status; 875 876 target_clock = fixed_mode->clock; 877 } 878 879 if (mode->clock < 10000) 880 return MODE_CLOCK_LOW; 881 882 if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) { 883 bigjoiner = true; 884 max_dotclk *= 2; 885 } 886 if (target_clock > max_dotclk) 887 return MODE_CLOCK_HIGH; 888 889 max_link_clock = intel_dp_max_link_rate(intel_dp); 890 max_lanes = intel_dp_max_lane_count(intel_dp); 891 892 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 893 mode_rate = intel_dp_link_required(target_clock, 894 intel_dp_mode_min_output_bpp(connector, mode)); 895 896 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 897 return MODE_H_ILLEGAL; 898 899 /* 900 * Output bpp is stored in 6.4 format so right shift by 4 to get the 901 * integer value since we support only integer values of bpp. 902 */ 903 if (DISPLAY_VER(dev_priv) >= 10 && 904 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 905 /* 906 * TBD pass the connector BPC, 907 * for now U8_MAX so that max BPC on that platform would be picked 908 */ 909 int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX); 910 911 if (intel_dp_is_edp(intel_dp)) { 912 dsc_max_output_bpp = 913 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 914 dsc_slice_count = 915 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 916 true); 917 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 918 dsc_max_output_bpp = 919 intel_dp_dsc_get_output_bpp(dev_priv, 920 max_link_clock, 921 max_lanes, 922 target_clock, 923 mode->hdisplay, 924 bigjoiner, 925 pipe_bpp) >> 4; 926 dsc_slice_count = 927 intel_dp_dsc_get_slice_count(intel_dp, 928 target_clock, 929 mode->hdisplay, 930 bigjoiner); 931 } 932 933 dsc = dsc_max_output_bpp && dsc_slice_count; 934 } 935 936 /* 937 * Big joiner configuration needs DSC for TGL which is not true for 938 * XE_LPD where uncompressed joiner is supported. 939 */ 940 if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) 941 return MODE_CLOCK_HIGH; 942 943 if (mode_rate > max_rate && !dsc) 944 return MODE_CLOCK_HIGH; 945 946 status = intel_dp_mode_valid_downstream(intel_connector, 947 mode, target_clock); 948 if (status != MODE_OK) 949 return status; 950 951 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); 952 } 953 954 bool intel_dp_source_supports_tps3(struct drm_i915_private *i915) 955 { 956 return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915); 957 } 958 959 bool intel_dp_source_supports_tps4(struct drm_i915_private *i915) 960 { 961 return DISPLAY_VER(i915) >= 10; 962 } 963 964 static void snprintf_int_array(char *str, size_t len, 965 const int *array, int nelem) 966 { 967 int i; 968 969 str[0] = '\0'; 970 971 for (i = 0; i < nelem; i++) { 972 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 973 if (r >= len) 974 return; 975 str += r; 976 len -= r; 977 } 978 } 979 980 static void intel_dp_print_rates(struct intel_dp *intel_dp) 981 { 982 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 983 char str[128]; /* FIXME: too big for stack? */ 984 985 if (!drm_debug_enabled(DRM_UT_KMS)) 986 return; 987 988 snprintf_int_array(str, sizeof(str), 989 intel_dp->source_rates, intel_dp->num_source_rates); 990 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 991 992 snprintf_int_array(str, sizeof(str), 993 intel_dp->sink_rates, intel_dp->num_sink_rates); 994 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 995 996 snprintf_int_array(str, sizeof(str), 997 intel_dp->common_rates, intel_dp->num_common_rates); 998 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 999 } 1000 1001 int 1002 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1003 { 1004 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1005 int len; 1006 1007 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1008 if (drm_WARN_ON(&i915->drm, len <= 0)) 1009 return 162000; 1010 1011 return intel_dp->common_rates[len - 1]; 1012 } 1013 1014 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1015 { 1016 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1017 int i = intel_dp_rate_index(intel_dp->sink_rates, 1018 intel_dp->num_sink_rates, rate); 1019 1020 if (drm_WARN_ON(&i915->drm, i < 0)) 1021 i = 0; 1022 1023 return i; 1024 } 1025 1026 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1027 u8 *link_bw, u8 *rate_select) 1028 { 1029 /* eDP 1.4 rate select method. */ 1030 if (intel_dp->use_rate_select) { 1031 *link_bw = 0; 1032 *rate_select = 1033 intel_dp_rate_select(intel_dp, port_clock); 1034 } else { 1035 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1036 *rate_select = 0; 1037 } 1038 } 1039 1040 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1041 const struct intel_crtc_state *pipe_config) 1042 { 1043 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1044 1045 /* On TGL, FEC is supported on all Pipes */ 1046 if (DISPLAY_VER(dev_priv) >= 12) 1047 return true; 1048 1049 if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A) 1050 return true; 1051 1052 return false; 1053 } 1054 1055 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1056 const struct intel_crtc_state *pipe_config) 1057 { 1058 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1059 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1060 } 1061 1062 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1063 const struct intel_crtc_state *crtc_state) 1064 { 1065 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) 1066 return false; 1067 1068 return intel_dsc_source_support(crtc_state) && 1069 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1070 } 1071 1072 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, 1073 const struct intel_crtc_state *crtc_state) 1074 { 1075 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 1076 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 1077 intel_dp->dfp.ycbcr_444_to_420); 1078 } 1079 1080 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, 1081 const struct intel_crtc_state *crtc_state, int bpc) 1082 { 1083 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; 1084 1085 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) 1086 clock /= 2; 1087 1088 return clock; 1089 } 1090 1091 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, 1092 const struct intel_crtc_state *crtc_state, int bpc) 1093 { 1094 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); 1095 1096 if (intel_dp->dfp.min_tmds_clock && 1097 tmds_clock < intel_dp->dfp.min_tmds_clock) 1098 return false; 1099 1100 if (intel_dp->dfp.max_tmds_clock && 1101 tmds_clock > intel_dp->dfp.max_tmds_clock) 1102 return false; 1103 1104 return true; 1105 } 1106 1107 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, 1108 const struct intel_crtc_state *crtc_state, 1109 int bpc) 1110 { 1111 1112 return intel_hdmi_deep_color_possible(crtc_state, bpc, 1113 intel_dp->has_hdmi_sink, 1114 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && 1115 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); 1116 } 1117 1118 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 1119 const struct intel_crtc_state *crtc_state) 1120 { 1121 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1122 struct intel_connector *intel_connector = intel_dp->attached_connector; 1123 int bpp, bpc; 1124 1125 bpc = crtc_state->pipe_bpp / 3; 1126 1127 if (intel_dp->dfp.max_bpc) 1128 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 1129 1130 if (intel_dp->dfp.min_tmds_clock) { 1131 for (; bpc >= 10; bpc -= 2) { 1132 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) 1133 break; 1134 } 1135 } 1136 1137 bpp = bpc * 3; 1138 if (intel_dp_is_edp(intel_dp)) { 1139 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1140 if (intel_connector->base.display_info.bpc == 0 && 1141 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1142 drm_dbg_kms(&dev_priv->drm, 1143 "clamping bpp for eDP panel to BIOS-provided %i\n", 1144 dev_priv->vbt.edp.bpp); 1145 bpp = dev_priv->vbt.edp.bpp; 1146 } 1147 } 1148 1149 return bpp; 1150 } 1151 1152 /* Adjust link config limits based on compliance test requests. */ 1153 void 1154 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1155 struct intel_crtc_state *pipe_config, 1156 struct link_config_limits *limits) 1157 { 1158 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1159 1160 /* For DP Compliance we override the computed bpp for the pipe */ 1161 if (intel_dp->compliance.test_data.bpc != 0) { 1162 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1163 1164 limits->min_bpp = limits->max_bpp = bpp; 1165 pipe_config->dither_force_disable = bpp == 6 * 3; 1166 1167 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1168 } 1169 1170 /* Use values requested by Compliance Test Request */ 1171 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1172 int index; 1173 1174 /* Validate the compliance test data since max values 1175 * might have changed due to link train fallback. 1176 */ 1177 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1178 intel_dp->compliance.test_lane_count)) { 1179 index = intel_dp_rate_index(intel_dp->common_rates, 1180 intel_dp->num_common_rates, 1181 intel_dp->compliance.test_link_rate); 1182 if (index >= 0) 1183 limits->min_rate = limits->max_rate = 1184 intel_dp->compliance.test_link_rate; 1185 limits->min_lane_count = limits->max_lane_count = 1186 intel_dp->compliance.test_lane_count; 1187 } 1188 } 1189 } 1190 1191 /* Optimize link config in order: max bpp, min clock, min lanes */ 1192 static int 1193 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 1194 struct intel_crtc_state *pipe_config, 1195 const struct link_config_limits *limits) 1196 { 1197 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1198 int bpp, i, lane_count; 1199 int mode_rate, link_rate, link_avail; 1200 1201 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 1202 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1203 1204 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1205 output_bpp); 1206 1207 for (i = 0; i < intel_dp->num_common_rates; i++) { 1208 link_rate = intel_dp->common_rates[i]; 1209 if (link_rate < limits->min_rate || 1210 link_rate > limits->max_rate) 1211 continue; 1212 1213 for (lane_count = limits->min_lane_count; 1214 lane_count <= limits->max_lane_count; 1215 lane_count <<= 1) { 1216 link_avail = intel_dp_max_data_rate(link_rate, 1217 lane_count); 1218 1219 if (mode_rate <= link_avail) { 1220 pipe_config->lane_count = lane_count; 1221 pipe_config->pipe_bpp = bpp; 1222 pipe_config->port_clock = link_rate; 1223 1224 return 0; 1225 } 1226 } 1227 } 1228 } 1229 1230 return -EINVAL; 1231 } 1232 1233 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc) 1234 { 1235 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1236 int i, num_bpc; 1237 u8 dsc_bpc[3] = {0}; 1238 u8 dsc_max_bpc; 1239 1240 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 1241 if (DISPLAY_VER(i915) >= 12) 1242 dsc_max_bpc = min_t(u8, 12, max_req_bpc); 1243 else 1244 dsc_max_bpc = min_t(u8, 10, max_req_bpc); 1245 1246 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 1247 dsc_bpc); 1248 for (i = 0; i < num_bpc; i++) { 1249 if (dsc_max_bpc >= dsc_bpc[i]) 1250 return dsc_bpc[i] * 3; 1251 } 1252 1253 return 0; 1254 } 1255 1256 #define DSC_SUPPORTED_VERSION_MIN 1 1257 1258 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 1259 struct intel_crtc_state *crtc_state) 1260 { 1261 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1262 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1263 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1264 u8 line_buf_depth; 1265 int ret; 1266 1267 /* 1268 * RC_MODEL_SIZE is currently a constant across all configurations. 1269 * 1270 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 1271 * DP_DSC_RC_BUF_SIZE for this. 1272 */ 1273 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1274 1275 /* 1276 * Slice Height of 8 works for all currently available panels. So start 1277 * with that if pic_height is an integral multiple of 8. Eventually add 1278 * logic to try multiple slice heights. 1279 */ 1280 if (vdsc_cfg->pic_height % 8 == 0) 1281 vdsc_cfg->slice_height = 8; 1282 else if (vdsc_cfg->pic_height % 4 == 0) 1283 vdsc_cfg->slice_height = 4; 1284 else 1285 vdsc_cfg->slice_height = 2; 1286 1287 ret = intel_dsc_compute_params(encoder, crtc_state); 1288 if (ret) 1289 return ret; 1290 1291 vdsc_cfg->dsc_version_major = 1292 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1293 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 1294 vdsc_cfg->dsc_version_minor = 1295 min(DSC_SUPPORTED_VERSION_MIN, 1296 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1297 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 1298 1299 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1300 DP_DSC_RGB; 1301 1302 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 1303 if (!line_buf_depth) { 1304 drm_dbg_kms(&i915->drm, 1305 "DSC Sink Line Buffer Depth invalid\n"); 1306 return -EINVAL; 1307 } 1308 1309 if (vdsc_cfg->dsc_version_minor == 2) 1310 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 1311 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 1312 else 1313 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 1314 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 1315 1316 vdsc_cfg->block_pred_enable = 1317 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 1318 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 1319 1320 return drm_dsc_compute_rc_parameters(vdsc_cfg); 1321 } 1322 1323 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 1324 struct intel_crtc_state *pipe_config, 1325 struct drm_connector_state *conn_state, 1326 struct link_config_limits *limits) 1327 { 1328 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1329 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 1330 const struct drm_display_mode *adjusted_mode = 1331 &pipe_config->hw.adjusted_mode; 1332 int pipe_bpp; 1333 int ret; 1334 1335 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 1336 intel_dp_supports_fec(intel_dp, pipe_config); 1337 1338 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 1339 return -EINVAL; 1340 1341 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc); 1342 1343 /* Min Input BPC for ICL+ is 8 */ 1344 if (pipe_bpp < 8 * 3) { 1345 drm_dbg_kms(&dev_priv->drm, 1346 "No DSC support for less than 8bpc\n"); 1347 return -EINVAL; 1348 } 1349 1350 /* 1351 * For now enable DSC for max bpp, max link rate, max lane count. 1352 * Optimize this later for the minimum possible link rate/lane count 1353 * with DSC enabled for the requested mode. 1354 */ 1355 pipe_config->pipe_bpp = pipe_bpp; 1356 pipe_config->port_clock = limits->max_rate; 1357 pipe_config->lane_count = limits->max_lane_count; 1358 1359 if (intel_dp_is_edp(intel_dp)) { 1360 pipe_config->dsc.compressed_bpp = 1361 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 1362 pipe_config->pipe_bpp); 1363 pipe_config->dsc.slice_count = 1364 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 1365 true); 1366 } else { 1367 u16 dsc_max_output_bpp; 1368 u8 dsc_dp_slice_count; 1369 1370 dsc_max_output_bpp = 1371 intel_dp_dsc_get_output_bpp(dev_priv, 1372 pipe_config->port_clock, 1373 pipe_config->lane_count, 1374 adjusted_mode->crtc_clock, 1375 adjusted_mode->crtc_hdisplay, 1376 pipe_config->bigjoiner, 1377 pipe_bpp); 1378 dsc_dp_slice_count = 1379 intel_dp_dsc_get_slice_count(intel_dp, 1380 adjusted_mode->crtc_clock, 1381 adjusted_mode->crtc_hdisplay, 1382 pipe_config->bigjoiner); 1383 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 1384 drm_dbg_kms(&dev_priv->drm, 1385 "Compressed BPP/Slice Count not supported\n"); 1386 return -EINVAL; 1387 } 1388 pipe_config->dsc.compressed_bpp = min_t(u16, 1389 dsc_max_output_bpp >> 4, 1390 pipe_config->pipe_bpp); 1391 pipe_config->dsc.slice_count = dsc_dp_slice_count; 1392 } 1393 1394 /* As of today we support DSC for only RGB */ 1395 if (intel_dp->force_dsc_bpp) { 1396 if (intel_dp->force_dsc_bpp >= 8 && 1397 intel_dp->force_dsc_bpp < pipe_bpp) { 1398 drm_dbg_kms(&dev_priv->drm, 1399 "DSC BPP forced to %d", 1400 intel_dp->force_dsc_bpp); 1401 pipe_config->dsc.compressed_bpp = 1402 intel_dp->force_dsc_bpp; 1403 } else { 1404 drm_dbg_kms(&dev_priv->drm, 1405 "Invalid DSC BPP %d", 1406 intel_dp->force_dsc_bpp); 1407 } 1408 } 1409 1410 /* 1411 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 1412 * is greater than the maximum Cdclock and if slice count is even 1413 * then we need to use 2 VDSC instances. 1414 */ 1415 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq || 1416 pipe_config->bigjoiner) { 1417 if (pipe_config->dsc.slice_count < 2) { 1418 drm_dbg_kms(&dev_priv->drm, 1419 "Cannot split stream to use 2 VDSC instances\n"); 1420 return -EINVAL; 1421 } 1422 1423 pipe_config->dsc.dsc_split = true; 1424 } 1425 1426 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 1427 if (ret < 0) { 1428 drm_dbg_kms(&dev_priv->drm, 1429 "Cannot compute valid DSC parameters for Input Bpp = %d " 1430 "Compressed BPP = %d\n", 1431 pipe_config->pipe_bpp, 1432 pipe_config->dsc.compressed_bpp); 1433 return ret; 1434 } 1435 1436 pipe_config->dsc.compression_enable = true; 1437 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 1438 "Compressed Bpp = %d Slice Count = %d\n", 1439 pipe_config->pipe_bpp, 1440 pipe_config->dsc.compressed_bpp, 1441 pipe_config->dsc.slice_count); 1442 1443 return 0; 1444 } 1445 1446 static int 1447 intel_dp_compute_link_config(struct intel_encoder *encoder, 1448 struct intel_crtc_state *pipe_config, 1449 struct drm_connector_state *conn_state) 1450 { 1451 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1452 const struct drm_display_mode *adjusted_mode = 1453 &pipe_config->hw.adjusted_mode; 1454 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1455 struct link_config_limits limits; 1456 int common_len; 1457 int ret; 1458 1459 common_len = intel_dp_common_len_rate_limit(intel_dp, 1460 intel_dp->max_link_rate); 1461 1462 /* No common link rates between source and sink */ 1463 drm_WARN_ON(encoder->base.dev, common_len <= 0); 1464 1465 limits.min_rate = intel_dp->common_rates[0]; 1466 limits.max_rate = intel_dp->common_rates[common_len - 1]; 1467 1468 limits.min_lane_count = 1; 1469 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 1470 1471 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); 1472 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); 1473 1474 if (intel_dp->use_max_params) { 1475 /* 1476 * Use the maximum clock and number of lanes the eDP panel 1477 * advertizes being capable of in case the initial fast 1478 * optimal params failed us. The panels are generally 1479 * designed to support only a single clock and lane 1480 * configuration, and typically on older panels these 1481 * values correspond to the native resolution of the panel. 1482 */ 1483 limits.min_lane_count = limits.max_lane_count; 1484 limits.min_rate = limits.max_rate; 1485 } 1486 1487 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 1488 1489 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 1490 "max rate %d max bpp %d pixel clock %iKHz\n", 1491 limits.max_lane_count, limits.max_rate, 1492 limits.max_bpp, adjusted_mode->crtc_clock); 1493 1494 if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay, 1495 adjusted_mode->crtc_clock)) 1496 pipe_config->bigjoiner = true; 1497 1498 /* 1499 * Optimize for slow and wide for everything, because there are some 1500 * eDP 1.3 and 1.4 panels don't work well with fast and narrow. 1501 */ 1502 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 1503 1504 /* 1505 * Pipe joiner needs compression upto display12 due to BW limitation. DG2 1506 * onwards pipe joiner can be enabled without compression. 1507 */ 1508 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 1509 if (ret || intel_dp->force_dsc_en || (DISPLAY_VER(i915) < 13 && 1510 pipe_config->bigjoiner)) { 1511 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 1512 conn_state, &limits); 1513 if (ret < 0) 1514 return ret; 1515 } 1516 1517 if (pipe_config->dsc.compression_enable) { 1518 drm_dbg_kms(&i915->drm, 1519 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 1520 pipe_config->lane_count, pipe_config->port_clock, 1521 pipe_config->pipe_bpp, 1522 pipe_config->dsc.compressed_bpp); 1523 1524 drm_dbg_kms(&i915->drm, 1525 "DP link rate required %i available %i\n", 1526 intel_dp_link_required(adjusted_mode->crtc_clock, 1527 pipe_config->dsc.compressed_bpp), 1528 intel_dp_max_data_rate(pipe_config->port_clock, 1529 pipe_config->lane_count)); 1530 } else { 1531 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 1532 pipe_config->lane_count, pipe_config->port_clock, 1533 pipe_config->pipe_bpp); 1534 1535 drm_dbg_kms(&i915->drm, 1536 "DP link rate required %i available %i\n", 1537 intel_dp_link_required(adjusted_mode->crtc_clock, 1538 pipe_config->pipe_bpp), 1539 intel_dp_max_data_rate(pipe_config->port_clock, 1540 pipe_config->lane_count)); 1541 } 1542 return 0; 1543 } 1544 1545 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 1546 const struct drm_connector_state *conn_state) 1547 { 1548 const struct intel_digital_connector_state *intel_conn_state = 1549 to_intel_digital_connector_state(conn_state); 1550 const struct drm_display_mode *adjusted_mode = 1551 &crtc_state->hw.adjusted_mode; 1552 1553 /* 1554 * Our YCbCr output is always limited range. 1555 * crtc_state->limited_color_range only applies to RGB, 1556 * and it must never be set for YCbCr or we risk setting 1557 * some conflicting bits in PIPECONF which will mess up 1558 * the colors on the monitor. 1559 */ 1560 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 1561 return false; 1562 1563 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 1564 /* 1565 * See: 1566 * CEA-861-E - 5.1 Default Encoding Parameters 1567 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 1568 */ 1569 return crtc_state->pipe_bpp != 18 && 1570 drm_default_rgb_quant_range(adjusted_mode) == 1571 HDMI_QUANTIZATION_RANGE_LIMITED; 1572 } else { 1573 return intel_conn_state->broadcast_rgb == 1574 INTEL_BROADCAST_RGB_LIMITED; 1575 } 1576 } 1577 1578 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 1579 enum port port) 1580 { 1581 if (IS_G4X(dev_priv)) 1582 return false; 1583 if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A) 1584 return false; 1585 1586 return true; 1587 } 1588 1589 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 1590 const struct drm_connector_state *conn_state, 1591 struct drm_dp_vsc_sdp *vsc) 1592 { 1593 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1594 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1595 1596 /* 1597 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1598 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 1599 * Colorimetry Format indication. 1600 */ 1601 vsc->revision = 0x5; 1602 vsc->length = 0x13; 1603 1604 /* DP 1.4a spec, Table 2-120 */ 1605 switch (crtc_state->output_format) { 1606 case INTEL_OUTPUT_FORMAT_YCBCR444: 1607 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 1608 break; 1609 case INTEL_OUTPUT_FORMAT_YCBCR420: 1610 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 1611 break; 1612 case INTEL_OUTPUT_FORMAT_RGB: 1613 default: 1614 vsc->pixelformat = DP_PIXELFORMAT_RGB; 1615 } 1616 1617 switch (conn_state->colorspace) { 1618 case DRM_MODE_COLORIMETRY_BT709_YCC: 1619 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1620 break; 1621 case DRM_MODE_COLORIMETRY_XVYCC_601: 1622 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 1623 break; 1624 case DRM_MODE_COLORIMETRY_XVYCC_709: 1625 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 1626 break; 1627 case DRM_MODE_COLORIMETRY_SYCC_601: 1628 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 1629 break; 1630 case DRM_MODE_COLORIMETRY_OPYCC_601: 1631 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 1632 break; 1633 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 1634 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 1635 break; 1636 case DRM_MODE_COLORIMETRY_BT2020_RGB: 1637 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 1638 break; 1639 case DRM_MODE_COLORIMETRY_BT2020_YCC: 1640 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 1641 break; 1642 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 1643 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 1644 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 1645 break; 1646 default: 1647 /* 1648 * RGB->YCBCR color conversion uses the BT.709 1649 * color space. 1650 */ 1651 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1652 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1653 else 1654 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 1655 break; 1656 } 1657 1658 vsc->bpc = crtc_state->pipe_bpp / 3; 1659 1660 /* only RGB pixelformat supports 6 bpc */ 1661 drm_WARN_ON(&dev_priv->drm, 1662 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 1663 1664 /* all YCbCr are always limited range */ 1665 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 1666 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 1667 } 1668 1669 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 1670 struct intel_crtc_state *crtc_state, 1671 const struct drm_connector_state *conn_state) 1672 { 1673 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 1674 1675 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 1676 if (crtc_state->has_psr) 1677 return; 1678 1679 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 1680 return; 1681 1682 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 1683 vsc->sdp_type = DP_SDP_VSC; 1684 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1685 &crtc_state->infoframes.vsc); 1686 } 1687 1688 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 1689 const struct intel_crtc_state *crtc_state, 1690 const struct drm_connector_state *conn_state, 1691 struct drm_dp_vsc_sdp *vsc) 1692 { 1693 vsc->sdp_type = DP_SDP_VSC; 1694 1695 if (crtc_state->has_psr2) { 1696 if (intel_dp->psr.colorimetry_support && 1697 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 1698 /* [PSR2, +Colorimetry] */ 1699 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1700 vsc); 1701 } else { 1702 /* 1703 * [PSR2, -Colorimetry] 1704 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 1705 * 3D stereo + PSR/PSR2 + Y-coordinate. 1706 */ 1707 vsc->revision = 0x4; 1708 vsc->length = 0xe; 1709 } 1710 } else { 1711 /* 1712 * [PSR1] 1713 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1714 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 1715 * higher). 1716 */ 1717 vsc->revision = 0x2; 1718 vsc->length = 0x8; 1719 } 1720 } 1721 1722 static void 1723 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 1724 struct intel_crtc_state *crtc_state, 1725 const struct drm_connector_state *conn_state) 1726 { 1727 int ret; 1728 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1729 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 1730 1731 if (!conn_state->hdr_output_metadata) 1732 return; 1733 1734 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 1735 1736 if (ret) { 1737 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 1738 return; 1739 } 1740 1741 crtc_state->infoframes.enable |= 1742 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 1743 } 1744 1745 int 1746 intel_dp_compute_config(struct intel_encoder *encoder, 1747 struct intel_crtc_state *pipe_config, 1748 struct drm_connector_state *conn_state) 1749 { 1750 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1751 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1752 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1753 enum port port = encoder->port; 1754 struct intel_connector *intel_connector = intel_dp->attached_connector; 1755 struct intel_digital_connector_state *intel_conn_state = 1756 to_intel_digital_connector_state(conn_state); 1757 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); 1758 int ret = 0, output_bpp; 1759 1760 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 1761 pipe_config->has_pch_encoder = true; 1762 1763 pipe_config->output_format = intel_dp_output_format(&intel_connector->base, 1764 adjusted_mode); 1765 1766 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 1767 ret = intel_panel_fitting(pipe_config, conn_state); 1768 if (ret) 1769 return ret; 1770 } 1771 1772 if (!intel_dp_port_has_audio(dev_priv, port)) 1773 pipe_config->has_audio = false; 1774 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 1775 pipe_config->has_audio = intel_dp->has_audio; 1776 else 1777 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 1778 1779 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1780 ret = intel_panel_compute_config(intel_connector, adjusted_mode); 1781 if (ret) 1782 return ret; 1783 1784 ret = intel_panel_fitting(pipe_config, conn_state); 1785 if (ret) 1786 return ret; 1787 } 1788 1789 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1790 return -EINVAL; 1791 1792 if (HAS_GMCH(dev_priv) && 1793 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 1794 return -EINVAL; 1795 1796 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 1797 return -EINVAL; 1798 1799 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 1800 return -EINVAL; 1801 1802 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 1803 if (ret < 0) 1804 return ret; 1805 1806 pipe_config->limited_color_range = 1807 intel_dp_limited_color_range(pipe_config, conn_state); 1808 1809 if (pipe_config->dsc.compression_enable) 1810 output_bpp = pipe_config->dsc.compressed_bpp; 1811 else 1812 output_bpp = intel_dp_output_bpp(pipe_config->output_format, 1813 pipe_config->pipe_bpp); 1814 1815 if (intel_dp->mso_link_count) { 1816 int n = intel_dp->mso_link_count; 1817 int overlap = intel_dp->mso_pixel_overlap; 1818 1819 pipe_config->splitter.enable = true; 1820 pipe_config->splitter.link_count = n; 1821 pipe_config->splitter.pixel_overlap = overlap; 1822 1823 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", 1824 n, overlap); 1825 1826 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; 1827 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; 1828 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; 1829 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; 1830 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; 1831 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; 1832 adjusted_mode->crtc_clock /= n; 1833 } 1834 1835 intel_link_compute_m_n(output_bpp, 1836 pipe_config->lane_count, 1837 adjusted_mode->crtc_clock, 1838 pipe_config->port_clock, 1839 &pipe_config->dp_m_n, 1840 constant_n, pipe_config->fec_enable); 1841 1842 /* FIXME: abstract this better */ 1843 if (pipe_config->splitter.enable) 1844 pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count; 1845 1846 if (!HAS_DDI(dev_priv)) 1847 g4x_dp_set_clock(encoder, pipe_config); 1848 1849 intel_vrr_compute_config(pipe_config, conn_state); 1850 intel_psr_compute_config(intel_dp, pipe_config, conn_state); 1851 intel_drrs_compute_config(intel_dp, pipe_config, output_bpp, 1852 constant_n); 1853 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 1854 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 1855 1856 return 0; 1857 } 1858 1859 void intel_dp_set_link_params(struct intel_dp *intel_dp, 1860 int link_rate, int lane_count) 1861 { 1862 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 1863 intel_dp->link_trained = false; 1864 intel_dp->link_rate = link_rate; 1865 intel_dp->lane_count = lane_count; 1866 } 1867 1868 static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp) 1869 { 1870 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 1871 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 1872 } 1873 1874 /* Enable backlight PWM and backlight PP control. */ 1875 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 1876 const struct drm_connector_state *conn_state) 1877 { 1878 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 1879 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1880 1881 if (!intel_dp_is_edp(intel_dp)) 1882 return; 1883 1884 drm_dbg_kms(&i915->drm, "\n"); 1885 1886 intel_backlight_enable(crtc_state, conn_state); 1887 intel_pps_backlight_on(intel_dp); 1888 } 1889 1890 /* Disable backlight PP control and backlight PWM. */ 1891 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 1892 { 1893 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 1894 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1895 1896 if (!intel_dp_is_edp(intel_dp)) 1897 return; 1898 1899 drm_dbg_kms(&i915->drm, "\n"); 1900 1901 intel_pps_backlight_off(intel_dp); 1902 intel_backlight_disable(old_conn_state); 1903 } 1904 1905 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 1906 { 1907 /* 1908 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 1909 * be capable of signalling downstream hpd with a long pulse. 1910 * Whether or not that means D3 is safe to use is not clear, 1911 * but let's assume so until proven otherwise. 1912 * 1913 * FIXME should really check all downstream ports... 1914 */ 1915 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 1916 drm_dp_is_branch(intel_dp->dpcd) && 1917 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 1918 } 1919 1920 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 1921 const struct intel_crtc_state *crtc_state, 1922 bool enable) 1923 { 1924 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1925 int ret; 1926 1927 if (!crtc_state->dsc.compression_enable) 1928 return; 1929 1930 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 1931 enable ? DP_DECOMPRESSION_EN : 0); 1932 if (ret < 0) 1933 drm_dbg_kms(&i915->drm, 1934 "Failed to %s sink decompression state\n", 1935 enabledisable(enable)); 1936 } 1937 1938 static void 1939 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 1940 { 1941 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1942 u8 oui[] = { 0x00, 0xaa, 0x01 }; 1943 u8 buf[3] = { 0 }; 1944 1945 /* 1946 * During driver init, we want to be careful and avoid changing the source OUI if it's 1947 * already set to what we want, so as to avoid clearing any state by accident 1948 */ 1949 if (careful) { 1950 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 1951 drm_err(&i915->drm, "Failed to read source OUI\n"); 1952 1953 if (memcmp(oui, buf, sizeof(oui)) == 0) 1954 return; 1955 } 1956 1957 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 1958 drm_err(&i915->drm, "Failed to write source OUI\n"); 1959 1960 intel_dp->last_oui_write = jiffies; 1961 } 1962 1963 void intel_dp_wait_source_oui(struct intel_dp *intel_dp) 1964 { 1965 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1966 1967 drm_dbg_kms(&i915->drm, "Performing OUI wait\n"); 1968 wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30); 1969 } 1970 1971 /* If the device supports it, try to set the power state appropriately */ 1972 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 1973 { 1974 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1975 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1976 int ret, i; 1977 1978 /* Should have a valid DPCD by this point */ 1979 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1980 return; 1981 1982 if (mode != DP_SET_POWER_D0) { 1983 if (downstream_hpd_needs_d0(intel_dp)) 1984 return; 1985 1986 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 1987 } else { 1988 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 1989 1990 lspcon_resume(dp_to_dig_port(intel_dp)); 1991 1992 /* Write the source OUI as early as possible */ 1993 if (intel_dp_is_edp(intel_dp)) 1994 intel_edp_init_source_oui(intel_dp, false); 1995 1996 /* 1997 * When turning on, we need to retry for 1ms to give the sink 1998 * time to wake up. 1999 */ 2000 for (i = 0; i < 3; i++) { 2001 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 2002 if (ret == 1) 2003 break; 2004 msleep(1); 2005 } 2006 2007 if (ret == 1 && lspcon->active) 2008 lspcon_wait_pcon_mode(lspcon); 2009 } 2010 2011 if (ret != 1) 2012 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 2013 encoder->base.base.id, encoder->base.name, 2014 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 2015 } 2016 2017 static bool 2018 intel_dp_get_dpcd(struct intel_dp *intel_dp); 2019 2020 /** 2021 * intel_dp_sync_state - sync the encoder state during init/resume 2022 * @encoder: intel encoder to sync 2023 * @crtc_state: state for the CRTC connected to the encoder 2024 * 2025 * Sync any state stored in the encoder wrt. HW state during driver init 2026 * and system resume. 2027 */ 2028 void intel_dp_sync_state(struct intel_encoder *encoder, 2029 const struct intel_crtc_state *crtc_state) 2030 { 2031 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2032 2033 if (!crtc_state) 2034 return; 2035 2036 /* 2037 * Don't clobber DPCD if it's been already read out during output 2038 * setup (eDP) or detect. 2039 */ 2040 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2041 intel_dp_get_dpcd(intel_dp); 2042 2043 intel_dp_reset_max_link_params(intel_dp); 2044 } 2045 2046 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 2047 struct intel_crtc_state *crtc_state) 2048 { 2049 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2050 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2051 2052 /* 2053 * If BIOS has set an unsupported or non-standard link rate for some 2054 * reason force an encoder recompute and full modeset. 2055 */ 2056 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 2057 crtc_state->port_clock) < 0) { 2058 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n"); 2059 crtc_state->uapi.connectors_changed = true; 2060 return false; 2061 } 2062 2063 /* 2064 * FIXME hack to force full modeset when DSC is being used. 2065 * 2066 * As long as we do not have full state readout and config comparison 2067 * of crtc_state->dsc, we have no way to ensure reliable fastset. 2068 * Remove once we have readout for DSC. 2069 */ 2070 if (crtc_state->dsc.compression_enable) { 2071 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n"); 2072 crtc_state->uapi.mode_changed = true; 2073 return false; 2074 } 2075 2076 if (CAN_PSR(intel_dp)) { 2077 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n"); 2078 crtc_state->uapi.mode_changed = true; 2079 return false; 2080 } 2081 2082 return true; 2083 } 2084 2085 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 2086 { 2087 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2088 2089 /* Clear the cached register set to avoid using stale values */ 2090 2091 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 2092 2093 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 2094 intel_dp->pcon_dsc_dpcd, 2095 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 2096 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", 2097 DP_PCON_DSC_ENCODER); 2098 2099 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", 2100 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 2101 } 2102 2103 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 2104 { 2105 int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 2106 int i; 2107 2108 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 2109 if (frl_bw_mask & (1 << i)) 2110 return bw_gbps[i]; 2111 } 2112 return 0; 2113 } 2114 2115 static int intel_dp_pcon_set_frl_mask(int max_frl) 2116 { 2117 switch (max_frl) { 2118 case 48: 2119 return DP_PCON_FRL_BW_MASK_48GBPS; 2120 case 40: 2121 return DP_PCON_FRL_BW_MASK_40GBPS; 2122 case 32: 2123 return DP_PCON_FRL_BW_MASK_32GBPS; 2124 case 24: 2125 return DP_PCON_FRL_BW_MASK_24GBPS; 2126 case 18: 2127 return DP_PCON_FRL_BW_MASK_18GBPS; 2128 case 9: 2129 return DP_PCON_FRL_BW_MASK_9GBPS; 2130 } 2131 2132 return 0; 2133 } 2134 2135 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 2136 { 2137 struct intel_connector *intel_connector = intel_dp->attached_connector; 2138 struct drm_connector *connector = &intel_connector->base; 2139 int max_frl_rate; 2140 int max_lanes, rate_per_lane; 2141 int max_dsc_lanes, dsc_rate_per_lane; 2142 2143 max_lanes = connector->display_info.hdmi.max_lanes; 2144 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; 2145 max_frl_rate = max_lanes * rate_per_lane; 2146 2147 if (connector->display_info.hdmi.dsc_cap.v_1p2) { 2148 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; 2149 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; 2150 if (max_dsc_lanes && dsc_rate_per_lane) 2151 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 2152 } 2153 2154 return max_frl_rate; 2155 } 2156 2157 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 2158 { 2159 #define TIMEOUT_FRL_READY_MS 500 2160 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 2161 2162 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2163 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 2164 u8 max_frl_bw_mask = 0, frl_trained_mask; 2165 bool is_active; 2166 2167 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2168 if (ret < 0) 2169 return ret; 2170 2171 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 2172 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 2173 2174 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 2175 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); 2176 2177 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 2178 2179 if (max_frl_bw <= 0) 2180 return -EINVAL; 2181 2182 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 2183 if (ret < 0) 2184 return ret; 2185 /* Wait for PCON to be FRL Ready */ 2186 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); 2187 2188 if (!is_active) 2189 return -ETIMEDOUT; 2190 2191 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 2192 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, 2193 DP_PCON_ENABLE_SEQUENTIAL_LINK); 2194 if (ret < 0) 2195 return ret; 2196 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, 2197 DP_PCON_FRL_LINK_TRAIN_NORMAL); 2198 if (ret < 0) 2199 return ret; 2200 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 2201 if (ret < 0) 2202 return ret; 2203 /* 2204 * Wait for FRL to be completed 2205 * Check if the HDMI Link is up and active. 2206 */ 2207 wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); 2208 2209 if (!is_active) 2210 return -ETIMEDOUT; 2211 2212 /* Verify HDMI Link configuration shows FRL Mode */ 2213 if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != 2214 DP_PCON_HDMI_MODE_FRL) { 2215 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); 2216 return -EINVAL; 2217 } 2218 drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); 2219 2220 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 2221 intel_dp->frl.is_trained = true; 2222 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); 2223 2224 return 0; 2225 } 2226 2227 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 2228 { 2229 if (drm_dp_is_branch(intel_dp->dpcd) && 2230 intel_dp->has_hdmi_sink && 2231 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 2232 return true; 2233 2234 return false; 2235 } 2236 2237 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 2238 { 2239 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2240 2241 /* 2242 * Always go for FRL training if: 2243 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) 2244 * -sink is HDMI2.1 2245 */ 2246 if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) || 2247 !intel_dp_is_hdmi_2_1_sink(intel_dp) || 2248 intel_dp->frl.is_trained) 2249 return; 2250 2251 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 2252 int ret, mode; 2253 2254 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); 2255 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2256 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 2257 2258 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 2259 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); 2260 } else { 2261 drm_dbg(&dev_priv->drm, "FRL training Completed\n"); 2262 } 2263 } 2264 2265 static int 2266 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 2267 { 2268 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 2269 2270 return intel_hdmi_dsc_get_slice_height(vactive); 2271 } 2272 2273 static int 2274 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 2275 const struct intel_crtc_state *crtc_state) 2276 { 2277 struct intel_connector *intel_connector = intel_dp->attached_connector; 2278 struct drm_connector *connector = &intel_connector->base; 2279 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; 2280 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; 2281 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 2282 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 2283 2284 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 2285 pcon_max_slice_width, 2286 hdmi_max_slices, hdmi_throughput); 2287 } 2288 2289 static int 2290 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 2291 const struct intel_crtc_state *crtc_state, 2292 int num_slices, int slice_width) 2293 { 2294 struct intel_connector *intel_connector = intel_dp->attached_connector; 2295 struct drm_connector *connector = &intel_connector->base; 2296 int output_format = crtc_state->output_format; 2297 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; 2298 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 2299 int hdmi_max_chunk_bytes = 2300 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; 2301 2302 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 2303 num_slices, output_format, hdmi_all_bpp, 2304 hdmi_max_chunk_bytes); 2305 } 2306 2307 void 2308 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 2309 const struct intel_crtc_state *crtc_state) 2310 { 2311 u8 pps_param[6]; 2312 int slice_height; 2313 int slice_width; 2314 int num_slices; 2315 int bits_per_pixel; 2316 int ret; 2317 struct intel_connector *intel_connector = intel_dp->attached_connector; 2318 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2319 struct drm_connector *connector; 2320 bool hdmi_is_dsc_1_2; 2321 2322 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 2323 return; 2324 2325 if (!intel_connector) 2326 return; 2327 connector = &intel_connector->base; 2328 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; 2329 2330 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 2331 !hdmi_is_dsc_1_2) 2332 return; 2333 2334 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 2335 if (!slice_height) 2336 return; 2337 2338 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 2339 if (!num_slices) 2340 return; 2341 2342 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 2343 num_slices); 2344 2345 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 2346 num_slices, slice_width); 2347 if (!bits_per_pixel) 2348 return; 2349 2350 pps_param[0] = slice_height & 0xFF; 2351 pps_param[1] = slice_height >> 8; 2352 pps_param[2] = slice_width & 0xFF; 2353 pps_param[3] = slice_width >> 8; 2354 pps_param[4] = bits_per_pixel & 0xFF; 2355 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 2356 2357 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 2358 if (ret < 0) 2359 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); 2360 } 2361 2362 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 2363 const struct intel_crtc_state *crtc_state) 2364 { 2365 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2366 u8 tmp; 2367 2368 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 2369 return; 2370 2371 if (!drm_dp_is_branch(intel_dp->dpcd)) 2372 return; 2373 2374 tmp = intel_dp->has_hdmi_sink ? 2375 DP_HDMI_DVI_OUTPUT_CONFIG : 0; 2376 2377 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2378 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 2379 drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n", 2380 enabledisable(intel_dp->has_hdmi_sink)); 2381 2382 tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 2383 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 2384 2385 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2386 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 2387 drm_dbg_kms(&i915->drm, 2388 "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n", 2389 enabledisable(intel_dp->dfp.ycbcr_444_to_420)); 2390 2391 tmp = 0; 2392 if (intel_dp->dfp.rgb_to_ycbcr) { 2393 bool bt2020, bt709; 2394 2395 /* 2396 * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only 2397 * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default. 2398 * 2399 */ 2400 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE; 2401 2402 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2403 intel_dp->downstream_ports, 2404 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 2405 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2406 intel_dp->downstream_ports, 2407 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 2408 switch (crtc_state->infoframes.vsc.colorimetry) { 2409 case DP_COLORIMETRY_BT2020_RGB: 2410 case DP_COLORIMETRY_BT2020_YCC: 2411 if (bt2020) 2412 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE; 2413 break; 2414 case DP_COLORIMETRY_BT709_YCC: 2415 case DP_COLORIMETRY_XVYCC_709: 2416 if (bt709) 2417 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE; 2418 break; 2419 default: 2420 break; 2421 } 2422 } 2423 2424 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 2425 drm_dbg_kms(&i915->drm, 2426 "Failed to %s protocol converter RGB->YCbCr conversion mode\n", 2427 enabledisable(tmp)); 2428 } 2429 2430 2431 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 2432 { 2433 u8 dprx = 0; 2434 2435 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 2436 &dprx) != 1) 2437 return false; 2438 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 2439 } 2440 2441 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 2442 { 2443 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2444 2445 /* 2446 * Clear the cached register set to avoid using stale values 2447 * for the sinks that do not support DSC. 2448 */ 2449 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 2450 2451 /* Clear fec_capable to avoid using stale values */ 2452 intel_dp->fec_capable = 0; 2453 2454 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 2455 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 2456 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2457 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 2458 intel_dp->dsc_dpcd, 2459 sizeof(intel_dp->dsc_dpcd)) < 0) 2460 drm_err(&i915->drm, 2461 "Failed to read DPCD register 0x%x\n", 2462 DP_DSC_SUPPORT); 2463 2464 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 2465 (int)sizeof(intel_dp->dsc_dpcd), 2466 intel_dp->dsc_dpcd); 2467 2468 /* FEC is supported only on DP 1.4 */ 2469 if (!intel_dp_is_edp(intel_dp) && 2470 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 2471 &intel_dp->fec_capable) < 0) 2472 drm_err(&i915->drm, 2473 "Failed to read FEC DPCD register\n"); 2474 2475 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 2476 intel_dp->fec_capable); 2477 } 2478 } 2479 2480 static void intel_edp_mso_mode_fixup(struct intel_connector *connector, 2481 struct drm_display_mode *mode) 2482 { 2483 struct intel_dp *intel_dp = intel_attached_dp(connector); 2484 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2485 int n = intel_dp->mso_link_count; 2486 int overlap = intel_dp->mso_pixel_overlap; 2487 2488 if (!mode || !n) 2489 return; 2490 2491 mode->hdisplay = (mode->hdisplay - overlap) * n; 2492 mode->hsync_start = (mode->hsync_start - overlap) * n; 2493 mode->hsync_end = (mode->hsync_end - overlap) * n; 2494 mode->htotal = (mode->htotal - overlap) * n; 2495 mode->clock *= n; 2496 2497 drm_mode_set_name(mode); 2498 2499 drm_dbg_kms(&i915->drm, 2500 "[CONNECTOR:%d:%s] using generated MSO mode: ", 2501 connector->base.base.id, connector->base.name); 2502 drm_mode_debug_printmodeline(mode); 2503 } 2504 2505 static void intel_edp_mso_init(struct intel_dp *intel_dp) 2506 { 2507 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2508 struct intel_connector *connector = intel_dp->attached_connector; 2509 struct drm_display_info *info = &connector->base.display_info; 2510 u8 mso; 2511 2512 if (intel_dp->edp_dpcd[0] < DP_EDP_14) 2513 return; 2514 2515 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { 2516 drm_err(&i915->drm, "Failed to read MSO cap\n"); 2517 return; 2518 } 2519 2520 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ 2521 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; 2522 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { 2523 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); 2524 mso = 0; 2525 } 2526 2527 if (mso) { 2528 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n", 2529 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso, 2530 info->mso_pixel_overlap); 2531 if (!HAS_MSO(i915)) { 2532 drm_err(&i915->drm, "No source MSO support, disabling\n"); 2533 mso = 0; 2534 } 2535 } 2536 2537 intel_dp->mso_link_count = mso; 2538 intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; 2539 } 2540 2541 static bool 2542 intel_edp_init_dpcd(struct intel_dp *intel_dp) 2543 { 2544 struct drm_i915_private *dev_priv = 2545 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 2546 2547 /* this function is meant to be called only once */ 2548 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 2549 2550 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 2551 return false; 2552 2553 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2554 drm_dp_is_branch(intel_dp->dpcd)); 2555 2556 /* 2557 * Read the eDP display control registers. 2558 * 2559 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 2560 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 2561 * set, but require eDP 1.4+ detection (e.g. for supported link rates 2562 * method). The display control registers should read zero if they're 2563 * not supported anyway. 2564 */ 2565 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 2566 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 2567 sizeof(intel_dp->edp_dpcd)) { 2568 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 2569 (int)sizeof(intel_dp->edp_dpcd), 2570 intel_dp->edp_dpcd); 2571 2572 intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; 2573 } 2574 2575 /* 2576 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 2577 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 2578 */ 2579 intel_psr_init_dpcd(intel_dp); 2580 2581 /* Clear the default sink rates */ 2582 intel_dp->num_sink_rates = 0; 2583 2584 /* Read the eDP 1.4+ supported link rates. */ 2585 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2586 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 2587 int i; 2588 2589 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 2590 sink_rates, sizeof(sink_rates)); 2591 2592 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 2593 int val = le16_to_cpu(sink_rates[i]); 2594 2595 if (val == 0) 2596 break; 2597 2598 /* Value read multiplied by 200kHz gives the per-lane 2599 * link rate in kHz. The source rates are, however, 2600 * stored in terms of LS_Clk kHz. The full conversion 2601 * back to symbols is 2602 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 2603 */ 2604 intel_dp->sink_rates[i] = (val * 200) / 10; 2605 } 2606 intel_dp->num_sink_rates = i; 2607 } 2608 2609 /* 2610 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 2611 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 2612 */ 2613 if (intel_dp->num_sink_rates) 2614 intel_dp->use_rate_select = true; 2615 else 2616 intel_dp_set_sink_rates(intel_dp); 2617 2618 intel_dp_set_common_rates(intel_dp); 2619 intel_dp_reset_max_link_params(intel_dp); 2620 2621 /* Read the eDP DSC DPCD registers */ 2622 if (DISPLAY_VER(dev_priv) >= 10) 2623 intel_dp_get_dsc_sink_cap(intel_dp); 2624 2625 /* 2626 * If needed, program our source OUI so we can make various Intel-specific AUX services 2627 * available (such as HDR backlight controls) 2628 */ 2629 intel_edp_init_source_oui(intel_dp, true); 2630 2631 return true; 2632 } 2633 2634 static bool 2635 intel_dp_has_sink_count(struct intel_dp *intel_dp) 2636 { 2637 if (!intel_dp->attached_connector) 2638 return false; 2639 2640 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 2641 intel_dp->dpcd, 2642 &intel_dp->desc); 2643 } 2644 2645 static bool 2646 intel_dp_get_dpcd(struct intel_dp *intel_dp) 2647 { 2648 int ret; 2649 2650 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) 2651 return false; 2652 2653 /* 2654 * Don't clobber cached eDP rates. Also skip re-reading 2655 * the OUI/ID since we know it won't change. 2656 */ 2657 if (!intel_dp_is_edp(intel_dp)) { 2658 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2659 drm_dp_is_branch(intel_dp->dpcd)); 2660 2661 intel_dp_set_sink_rates(intel_dp); 2662 intel_dp_set_common_rates(intel_dp); 2663 } 2664 2665 if (intel_dp_has_sink_count(intel_dp)) { 2666 ret = drm_dp_read_sink_count(&intel_dp->aux); 2667 if (ret < 0) 2668 return false; 2669 2670 /* 2671 * Sink count can change between short pulse hpd hence 2672 * a member variable in intel_dp will track any changes 2673 * between short pulse interrupts. 2674 */ 2675 intel_dp->sink_count = ret; 2676 2677 /* 2678 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 2679 * a dongle is present but no display. Unless we require to know 2680 * if a dongle is present or not, we don't need to update 2681 * downstream port information. So, an early return here saves 2682 * time from performing other operations which are not required. 2683 */ 2684 if (!intel_dp->sink_count) 2685 return false; 2686 } 2687 2688 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 2689 intel_dp->downstream_ports) == 0; 2690 } 2691 2692 static bool 2693 intel_dp_can_mst(struct intel_dp *intel_dp) 2694 { 2695 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2696 2697 return i915->params.enable_dp_mst && 2698 intel_dp_mst_source_support(intel_dp) && 2699 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 2700 } 2701 2702 static void 2703 intel_dp_configure_mst(struct intel_dp *intel_dp) 2704 { 2705 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2706 struct intel_encoder *encoder = 2707 &dp_to_dig_port(intel_dp)->base; 2708 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 2709 2710 drm_dbg_kms(&i915->drm, 2711 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 2712 encoder->base.base.id, encoder->base.name, 2713 yesno(intel_dp_mst_source_support(intel_dp)), yesno(sink_can_mst), 2714 yesno(i915->params.enable_dp_mst)); 2715 2716 if (!intel_dp_mst_source_support(intel_dp)) 2717 return; 2718 2719 intel_dp->is_mst = sink_can_mst && 2720 i915->params.enable_dp_mst; 2721 2722 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 2723 intel_dp->is_mst); 2724 } 2725 2726 static bool 2727 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2728 { 2729 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 2730 sink_irq_vector, DP_DPRX_ESI_LEN) == 2731 DP_DPRX_ESI_LEN; 2732 } 2733 2734 bool 2735 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 2736 const struct drm_connector_state *conn_state) 2737 { 2738 /* 2739 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 2740 * of Color Encoding Format and Content Color Gamut], in order to 2741 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 2742 */ 2743 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2744 return true; 2745 2746 switch (conn_state->colorspace) { 2747 case DRM_MODE_COLORIMETRY_SYCC_601: 2748 case DRM_MODE_COLORIMETRY_OPYCC_601: 2749 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2750 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2751 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2752 return true; 2753 default: 2754 break; 2755 } 2756 2757 return false; 2758 } 2759 2760 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 2761 struct dp_sdp *sdp, size_t size) 2762 { 2763 size_t length = sizeof(struct dp_sdp); 2764 2765 if (size < length) 2766 return -ENOSPC; 2767 2768 memset(sdp, 0, size); 2769 2770 /* 2771 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 2772 * VSC SDP Header Bytes 2773 */ 2774 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 2775 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 2776 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 2777 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 2778 2779 /* 2780 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 2781 * per DP 1.4a spec. 2782 */ 2783 if (vsc->revision != 0x5) 2784 goto out; 2785 2786 /* VSC SDP Payload for DB16 through DB18 */ 2787 /* Pixel Encoding and Colorimetry Formats */ 2788 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 2789 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 2790 2791 switch (vsc->bpc) { 2792 case 6: 2793 /* 6bpc: 0x0 */ 2794 break; 2795 case 8: 2796 sdp->db[17] = 0x1; /* DB17[3:0] */ 2797 break; 2798 case 10: 2799 sdp->db[17] = 0x2; 2800 break; 2801 case 12: 2802 sdp->db[17] = 0x3; 2803 break; 2804 case 16: 2805 sdp->db[17] = 0x4; 2806 break; 2807 default: 2808 MISSING_CASE(vsc->bpc); 2809 break; 2810 } 2811 /* Dynamic Range and Component Bit Depth */ 2812 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 2813 sdp->db[17] |= 0x80; /* DB17[7] */ 2814 2815 /* Content Type */ 2816 sdp->db[18] = vsc->content_type & 0x7; 2817 2818 out: 2819 return length; 2820 } 2821 2822 static ssize_t 2823 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 2824 struct dp_sdp *sdp, 2825 size_t size) 2826 { 2827 size_t length = sizeof(struct dp_sdp); 2828 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 2829 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 2830 ssize_t len; 2831 2832 if (size < length) 2833 return -ENOSPC; 2834 2835 memset(sdp, 0, size); 2836 2837 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 2838 if (len < 0) { 2839 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 2840 return -ENOSPC; 2841 } 2842 2843 if (len != infoframe_size) { 2844 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 2845 return -ENOSPC; 2846 } 2847 2848 /* 2849 * Set up the infoframe sdp packet for HDR static metadata. 2850 * Prepare VSC Header for SU as per DP 1.4a spec, 2851 * Table 2-100 and Table 2-101 2852 */ 2853 2854 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 2855 sdp->sdp_header.HB0 = 0; 2856 /* 2857 * Packet Type 80h + Non-audio INFOFRAME Type value 2858 * HDMI_INFOFRAME_TYPE_DRM: 0x87 2859 * - 80h + Non-audio INFOFRAME Type value 2860 * - InfoFrame Type: 0x07 2861 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 2862 */ 2863 sdp->sdp_header.HB1 = drm_infoframe->type; 2864 /* 2865 * Least Significant Eight Bits of (Data Byte Count – 1) 2866 * infoframe_size - 1 2867 */ 2868 sdp->sdp_header.HB2 = 0x1D; 2869 /* INFOFRAME SDP Version Number */ 2870 sdp->sdp_header.HB3 = (0x13 << 2); 2871 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 2872 sdp->db[0] = drm_infoframe->version; 2873 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 2874 sdp->db[1] = drm_infoframe->length; 2875 /* 2876 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 2877 * HDMI_INFOFRAME_HEADER_SIZE 2878 */ 2879 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 2880 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 2881 HDMI_DRM_INFOFRAME_SIZE); 2882 2883 /* 2884 * Size of DP infoframe sdp packet for HDR static metadata consists of 2885 * - DP SDP Header(struct dp_sdp_header): 4 bytes 2886 * - Two Data Blocks: 2 bytes 2887 * CTA Header Byte2 (INFOFRAME Version Number) 2888 * CTA Header Byte3 (Length of INFOFRAME) 2889 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 2890 * 2891 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 2892 * infoframe size. But GEN11+ has larger than that size, write_infoframe 2893 * will pad rest of the size. 2894 */ 2895 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 2896 } 2897 2898 static void intel_write_dp_sdp(struct intel_encoder *encoder, 2899 const struct intel_crtc_state *crtc_state, 2900 unsigned int type) 2901 { 2902 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2903 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2904 struct dp_sdp sdp = {}; 2905 ssize_t len; 2906 2907 if ((crtc_state->infoframes.enable & 2908 intel_hdmi_infoframe_enable(type)) == 0) 2909 return; 2910 2911 switch (type) { 2912 case DP_SDP_VSC: 2913 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 2914 sizeof(sdp)); 2915 break; 2916 case HDMI_PACKET_TYPE_GAMUT_METADATA: 2917 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 2918 &sdp, sizeof(sdp)); 2919 break; 2920 default: 2921 MISSING_CASE(type); 2922 return; 2923 } 2924 2925 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 2926 return; 2927 2928 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 2929 } 2930 2931 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 2932 const struct intel_crtc_state *crtc_state, 2933 const struct drm_dp_vsc_sdp *vsc) 2934 { 2935 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2936 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2937 struct dp_sdp sdp = {}; 2938 ssize_t len; 2939 2940 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 2941 2942 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 2943 return; 2944 2945 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 2946 &sdp, len); 2947 } 2948 2949 void intel_dp_set_infoframes(struct intel_encoder *encoder, 2950 bool enable, 2951 const struct intel_crtc_state *crtc_state, 2952 const struct drm_connector_state *conn_state) 2953 { 2954 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2955 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 2956 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 2957 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 2958 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 2959 u32 val = intel_de_read(dev_priv, reg) & ~dip_enable; 2960 2961 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 2962 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 2963 if (!crtc_state->has_psr) 2964 val &= ~VIDEO_DIP_ENABLE_VSC_HSW; 2965 2966 intel_de_write(dev_priv, reg, val); 2967 intel_de_posting_read(dev_priv, reg); 2968 2969 if (!enable) 2970 return; 2971 2972 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 2973 if (!crtc_state->has_psr) 2974 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 2975 2976 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 2977 } 2978 2979 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 2980 const void *buffer, size_t size) 2981 { 2982 const struct dp_sdp *sdp = buffer; 2983 2984 if (size < sizeof(struct dp_sdp)) 2985 return -EINVAL; 2986 2987 memset(vsc, 0, sizeof(*vsc)); 2988 2989 if (sdp->sdp_header.HB0 != 0) 2990 return -EINVAL; 2991 2992 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 2993 return -EINVAL; 2994 2995 vsc->sdp_type = sdp->sdp_header.HB1; 2996 vsc->revision = sdp->sdp_header.HB2; 2997 vsc->length = sdp->sdp_header.HB3; 2998 2999 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 3000 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 3001 /* 3002 * - HB2 = 0x2, HB3 = 0x8 3003 * VSC SDP supporting 3D stereo + PSR 3004 * - HB2 = 0x4, HB3 = 0xe 3005 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 3006 * first scan line of the SU region (applies to eDP v1.4b 3007 * and higher). 3008 */ 3009 return 0; 3010 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 3011 /* 3012 * - HB2 = 0x5, HB3 = 0x13 3013 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 3014 * Format. 3015 */ 3016 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 3017 vsc->colorimetry = sdp->db[16] & 0xf; 3018 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 3019 3020 switch (sdp->db[17] & 0x7) { 3021 case 0x0: 3022 vsc->bpc = 6; 3023 break; 3024 case 0x1: 3025 vsc->bpc = 8; 3026 break; 3027 case 0x2: 3028 vsc->bpc = 10; 3029 break; 3030 case 0x3: 3031 vsc->bpc = 12; 3032 break; 3033 case 0x4: 3034 vsc->bpc = 16; 3035 break; 3036 default: 3037 MISSING_CASE(sdp->db[17] & 0x7); 3038 return -EINVAL; 3039 } 3040 3041 vsc->content_type = sdp->db[18] & 0x7; 3042 } else { 3043 return -EINVAL; 3044 } 3045 3046 return 0; 3047 } 3048 3049 static int 3050 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 3051 const void *buffer, size_t size) 3052 { 3053 int ret; 3054 3055 const struct dp_sdp *sdp = buffer; 3056 3057 if (size < sizeof(struct dp_sdp)) 3058 return -EINVAL; 3059 3060 if (sdp->sdp_header.HB0 != 0) 3061 return -EINVAL; 3062 3063 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 3064 return -EINVAL; 3065 3066 /* 3067 * Least Significant Eight Bits of (Data Byte Count – 1) 3068 * 1Dh (i.e., Data Byte Count = 30 bytes). 3069 */ 3070 if (sdp->sdp_header.HB2 != 0x1D) 3071 return -EINVAL; 3072 3073 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 3074 if ((sdp->sdp_header.HB3 & 0x3) != 0) 3075 return -EINVAL; 3076 3077 /* INFOFRAME SDP Version Number */ 3078 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 3079 return -EINVAL; 3080 3081 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 3082 if (sdp->db[0] != 1) 3083 return -EINVAL; 3084 3085 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 3086 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 3087 return -EINVAL; 3088 3089 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 3090 HDMI_DRM_INFOFRAME_SIZE); 3091 3092 return ret; 3093 } 3094 3095 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 3096 struct intel_crtc_state *crtc_state, 3097 struct drm_dp_vsc_sdp *vsc) 3098 { 3099 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3100 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3101 unsigned int type = DP_SDP_VSC; 3102 struct dp_sdp sdp = {}; 3103 int ret; 3104 3105 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 3106 if (crtc_state->has_psr) 3107 return; 3108 3109 if ((crtc_state->infoframes.enable & 3110 intel_hdmi_infoframe_enable(type)) == 0) 3111 return; 3112 3113 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 3114 3115 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 3116 3117 if (ret) 3118 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 3119 } 3120 3121 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 3122 struct intel_crtc_state *crtc_state, 3123 struct hdmi_drm_infoframe *drm_infoframe) 3124 { 3125 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3126 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3127 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 3128 struct dp_sdp sdp = {}; 3129 int ret; 3130 3131 if ((crtc_state->infoframes.enable & 3132 intel_hdmi_infoframe_enable(type)) == 0) 3133 return; 3134 3135 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 3136 sizeof(sdp)); 3137 3138 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 3139 sizeof(sdp)); 3140 3141 if (ret) 3142 drm_dbg_kms(&dev_priv->drm, 3143 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 3144 } 3145 3146 void intel_read_dp_sdp(struct intel_encoder *encoder, 3147 struct intel_crtc_state *crtc_state, 3148 unsigned int type) 3149 { 3150 switch (type) { 3151 case DP_SDP_VSC: 3152 intel_read_dp_vsc_sdp(encoder, crtc_state, 3153 &crtc_state->infoframes.vsc); 3154 break; 3155 case HDMI_PACKET_TYPE_GAMUT_METADATA: 3156 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 3157 &crtc_state->infoframes.drm.drm); 3158 break; 3159 default: 3160 MISSING_CASE(type); 3161 break; 3162 } 3163 } 3164 3165 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 3166 { 3167 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3168 int status = 0; 3169 int test_link_rate; 3170 u8 test_lane_count, test_link_bw; 3171 /* (DP CTS 1.2) 3172 * 4.3.1.11 3173 */ 3174 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 3175 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 3176 &test_lane_count); 3177 3178 if (status <= 0) { 3179 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 3180 return DP_TEST_NAK; 3181 } 3182 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 3183 3184 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 3185 &test_link_bw); 3186 if (status <= 0) { 3187 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 3188 return DP_TEST_NAK; 3189 } 3190 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 3191 3192 /* Validate the requested link rate and lane count */ 3193 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 3194 test_lane_count)) 3195 return DP_TEST_NAK; 3196 3197 intel_dp->compliance.test_lane_count = test_lane_count; 3198 intel_dp->compliance.test_link_rate = test_link_rate; 3199 3200 return DP_TEST_ACK; 3201 } 3202 3203 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 3204 { 3205 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3206 u8 test_pattern; 3207 u8 test_misc; 3208 __be16 h_width, v_height; 3209 int status = 0; 3210 3211 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 3212 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 3213 &test_pattern); 3214 if (status <= 0) { 3215 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 3216 return DP_TEST_NAK; 3217 } 3218 if (test_pattern != DP_COLOR_RAMP) 3219 return DP_TEST_NAK; 3220 3221 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 3222 &h_width, 2); 3223 if (status <= 0) { 3224 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 3225 return DP_TEST_NAK; 3226 } 3227 3228 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 3229 &v_height, 2); 3230 if (status <= 0) { 3231 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 3232 return DP_TEST_NAK; 3233 } 3234 3235 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 3236 &test_misc); 3237 if (status <= 0) { 3238 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 3239 return DP_TEST_NAK; 3240 } 3241 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 3242 return DP_TEST_NAK; 3243 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 3244 return DP_TEST_NAK; 3245 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 3246 case DP_TEST_BIT_DEPTH_6: 3247 intel_dp->compliance.test_data.bpc = 6; 3248 break; 3249 case DP_TEST_BIT_DEPTH_8: 3250 intel_dp->compliance.test_data.bpc = 8; 3251 break; 3252 default: 3253 return DP_TEST_NAK; 3254 } 3255 3256 intel_dp->compliance.test_data.video_pattern = test_pattern; 3257 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 3258 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 3259 /* Set test active flag here so userspace doesn't interrupt things */ 3260 intel_dp->compliance.test_active = true; 3261 3262 return DP_TEST_ACK; 3263 } 3264 3265 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 3266 { 3267 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3268 u8 test_result = DP_TEST_ACK; 3269 struct intel_connector *intel_connector = intel_dp->attached_connector; 3270 struct drm_connector *connector = &intel_connector->base; 3271 3272 if (intel_connector->detect_edid == NULL || 3273 connector->edid_corrupt || 3274 intel_dp->aux.i2c_defer_count > 6) { 3275 /* Check EDID read for NACKs, DEFERs and corruption 3276 * (DP CTS 1.2 Core r1.1) 3277 * 4.2.2.4 : Failed EDID read, I2C_NAK 3278 * 4.2.2.5 : Failed EDID read, I2C_DEFER 3279 * 4.2.2.6 : EDID corruption detected 3280 * Use failsafe mode for all cases 3281 */ 3282 if (intel_dp->aux.i2c_nack_count > 0 || 3283 intel_dp->aux.i2c_defer_count > 0) 3284 drm_dbg_kms(&i915->drm, 3285 "EDID read had %d NACKs, %d DEFERs\n", 3286 intel_dp->aux.i2c_nack_count, 3287 intel_dp->aux.i2c_defer_count); 3288 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 3289 } else { 3290 struct edid *block = intel_connector->detect_edid; 3291 3292 /* We have to write the checksum 3293 * of the last block read 3294 */ 3295 block += intel_connector->detect_edid->extensions; 3296 3297 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 3298 block->checksum) <= 0) 3299 drm_dbg_kms(&i915->drm, 3300 "Failed to write EDID checksum\n"); 3301 3302 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 3303 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 3304 } 3305 3306 /* Set test active flag here so userspace doesn't interrupt things */ 3307 intel_dp->compliance.test_active = true; 3308 3309 return test_result; 3310 } 3311 3312 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 3313 const struct intel_crtc_state *crtc_state) 3314 { 3315 struct drm_i915_private *dev_priv = 3316 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 3317 struct drm_dp_phy_test_params *data = 3318 &intel_dp->compliance.test_data.phytest; 3319 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3320 enum pipe pipe = crtc->pipe; 3321 u32 pattern_val; 3322 3323 switch (data->phy_pattern) { 3324 case DP_PHY_TEST_PATTERN_NONE: 3325 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 3326 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 3327 break; 3328 case DP_PHY_TEST_PATTERN_D10_2: 3329 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 3330 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3331 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 3332 break; 3333 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 3334 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 3335 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3336 DDI_DP_COMP_CTL_ENABLE | 3337 DDI_DP_COMP_CTL_SCRAMBLED_0); 3338 break; 3339 case DP_PHY_TEST_PATTERN_PRBS7: 3340 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 3341 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3342 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 3343 break; 3344 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 3345 /* 3346 * FIXME: Ideally pattern should come from DPCD 0x250. As 3347 * current firmware of DPR-100 could not set it, so hardcoding 3348 * now for complaince test. 3349 */ 3350 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 3351 pattern_val = 0x3e0f83e0; 3352 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 3353 pattern_val = 0x0f83e0f8; 3354 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 3355 pattern_val = 0x0000f83e; 3356 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 3357 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3358 DDI_DP_COMP_CTL_ENABLE | 3359 DDI_DP_COMP_CTL_CUSTOM80); 3360 break; 3361 case DP_PHY_TEST_PATTERN_CP2520: 3362 /* 3363 * FIXME: Ideally pattern should come from DPCD 0x24A. As 3364 * current firmware of DPR-100 could not set it, so hardcoding 3365 * now for complaince test. 3366 */ 3367 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 3368 pattern_val = 0xFB; 3369 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3370 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 3371 pattern_val); 3372 break; 3373 default: 3374 WARN(1, "Invalid Phy Test Pattern\n"); 3375 } 3376 } 3377 3378 static void 3379 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, 3380 const struct intel_crtc_state *crtc_state) 3381 { 3382 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3383 struct drm_device *dev = dig_port->base.base.dev; 3384 struct drm_i915_private *dev_priv = to_i915(dev); 3385 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 3386 enum pipe pipe = crtc->pipe; 3387 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 3388 3389 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 3390 TRANS_DDI_FUNC_CTL(pipe)); 3391 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 3392 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 3393 3394 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 3395 TGL_TRANS_DDI_PORT_MASK); 3396 trans_conf_value &= ~PIPECONF_ENABLE; 3397 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 3398 3399 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 3400 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 3401 trans_ddi_func_ctl_value); 3402 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 3403 } 3404 3405 static void 3406 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, 3407 const struct intel_crtc_state *crtc_state) 3408 { 3409 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3410 struct drm_device *dev = dig_port->base.base.dev; 3411 struct drm_i915_private *dev_priv = to_i915(dev); 3412 enum port port = dig_port->base.port; 3413 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 3414 enum pipe pipe = crtc->pipe; 3415 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 3416 3417 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 3418 TRANS_DDI_FUNC_CTL(pipe)); 3419 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 3420 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 3421 3422 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 3423 TGL_TRANS_DDI_SELECT_PORT(port); 3424 trans_conf_value |= PIPECONF_ENABLE; 3425 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 3426 3427 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 3428 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 3429 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 3430 trans_ddi_func_ctl_value); 3431 } 3432 3433 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 3434 const struct intel_crtc_state *crtc_state) 3435 { 3436 struct drm_dp_phy_test_params *data = 3437 &intel_dp->compliance.test_data.phytest; 3438 u8 link_status[DP_LINK_STATUS_SIZE]; 3439 3440 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3441 link_status) < 0) { 3442 DRM_DEBUG_KMS("failed to get link status\n"); 3443 return; 3444 } 3445 3446 /* retrieve vswing & pre-emphasis setting */ 3447 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 3448 link_status); 3449 3450 intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); 3451 3452 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); 3453 3454 intel_dp_phy_pattern_update(intel_dp, crtc_state); 3455 3456 intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); 3457 3458 drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, 3459 intel_dp->train_set, crtc_state->lane_count); 3460 3461 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 3462 link_status[DP_DPCD_REV]); 3463 } 3464 3465 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 3466 { 3467 struct drm_dp_phy_test_params *data = 3468 &intel_dp->compliance.test_data.phytest; 3469 3470 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 3471 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 3472 return DP_TEST_NAK; 3473 } 3474 3475 /* Set test active flag here so userspace doesn't interrupt things */ 3476 intel_dp->compliance.test_active = true; 3477 3478 return DP_TEST_ACK; 3479 } 3480 3481 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 3482 { 3483 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3484 u8 response = DP_TEST_NAK; 3485 u8 request = 0; 3486 int status; 3487 3488 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 3489 if (status <= 0) { 3490 drm_dbg_kms(&i915->drm, 3491 "Could not read test request from sink\n"); 3492 goto update_status; 3493 } 3494 3495 switch (request) { 3496 case DP_TEST_LINK_TRAINING: 3497 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 3498 response = intel_dp_autotest_link_training(intel_dp); 3499 break; 3500 case DP_TEST_LINK_VIDEO_PATTERN: 3501 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 3502 response = intel_dp_autotest_video_pattern(intel_dp); 3503 break; 3504 case DP_TEST_LINK_EDID_READ: 3505 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 3506 response = intel_dp_autotest_edid(intel_dp); 3507 break; 3508 case DP_TEST_LINK_PHY_TEST_PATTERN: 3509 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 3510 response = intel_dp_autotest_phy_pattern(intel_dp); 3511 break; 3512 default: 3513 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 3514 request); 3515 break; 3516 } 3517 3518 if (response & DP_TEST_ACK) 3519 intel_dp->compliance.test_type = request; 3520 3521 update_status: 3522 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 3523 if (status <= 0) 3524 drm_dbg_kms(&i915->drm, 3525 "Could not write test response to sink\n"); 3526 } 3527 3528 static void 3529 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled) 3530 { 3531 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled); 3532 3533 if (esi[1] & DP_CP_IRQ) { 3534 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3535 *handled = true; 3536 } 3537 } 3538 3539 /** 3540 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 3541 * @intel_dp: Intel DP struct 3542 * 3543 * Read any pending MST interrupts, call MST core to handle these and ack the 3544 * interrupts. Check if the main and AUX link state is ok. 3545 * 3546 * Returns: 3547 * - %true if pending interrupts were serviced (or no interrupts were 3548 * pending) w/o detecting an error condition. 3549 * - %false if an error condition - like AUX failure or a loss of link - is 3550 * detected, which needs servicing from the hotplug work. 3551 */ 3552 static bool 3553 intel_dp_check_mst_status(struct intel_dp *intel_dp) 3554 { 3555 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3556 bool link_ok = true; 3557 3558 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 3559 3560 for (;;) { 3561 /* 3562 * The +2 is because DP_DPRX_ESI_LEN is 14, but we then 3563 * pass in "esi+10" to drm_dp_channel_eq_ok(), which 3564 * takes a 6-byte array. So we actually need 16 bytes 3565 * here. 3566 * 3567 * Somebody who knows what the limits actually are 3568 * should check this, but for now this is at least 3569 * harmless and avoids a valid compiler warning about 3570 * using more of the array than we have allocated. 3571 */ 3572 u8 esi[DP_DPRX_ESI_LEN+2] = {}; 3573 bool handled; 3574 int retry; 3575 3576 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 3577 drm_dbg_kms(&i915->drm, 3578 "failed to get ESI - device may have failed\n"); 3579 link_ok = false; 3580 3581 break; 3582 } 3583 3584 /* check link status - esi[10] = 0x200c */ 3585 if (intel_dp->active_mst_links > 0 && link_ok && 3586 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 3587 drm_dbg_kms(&i915->drm, 3588 "channel EQ not ok, retraining\n"); 3589 link_ok = false; 3590 } 3591 3592 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 3593 3594 intel_dp_mst_hpd_irq(intel_dp, esi, &handled); 3595 3596 if (!handled) 3597 break; 3598 3599 for (retry = 0; retry < 3; retry++) { 3600 int wret; 3601 3602 wret = drm_dp_dpcd_write(&intel_dp->aux, 3603 DP_SINK_COUNT_ESI+1, 3604 &esi[1], 3); 3605 if (wret == 3) 3606 break; 3607 } 3608 } 3609 3610 return link_ok; 3611 } 3612 3613 static void 3614 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 3615 { 3616 bool is_active; 3617 u8 buf = 0; 3618 3619 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 3620 if (intel_dp->frl.is_trained && !is_active) { 3621 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 3622 return; 3623 3624 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 3625 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 3626 return; 3627 3628 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 3629 3630 /* Restart FRL training or fall back to TMDS mode */ 3631 intel_dp_check_frl_training(intel_dp); 3632 } 3633 } 3634 3635 static bool 3636 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 3637 { 3638 u8 link_status[DP_LINK_STATUS_SIZE]; 3639 3640 if (!intel_dp->link_trained) 3641 return false; 3642 3643 /* 3644 * While PSR source HW is enabled, it will control main-link sending 3645 * frames, enabling and disabling it so trying to do a retrain will fail 3646 * as the link would or not be on or it could mix training patterns 3647 * and frame data at the same time causing retrain to fail. 3648 * Also when exiting PSR, HW will retrain the link anyways fixing 3649 * any link status error. 3650 */ 3651 if (intel_psr_enabled(intel_dp)) 3652 return false; 3653 3654 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3655 link_status) < 0) 3656 return false; 3657 3658 /* 3659 * Validate the cached values of intel_dp->link_rate and 3660 * intel_dp->lane_count before attempting to retrain. 3661 * 3662 * FIXME would be nice to user the crtc state here, but since 3663 * we need to call this from the short HPD handler that seems 3664 * a bit hard. 3665 */ 3666 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 3667 intel_dp->lane_count)) 3668 return false; 3669 3670 /* Retrain if Channel EQ or CR not ok */ 3671 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 3672 } 3673 3674 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 3675 const struct drm_connector_state *conn_state) 3676 { 3677 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3678 struct intel_encoder *encoder; 3679 enum pipe pipe; 3680 3681 if (!conn_state->best_encoder) 3682 return false; 3683 3684 /* SST */ 3685 encoder = &dp_to_dig_port(intel_dp)->base; 3686 if (conn_state->best_encoder == &encoder->base) 3687 return true; 3688 3689 /* MST */ 3690 for_each_pipe(i915, pipe) { 3691 encoder = &intel_dp->mst_encoders[pipe]->base; 3692 if (conn_state->best_encoder == &encoder->base) 3693 return true; 3694 } 3695 3696 return false; 3697 } 3698 3699 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 3700 struct drm_modeset_acquire_ctx *ctx, 3701 u32 *crtc_mask) 3702 { 3703 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3704 struct drm_connector_list_iter conn_iter; 3705 struct intel_connector *connector; 3706 int ret = 0; 3707 3708 *crtc_mask = 0; 3709 3710 if (!intel_dp_needs_link_retrain(intel_dp)) 3711 return 0; 3712 3713 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 3714 for_each_intel_connector_iter(connector, &conn_iter) { 3715 struct drm_connector_state *conn_state = 3716 connector->base.state; 3717 struct intel_crtc_state *crtc_state; 3718 struct intel_crtc *crtc; 3719 3720 if (!intel_dp_has_connector(intel_dp, conn_state)) 3721 continue; 3722 3723 crtc = to_intel_crtc(conn_state->crtc); 3724 if (!crtc) 3725 continue; 3726 3727 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 3728 if (ret) 3729 break; 3730 3731 crtc_state = to_intel_crtc_state(crtc->base.state); 3732 3733 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 3734 3735 if (!crtc_state->hw.active) 3736 continue; 3737 3738 if (conn_state->commit && 3739 !try_wait_for_completion(&conn_state->commit->hw_done)) 3740 continue; 3741 3742 *crtc_mask |= drm_crtc_mask(&crtc->base); 3743 } 3744 drm_connector_list_iter_end(&conn_iter); 3745 3746 if (!intel_dp_needs_link_retrain(intel_dp)) 3747 *crtc_mask = 0; 3748 3749 return ret; 3750 } 3751 3752 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 3753 { 3754 struct intel_connector *connector = intel_dp->attached_connector; 3755 3756 return connector->base.status == connector_status_connected || 3757 intel_dp->is_mst; 3758 } 3759 3760 int intel_dp_retrain_link(struct intel_encoder *encoder, 3761 struct drm_modeset_acquire_ctx *ctx) 3762 { 3763 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3764 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3765 struct intel_crtc *crtc; 3766 u32 crtc_mask; 3767 int ret; 3768 3769 if (!intel_dp_is_connected(intel_dp)) 3770 return 0; 3771 3772 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 3773 ctx); 3774 if (ret) 3775 return ret; 3776 3777 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 3778 if (ret) 3779 return ret; 3780 3781 if (crtc_mask == 0) 3782 return 0; 3783 3784 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 3785 encoder->base.base.id, encoder->base.name); 3786 3787 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3788 const struct intel_crtc_state *crtc_state = 3789 to_intel_crtc_state(crtc->base.state); 3790 3791 /* Suppress underruns caused by re-training */ 3792 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 3793 if (crtc_state->has_pch_encoder) 3794 intel_set_pch_fifo_underrun_reporting(dev_priv, 3795 intel_crtc_pch_transcoder(crtc), false); 3796 } 3797 3798 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3799 const struct intel_crtc_state *crtc_state = 3800 to_intel_crtc_state(crtc->base.state); 3801 3802 /* retrain on the MST master transcoder */ 3803 if (DISPLAY_VER(dev_priv) >= 12 && 3804 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 3805 !intel_dp_mst_is_master_trans(crtc_state)) 3806 continue; 3807 3808 intel_dp_check_frl_training(intel_dp); 3809 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); 3810 intel_dp_start_link_train(intel_dp, crtc_state); 3811 intel_dp_stop_link_train(intel_dp, crtc_state); 3812 break; 3813 } 3814 3815 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3816 const struct intel_crtc_state *crtc_state = 3817 to_intel_crtc_state(crtc->base.state); 3818 3819 /* Keep underrun reporting disabled until things are stable */ 3820 intel_wait_for_vblank(dev_priv, crtc->pipe); 3821 3822 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 3823 if (crtc_state->has_pch_encoder) 3824 intel_set_pch_fifo_underrun_reporting(dev_priv, 3825 intel_crtc_pch_transcoder(crtc), true); 3826 } 3827 3828 return 0; 3829 } 3830 3831 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 3832 struct drm_modeset_acquire_ctx *ctx, 3833 u32 *crtc_mask) 3834 { 3835 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3836 struct drm_connector_list_iter conn_iter; 3837 struct intel_connector *connector; 3838 int ret = 0; 3839 3840 *crtc_mask = 0; 3841 3842 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 3843 for_each_intel_connector_iter(connector, &conn_iter) { 3844 struct drm_connector_state *conn_state = 3845 connector->base.state; 3846 struct intel_crtc_state *crtc_state; 3847 struct intel_crtc *crtc; 3848 3849 if (!intel_dp_has_connector(intel_dp, conn_state)) 3850 continue; 3851 3852 crtc = to_intel_crtc(conn_state->crtc); 3853 if (!crtc) 3854 continue; 3855 3856 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 3857 if (ret) 3858 break; 3859 3860 crtc_state = to_intel_crtc_state(crtc->base.state); 3861 3862 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 3863 3864 if (!crtc_state->hw.active) 3865 continue; 3866 3867 if (conn_state->commit && 3868 !try_wait_for_completion(&conn_state->commit->hw_done)) 3869 continue; 3870 3871 *crtc_mask |= drm_crtc_mask(&crtc->base); 3872 } 3873 drm_connector_list_iter_end(&conn_iter); 3874 3875 return ret; 3876 } 3877 3878 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 3879 struct drm_modeset_acquire_ctx *ctx) 3880 { 3881 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3882 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3883 struct intel_crtc *crtc; 3884 u32 crtc_mask; 3885 int ret; 3886 3887 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 3888 ctx); 3889 if (ret) 3890 return ret; 3891 3892 ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask); 3893 if (ret) 3894 return ret; 3895 3896 if (crtc_mask == 0) 3897 return 0; 3898 3899 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 3900 encoder->base.base.id, encoder->base.name); 3901 3902 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3903 const struct intel_crtc_state *crtc_state = 3904 to_intel_crtc_state(crtc->base.state); 3905 3906 /* test on the MST master transcoder */ 3907 if (DISPLAY_VER(dev_priv) >= 12 && 3908 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 3909 !intel_dp_mst_is_master_trans(crtc_state)) 3910 continue; 3911 3912 intel_dp_process_phy_request(intel_dp, crtc_state); 3913 break; 3914 } 3915 3916 return 0; 3917 } 3918 3919 void intel_dp_phy_test(struct intel_encoder *encoder) 3920 { 3921 struct drm_modeset_acquire_ctx ctx; 3922 int ret; 3923 3924 drm_modeset_acquire_init(&ctx, 0); 3925 3926 for (;;) { 3927 ret = intel_dp_do_phy_test(encoder, &ctx); 3928 3929 if (ret == -EDEADLK) { 3930 drm_modeset_backoff(&ctx); 3931 continue; 3932 } 3933 3934 break; 3935 } 3936 3937 drm_modeset_drop_locks(&ctx); 3938 drm_modeset_acquire_fini(&ctx); 3939 drm_WARN(encoder->base.dev, ret, 3940 "Acquiring modeset locks failed with %i\n", ret); 3941 } 3942 3943 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) 3944 { 3945 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3946 u8 val; 3947 3948 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3949 return; 3950 3951 if (drm_dp_dpcd_readb(&intel_dp->aux, 3952 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 3953 return; 3954 3955 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 3956 3957 if (val & DP_AUTOMATED_TEST_REQUEST) 3958 intel_dp_handle_test_request(intel_dp); 3959 3960 if (val & DP_CP_IRQ) 3961 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3962 3963 if (val & DP_SINK_SPECIFIC_IRQ) 3964 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 3965 } 3966 3967 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) 3968 { 3969 u8 val; 3970 3971 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3972 return; 3973 3974 if (drm_dp_dpcd_readb(&intel_dp->aux, 3975 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) 3976 return; 3977 3978 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3979 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) 3980 return; 3981 3982 if (val & HDMI_LINK_STATUS_CHANGED) 3983 intel_dp_handle_hdmi_link_status_change(intel_dp); 3984 } 3985 3986 /* 3987 * According to DP spec 3988 * 5.1.2: 3989 * 1. Read DPCD 3990 * 2. Configure link according to Receiver Capabilities 3991 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 3992 * 4. Check link status on receipt of hot-plug interrupt 3993 * 3994 * intel_dp_short_pulse - handles short pulse interrupts 3995 * when full detection is not required. 3996 * Returns %true if short pulse is handled and full detection 3997 * is NOT required and %false otherwise. 3998 */ 3999 static bool 4000 intel_dp_short_pulse(struct intel_dp *intel_dp) 4001 { 4002 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4003 u8 old_sink_count = intel_dp->sink_count; 4004 bool ret; 4005 4006 /* 4007 * Clearing compliance test variables to allow capturing 4008 * of values for next automated test request. 4009 */ 4010 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 4011 4012 /* 4013 * Now read the DPCD to see if it's actually running 4014 * If the current value of sink count doesn't match with 4015 * the value that was stored earlier or dpcd read failed 4016 * we need to do full detection 4017 */ 4018 ret = intel_dp_get_dpcd(intel_dp); 4019 4020 if ((old_sink_count != intel_dp->sink_count) || !ret) { 4021 /* No need to proceed if we are going to do full detect */ 4022 return false; 4023 } 4024 4025 intel_dp_check_device_service_irq(intel_dp); 4026 intel_dp_check_link_service_irq(intel_dp); 4027 4028 /* Handle CEC interrupts, if any */ 4029 drm_dp_cec_irq(&intel_dp->aux); 4030 4031 /* defer to the hotplug work for link retraining if needed */ 4032 if (intel_dp_needs_link_retrain(intel_dp)) 4033 return false; 4034 4035 intel_psr_short_pulse(intel_dp); 4036 4037 switch (intel_dp->compliance.test_type) { 4038 case DP_TEST_LINK_TRAINING: 4039 drm_dbg_kms(&dev_priv->drm, 4040 "Link Training Compliance Test requested\n"); 4041 /* Send a Hotplug Uevent to userspace to start modeset */ 4042 drm_kms_helper_hotplug_event(&dev_priv->drm); 4043 break; 4044 case DP_TEST_LINK_PHY_TEST_PATTERN: 4045 drm_dbg_kms(&dev_priv->drm, 4046 "PHY test pattern Compliance Test requested\n"); 4047 /* 4048 * Schedule long hpd to do the test 4049 * 4050 * FIXME get rid of the ad-hoc phy test modeset code 4051 * and properly incorporate it into the normal modeset. 4052 */ 4053 return false; 4054 } 4055 4056 return true; 4057 } 4058 4059 /* XXX this is probably wrong for multiple downstream ports */ 4060 static enum drm_connector_status 4061 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 4062 { 4063 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4064 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4065 u8 *dpcd = intel_dp->dpcd; 4066 u8 type; 4067 4068 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 4069 return connector_status_connected; 4070 4071 lspcon_resume(dig_port); 4072 4073 if (!intel_dp_get_dpcd(intel_dp)) 4074 return connector_status_disconnected; 4075 4076 /* if there's no downstream port, we're done */ 4077 if (!drm_dp_is_branch(dpcd)) 4078 return connector_status_connected; 4079 4080 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 4081 if (intel_dp_has_sink_count(intel_dp) && 4082 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 4083 return intel_dp->sink_count ? 4084 connector_status_connected : connector_status_disconnected; 4085 } 4086 4087 if (intel_dp_can_mst(intel_dp)) 4088 return connector_status_connected; 4089 4090 /* If no HPD, poke DDC gently */ 4091 if (drm_probe_ddc(&intel_dp->aux.ddc)) 4092 return connector_status_connected; 4093 4094 /* Well we tried, say unknown for unreliable port types */ 4095 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 4096 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 4097 if (type == DP_DS_PORT_TYPE_VGA || 4098 type == DP_DS_PORT_TYPE_NON_EDID) 4099 return connector_status_unknown; 4100 } else { 4101 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 4102 DP_DWN_STRM_PORT_TYPE_MASK; 4103 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 4104 type == DP_DWN_STRM_PORT_TYPE_OTHER) 4105 return connector_status_unknown; 4106 } 4107 4108 /* Anything else is out of spec, warn and ignore */ 4109 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 4110 return connector_status_disconnected; 4111 } 4112 4113 static enum drm_connector_status 4114 edp_detect(struct intel_dp *intel_dp) 4115 { 4116 return connector_status_connected; 4117 } 4118 4119 /* 4120 * intel_digital_port_connected - is the specified port connected? 4121 * @encoder: intel_encoder 4122 * 4123 * In cases where there's a connector physically connected but it can't be used 4124 * by our hardware we also return false, since the rest of the driver should 4125 * pretty much treat the port as disconnected. This is relevant for type-C 4126 * (starting on ICL) where there's ownership involved. 4127 * 4128 * Return %true if port is connected, %false otherwise. 4129 */ 4130 bool intel_digital_port_connected(struct intel_encoder *encoder) 4131 { 4132 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4133 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4134 bool is_connected = false; 4135 intel_wakeref_t wakeref; 4136 4137 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 4138 is_connected = dig_port->connected(encoder); 4139 4140 return is_connected; 4141 } 4142 4143 static struct edid * 4144 intel_dp_get_edid(struct intel_dp *intel_dp) 4145 { 4146 struct intel_connector *intel_connector = intel_dp->attached_connector; 4147 4148 /* use cached edid if we have one */ 4149 if (intel_connector->edid) { 4150 /* invalid edid */ 4151 if (IS_ERR(intel_connector->edid)) 4152 return NULL; 4153 4154 return drm_edid_duplicate(intel_connector->edid); 4155 } else 4156 return drm_get_edid(&intel_connector->base, 4157 &intel_dp->aux.ddc); 4158 } 4159 4160 static void 4161 intel_dp_update_dfp(struct intel_dp *intel_dp, 4162 const struct edid *edid) 4163 { 4164 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4165 struct intel_connector *connector = intel_dp->attached_connector; 4166 4167 intel_dp->dfp.max_bpc = 4168 drm_dp_downstream_max_bpc(intel_dp->dpcd, 4169 intel_dp->downstream_ports, edid); 4170 4171 intel_dp->dfp.max_dotclock = 4172 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 4173 intel_dp->downstream_ports); 4174 4175 intel_dp->dfp.min_tmds_clock = 4176 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 4177 intel_dp->downstream_ports, 4178 edid); 4179 intel_dp->dfp.max_tmds_clock = 4180 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 4181 intel_dp->downstream_ports, 4182 edid); 4183 4184 intel_dp->dfp.pcon_max_frl_bw = 4185 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 4186 intel_dp->downstream_ports); 4187 4188 drm_dbg_kms(&i915->drm, 4189 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 4190 connector->base.base.id, connector->base.name, 4191 intel_dp->dfp.max_bpc, 4192 intel_dp->dfp.max_dotclock, 4193 intel_dp->dfp.min_tmds_clock, 4194 intel_dp->dfp.max_tmds_clock, 4195 intel_dp->dfp.pcon_max_frl_bw); 4196 4197 intel_dp_get_pcon_dsc_cap(intel_dp); 4198 } 4199 4200 static void 4201 intel_dp_update_420(struct intel_dp *intel_dp) 4202 { 4203 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4204 struct intel_connector *connector = intel_dp->attached_connector; 4205 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr; 4206 4207 /* No YCbCr output support on gmch platforms */ 4208 if (HAS_GMCH(i915)) 4209 return; 4210 4211 /* 4212 * ILK doesn't seem capable of DP YCbCr output. The 4213 * displayed image is severly corrupted. SNB+ is fine. 4214 */ 4215 if (IS_IRONLAKE(i915)) 4216 return; 4217 4218 is_branch = drm_dp_is_branch(intel_dp->dpcd); 4219 ycbcr_420_passthrough = 4220 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 4221 intel_dp->downstream_ports); 4222 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 4223 ycbcr_444_to_420 = 4224 dp_to_dig_port(intel_dp)->lspcon.active || 4225 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 4226 intel_dp->downstream_ports); 4227 rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 4228 intel_dp->downstream_ports, 4229 DP_DS_HDMI_BT601_RGB_YCBCR_CONV | 4230 DP_DS_HDMI_BT709_RGB_YCBCR_CONV | 4231 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 4232 4233 if (DISPLAY_VER(i915) >= 11) { 4234 /* Let PCON convert from RGB->YCbCr if possible */ 4235 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) { 4236 intel_dp->dfp.rgb_to_ycbcr = true; 4237 intel_dp->dfp.ycbcr_444_to_420 = true; 4238 connector->base.ycbcr_420_allowed = true; 4239 } else { 4240 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ 4241 intel_dp->dfp.ycbcr_444_to_420 = 4242 ycbcr_444_to_420 && !ycbcr_420_passthrough; 4243 4244 connector->base.ycbcr_420_allowed = 4245 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; 4246 } 4247 } else { 4248 /* 4:4:4->4:2:0 conversion is the only way */ 4249 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; 4250 4251 connector->base.ycbcr_420_allowed = ycbcr_444_to_420; 4252 } 4253 4254 drm_dbg_kms(&i915->drm, 4255 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 4256 connector->base.base.id, connector->base.name, 4257 yesno(intel_dp->dfp.rgb_to_ycbcr), 4258 yesno(connector->base.ycbcr_420_allowed), 4259 yesno(intel_dp->dfp.ycbcr_444_to_420)); 4260 } 4261 4262 static void 4263 intel_dp_set_edid(struct intel_dp *intel_dp) 4264 { 4265 struct intel_connector *connector = intel_dp->attached_connector; 4266 struct edid *edid; 4267 4268 intel_dp_unset_edid(intel_dp); 4269 edid = intel_dp_get_edid(intel_dp); 4270 connector->detect_edid = edid; 4271 4272 intel_dp_update_dfp(intel_dp, edid); 4273 intel_dp_update_420(intel_dp); 4274 4275 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 4276 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 4277 intel_dp->has_audio = drm_detect_monitor_audio(edid); 4278 } 4279 4280 drm_dp_cec_set_edid(&intel_dp->aux, edid); 4281 } 4282 4283 static void 4284 intel_dp_unset_edid(struct intel_dp *intel_dp) 4285 { 4286 struct intel_connector *connector = intel_dp->attached_connector; 4287 4288 drm_dp_cec_unset_edid(&intel_dp->aux); 4289 kfree(connector->detect_edid); 4290 connector->detect_edid = NULL; 4291 4292 intel_dp->has_hdmi_sink = false; 4293 intel_dp->has_audio = false; 4294 4295 intel_dp->dfp.max_bpc = 0; 4296 intel_dp->dfp.max_dotclock = 0; 4297 intel_dp->dfp.min_tmds_clock = 0; 4298 intel_dp->dfp.max_tmds_clock = 0; 4299 4300 intel_dp->dfp.pcon_max_frl_bw = 0; 4301 4302 intel_dp->dfp.ycbcr_444_to_420 = false; 4303 connector->base.ycbcr_420_allowed = false; 4304 } 4305 4306 static int 4307 intel_dp_detect(struct drm_connector *connector, 4308 struct drm_modeset_acquire_ctx *ctx, 4309 bool force) 4310 { 4311 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4312 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4313 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4314 struct intel_encoder *encoder = &dig_port->base; 4315 enum drm_connector_status status; 4316 4317 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4318 connector->base.id, connector->name); 4319 drm_WARN_ON(&dev_priv->drm, 4320 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 4321 4322 if (!INTEL_DISPLAY_ENABLED(dev_priv)) 4323 return connector_status_disconnected; 4324 4325 /* Can't disconnect eDP */ 4326 if (intel_dp_is_edp(intel_dp)) 4327 status = edp_detect(intel_dp); 4328 else if (intel_digital_port_connected(encoder)) 4329 status = intel_dp_detect_dpcd(intel_dp); 4330 else 4331 status = connector_status_disconnected; 4332 4333 if (status == connector_status_disconnected) { 4334 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 4335 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4336 4337 if (intel_dp->is_mst) { 4338 drm_dbg_kms(&dev_priv->drm, 4339 "MST device may have disappeared %d vs %d\n", 4340 intel_dp->is_mst, 4341 intel_dp->mst_mgr.mst_state); 4342 intel_dp->is_mst = false; 4343 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4344 intel_dp->is_mst); 4345 } 4346 4347 goto out; 4348 } 4349 4350 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 4351 if (DISPLAY_VER(dev_priv) >= 11) 4352 intel_dp_get_dsc_sink_cap(intel_dp); 4353 4354 intel_dp_configure_mst(intel_dp); 4355 4356 /* 4357 * TODO: Reset link params when switching to MST mode, until MST 4358 * supports link training fallback params. 4359 */ 4360 if (intel_dp->reset_link_params || intel_dp->is_mst) { 4361 intel_dp_reset_max_link_params(intel_dp); 4362 intel_dp->reset_link_params = false; 4363 } 4364 4365 intel_dp_print_rates(intel_dp); 4366 4367 if (intel_dp->is_mst) { 4368 /* 4369 * If we are in MST mode then this connector 4370 * won't appear connected or have anything 4371 * with EDID on it 4372 */ 4373 status = connector_status_disconnected; 4374 goto out; 4375 } 4376 4377 /* 4378 * Some external monitors do not signal loss of link synchronization 4379 * with an IRQ_HPD, so force a link status check. 4380 */ 4381 if (!intel_dp_is_edp(intel_dp)) { 4382 int ret; 4383 4384 ret = intel_dp_retrain_link(encoder, ctx); 4385 if (ret) 4386 return ret; 4387 } 4388 4389 /* 4390 * Clearing NACK and defer counts to get their exact values 4391 * while reading EDID which are required by Compliance tests 4392 * 4.2.2.4 and 4.2.2.5 4393 */ 4394 intel_dp->aux.i2c_nack_count = 0; 4395 intel_dp->aux.i2c_defer_count = 0; 4396 4397 intel_dp_set_edid(intel_dp); 4398 if (intel_dp_is_edp(intel_dp) || 4399 to_intel_connector(connector)->detect_edid) 4400 status = connector_status_connected; 4401 4402 intel_dp_check_device_service_irq(intel_dp); 4403 4404 out: 4405 if (status != connector_status_connected && !intel_dp->is_mst) 4406 intel_dp_unset_edid(intel_dp); 4407 4408 /* 4409 * Make sure the refs for power wells enabled during detect are 4410 * dropped to avoid a new detect cycle triggered by HPD polling. 4411 */ 4412 intel_display_power_flush_work(dev_priv); 4413 4414 if (!intel_dp_is_edp(intel_dp)) 4415 drm_dp_set_subconnector_property(connector, 4416 status, 4417 intel_dp->dpcd, 4418 intel_dp->downstream_ports); 4419 return status; 4420 } 4421 4422 static void 4423 intel_dp_force(struct drm_connector *connector) 4424 { 4425 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4426 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4427 struct intel_encoder *intel_encoder = &dig_port->base; 4428 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 4429 enum intel_display_power_domain aux_domain = 4430 intel_aux_power_domain(dig_port); 4431 intel_wakeref_t wakeref; 4432 4433 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4434 connector->base.id, connector->name); 4435 intel_dp_unset_edid(intel_dp); 4436 4437 if (connector->status != connector_status_connected) 4438 return; 4439 4440 wakeref = intel_display_power_get(dev_priv, aux_domain); 4441 4442 intel_dp_set_edid(intel_dp); 4443 4444 intel_display_power_put(dev_priv, aux_domain, wakeref); 4445 } 4446 4447 static int intel_dp_get_modes(struct drm_connector *connector) 4448 { 4449 struct intel_connector *intel_connector = to_intel_connector(connector); 4450 struct edid *edid; 4451 int num_modes = 0; 4452 4453 edid = intel_connector->detect_edid; 4454 if (edid) { 4455 num_modes = intel_connector_update_modes(connector, edid); 4456 4457 if (intel_vrr_is_capable(connector)) 4458 drm_connector_set_vrr_capable_property(connector, 4459 true); 4460 } 4461 4462 /* Also add fixed mode, which may or may not be present in EDID */ 4463 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && 4464 intel_connector->panel.fixed_mode) { 4465 struct drm_display_mode *mode; 4466 4467 mode = drm_mode_duplicate(connector->dev, 4468 intel_connector->panel.fixed_mode); 4469 if (mode) { 4470 drm_mode_probed_add(connector, mode); 4471 num_modes++; 4472 } 4473 } 4474 4475 if (num_modes) 4476 return num_modes; 4477 4478 if (!edid) { 4479 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 4480 struct drm_display_mode *mode; 4481 4482 mode = drm_dp_downstream_mode(connector->dev, 4483 intel_dp->dpcd, 4484 intel_dp->downstream_ports); 4485 if (mode) { 4486 drm_mode_probed_add(connector, mode); 4487 num_modes++; 4488 } 4489 } 4490 4491 return num_modes; 4492 } 4493 4494 static int 4495 intel_dp_connector_register(struct drm_connector *connector) 4496 { 4497 struct drm_i915_private *i915 = to_i915(connector->dev); 4498 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4499 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4500 struct intel_lspcon *lspcon = &dig_port->lspcon; 4501 int ret; 4502 4503 ret = intel_connector_register(connector); 4504 if (ret) 4505 return ret; 4506 4507 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 4508 intel_dp->aux.name, connector->kdev->kobj.name); 4509 4510 intel_dp->aux.dev = connector->kdev; 4511 ret = drm_dp_aux_register(&intel_dp->aux); 4512 if (!ret) 4513 drm_dp_cec_register_connector(&intel_dp->aux, connector); 4514 4515 if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) 4516 return ret; 4517 4518 /* 4519 * ToDo: Clean this up to handle lspcon init and resume more 4520 * efficiently and streamlined. 4521 */ 4522 if (lspcon_init(dig_port)) { 4523 lspcon_detect_hdr_capability(lspcon); 4524 if (lspcon->hdr_supported) 4525 drm_object_attach_property(&connector->base, 4526 connector->dev->mode_config.hdr_output_metadata_property, 4527 0); 4528 } 4529 4530 return ret; 4531 } 4532 4533 static void 4534 intel_dp_connector_unregister(struct drm_connector *connector) 4535 { 4536 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4537 4538 drm_dp_cec_unregister_connector(&intel_dp->aux); 4539 drm_dp_aux_unregister(&intel_dp->aux); 4540 intel_connector_unregister(connector); 4541 } 4542 4543 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 4544 { 4545 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 4546 struct intel_dp *intel_dp = &dig_port->dp; 4547 4548 intel_dp_mst_encoder_cleanup(dig_port); 4549 4550 intel_pps_vdd_off_sync(intel_dp); 4551 4552 intel_dp_aux_fini(intel_dp); 4553 } 4554 4555 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 4556 { 4557 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4558 4559 intel_pps_vdd_off_sync(intel_dp); 4560 } 4561 4562 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 4563 { 4564 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4565 4566 intel_pps_wait_power_cycle(intel_dp); 4567 } 4568 4569 static int intel_modeset_tile_group(struct intel_atomic_state *state, 4570 int tile_group_id) 4571 { 4572 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4573 struct drm_connector_list_iter conn_iter; 4574 struct drm_connector *connector; 4575 int ret = 0; 4576 4577 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 4578 drm_for_each_connector_iter(connector, &conn_iter) { 4579 struct drm_connector_state *conn_state; 4580 struct intel_crtc_state *crtc_state; 4581 struct intel_crtc *crtc; 4582 4583 if (!connector->has_tile || 4584 connector->tile_group->id != tile_group_id) 4585 continue; 4586 4587 conn_state = drm_atomic_get_connector_state(&state->base, 4588 connector); 4589 if (IS_ERR(conn_state)) { 4590 ret = PTR_ERR(conn_state); 4591 break; 4592 } 4593 4594 crtc = to_intel_crtc(conn_state->crtc); 4595 4596 if (!crtc) 4597 continue; 4598 4599 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 4600 crtc_state->uapi.mode_changed = true; 4601 4602 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4603 if (ret) 4604 break; 4605 } 4606 drm_connector_list_iter_end(&conn_iter); 4607 4608 return ret; 4609 } 4610 4611 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 4612 { 4613 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4614 struct intel_crtc *crtc; 4615 4616 if (transcoders == 0) 4617 return 0; 4618 4619 for_each_intel_crtc(&dev_priv->drm, crtc) { 4620 struct intel_crtc_state *crtc_state; 4621 int ret; 4622 4623 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 4624 if (IS_ERR(crtc_state)) 4625 return PTR_ERR(crtc_state); 4626 4627 if (!crtc_state->hw.enable) 4628 continue; 4629 4630 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 4631 continue; 4632 4633 crtc_state->uapi.mode_changed = true; 4634 4635 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 4636 if (ret) 4637 return ret; 4638 4639 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4640 if (ret) 4641 return ret; 4642 4643 transcoders &= ~BIT(crtc_state->cpu_transcoder); 4644 } 4645 4646 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 4647 4648 return 0; 4649 } 4650 4651 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 4652 struct drm_connector *connector) 4653 { 4654 const struct drm_connector_state *old_conn_state = 4655 drm_atomic_get_old_connector_state(&state->base, connector); 4656 const struct intel_crtc_state *old_crtc_state; 4657 struct intel_crtc *crtc; 4658 u8 transcoders; 4659 4660 crtc = to_intel_crtc(old_conn_state->crtc); 4661 if (!crtc) 4662 return 0; 4663 4664 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 4665 4666 if (!old_crtc_state->hw.active) 4667 return 0; 4668 4669 transcoders = old_crtc_state->sync_mode_slaves_mask; 4670 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 4671 transcoders |= BIT(old_crtc_state->master_transcoder); 4672 4673 return intel_modeset_affected_transcoders(state, 4674 transcoders); 4675 } 4676 4677 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 4678 struct drm_atomic_state *_state) 4679 { 4680 struct drm_i915_private *dev_priv = to_i915(conn->dev); 4681 struct intel_atomic_state *state = to_intel_atomic_state(_state); 4682 int ret; 4683 4684 ret = intel_digital_connector_atomic_check(conn, &state->base); 4685 if (ret) 4686 return ret; 4687 4688 /* 4689 * We don't enable port sync on BDW due to missing w/as and 4690 * due to not having adjusted the modeset sequence appropriately. 4691 */ 4692 if (DISPLAY_VER(dev_priv) < 9) 4693 return 0; 4694 4695 if (!intel_connector_needs_modeset(state, conn)) 4696 return 0; 4697 4698 if (conn->has_tile) { 4699 ret = intel_modeset_tile_group(state, conn->tile_group->id); 4700 if (ret) 4701 return ret; 4702 } 4703 4704 return intel_modeset_synced_crtcs(state, conn); 4705 } 4706 4707 static void intel_dp_oob_hotplug_event(struct drm_connector *connector) 4708 { 4709 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 4710 struct drm_i915_private *i915 = to_i915(connector->dev); 4711 4712 spin_lock_irq(&i915->irq_lock); 4713 i915->hotplug.event_bits |= BIT(encoder->hpd_pin); 4714 spin_unlock_irq(&i915->irq_lock); 4715 queue_delayed_work(system_wq, &i915->hotplug.hotplug_work, 0); 4716 } 4717 4718 static const struct drm_connector_funcs intel_dp_connector_funcs = { 4719 .force = intel_dp_force, 4720 .fill_modes = drm_helper_probe_single_connector_modes, 4721 .atomic_get_property = intel_digital_connector_atomic_get_property, 4722 .atomic_set_property = intel_digital_connector_atomic_set_property, 4723 .late_register = intel_dp_connector_register, 4724 .early_unregister = intel_dp_connector_unregister, 4725 .destroy = intel_connector_destroy, 4726 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 4727 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 4728 .oob_hotplug_event = intel_dp_oob_hotplug_event, 4729 }; 4730 4731 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 4732 .detect_ctx = intel_dp_detect, 4733 .get_modes = intel_dp_get_modes, 4734 .mode_valid = intel_dp_mode_valid, 4735 .atomic_check = intel_dp_connector_atomic_check, 4736 }; 4737 4738 enum irqreturn 4739 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 4740 { 4741 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 4742 struct intel_dp *intel_dp = &dig_port->dp; 4743 4744 if (dig_port->base.type == INTEL_OUTPUT_EDP && 4745 (long_hpd || !intel_pps_have_power(intel_dp))) { 4746 /* 4747 * vdd off can generate a long/short pulse on eDP which 4748 * would require vdd on to handle it, and thus we 4749 * would end up in an endless cycle of 4750 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 4751 */ 4752 drm_dbg_kms(&i915->drm, 4753 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 4754 long_hpd ? "long" : "short", 4755 dig_port->base.base.base.id, 4756 dig_port->base.base.name); 4757 return IRQ_HANDLED; 4758 } 4759 4760 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 4761 dig_port->base.base.base.id, 4762 dig_port->base.base.name, 4763 long_hpd ? "long" : "short"); 4764 4765 if (long_hpd) { 4766 intel_dp->reset_link_params = true; 4767 return IRQ_NONE; 4768 } 4769 4770 if (intel_dp->is_mst) { 4771 if (!intel_dp_check_mst_status(intel_dp)) 4772 return IRQ_NONE; 4773 } else if (!intel_dp_short_pulse(intel_dp)) { 4774 return IRQ_NONE; 4775 } 4776 4777 return IRQ_HANDLED; 4778 } 4779 4780 /* check the VBT to see whether the eDP is on another port */ 4781 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 4782 { 4783 /* 4784 * eDP not supported on g4x. so bail out early just 4785 * for a bit extra safety in case the VBT is bonkers. 4786 */ 4787 if (DISPLAY_VER(dev_priv) < 5) 4788 return false; 4789 4790 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) 4791 return true; 4792 4793 return intel_bios_is_port_edp(dev_priv, port); 4794 } 4795 4796 static void 4797 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 4798 { 4799 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4800 enum port port = dp_to_dig_port(intel_dp)->base.port; 4801 4802 if (!intel_dp_is_edp(intel_dp)) 4803 drm_connector_attach_dp_subconnector_property(connector); 4804 4805 if (!IS_G4X(dev_priv) && port != PORT_A) 4806 intel_attach_force_audio_property(connector); 4807 4808 intel_attach_broadcast_rgb_property(connector); 4809 if (HAS_GMCH(dev_priv)) 4810 drm_connector_attach_max_bpc_property(connector, 6, 10); 4811 else if (DISPLAY_VER(dev_priv) >= 5) 4812 drm_connector_attach_max_bpc_property(connector, 6, 12); 4813 4814 /* Register HDMI colorspace for case of lspcon */ 4815 if (intel_bios_is_lspcon_present(dev_priv, port)) { 4816 drm_connector_attach_content_type_property(connector); 4817 intel_attach_hdmi_colorspace_property(connector); 4818 } else { 4819 intel_attach_dp_colorspace_property(connector); 4820 } 4821 4822 if (IS_GEMINILAKE(dev_priv) || DISPLAY_VER(dev_priv) >= 11) 4823 drm_object_attach_property(&connector->base, 4824 connector->dev->mode_config.hdr_output_metadata_property, 4825 0); 4826 4827 if (intel_dp_is_edp(intel_dp)) { 4828 u32 allowed_scalers; 4829 4830 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 4831 if (!HAS_GMCH(dev_priv)) 4832 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 4833 4834 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 4835 4836 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 4837 4838 } 4839 4840 if (HAS_VRR(dev_priv)) 4841 drm_connector_attach_vrr_capable_property(connector); 4842 } 4843 4844 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 4845 struct intel_connector *intel_connector) 4846 { 4847 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4848 struct drm_device *dev = &dev_priv->drm; 4849 struct drm_connector *connector = &intel_connector->base; 4850 struct drm_display_mode *fixed_mode = NULL; 4851 struct drm_display_mode *downclock_mode = NULL; 4852 bool has_dpcd; 4853 enum pipe pipe = INVALID_PIPE; 4854 struct edid *edid; 4855 4856 if (!intel_dp_is_edp(intel_dp)) 4857 return true; 4858 4859 /* 4860 * On IBX/CPT we may get here with LVDS already registered. Since the 4861 * driver uses the only internal power sequencer available for both 4862 * eDP and LVDS bail out early in this case to prevent interfering 4863 * with an already powered-on LVDS power sequencer. 4864 */ 4865 if (intel_get_lvds_encoder(dev_priv)) { 4866 drm_WARN_ON(dev, 4867 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 4868 drm_info(&dev_priv->drm, 4869 "LVDS was detected, not registering eDP\n"); 4870 4871 return false; 4872 } 4873 4874 intel_pps_init(intel_dp); 4875 4876 /* Cache DPCD and EDID for edp. */ 4877 has_dpcd = intel_edp_init_dpcd(intel_dp); 4878 4879 if (!has_dpcd) { 4880 /* if this fails, presume the device is a ghost */ 4881 drm_info(&dev_priv->drm, 4882 "failed to retrieve link info, disabling eDP\n"); 4883 goto out_vdd_off; 4884 } 4885 4886 mutex_lock(&dev->mode_config.mutex); 4887 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 4888 if (edid) { 4889 if (drm_add_edid_modes(connector, edid)) { 4890 drm_connector_update_edid_property(connector, edid); 4891 } else { 4892 kfree(edid); 4893 edid = ERR_PTR(-EINVAL); 4894 } 4895 } else { 4896 edid = ERR_PTR(-ENOENT); 4897 } 4898 intel_connector->edid = edid; 4899 4900 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 4901 if (fixed_mode) 4902 downclock_mode = intel_drrs_init(intel_connector, fixed_mode); 4903 4904 /* MSO requires information from the EDID */ 4905 intel_edp_mso_init(intel_dp); 4906 4907 /* multiply the mode clock and horizontal timings for MSO */ 4908 intel_edp_mso_mode_fixup(intel_connector, fixed_mode); 4909 intel_edp_mso_mode_fixup(intel_connector, downclock_mode); 4910 4911 /* fallback to VBT if available for eDP */ 4912 if (!fixed_mode) 4913 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 4914 mutex_unlock(&dev->mode_config.mutex); 4915 4916 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4917 /* 4918 * Figure out the current pipe for the initial backlight setup. 4919 * If the current pipe isn't valid, try the PPS pipe, and if that 4920 * fails just assume pipe A. 4921 */ 4922 pipe = vlv_active_pipe(intel_dp); 4923 4924 if (pipe != PIPE_A && pipe != PIPE_B) 4925 pipe = intel_dp->pps.pps_pipe; 4926 4927 if (pipe != PIPE_A && pipe != PIPE_B) 4928 pipe = PIPE_A; 4929 4930 drm_dbg_kms(&dev_priv->drm, 4931 "using pipe %c for initial backlight setup\n", 4932 pipe_name(pipe)); 4933 } 4934 4935 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 4936 if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) 4937 intel_connector->panel.backlight.power = intel_pps_backlight_power; 4938 intel_backlight_setup(intel_connector, pipe); 4939 4940 if (fixed_mode) { 4941 drm_connector_set_panel_orientation_with_quirk(connector, 4942 dev_priv->vbt.orientation, 4943 fixed_mode->hdisplay, fixed_mode->vdisplay); 4944 } 4945 4946 return true; 4947 4948 out_vdd_off: 4949 intel_pps_vdd_off_sync(intel_dp); 4950 4951 return false; 4952 } 4953 4954 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 4955 { 4956 struct intel_connector *intel_connector; 4957 struct drm_connector *connector; 4958 4959 intel_connector = container_of(work, typeof(*intel_connector), 4960 modeset_retry_work); 4961 connector = &intel_connector->base; 4962 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 4963 connector->name); 4964 4965 /* Grab the locks before changing connector property*/ 4966 mutex_lock(&connector->dev->mode_config.mutex); 4967 /* Set connector link status to BAD and send a Uevent to notify 4968 * userspace to do a modeset. 4969 */ 4970 drm_connector_set_link_status_property(connector, 4971 DRM_MODE_LINK_STATUS_BAD); 4972 mutex_unlock(&connector->dev->mode_config.mutex); 4973 /* Send Hotplug uevent so userspace can reprobe */ 4974 drm_kms_helper_hotplug_event(connector->dev); 4975 } 4976 4977 bool 4978 intel_dp_init_connector(struct intel_digital_port *dig_port, 4979 struct intel_connector *intel_connector) 4980 { 4981 struct drm_connector *connector = &intel_connector->base; 4982 struct intel_dp *intel_dp = &dig_port->dp; 4983 struct intel_encoder *intel_encoder = &dig_port->base; 4984 struct drm_device *dev = intel_encoder->base.dev; 4985 struct drm_i915_private *dev_priv = to_i915(dev); 4986 enum port port = intel_encoder->port; 4987 enum phy phy = intel_port_to_phy(dev_priv, port); 4988 int type; 4989 4990 /* Initialize the work for modeset in case of link train failure */ 4991 INIT_WORK(&intel_connector->modeset_retry_work, 4992 intel_dp_modeset_retry_work_fn); 4993 4994 if (drm_WARN(dev, dig_port->max_lanes < 1, 4995 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 4996 dig_port->max_lanes, intel_encoder->base.base.id, 4997 intel_encoder->base.name)) 4998 return false; 4999 5000 intel_dp->reset_link_params = true; 5001 intel_dp->pps.pps_pipe = INVALID_PIPE; 5002 intel_dp->pps.active_pipe = INVALID_PIPE; 5003 5004 /* Preserve the current hw state. */ 5005 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 5006 intel_dp->attached_connector = intel_connector; 5007 5008 if (intel_dp_is_port_edp(dev_priv, port)) { 5009 /* 5010 * Currently we don't support eDP on TypeC ports, although in 5011 * theory it could work on TypeC legacy ports. 5012 */ 5013 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 5014 type = DRM_MODE_CONNECTOR_eDP; 5015 intel_encoder->type = INTEL_OUTPUT_EDP; 5016 5017 /* eDP only on port B and/or C on vlv/chv */ 5018 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 5019 IS_CHERRYVIEW(dev_priv)) && 5020 port != PORT_B && port != PORT_C)) 5021 return false; 5022 } else { 5023 type = DRM_MODE_CONNECTOR_DisplayPort; 5024 } 5025 5026 intel_dp_set_source_rates(intel_dp); 5027 intel_dp_set_default_sink_rates(intel_dp); 5028 intel_dp_set_common_rates(intel_dp); 5029 intel_dp_reset_max_link_params(intel_dp); 5030 5031 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5032 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); 5033 5034 drm_dbg_kms(&dev_priv->drm, 5035 "Adding %s connector on [ENCODER:%d:%s]\n", 5036 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 5037 intel_encoder->base.base.id, intel_encoder->base.name); 5038 5039 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 5040 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 5041 5042 if (!HAS_GMCH(dev_priv)) 5043 connector->interlace_allowed = true; 5044 connector->doublescan_allowed = 0; 5045 5046 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 5047 5048 intel_dp_aux_init(intel_dp); 5049 5050 intel_connector_attach_encoder(intel_connector, intel_encoder); 5051 5052 if (HAS_DDI(dev_priv)) 5053 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 5054 else 5055 intel_connector->get_hw_state = intel_connector_get_hw_state; 5056 5057 /* init MST on ports that can support it */ 5058 intel_dp_mst_encoder_init(dig_port, 5059 intel_connector->base.base.id); 5060 5061 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 5062 intel_dp_aux_fini(intel_dp); 5063 intel_dp_mst_encoder_cleanup(dig_port); 5064 goto fail; 5065 } 5066 5067 intel_dp_add_properties(intel_dp, connector); 5068 5069 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 5070 int ret = intel_dp_hdcp_init(dig_port, intel_connector); 5071 if (ret) 5072 drm_dbg_kms(&dev_priv->drm, 5073 "HDCP init failed, skipping.\n"); 5074 } 5075 5076 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 5077 * 0xd. Failure to do so will result in spurious interrupts being 5078 * generated on the port when a cable is not attached. 5079 */ 5080 if (IS_G45(dev_priv)) { 5081 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 5082 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 5083 (temp & ~0xf) | 0xd); 5084 } 5085 5086 intel_dp->frl.is_trained = false; 5087 intel_dp->frl.trained_rate_gbps = 0; 5088 5089 intel_psr_init(intel_dp); 5090 5091 return true; 5092 5093 fail: 5094 drm_connector_cleanup(connector); 5095 5096 return false; 5097 } 5098 5099 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 5100 { 5101 struct intel_encoder *encoder; 5102 5103 if (!HAS_DISPLAY(dev_priv)) 5104 return; 5105 5106 for_each_intel_encoder(&dev_priv->drm, encoder) { 5107 struct intel_dp *intel_dp; 5108 5109 if (encoder->type != INTEL_OUTPUT_DDI) 5110 continue; 5111 5112 intel_dp = enc_to_intel_dp(encoder); 5113 5114 if (!intel_dp_mst_source_support(intel_dp)) 5115 continue; 5116 5117 if (intel_dp->is_mst) 5118 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 5119 } 5120 } 5121 5122 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 5123 { 5124 struct intel_encoder *encoder; 5125 5126 if (!HAS_DISPLAY(dev_priv)) 5127 return; 5128 5129 for_each_intel_encoder(&dev_priv->drm, encoder) { 5130 struct intel_dp *intel_dp; 5131 int ret; 5132 5133 if (encoder->type != INTEL_OUTPUT_DDI) 5134 continue; 5135 5136 intel_dp = enc_to_intel_dp(encoder); 5137 5138 if (!intel_dp_mst_source_support(intel_dp)) 5139 continue; 5140 5141 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 5142 true); 5143 if (ret) { 5144 intel_dp->is_mst = false; 5145 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5146 false); 5147 } 5148 } 5149 } 5150