1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/types.h> 33 34 #include <asm/byteorder.h> 35 36 #include <drm/drm_atomic_helper.h> 37 #include <drm/drm_crtc.h> 38 #include <drm/drm_dp_helper.h> 39 #include <drm/drm_edid.h> 40 #include <drm/drm_probe_helper.h> 41 42 #include "i915_debugfs.h" 43 #include "i915_drv.h" 44 #include "i915_trace.h" 45 #include "intel_atomic.h" 46 #include "intel_audio.h" 47 #include "intel_connector.h" 48 #include "intel_ddi.h" 49 #include "intel_display_types.h" 50 #include "intel_dp.h" 51 #include "intel_dp_link_training.h" 52 #include "intel_dp_mst.h" 53 #include "intel_dpio_phy.h" 54 #include "intel_fifo_underrun.h" 55 #include "intel_hdcp.h" 56 #include "intel_hdmi.h" 57 #include "intel_hotplug.h" 58 #include "intel_lspcon.h" 59 #include "intel_lvds.h" 60 #include "intel_panel.h" 61 #include "intel_psr.h" 62 #include "intel_sideband.h" 63 #include "intel_tc.h" 64 #include "intel_vdsc.h" 65 66 #define DP_DPRX_ESI_LEN 14 67 68 /* DP DSC throughput values used for slice count calculations KPixels/s */ 69 #define DP_DSC_PEAK_PIXEL_RATE 2720000 70 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 71 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 72 73 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 74 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 75 76 /* Compliance test status bits */ 77 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 78 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 79 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 80 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 81 82 struct dp_link_dpll { 83 int clock; 84 struct dpll dpll; 85 }; 86 87 static const struct dp_link_dpll g4x_dpll[] = { 88 { 162000, 89 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 90 { 270000, 91 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 92 }; 93 94 static const struct dp_link_dpll pch_dpll[] = { 95 { 162000, 96 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 97 { 270000, 98 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 99 }; 100 101 static const struct dp_link_dpll vlv_dpll[] = { 102 { 162000, 103 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 104 { 270000, 105 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 106 }; 107 108 /* 109 * CHV supports eDP 1.4 that have more link rates. 110 * Below only provides the fixed rate but exclude variable rate. 111 */ 112 static const struct dp_link_dpll chv_dpll[] = { 113 /* 114 * CHV requires to program fractional division for m2. 115 * m2 is stored in fixed point format using formula below 116 * (m2_int << 22) | m2_fraction 117 */ 118 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 119 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 120 { 270000, /* m2_int = 27, m2_fraction = 0 */ 121 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 122 }; 123 124 /* Constants for DP DSC configurations */ 125 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 126 127 /* With Single pipe configuration, HW is capable of supporting maximum 128 * of 4 slices per line. 129 */ 130 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 131 132 /** 133 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 134 * @intel_dp: DP struct 135 * 136 * If a CPU or PCH DP output is attached to an eDP panel, this function 137 * will return true, and false otherwise. 138 */ 139 bool intel_dp_is_edp(struct intel_dp *intel_dp) 140 { 141 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 142 143 return dig_port->base.type == INTEL_OUTPUT_EDP; 144 } 145 146 static void intel_dp_link_down(struct intel_encoder *encoder, 147 const struct intel_crtc_state *old_crtc_state); 148 static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 149 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 150 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 151 const struct intel_crtc_state *crtc_state); 152 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 153 enum pipe pipe); 154 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 155 156 /* update sink rates from dpcd */ 157 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 158 { 159 static const int dp_rates[] = { 160 162000, 270000, 540000, 810000 161 }; 162 int i, max_rate; 163 int max_lttpr_rate; 164 165 if (drm_dp_has_quirk(&intel_dp->desc, 0, 166 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 167 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 168 static const int quirk_rates[] = { 162000, 270000, 324000 }; 169 170 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 171 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 172 173 return; 174 } 175 176 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 177 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 178 if (max_lttpr_rate) 179 max_rate = min(max_rate, max_lttpr_rate); 180 181 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 182 if (dp_rates[i] > max_rate) 183 break; 184 intel_dp->sink_rates[i] = dp_rates[i]; 185 } 186 187 intel_dp->num_sink_rates = i; 188 } 189 190 /* Get length of rates array potentially limited by max_rate. */ 191 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 192 { 193 int i; 194 195 /* Limit results by potentially reduced max rate */ 196 for (i = 0; i < len; i++) { 197 if (rates[len - i - 1] <= max_rate) 198 return len - i; 199 } 200 201 return 0; 202 } 203 204 /* Get length of common rates array potentially limited by max_rate. */ 205 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 206 int max_rate) 207 { 208 return intel_dp_rate_limit_len(intel_dp->common_rates, 209 intel_dp->num_common_rates, max_rate); 210 } 211 212 /* Theoretical max between source and sink */ 213 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 214 { 215 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 216 } 217 218 /* Theoretical max between source and sink */ 219 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 220 { 221 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 222 int source_max = dig_port->max_lanes; 223 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 224 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 225 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 226 227 if (lttpr_max) 228 sink_max = min(sink_max, lttpr_max); 229 230 return min3(source_max, sink_max, fia_max); 231 } 232 233 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 234 { 235 return intel_dp->max_link_lane_count; 236 } 237 238 int 239 intel_dp_link_required(int pixel_clock, int bpp) 240 { 241 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 242 return DIV_ROUND_UP(pixel_clock * bpp, 8); 243 } 244 245 int 246 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 247 { 248 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 249 * link rate that is generally expressed in Gbps. Since, 8 bits of data 250 * is transmitted every LS_Clk per lane, there is no need to account for 251 * the channel encoding that is done in the PHY layer here. 252 */ 253 254 return max_link_clock * max_lanes; 255 } 256 257 static int cnl_max_source_rate(struct intel_dp *intel_dp) 258 { 259 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 260 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 261 enum port port = dig_port->base.port; 262 263 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 264 265 /* Low voltage SKUs are limited to max of 5.4G */ 266 if (voltage == VOLTAGE_INFO_0_85V) 267 return 540000; 268 269 /* For this SKU 8.1G is supported in all ports */ 270 if (IS_CNL_WITH_PORT_F(dev_priv)) 271 return 810000; 272 273 /* For other SKUs, max rate on ports A and D is 5.4G */ 274 if (port == PORT_A || port == PORT_D) 275 return 540000; 276 277 return 810000; 278 } 279 280 static int icl_max_source_rate(struct intel_dp *intel_dp) 281 { 282 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 283 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 284 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 285 286 if (intel_phy_is_combo(dev_priv, phy) && 287 !intel_dp_is_edp(intel_dp)) 288 return 540000; 289 290 return 810000; 291 } 292 293 static int ehl_max_source_rate(struct intel_dp *intel_dp) 294 { 295 if (intel_dp_is_edp(intel_dp)) 296 return 540000; 297 298 return 810000; 299 } 300 301 static void 302 intel_dp_set_source_rates(struct intel_dp *intel_dp) 303 { 304 /* The values must be in increasing order */ 305 static const int cnl_rates[] = { 306 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 307 }; 308 static const int bxt_rates[] = { 309 162000, 216000, 243000, 270000, 324000, 432000, 540000 310 }; 311 static const int skl_rates[] = { 312 162000, 216000, 270000, 324000, 432000, 540000 313 }; 314 static const int hsw_rates[] = { 315 162000, 270000, 540000 316 }; 317 static const int g4x_rates[] = { 318 162000, 270000 319 }; 320 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 321 struct intel_encoder *encoder = &dig_port->base; 322 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 323 const int *source_rates; 324 int size, max_rate = 0, vbt_max_rate; 325 326 /* This should only be done once */ 327 drm_WARN_ON(&dev_priv->drm, 328 intel_dp->source_rates || intel_dp->num_source_rates); 329 330 if (INTEL_GEN(dev_priv) >= 10) { 331 source_rates = cnl_rates; 332 size = ARRAY_SIZE(cnl_rates); 333 if (IS_GEN(dev_priv, 10)) 334 max_rate = cnl_max_source_rate(intel_dp); 335 else if (IS_JSL_EHL(dev_priv)) 336 max_rate = ehl_max_source_rate(intel_dp); 337 else 338 max_rate = icl_max_source_rate(intel_dp); 339 } else if (IS_GEN9_LP(dev_priv)) { 340 source_rates = bxt_rates; 341 size = ARRAY_SIZE(bxt_rates); 342 } else if (IS_GEN9_BC(dev_priv)) { 343 source_rates = skl_rates; 344 size = ARRAY_SIZE(skl_rates); 345 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 346 IS_BROADWELL(dev_priv)) { 347 source_rates = hsw_rates; 348 size = ARRAY_SIZE(hsw_rates); 349 } else { 350 source_rates = g4x_rates; 351 size = ARRAY_SIZE(g4x_rates); 352 } 353 354 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 355 if (max_rate && vbt_max_rate) 356 max_rate = min(max_rate, vbt_max_rate); 357 else if (vbt_max_rate) 358 max_rate = vbt_max_rate; 359 360 if (max_rate) 361 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 362 363 intel_dp->source_rates = source_rates; 364 intel_dp->num_source_rates = size; 365 } 366 367 static int intersect_rates(const int *source_rates, int source_len, 368 const int *sink_rates, int sink_len, 369 int *common_rates) 370 { 371 int i = 0, j = 0, k = 0; 372 373 while (i < source_len && j < sink_len) { 374 if (source_rates[i] == sink_rates[j]) { 375 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 376 return k; 377 common_rates[k] = source_rates[i]; 378 ++k; 379 ++i; 380 ++j; 381 } else if (source_rates[i] < sink_rates[j]) { 382 ++i; 383 } else { 384 ++j; 385 } 386 } 387 return k; 388 } 389 390 /* return index of rate in rates array, or -1 if not found */ 391 static int intel_dp_rate_index(const int *rates, int len, int rate) 392 { 393 int i; 394 395 for (i = 0; i < len; i++) 396 if (rate == rates[i]) 397 return i; 398 399 return -1; 400 } 401 402 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 403 { 404 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 405 406 drm_WARN_ON(&i915->drm, 407 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 408 409 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 410 intel_dp->num_source_rates, 411 intel_dp->sink_rates, 412 intel_dp->num_sink_rates, 413 intel_dp->common_rates); 414 415 /* Paranoia, there should always be something in common. */ 416 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 417 intel_dp->common_rates[0] = 162000; 418 intel_dp->num_common_rates = 1; 419 } 420 } 421 422 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 423 u8 lane_count) 424 { 425 /* 426 * FIXME: we need to synchronize the current link parameters with 427 * hardware readout. Currently fast link training doesn't work on 428 * boot-up. 429 */ 430 if (link_rate == 0 || 431 link_rate > intel_dp->max_link_rate) 432 return false; 433 434 if (lane_count == 0 || 435 lane_count > intel_dp_max_lane_count(intel_dp)) 436 return false; 437 438 return true; 439 } 440 441 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 442 int link_rate, 443 u8 lane_count) 444 { 445 const struct drm_display_mode *fixed_mode = 446 intel_dp->attached_connector->panel.fixed_mode; 447 int mode_rate, max_rate; 448 449 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 450 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 451 if (mode_rate > max_rate) 452 return false; 453 454 return true; 455 } 456 457 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 458 int link_rate, u8 lane_count) 459 { 460 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 461 int index; 462 463 /* 464 * TODO: Enable fallback on MST links once MST link compute can handle 465 * the fallback params. 466 */ 467 if (intel_dp->is_mst) { 468 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 469 return -1; 470 } 471 472 index = intel_dp_rate_index(intel_dp->common_rates, 473 intel_dp->num_common_rates, 474 link_rate); 475 if (index > 0) { 476 if (intel_dp_is_edp(intel_dp) && 477 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 478 intel_dp->common_rates[index - 1], 479 lane_count)) { 480 drm_dbg_kms(&i915->drm, 481 "Retrying Link training for eDP with same parameters\n"); 482 return 0; 483 } 484 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 485 intel_dp->max_link_lane_count = lane_count; 486 } else if (lane_count > 1) { 487 if (intel_dp_is_edp(intel_dp) && 488 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 489 intel_dp_max_common_rate(intel_dp), 490 lane_count >> 1)) { 491 drm_dbg_kms(&i915->drm, 492 "Retrying Link training for eDP with same parameters\n"); 493 return 0; 494 } 495 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 496 intel_dp->max_link_lane_count = lane_count >> 1; 497 } else { 498 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 499 return -1; 500 } 501 502 return 0; 503 } 504 505 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 506 { 507 return div_u64(mul_u32_u32(mode_clock, 1000000U), 508 DP_DSC_FEC_OVERHEAD_FACTOR); 509 } 510 511 static int 512 small_joiner_ram_size_bits(struct drm_i915_private *i915) 513 { 514 if (INTEL_GEN(i915) >= 11) 515 return 7680 * 8; 516 else 517 return 6144 * 8; 518 } 519 520 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 521 u32 link_clock, u32 lane_count, 522 u32 mode_clock, u32 mode_hdisplay) 523 { 524 u32 bits_per_pixel, max_bpp_small_joiner_ram; 525 int i; 526 527 /* 528 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 529 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 530 * for SST -> TimeSlotsPerMTP is 1, 531 * for MST -> TimeSlotsPerMTP has to be calculated 532 */ 533 bits_per_pixel = (link_clock * lane_count * 8) / 534 intel_dp_mode_to_fec_clock(mode_clock); 535 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 536 537 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 538 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 539 mode_hdisplay; 540 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 541 max_bpp_small_joiner_ram); 542 543 /* 544 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 545 * check, output bpp from small joiner RAM check) 546 */ 547 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 548 549 /* Error out if the max bpp is less than smallest allowed valid bpp */ 550 if (bits_per_pixel < valid_dsc_bpp[0]) { 551 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 552 bits_per_pixel, valid_dsc_bpp[0]); 553 return 0; 554 } 555 556 /* Find the nearest match in the array of known BPPs from VESA */ 557 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 558 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 559 break; 560 } 561 bits_per_pixel = valid_dsc_bpp[i]; 562 563 /* 564 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 565 * fractional part is 0 566 */ 567 return bits_per_pixel << 4; 568 } 569 570 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 571 int mode_clock, int mode_hdisplay) 572 { 573 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 574 u8 min_slice_count, i; 575 int max_slice_width; 576 577 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 578 min_slice_count = DIV_ROUND_UP(mode_clock, 579 DP_DSC_MAX_ENC_THROUGHPUT_0); 580 else 581 min_slice_count = DIV_ROUND_UP(mode_clock, 582 DP_DSC_MAX_ENC_THROUGHPUT_1); 583 584 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 585 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 586 drm_dbg_kms(&i915->drm, 587 "Unsupported slice width %d by DP DSC Sink device\n", 588 max_slice_width); 589 return 0; 590 } 591 /* Also take into account max slice width */ 592 min_slice_count = min_t(u8, min_slice_count, 593 DIV_ROUND_UP(mode_hdisplay, 594 max_slice_width)); 595 596 /* Find the closest match to the valid slice count values */ 597 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 598 if (valid_dsc_slicecount[i] > 599 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 600 false)) 601 break; 602 if (min_slice_count <= valid_dsc_slicecount[i]) 603 return valid_dsc_slicecount[i]; 604 } 605 606 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 607 min_slice_count); 608 return 0; 609 } 610 611 static enum intel_output_format 612 intel_dp_output_format(struct drm_connector *connector, 613 const struct drm_display_mode *mode) 614 { 615 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 616 const struct drm_display_info *info = &connector->display_info; 617 618 if (!connector->ycbcr_420_allowed || 619 !drm_mode_is_420_only(info, mode)) 620 return INTEL_OUTPUT_FORMAT_RGB; 621 622 if (intel_dp->dfp.ycbcr_444_to_420) 623 return INTEL_OUTPUT_FORMAT_YCBCR444; 624 else 625 return INTEL_OUTPUT_FORMAT_YCBCR420; 626 } 627 628 int intel_dp_min_bpp(enum intel_output_format output_format) 629 { 630 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 631 return 6 * 3; 632 else 633 return 8 * 3; 634 } 635 636 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 637 { 638 /* 639 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 640 * format of the number of bytes per pixel will be half the number 641 * of bytes of RGB pixel. 642 */ 643 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 644 bpp /= 2; 645 646 return bpp; 647 } 648 649 static int 650 intel_dp_mode_min_output_bpp(struct drm_connector *connector, 651 const struct drm_display_mode *mode) 652 { 653 enum intel_output_format output_format = 654 intel_dp_output_format(connector, mode); 655 656 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 657 } 658 659 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 660 int hdisplay) 661 { 662 /* 663 * Older platforms don't like hdisplay==4096 with DP. 664 * 665 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 666 * and frame counter increment), but we don't get vblank interrupts, 667 * and the pipe underruns immediately. The link also doesn't seem 668 * to get trained properly. 669 * 670 * On CHV the vblank interrupts don't seem to disappear but 671 * otherwise the symptoms are similar. 672 * 673 * TODO: confirm the behaviour on HSW+ 674 */ 675 return hdisplay == 4096 && !HAS_DDI(dev_priv); 676 } 677 678 static enum drm_mode_status 679 intel_dp_mode_valid_downstream(struct intel_connector *connector, 680 const struct drm_display_mode *mode, 681 int target_clock) 682 { 683 struct intel_dp *intel_dp = intel_attached_dp(connector); 684 const struct drm_display_info *info = &connector->base.display_info; 685 int tmds_clock; 686 687 if (intel_dp->dfp.max_dotclock && 688 target_clock > intel_dp->dfp.max_dotclock) 689 return MODE_CLOCK_HIGH; 690 691 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 692 tmds_clock = target_clock; 693 if (drm_mode_is_420_only(info, mode)) 694 tmds_clock /= 2; 695 696 if (intel_dp->dfp.min_tmds_clock && 697 tmds_clock < intel_dp->dfp.min_tmds_clock) 698 return MODE_CLOCK_LOW; 699 if (intel_dp->dfp.max_tmds_clock && 700 tmds_clock > intel_dp->dfp.max_tmds_clock) 701 return MODE_CLOCK_HIGH; 702 703 return MODE_OK; 704 } 705 706 static enum drm_mode_status 707 intel_dp_mode_valid(struct drm_connector *connector, 708 struct drm_display_mode *mode) 709 { 710 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 711 struct intel_connector *intel_connector = to_intel_connector(connector); 712 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 713 struct drm_i915_private *dev_priv = to_i915(connector->dev); 714 int target_clock = mode->clock; 715 int max_rate, mode_rate, max_lanes, max_link_clock; 716 int max_dotclk = dev_priv->max_dotclk_freq; 717 u16 dsc_max_output_bpp = 0; 718 u8 dsc_slice_count = 0; 719 enum drm_mode_status status; 720 721 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 722 return MODE_NO_DBLESCAN; 723 724 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 725 if (mode->hdisplay > fixed_mode->hdisplay) 726 return MODE_PANEL; 727 728 if (mode->vdisplay > fixed_mode->vdisplay) 729 return MODE_PANEL; 730 731 target_clock = fixed_mode->clock; 732 } 733 734 max_link_clock = intel_dp_max_link_rate(intel_dp); 735 max_lanes = intel_dp_max_lane_count(intel_dp); 736 737 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 738 mode_rate = intel_dp_link_required(target_clock, 739 intel_dp_mode_min_output_bpp(connector, mode)); 740 741 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 742 return MODE_H_ILLEGAL; 743 744 /* 745 * Output bpp is stored in 6.4 format so right shift by 4 to get the 746 * integer value since we support only integer values of bpp. 747 */ 748 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 749 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 750 if (intel_dp_is_edp(intel_dp)) { 751 dsc_max_output_bpp = 752 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 753 dsc_slice_count = 754 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 755 true); 756 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 757 dsc_max_output_bpp = 758 intel_dp_dsc_get_output_bpp(dev_priv, 759 max_link_clock, 760 max_lanes, 761 target_clock, 762 mode->hdisplay) >> 4; 763 dsc_slice_count = 764 intel_dp_dsc_get_slice_count(intel_dp, 765 target_clock, 766 mode->hdisplay); 767 } 768 } 769 770 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || 771 target_clock > max_dotclk) 772 return MODE_CLOCK_HIGH; 773 774 if (mode->clock < 10000) 775 return MODE_CLOCK_LOW; 776 777 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 778 return MODE_H_ILLEGAL; 779 780 status = intel_dp_mode_valid_downstream(intel_connector, 781 mode, target_clock); 782 if (status != MODE_OK) 783 return status; 784 785 return intel_mode_valid_max_plane_size(dev_priv, mode); 786 } 787 788 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 789 { 790 int i; 791 u32 v = 0; 792 793 if (src_bytes > 4) 794 src_bytes = 4; 795 for (i = 0; i < src_bytes; i++) 796 v |= ((u32)src[i]) << ((3 - i) * 8); 797 return v; 798 } 799 800 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 801 { 802 int i; 803 if (dst_bytes > 4) 804 dst_bytes = 4; 805 for (i = 0; i < dst_bytes; i++) 806 dst[i] = src >> ((3-i) * 8); 807 } 808 809 static void 810 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); 811 static void 812 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 813 bool force_disable_vdd); 814 static void 815 intel_dp_pps_init(struct intel_dp *intel_dp); 816 817 static intel_wakeref_t 818 pps_lock(struct intel_dp *intel_dp) 819 { 820 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 821 intel_wakeref_t wakeref; 822 823 /* 824 * See intel_power_sequencer_reset() why we need 825 * a power domain reference here. 826 */ 827 wakeref = intel_display_power_get(dev_priv, 828 intel_aux_power_domain(dp_to_dig_port(intel_dp))); 829 830 mutex_lock(&dev_priv->pps_mutex); 831 832 return wakeref; 833 } 834 835 static intel_wakeref_t 836 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) 837 { 838 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 839 840 mutex_unlock(&dev_priv->pps_mutex); 841 intel_display_power_put(dev_priv, 842 intel_aux_power_domain(dp_to_dig_port(intel_dp)), 843 wakeref); 844 return 0; 845 } 846 847 #define with_pps_lock(dp, wf) \ 848 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) 849 850 static void 851 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 852 { 853 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 854 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 855 enum pipe pipe = intel_dp->pps_pipe; 856 bool pll_enabled, release_cl_override = false; 857 enum dpio_phy phy = DPIO_PHY(pipe); 858 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 859 u32 DP; 860 861 if (drm_WARN(&dev_priv->drm, 862 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, 863 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", 864 pipe_name(pipe), dig_port->base.base.base.id, 865 dig_port->base.base.name)) 866 return; 867 868 drm_dbg_kms(&dev_priv->drm, 869 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", 870 pipe_name(pipe), dig_port->base.base.base.id, 871 dig_port->base.base.name); 872 873 /* Preserve the BIOS-computed detected bit. This is 874 * supposed to be read-only. 875 */ 876 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 877 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 878 DP |= DP_PORT_WIDTH(1); 879 DP |= DP_LINK_TRAIN_PAT_1; 880 881 if (IS_CHERRYVIEW(dev_priv)) 882 DP |= DP_PIPE_SEL_CHV(pipe); 883 else 884 DP |= DP_PIPE_SEL(pipe); 885 886 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; 887 888 /* 889 * The DPLL for the pipe must be enabled for this to work. 890 * So enable temporarily it if it's not already enabled. 891 */ 892 if (!pll_enabled) { 893 release_cl_override = IS_CHERRYVIEW(dev_priv) && 894 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 895 896 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 897 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 898 drm_err(&dev_priv->drm, 899 "Failed to force on pll for pipe %c!\n", 900 pipe_name(pipe)); 901 return; 902 } 903 } 904 905 /* 906 * Similar magic as in intel_dp_enable_port(). 907 * We _must_ do this port enable + disable trick 908 * to make this power sequencer lock onto the port. 909 * Otherwise even VDD force bit won't work. 910 */ 911 intel_de_write(dev_priv, intel_dp->output_reg, DP); 912 intel_de_posting_read(dev_priv, intel_dp->output_reg); 913 914 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); 915 intel_de_posting_read(dev_priv, intel_dp->output_reg); 916 917 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); 918 intel_de_posting_read(dev_priv, intel_dp->output_reg); 919 920 if (!pll_enabled) { 921 vlv_force_pll_off(dev_priv, pipe); 922 923 if (release_cl_override) 924 chv_phy_powergate_ch(dev_priv, phy, ch, false); 925 } 926 } 927 928 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 929 { 930 struct intel_encoder *encoder; 931 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 932 933 /* 934 * We don't have power sequencer currently. 935 * Pick one that's not used by other ports. 936 */ 937 for_each_intel_dp(&dev_priv->drm, encoder) { 938 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 939 940 if (encoder->type == INTEL_OUTPUT_EDP) { 941 drm_WARN_ON(&dev_priv->drm, 942 intel_dp->active_pipe != INVALID_PIPE && 943 intel_dp->active_pipe != 944 intel_dp->pps_pipe); 945 946 if (intel_dp->pps_pipe != INVALID_PIPE) 947 pipes &= ~(1 << intel_dp->pps_pipe); 948 } else { 949 drm_WARN_ON(&dev_priv->drm, 950 intel_dp->pps_pipe != INVALID_PIPE); 951 952 if (intel_dp->active_pipe != INVALID_PIPE) 953 pipes &= ~(1 << intel_dp->active_pipe); 954 } 955 } 956 957 if (pipes == 0) 958 return INVALID_PIPE; 959 960 return ffs(pipes) - 1; 961 } 962 963 static enum pipe 964 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 965 { 966 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 967 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 968 enum pipe pipe; 969 970 lockdep_assert_held(&dev_priv->pps_mutex); 971 972 /* We should never land here with regular DP ports */ 973 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 974 975 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE && 976 intel_dp->active_pipe != intel_dp->pps_pipe); 977 978 if (intel_dp->pps_pipe != INVALID_PIPE) 979 return intel_dp->pps_pipe; 980 981 pipe = vlv_find_free_pps(dev_priv); 982 983 /* 984 * Didn't find one. This should not happen since there 985 * are two power sequencers and up to two eDP ports. 986 */ 987 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) 988 pipe = PIPE_A; 989 990 vlv_steal_power_sequencer(dev_priv, pipe); 991 intel_dp->pps_pipe = pipe; 992 993 drm_dbg_kms(&dev_priv->drm, 994 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", 995 pipe_name(intel_dp->pps_pipe), 996 dig_port->base.base.base.id, 997 dig_port->base.base.name); 998 999 /* init power sequencer on this pipe and port */ 1000 intel_dp_init_panel_power_sequencer(intel_dp); 1001 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 1002 1003 /* 1004 * Even vdd force doesn't work until we've made 1005 * the power sequencer lock in on the port. 1006 */ 1007 vlv_power_sequencer_kick(intel_dp); 1008 1009 return intel_dp->pps_pipe; 1010 } 1011 1012 static int 1013 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 1014 { 1015 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1016 int backlight_controller = dev_priv->vbt.backlight.controller; 1017 1018 lockdep_assert_held(&dev_priv->pps_mutex); 1019 1020 /* We should never land here with regular DP ports */ 1021 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 1022 1023 if (!intel_dp->pps_reset) 1024 return backlight_controller; 1025 1026 intel_dp->pps_reset = false; 1027 1028 /* 1029 * Only the HW needs to be reprogrammed, the SW state is fixed and 1030 * has been setup during connector init. 1031 */ 1032 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1033 1034 return backlight_controller; 1035 } 1036 1037 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 1038 enum pipe pipe); 1039 1040 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 1041 enum pipe pipe) 1042 { 1043 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON; 1044 } 1045 1046 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 1047 enum pipe pipe) 1048 { 1049 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD; 1050 } 1051 1052 static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 1053 enum pipe pipe) 1054 { 1055 return true; 1056 } 1057 1058 static enum pipe 1059 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 1060 enum port port, 1061 vlv_pipe_check pipe_check) 1062 { 1063 enum pipe pipe; 1064 1065 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 1066 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & 1067 PANEL_PORT_SELECT_MASK; 1068 1069 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 1070 continue; 1071 1072 if (!pipe_check(dev_priv, pipe)) 1073 continue; 1074 1075 return pipe; 1076 } 1077 1078 return INVALID_PIPE; 1079 } 1080 1081 static void 1082 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 1083 { 1084 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1085 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1086 enum port port = dig_port->base.port; 1087 1088 lockdep_assert_held(&dev_priv->pps_mutex); 1089 1090 /* try to find a pipe with this port selected */ 1091 /* first pick one where the panel is on */ 1092 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1093 vlv_pipe_has_pp_on); 1094 /* didn't find one? pick one where vdd is on */ 1095 if (intel_dp->pps_pipe == INVALID_PIPE) 1096 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1097 vlv_pipe_has_vdd_on); 1098 /* didn't find one? pick one with just the correct port */ 1099 if (intel_dp->pps_pipe == INVALID_PIPE) 1100 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1101 vlv_pipe_any); 1102 1103 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 1104 if (intel_dp->pps_pipe == INVALID_PIPE) { 1105 drm_dbg_kms(&dev_priv->drm, 1106 "no initial power sequencer for [ENCODER:%d:%s]\n", 1107 dig_port->base.base.base.id, 1108 dig_port->base.base.name); 1109 return; 1110 } 1111 1112 drm_dbg_kms(&dev_priv->drm, 1113 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", 1114 dig_port->base.base.base.id, 1115 dig_port->base.base.name, 1116 pipe_name(intel_dp->pps_pipe)); 1117 1118 intel_dp_init_panel_power_sequencer(intel_dp); 1119 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1120 } 1121 1122 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 1123 { 1124 struct intel_encoder *encoder; 1125 1126 if (drm_WARN_ON(&dev_priv->drm, 1127 !(IS_VALLEYVIEW(dev_priv) || 1128 IS_CHERRYVIEW(dev_priv) || 1129 IS_GEN9_LP(dev_priv)))) 1130 return; 1131 1132 /* 1133 * We can't grab pps_mutex here due to deadlock with power_domain 1134 * mutex when power_domain functions are called while holding pps_mutex. 1135 * That also means that in order to use pps_pipe the code needs to 1136 * hold both a power domain reference and pps_mutex, and the power domain 1137 * reference get/put must be done while _not_ holding pps_mutex. 1138 * pps_{lock,unlock}() do these steps in the correct order, so one 1139 * should use them always. 1140 */ 1141 1142 for_each_intel_dp(&dev_priv->drm, encoder) { 1143 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1144 1145 drm_WARN_ON(&dev_priv->drm, 1146 intel_dp->active_pipe != INVALID_PIPE); 1147 1148 if (encoder->type != INTEL_OUTPUT_EDP) 1149 continue; 1150 1151 if (IS_GEN9_LP(dev_priv)) 1152 intel_dp->pps_reset = true; 1153 else 1154 intel_dp->pps_pipe = INVALID_PIPE; 1155 } 1156 } 1157 1158 struct pps_registers { 1159 i915_reg_t pp_ctrl; 1160 i915_reg_t pp_stat; 1161 i915_reg_t pp_on; 1162 i915_reg_t pp_off; 1163 i915_reg_t pp_div; 1164 }; 1165 1166 static void intel_pps_get_registers(struct intel_dp *intel_dp, 1167 struct pps_registers *regs) 1168 { 1169 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1170 int pps_idx = 0; 1171 1172 memset(regs, 0, sizeof(*regs)); 1173 1174 if (IS_GEN9_LP(dev_priv)) 1175 pps_idx = bxt_power_sequencer_idx(intel_dp); 1176 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1177 pps_idx = vlv_power_sequencer_pipe(intel_dp); 1178 1179 regs->pp_ctrl = PP_CONTROL(pps_idx); 1180 regs->pp_stat = PP_STATUS(pps_idx); 1181 regs->pp_on = PP_ON_DELAYS(pps_idx); 1182 regs->pp_off = PP_OFF_DELAYS(pps_idx); 1183 1184 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 1185 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 1186 regs->pp_div = INVALID_MMIO_REG; 1187 else 1188 regs->pp_div = PP_DIVISOR(pps_idx); 1189 } 1190 1191 static i915_reg_t 1192 _pp_ctrl_reg(struct intel_dp *intel_dp) 1193 { 1194 struct pps_registers regs; 1195 1196 intel_pps_get_registers(intel_dp, ®s); 1197 1198 return regs.pp_ctrl; 1199 } 1200 1201 static i915_reg_t 1202 _pp_stat_reg(struct intel_dp *intel_dp) 1203 { 1204 struct pps_registers regs; 1205 1206 intel_pps_get_registers(intel_dp, ®s); 1207 1208 return regs.pp_stat; 1209 } 1210 1211 static bool edp_have_panel_power(struct intel_dp *intel_dp) 1212 { 1213 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1214 1215 lockdep_assert_held(&dev_priv->pps_mutex); 1216 1217 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1218 intel_dp->pps_pipe == INVALID_PIPE) 1219 return false; 1220 1221 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 1222 } 1223 1224 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 1225 { 1226 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1227 1228 lockdep_assert_held(&dev_priv->pps_mutex); 1229 1230 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1231 intel_dp->pps_pipe == INVALID_PIPE) 1232 return false; 1233 1234 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 1235 } 1236 1237 static void 1238 intel_dp_check_edp(struct intel_dp *intel_dp) 1239 { 1240 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1241 1242 if (!intel_dp_is_edp(intel_dp)) 1243 return; 1244 1245 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 1246 drm_WARN(&dev_priv->drm, 1, 1247 "eDP powered off while attempting aux channel communication.\n"); 1248 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n", 1249 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), 1250 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); 1251 } 1252 } 1253 1254 static u32 1255 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 1256 { 1257 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1258 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1259 const unsigned int timeout_ms = 10; 1260 u32 status; 1261 bool done; 1262 1263 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1264 done = wait_event_timeout(i915->gmbus_wait_queue, C, 1265 msecs_to_jiffies_timeout(timeout_ms)); 1266 1267 /* just trace the final value */ 1268 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1269 1270 if (!done) 1271 drm_err(&i915->drm, 1272 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 1273 intel_dp->aux.name, timeout_ms, status); 1274 #undef C 1275 1276 return status; 1277 } 1278 1279 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1280 { 1281 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1282 1283 if (index) 1284 return 0; 1285 1286 /* 1287 * The clock divider is based off the hrawclk, and would like to run at 1288 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 1289 */ 1290 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 1291 } 1292 1293 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1294 { 1295 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1296 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1297 u32 freq; 1298 1299 if (index) 1300 return 0; 1301 1302 /* 1303 * The clock divider is based off the cdclk or PCH rawclk, and would 1304 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1305 * divide by 2000 and use that 1306 */ 1307 if (dig_port->aux_ch == AUX_CH_A) 1308 freq = dev_priv->cdclk.hw.cdclk; 1309 else 1310 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 1311 return DIV_ROUND_CLOSEST(freq, 2000); 1312 } 1313 1314 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1315 { 1316 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1317 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1318 1319 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1320 /* Workaround for non-ULT HSW */ 1321 switch (index) { 1322 case 0: return 63; 1323 case 1: return 72; 1324 default: return 0; 1325 } 1326 } 1327 1328 return ilk_get_aux_clock_divider(intel_dp, index); 1329 } 1330 1331 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1332 { 1333 /* 1334 * SKL doesn't need us to program the AUX clock divider (Hardware will 1335 * derive the clock from CDCLK automatically). We still implement the 1336 * get_aux_clock_divider vfunc to plug-in into the existing code. 1337 */ 1338 return index ? 0 : 1; 1339 } 1340 1341 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 1342 int send_bytes, 1343 u32 aux_clock_divider) 1344 { 1345 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1346 struct drm_i915_private *dev_priv = 1347 to_i915(dig_port->base.base.dev); 1348 u32 precharge, timeout; 1349 1350 if (IS_GEN(dev_priv, 6)) 1351 precharge = 3; 1352 else 1353 precharge = 5; 1354 1355 if (IS_BROADWELL(dev_priv)) 1356 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1357 else 1358 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1359 1360 return DP_AUX_CH_CTL_SEND_BUSY | 1361 DP_AUX_CH_CTL_DONE | 1362 DP_AUX_CH_CTL_INTERRUPT | 1363 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1364 timeout | 1365 DP_AUX_CH_CTL_RECEIVE_ERROR | 1366 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1367 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1368 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1369 } 1370 1371 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1372 int send_bytes, 1373 u32 unused) 1374 { 1375 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1376 struct drm_i915_private *i915 = 1377 to_i915(dig_port->base.base.dev); 1378 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 1379 u32 ret; 1380 1381 ret = DP_AUX_CH_CTL_SEND_BUSY | 1382 DP_AUX_CH_CTL_DONE | 1383 DP_AUX_CH_CTL_INTERRUPT | 1384 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1385 DP_AUX_CH_CTL_TIME_OUT_MAX | 1386 DP_AUX_CH_CTL_RECEIVE_ERROR | 1387 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1388 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1389 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1390 1391 if (intel_phy_is_tc(i915, phy) && 1392 dig_port->tc_mode == TC_PORT_TBT_ALT) 1393 ret |= DP_AUX_CH_CTL_TBT_IO; 1394 1395 return ret; 1396 } 1397 1398 static int 1399 intel_dp_aux_xfer(struct intel_dp *intel_dp, 1400 const u8 *send, int send_bytes, 1401 u8 *recv, int recv_size, 1402 u32 aux_send_ctl_flags) 1403 { 1404 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1405 struct drm_i915_private *i915 = 1406 to_i915(dig_port->base.base.dev); 1407 struct intel_uncore *uncore = &i915->uncore; 1408 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 1409 bool is_tc_port = intel_phy_is_tc(i915, phy); 1410 i915_reg_t ch_ctl, ch_data[5]; 1411 u32 aux_clock_divider; 1412 enum intel_display_power_domain aux_domain; 1413 intel_wakeref_t aux_wakeref; 1414 intel_wakeref_t pps_wakeref; 1415 int i, ret, recv_bytes; 1416 int try, clock = 0; 1417 u32 status; 1418 bool vdd; 1419 1420 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1421 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1422 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1423 1424 if (is_tc_port) 1425 intel_tc_port_lock(dig_port); 1426 1427 aux_domain = intel_aux_power_domain(dig_port); 1428 1429 aux_wakeref = intel_display_power_get(i915, aux_domain); 1430 pps_wakeref = pps_lock(intel_dp); 1431 1432 /* 1433 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1434 * In such cases we want to leave VDD enabled and it's up to upper layers 1435 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1436 * ourselves. 1437 */ 1438 vdd = edp_panel_vdd_on(intel_dp); 1439 1440 /* dp aux is extremely sensitive to irq latency, hence request the 1441 * lowest possible wakeup latency and so prevent the cpu from going into 1442 * deep sleep states. 1443 */ 1444 cpu_latency_qos_update_request(&i915->pm_qos, 0); 1445 1446 intel_dp_check_edp(intel_dp); 1447 1448 /* Try to wait for any previous AUX channel activity */ 1449 for (try = 0; try < 3; try++) { 1450 status = intel_uncore_read_notrace(uncore, ch_ctl); 1451 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1452 break; 1453 msleep(1); 1454 } 1455 /* just trace the final value */ 1456 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1457 1458 if (try == 3) { 1459 const u32 status = intel_uncore_read(uncore, ch_ctl); 1460 1461 if (status != intel_dp->aux_busy_last_status) { 1462 drm_WARN(&i915->drm, 1, 1463 "%s: not started (status 0x%08x)\n", 1464 intel_dp->aux.name, status); 1465 intel_dp->aux_busy_last_status = status; 1466 } 1467 1468 ret = -EBUSY; 1469 goto out; 1470 } 1471 1472 /* Only 5 data registers! */ 1473 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 1474 ret = -E2BIG; 1475 goto out; 1476 } 1477 1478 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1479 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1480 send_bytes, 1481 aux_clock_divider); 1482 1483 send_ctl |= aux_send_ctl_flags; 1484 1485 /* Must try at least 3 times according to DP spec */ 1486 for (try = 0; try < 5; try++) { 1487 /* Load the send data into the aux channel data registers */ 1488 for (i = 0; i < send_bytes; i += 4) 1489 intel_uncore_write(uncore, 1490 ch_data[i >> 2], 1491 intel_dp_pack_aux(send + i, 1492 send_bytes - i)); 1493 1494 /* Send the command and wait for it to complete */ 1495 intel_uncore_write(uncore, ch_ctl, send_ctl); 1496 1497 status = intel_dp_aux_wait_done(intel_dp); 1498 1499 /* Clear done status and any errors */ 1500 intel_uncore_write(uncore, 1501 ch_ctl, 1502 status | 1503 DP_AUX_CH_CTL_DONE | 1504 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1505 DP_AUX_CH_CTL_RECEIVE_ERROR); 1506 1507 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1508 * 400us delay required for errors and timeouts 1509 * Timeout errors from the HW already meet this 1510 * requirement so skip to next iteration 1511 */ 1512 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1513 continue; 1514 1515 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1516 usleep_range(400, 500); 1517 continue; 1518 } 1519 if (status & DP_AUX_CH_CTL_DONE) 1520 goto done; 1521 } 1522 } 1523 1524 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1525 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 1526 intel_dp->aux.name, status); 1527 ret = -EBUSY; 1528 goto out; 1529 } 1530 1531 done: 1532 /* Check for timeout or receive error. 1533 * Timeouts occur when the sink is not connected 1534 */ 1535 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1536 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 1537 intel_dp->aux.name, status); 1538 ret = -EIO; 1539 goto out; 1540 } 1541 1542 /* Timeouts occur when the device isn't connected, so they're 1543 * "normal" -- don't fill the kernel log with these */ 1544 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1545 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 1546 intel_dp->aux.name, status); 1547 ret = -ETIMEDOUT; 1548 goto out; 1549 } 1550 1551 /* Unload any bytes sent back from the other side */ 1552 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1553 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1554 1555 /* 1556 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1557 * We have no idea of what happened so we return -EBUSY so 1558 * drm layer takes care for the necessary retries. 1559 */ 1560 if (recv_bytes == 0 || recv_bytes > 20) { 1561 drm_dbg_kms(&i915->drm, 1562 "%s: Forbidden recv_bytes = %d on aux transaction\n", 1563 intel_dp->aux.name, recv_bytes); 1564 ret = -EBUSY; 1565 goto out; 1566 } 1567 1568 if (recv_bytes > recv_size) 1569 recv_bytes = recv_size; 1570 1571 for (i = 0; i < recv_bytes; i += 4) 1572 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 1573 recv + i, recv_bytes - i); 1574 1575 ret = recv_bytes; 1576 out: 1577 cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); 1578 1579 if (vdd) 1580 edp_panel_vdd_off(intel_dp, false); 1581 1582 pps_unlock(intel_dp, pps_wakeref); 1583 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 1584 1585 if (is_tc_port) 1586 intel_tc_port_unlock(dig_port); 1587 1588 return ret; 1589 } 1590 1591 #define BARE_ADDRESS_SIZE 3 1592 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1593 1594 static void 1595 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1596 const struct drm_dp_aux_msg *msg) 1597 { 1598 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1599 txbuf[1] = (msg->address >> 8) & 0xff; 1600 txbuf[2] = msg->address & 0xff; 1601 txbuf[3] = msg->size - 1; 1602 } 1603 1604 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) 1605 { 1606 /* 1607 * If we're trying to send the HDCP Aksv, we need to set a the Aksv 1608 * select bit to inform the hardware to send the Aksv after our header 1609 * since we can't access that data from software. 1610 */ 1611 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && 1612 msg->address == DP_AUX_HDCP_AKSV) 1613 return DP_AUX_CH_CTL_AUX_AKSV_SELECT; 1614 1615 return 0; 1616 } 1617 1618 static ssize_t 1619 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1620 { 1621 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1622 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1623 u8 txbuf[20], rxbuf[20]; 1624 size_t txsize, rxsize; 1625 u32 flags = intel_dp_aux_xfer_flags(msg); 1626 int ret; 1627 1628 intel_dp_aux_header(txbuf, msg); 1629 1630 switch (msg->request & ~DP_AUX_I2C_MOT) { 1631 case DP_AUX_NATIVE_WRITE: 1632 case DP_AUX_I2C_WRITE: 1633 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1634 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1635 rxsize = 2; /* 0 or 1 data bytes */ 1636 1637 if (drm_WARN_ON(&i915->drm, txsize > 20)) 1638 return -E2BIG; 1639 1640 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); 1641 1642 if (msg->buffer) 1643 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1644 1645 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1646 rxbuf, rxsize, flags); 1647 if (ret > 0) { 1648 msg->reply = rxbuf[0] >> 4; 1649 1650 if (ret > 1) { 1651 /* Number of bytes written in a short write. */ 1652 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1653 } else { 1654 /* Return payload size. */ 1655 ret = msg->size; 1656 } 1657 } 1658 break; 1659 1660 case DP_AUX_NATIVE_READ: 1661 case DP_AUX_I2C_READ: 1662 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1663 rxsize = msg->size + 1; 1664 1665 if (drm_WARN_ON(&i915->drm, rxsize > 20)) 1666 return -E2BIG; 1667 1668 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1669 rxbuf, rxsize, flags); 1670 if (ret > 0) { 1671 msg->reply = rxbuf[0] >> 4; 1672 /* 1673 * Assume happy day, and copy the data. The caller is 1674 * expected to check msg->reply before touching it. 1675 * 1676 * Return payload size. 1677 */ 1678 ret--; 1679 memcpy(msg->buffer, rxbuf + 1, ret); 1680 } 1681 break; 1682 1683 default: 1684 ret = -EINVAL; 1685 break; 1686 } 1687 1688 return ret; 1689 } 1690 1691 1692 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1693 { 1694 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1695 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1696 enum aux_ch aux_ch = dig_port->aux_ch; 1697 1698 switch (aux_ch) { 1699 case AUX_CH_B: 1700 case AUX_CH_C: 1701 case AUX_CH_D: 1702 return DP_AUX_CH_CTL(aux_ch); 1703 default: 1704 MISSING_CASE(aux_ch); 1705 return DP_AUX_CH_CTL(AUX_CH_B); 1706 } 1707 } 1708 1709 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1710 { 1711 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1712 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1713 enum aux_ch aux_ch = dig_port->aux_ch; 1714 1715 switch (aux_ch) { 1716 case AUX_CH_B: 1717 case AUX_CH_C: 1718 case AUX_CH_D: 1719 return DP_AUX_CH_DATA(aux_ch, index); 1720 default: 1721 MISSING_CASE(aux_ch); 1722 return DP_AUX_CH_DATA(AUX_CH_B, index); 1723 } 1724 } 1725 1726 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1727 { 1728 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1729 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1730 enum aux_ch aux_ch = dig_port->aux_ch; 1731 1732 switch (aux_ch) { 1733 case AUX_CH_A: 1734 return DP_AUX_CH_CTL(aux_ch); 1735 case AUX_CH_B: 1736 case AUX_CH_C: 1737 case AUX_CH_D: 1738 return PCH_DP_AUX_CH_CTL(aux_ch); 1739 default: 1740 MISSING_CASE(aux_ch); 1741 return DP_AUX_CH_CTL(AUX_CH_A); 1742 } 1743 } 1744 1745 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1746 { 1747 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1748 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1749 enum aux_ch aux_ch = dig_port->aux_ch; 1750 1751 switch (aux_ch) { 1752 case AUX_CH_A: 1753 return DP_AUX_CH_DATA(aux_ch, index); 1754 case AUX_CH_B: 1755 case AUX_CH_C: 1756 case AUX_CH_D: 1757 return PCH_DP_AUX_CH_DATA(aux_ch, index); 1758 default: 1759 MISSING_CASE(aux_ch); 1760 return DP_AUX_CH_DATA(AUX_CH_A, index); 1761 } 1762 } 1763 1764 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1765 { 1766 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1767 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1768 enum aux_ch aux_ch = dig_port->aux_ch; 1769 1770 switch (aux_ch) { 1771 case AUX_CH_A: 1772 case AUX_CH_B: 1773 case AUX_CH_C: 1774 case AUX_CH_D: 1775 case AUX_CH_E: 1776 case AUX_CH_F: 1777 return DP_AUX_CH_CTL(aux_ch); 1778 default: 1779 MISSING_CASE(aux_ch); 1780 return DP_AUX_CH_CTL(AUX_CH_A); 1781 } 1782 } 1783 1784 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1785 { 1786 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1787 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1788 enum aux_ch aux_ch = dig_port->aux_ch; 1789 1790 switch (aux_ch) { 1791 case AUX_CH_A: 1792 case AUX_CH_B: 1793 case AUX_CH_C: 1794 case AUX_CH_D: 1795 case AUX_CH_E: 1796 case AUX_CH_F: 1797 return DP_AUX_CH_DATA(aux_ch, index); 1798 default: 1799 MISSING_CASE(aux_ch); 1800 return DP_AUX_CH_DATA(AUX_CH_A, index); 1801 } 1802 } 1803 1804 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) 1805 { 1806 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1807 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1808 enum aux_ch aux_ch = dig_port->aux_ch; 1809 1810 switch (aux_ch) { 1811 case AUX_CH_A: 1812 case AUX_CH_B: 1813 case AUX_CH_C: 1814 case AUX_CH_USBC1: 1815 case AUX_CH_USBC2: 1816 case AUX_CH_USBC3: 1817 case AUX_CH_USBC4: 1818 case AUX_CH_USBC5: 1819 case AUX_CH_USBC6: 1820 return DP_AUX_CH_CTL(aux_ch); 1821 default: 1822 MISSING_CASE(aux_ch); 1823 return DP_AUX_CH_CTL(AUX_CH_A); 1824 } 1825 } 1826 1827 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) 1828 { 1829 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1830 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1831 enum aux_ch aux_ch = dig_port->aux_ch; 1832 1833 switch (aux_ch) { 1834 case AUX_CH_A: 1835 case AUX_CH_B: 1836 case AUX_CH_C: 1837 case AUX_CH_USBC1: 1838 case AUX_CH_USBC2: 1839 case AUX_CH_USBC3: 1840 case AUX_CH_USBC4: 1841 case AUX_CH_USBC5: 1842 case AUX_CH_USBC6: 1843 return DP_AUX_CH_DATA(aux_ch, index); 1844 default: 1845 MISSING_CASE(aux_ch); 1846 return DP_AUX_CH_DATA(AUX_CH_A, index); 1847 } 1848 } 1849 1850 static void 1851 intel_dp_aux_fini(struct intel_dp *intel_dp) 1852 { 1853 kfree(intel_dp->aux.name); 1854 } 1855 1856 static void 1857 intel_dp_aux_init(struct intel_dp *intel_dp) 1858 { 1859 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1860 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1861 struct intel_encoder *encoder = &dig_port->base; 1862 enum aux_ch aux_ch = dig_port->aux_ch; 1863 1864 if (INTEL_GEN(dev_priv) >= 12) { 1865 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; 1866 intel_dp->aux_ch_data_reg = tgl_aux_data_reg; 1867 } else if (INTEL_GEN(dev_priv) >= 9) { 1868 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1869 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1870 } else if (HAS_PCH_SPLIT(dev_priv)) { 1871 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1872 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1873 } else { 1874 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1875 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1876 } 1877 1878 if (INTEL_GEN(dev_priv) >= 9) 1879 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1880 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1881 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1882 else if (HAS_PCH_SPLIT(dev_priv)) 1883 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1884 else 1885 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1886 1887 if (INTEL_GEN(dev_priv) >= 9) 1888 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1889 else 1890 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1891 1892 drm_dp_aux_init(&intel_dp->aux); 1893 1894 /* Failure to allocate our preferred name is not critical */ 1895 if (INTEL_GEN(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1) 1896 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s", 1897 aux_ch - AUX_CH_USBC1 + '1', 1898 encoder->base.name); 1899 else 1900 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s", 1901 aux_ch_name(aux_ch), 1902 encoder->base.name); 1903 1904 intel_dp->aux.transfer = intel_dp_aux_transfer; 1905 } 1906 1907 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1908 { 1909 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1910 1911 return max_rate >= 540000; 1912 } 1913 1914 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 1915 { 1916 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1917 1918 return max_rate >= 810000; 1919 } 1920 1921 static void 1922 intel_dp_set_clock(struct intel_encoder *encoder, 1923 struct intel_crtc_state *pipe_config) 1924 { 1925 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1926 const struct dp_link_dpll *divisor = NULL; 1927 int i, count = 0; 1928 1929 if (IS_G4X(dev_priv)) { 1930 divisor = g4x_dpll; 1931 count = ARRAY_SIZE(g4x_dpll); 1932 } else if (HAS_PCH_SPLIT(dev_priv)) { 1933 divisor = pch_dpll; 1934 count = ARRAY_SIZE(pch_dpll); 1935 } else if (IS_CHERRYVIEW(dev_priv)) { 1936 divisor = chv_dpll; 1937 count = ARRAY_SIZE(chv_dpll); 1938 } else if (IS_VALLEYVIEW(dev_priv)) { 1939 divisor = vlv_dpll; 1940 count = ARRAY_SIZE(vlv_dpll); 1941 } 1942 1943 if (divisor && count) { 1944 for (i = 0; i < count; i++) { 1945 if (pipe_config->port_clock == divisor[i].clock) { 1946 pipe_config->dpll = divisor[i].dpll; 1947 pipe_config->clock_set = true; 1948 break; 1949 } 1950 } 1951 } 1952 } 1953 1954 static void snprintf_int_array(char *str, size_t len, 1955 const int *array, int nelem) 1956 { 1957 int i; 1958 1959 str[0] = '\0'; 1960 1961 for (i = 0; i < nelem; i++) { 1962 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1963 if (r >= len) 1964 return; 1965 str += r; 1966 len -= r; 1967 } 1968 } 1969 1970 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1971 { 1972 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1973 char str[128]; /* FIXME: too big for stack? */ 1974 1975 if (!drm_debug_enabled(DRM_UT_KMS)) 1976 return; 1977 1978 snprintf_int_array(str, sizeof(str), 1979 intel_dp->source_rates, intel_dp->num_source_rates); 1980 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 1981 1982 snprintf_int_array(str, sizeof(str), 1983 intel_dp->sink_rates, intel_dp->num_sink_rates); 1984 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 1985 1986 snprintf_int_array(str, sizeof(str), 1987 intel_dp->common_rates, intel_dp->num_common_rates); 1988 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 1989 } 1990 1991 int 1992 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1993 { 1994 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1995 int len; 1996 1997 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1998 if (drm_WARN_ON(&i915->drm, len <= 0)) 1999 return 162000; 2000 2001 return intel_dp->common_rates[len - 1]; 2002 } 2003 2004 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 2005 { 2006 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2007 int i = intel_dp_rate_index(intel_dp->sink_rates, 2008 intel_dp->num_sink_rates, rate); 2009 2010 if (drm_WARN_ON(&i915->drm, i < 0)) 2011 i = 0; 2012 2013 return i; 2014 } 2015 2016 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 2017 u8 *link_bw, u8 *rate_select) 2018 { 2019 /* eDP 1.4 rate select method. */ 2020 if (intel_dp->use_rate_select) { 2021 *link_bw = 0; 2022 *rate_select = 2023 intel_dp_rate_select(intel_dp, port_clock); 2024 } else { 2025 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 2026 *rate_select = 0; 2027 } 2028 } 2029 2030 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 2031 const struct intel_crtc_state *pipe_config) 2032 { 2033 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2034 2035 /* On TGL, FEC is supported on all Pipes */ 2036 if (INTEL_GEN(dev_priv) >= 12) 2037 return true; 2038 2039 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 2040 return true; 2041 2042 return false; 2043 } 2044 2045 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 2046 const struct intel_crtc_state *pipe_config) 2047 { 2048 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 2049 drm_dp_sink_supports_fec(intel_dp->fec_capable); 2050 } 2051 2052 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 2053 const struct intel_crtc_state *crtc_state) 2054 { 2055 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 2056 2057 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) 2058 return false; 2059 2060 return intel_dsc_source_support(encoder, crtc_state) && 2061 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 2062 } 2063 2064 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, 2065 const struct intel_crtc_state *crtc_state) 2066 { 2067 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 2068 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 2069 intel_dp->dfp.ycbcr_444_to_420); 2070 } 2071 2072 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, 2073 const struct intel_crtc_state *crtc_state, int bpc) 2074 { 2075 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; 2076 2077 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) 2078 clock /= 2; 2079 2080 return clock; 2081 } 2082 2083 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, 2084 const struct intel_crtc_state *crtc_state, int bpc) 2085 { 2086 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); 2087 2088 if (intel_dp->dfp.min_tmds_clock && 2089 tmds_clock < intel_dp->dfp.min_tmds_clock) 2090 return false; 2091 2092 if (intel_dp->dfp.max_tmds_clock && 2093 tmds_clock > intel_dp->dfp.max_tmds_clock) 2094 return false; 2095 2096 return true; 2097 } 2098 2099 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, 2100 const struct intel_crtc_state *crtc_state, 2101 int bpc) 2102 { 2103 2104 return intel_hdmi_deep_color_possible(crtc_state, bpc, 2105 intel_dp->has_hdmi_sink, 2106 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && 2107 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); 2108 } 2109 2110 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 2111 const struct intel_crtc_state *crtc_state) 2112 { 2113 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2114 struct intel_connector *intel_connector = intel_dp->attached_connector; 2115 int bpp, bpc; 2116 2117 bpc = crtc_state->pipe_bpp / 3; 2118 2119 if (intel_dp->dfp.max_bpc) 2120 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 2121 2122 if (intel_dp->dfp.min_tmds_clock) { 2123 for (; bpc >= 10; bpc -= 2) { 2124 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) 2125 break; 2126 } 2127 } 2128 2129 bpp = bpc * 3; 2130 if (intel_dp_is_edp(intel_dp)) { 2131 /* Get bpp from vbt only for panels that dont have bpp in edid */ 2132 if (intel_connector->base.display_info.bpc == 0 && 2133 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 2134 drm_dbg_kms(&dev_priv->drm, 2135 "clamping bpp for eDP panel to BIOS-provided %i\n", 2136 dev_priv->vbt.edp.bpp); 2137 bpp = dev_priv->vbt.edp.bpp; 2138 } 2139 } 2140 2141 return bpp; 2142 } 2143 2144 /* Adjust link config limits based on compliance test requests. */ 2145 void 2146 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 2147 struct intel_crtc_state *pipe_config, 2148 struct link_config_limits *limits) 2149 { 2150 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2151 2152 /* For DP Compliance we override the computed bpp for the pipe */ 2153 if (intel_dp->compliance.test_data.bpc != 0) { 2154 int bpp = 3 * intel_dp->compliance.test_data.bpc; 2155 2156 limits->min_bpp = limits->max_bpp = bpp; 2157 pipe_config->dither_force_disable = bpp == 6 * 3; 2158 2159 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 2160 } 2161 2162 /* Use values requested by Compliance Test Request */ 2163 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 2164 int index; 2165 2166 /* Validate the compliance test data since max values 2167 * might have changed due to link train fallback. 2168 */ 2169 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 2170 intel_dp->compliance.test_lane_count)) { 2171 index = intel_dp_rate_index(intel_dp->common_rates, 2172 intel_dp->num_common_rates, 2173 intel_dp->compliance.test_link_rate); 2174 if (index >= 0) 2175 limits->min_clock = limits->max_clock = index; 2176 limits->min_lane_count = limits->max_lane_count = 2177 intel_dp->compliance.test_lane_count; 2178 } 2179 } 2180 } 2181 2182 /* Optimize link config in order: max bpp, min clock, min lanes */ 2183 static int 2184 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 2185 struct intel_crtc_state *pipe_config, 2186 const struct link_config_limits *limits) 2187 { 2188 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2189 int bpp, clock, lane_count; 2190 int mode_rate, link_clock, link_avail; 2191 2192 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2193 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 2194 2195 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2196 output_bpp); 2197 2198 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2199 for (lane_count = limits->min_lane_count; 2200 lane_count <= limits->max_lane_count; 2201 lane_count <<= 1) { 2202 link_clock = intel_dp->common_rates[clock]; 2203 link_avail = intel_dp_max_data_rate(link_clock, 2204 lane_count); 2205 2206 if (mode_rate <= link_avail) { 2207 pipe_config->lane_count = lane_count; 2208 pipe_config->pipe_bpp = bpp; 2209 pipe_config->port_clock = link_clock; 2210 2211 return 0; 2212 } 2213 } 2214 } 2215 } 2216 2217 return -EINVAL; 2218 } 2219 2220 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 2221 { 2222 int i, num_bpc; 2223 u8 dsc_bpc[3] = {0}; 2224 2225 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 2226 dsc_bpc); 2227 for (i = 0; i < num_bpc; i++) { 2228 if (dsc_max_bpc >= dsc_bpc[i]) 2229 return dsc_bpc[i] * 3; 2230 } 2231 2232 return 0; 2233 } 2234 2235 #define DSC_SUPPORTED_VERSION_MIN 1 2236 2237 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 2238 struct intel_crtc_state *crtc_state) 2239 { 2240 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2241 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2242 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2243 u8 line_buf_depth; 2244 int ret; 2245 2246 ret = intel_dsc_compute_params(encoder, crtc_state); 2247 if (ret) 2248 return ret; 2249 2250 /* 2251 * Slice Height of 8 works for all currently available panels. So start 2252 * with that if pic_height is an integral multiple of 8. Eventually add 2253 * logic to try multiple slice heights. 2254 */ 2255 if (vdsc_cfg->pic_height % 8 == 0) 2256 vdsc_cfg->slice_height = 8; 2257 else if (vdsc_cfg->pic_height % 4 == 0) 2258 vdsc_cfg->slice_height = 4; 2259 else 2260 vdsc_cfg->slice_height = 2; 2261 2262 vdsc_cfg->dsc_version_major = 2263 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2264 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 2265 vdsc_cfg->dsc_version_minor = 2266 min(DSC_SUPPORTED_VERSION_MIN, 2267 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2268 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 2269 2270 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2271 DP_DSC_RGB; 2272 2273 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 2274 if (!line_buf_depth) { 2275 drm_dbg_kms(&i915->drm, 2276 "DSC Sink Line Buffer Depth invalid\n"); 2277 return -EINVAL; 2278 } 2279 2280 if (vdsc_cfg->dsc_version_minor == 2) 2281 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 2282 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 2283 else 2284 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 2285 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 2286 2287 vdsc_cfg->block_pred_enable = 2288 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2289 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2290 2291 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2292 } 2293 2294 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2295 struct intel_crtc_state *pipe_config, 2296 struct drm_connector_state *conn_state, 2297 struct link_config_limits *limits) 2298 { 2299 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2300 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2301 const struct drm_display_mode *adjusted_mode = 2302 &pipe_config->hw.adjusted_mode; 2303 u8 dsc_max_bpc; 2304 int pipe_bpp; 2305 int ret; 2306 2307 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2308 intel_dp_supports_fec(intel_dp, pipe_config); 2309 2310 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 2311 return -EINVAL; 2312 2313 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 2314 if (INTEL_GEN(dev_priv) >= 12) 2315 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 2316 else 2317 dsc_max_bpc = min_t(u8, 10, 2318 conn_state->max_requested_bpc); 2319 2320 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 2321 2322 /* Min Input BPC for ICL+ is 8 */ 2323 if (pipe_bpp < 8 * 3) { 2324 drm_dbg_kms(&dev_priv->drm, 2325 "No DSC support for less than 8bpc\n"); 2326 return -EINVAL; 2327 } 2328 2329 /* 2330 * For now enable DSC for max bpp, max link rate, max lane count. 2331 * Optimize this later for the minimum possible link rate/lane count 2332 * with DSC enabled for the requested mode. 2333 */ 2334 pipe_config->pipe_bpp = pipe_bpp; 2335 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 2336 pipe_config->lane_count = limits->max_lane_count; 2337 2338 if (intel_dp_is_edp(intel_dp)) { 2339 pipe_config->dsc.compressed_bpp = 2340 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 2341 pipe_config->pipe_bpp); 2342 pipe_config->dsc.slice_count = 2343 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 2344 true); 2345 } else { 2346 u16 dsc_max_output_bpp; 2347 u8 dsc_dp_slice_count; 2348 2349 dsc_max_output_bpp = 2350 intel_dp_dsc_get_output_bpp(dev_priv, 2351 pipe_config->port_clock, 2352 pipe_config->lane_count, 2353 adjusted_mode->crtc_clock, 2354 adjusted_mode->crtc_hdisplay); 2355 dsc_dp_slice_count = 2356 intel_dp_dsc_get_slice_count(intel_dp, 2357 adjusted_mode->crtc_clock, 2358 adjusted_mode->crtc_hdisplay); 2359 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 2360 drm_dbg_kms(&dev_priv->drm, 2361 "Compressed BPP/Slice Count not supported\n"); 2362 return -EINVAL; 2363 } 2364 pipe_config->dsc.compressed_bpp = min_t(u16, 2365 dsc_max_output_bpp >> 4, 2366 pipe_config->pipe_bpp); 2367 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2368 } 2369 /* 2370 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2371 * is greater than the maximum Cdclock and if slice count is even 2372 * then we need to use 2 VDSC instances. 2373 */ 2374 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { 2375 if (pipe_config->dsc.slice_count > 1) { 2376 pipe_config->dsc.dsc_split = true; 2377 } else { 2378 drm_dbg_kms(&dev_priv->drm, 2379 "Cannot split stream to use 2 VDSC instances\n"); 2380 return -EINVAL; 2381 } 2382 } 2383 2384 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 2385 if (ret < 0) { 2386 drm_dbg_kms(&dev_priv->drm, 2387 "Cannot compute valid DSC parameters for Input Bpp = %d " 2388 "Compressed BPP = %d\n", 2389 pipe_config->pipe_bpp, 2390 pipe_config->dsc.compressed_bpp); 2391 return ret; 2392 } 2393 2394 pipe_config->dsc.compression_enable = true; 2395 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2396 "Compressed Bpp = %d Slice Count = %d\n", 2397 pipe_config->pipe_bpp, 2398 pipe_config->dsc.compressed_bpp, 2399 pipe_config->dsc.slice_count); 2400 2401 return 0; 2402 } 2403 2404 static int 2405 intel_dp_compute_link_config(struct intel_encoder *encoder, 2406 struct intel_crtc_state *pipe_config, 2407 struct drm_connector_state *conn_state) 2408 { 2409 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2410 const struct drm_display_mode *adjusted_mode = 2411 &pipe_config->hw.adjusted_mode; 2412 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2413 struct link_config_limits limits; 2414 int common_len; 2415 int ret; 2416 2417 common_len = intel_dp_common_len_rate_limit(intel_dp, 2418 intel_dp->max_link_rate); 2419 2420 /* No common link rates between source and sink */ 2421 drm_WARN_ON(encoder->base.dev, common_len <= 0); 2422 2423 limits.min_clock = 0; 2424 limits.max_clock = common_len - 1; 2425 2426 limits.min_lane_count = 1; 2427 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 2428 2429 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); 2430 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); 2431 2432 if (intel_dp_is_edp(intel_dp)) { 2433 /* 2434 * Use the maximum clock and number of lanes the eDP panel 2435 * advertizes being capable of. The panels are generally 2436 * designed to support only a single clock and lane 2437 * configuration, and typically these values correspond to the 2438 * native resolution of the panel. 2439 */ 2440 limits.min_lane_count = limits.max_lane_count; 2441 limits.min_clock = limits.max_clock; 2442 } 2443 2444 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 2445 2446 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 2447 "max rate %d max bpp %d pixel clock %iKHz\n", 2448 limits.max_lane_count, 2449 intel_dp->common_rates[limits.max_clock], 2450 limits.max_bpp, adjusted_mode->crtc_clock); 2451 2452 /* 2453 * Optimize for slow and wide. This is the place to add alternative 2454 * optimization policy. 2455 */ 2456 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 2457 2458 /* enable compression if the mode doesn't fit available BW */ 2459 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 2460 if (ret || intel_dp->force_dsc_en) { 2461 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2462 conn_state, &limits); 2463 if (ret < 0) 2464 return ret; 2465 } 2466 2467 if (pipe_config->dsc.compression_enable) { 2468 drm_dbg_kms(&i915->drm, 2469 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2470 pipe_config->lane_count, pipe_config->port_clock, 2471 pipe_config->pipe_bpp, 2472 pipe_config->dsc.compressed_bpp); 2473 2474 drm_dbg_kms(&i915->drm, 2475 "DP link rate required %i available %i\n", 2476 intel_dp_link_required(adjusted_mode->crtc_clock, 2477 pipe_config->dsc.compressed_bpp), 2478 intel_dp_max_data_rate(pipe_config->port_clock, 2479 pipe_config->lane_count)); 2480 } else { 2481 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 2482 pipe_config->lane_count, pipe_config->port_clock, 2483 pipe_config->pipe_bpp); 2484 2485 drm_dbg_kms(&i915->drm, 2486 "DP link rate required %i available %i\n", 2487 intel_dp_link_required(adjusted_mode->crtc_clock, 2488 pipe_config->pipe_bpp), 2489 intel_dp_max_data_rate(pipe_config->port_clock, 2490 pipe_config->lane_count)); 2491 } 2492 return 0; 2493 } 2494 2495 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2496 const struct drm_connector_state *conn_state) 2497 { 2498 const struct intel_digital_connector_state *intel_conn_state = 2499 to_intel_digital_connector_state(conn_state); 2500 const struct drm_display_mode *adjusted_mode = 2501 &crtc_state->hw.adjusted_mode; 2502 2503 /* 2504 * Our YCbCr output is always limited range. 2505 * crtc_state->limited_color_range only applies to RGB, 2506 * and it must never be set for YCbCr or we risk setting 2507 * some conflicting bits in PIPECONF which will mess up 2508 * the colors on the monitor. 2509 */ 2510 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2511 return false; 2512 2513 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2514 /* 2515 * See: 2516 * CEA-861-E - 5.1 Default Encoding Parameters 2517 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2518 */ 2519 return crtc_state->pipe_bpp != 18 && 2520 drm_default_rgb_quant_range(adjusted_mode) == 2521 HDMI_QUANTIZATION_RANGE_LIMITED; 2522 } else { 2523 return intel_conn_state->broadcast_rgb == 2524 INTEL_BROADCAST_RGB_LIMITED; 2525 } 2526 } 2527 2528 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2529 enum port port) 2530 { 2531 if (IS_G4X(dev_priv)) 2532 return false; 2533 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 2534 return false; 2535 2536 return true; 2537 } 2538 2539 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 2540 const struct drm_connector_state *conn_state, 2541 struct drm_dp_vsc_sdp *vsc) 2542 { 2543 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2544 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2545 2546 /* 2547 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2548 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2549 * Colorimetry Format indication. 2550 */ 2551 vsc->revision = 0x5; 2552 vsc->length = 0x13; 2553 2554 /* DP 1.4a spec, Table 2-120 */ 2555 switch (crtc_state->output_format) { 2556 case INTEL_OUTPUT_FORMAT_YCBCR444: 2557 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 2558 break; 2559 case INTEL_OUTPUT_FORMAT_YCBCR420: 2560 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 2561 break; 2562 case INTEL_OUTPUT_FORMAT_RGB: 2563 default: 2564 vsc->pixelformat = DP_PIXELFORMAT_RGB; 2565 } 2566 2567 switch (conn_state->colorspace) { 2568 case DRM_MODE_COLORIMETRY_BT709_YCC: 2569 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2570 break; 2571 case DRM_MODE_COLORIMETRY_XVYCC_601: 2572 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 2573 break; 2574 case DRM_MODE_COLORIMETRY_XVYCC_709: 2575 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 2576 break; 2577 case DRM_MODE_COLORIMETRY_SYCC_601: 2578 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 2579 break; 2580 case DRM_MODE_COLORIMETRY_OPYCC_601: 2581 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 2582 break; 2583 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2584 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 2585 break; 2586 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2587 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 2588 break; 2589 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2590 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 2591 break; 2592 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 2593 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 2594 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 2595 break; 2596 default: 2597 /* 2598 * RGB->YCBCR color conversion uses the BT.709 2599 * color space. 2600 */ 2601 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2602 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2603 else 2604 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 2605 break; 2606 } 2607 2608 vsc->bpc = crtc_state->pipe_bpp / 3; 2609 2610 /* only RGB pixelformat supports 6 bpc */ 2611 drm_WARN_ON(&dev_priv->drm, 2612 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 2613 2614 /* all YCbCr are always limited range */ 2615 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 2616 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 2617 } 2618 2619 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 2620 struct intel_crtc_state *crtc_state, 2621 const struct drm_connector_state *conn_state) 2622 { 2623 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 2624 2625 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 2626 if (crtc_state->has_psr) 2627 return; 2628 2629 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 2630 return; 2631 2632 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 2633 vsc->sdp_type = DP_SDP_VSC; 2634 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2635 &crtc_state->infoframes.vsc); 2636 } 2637 2638 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 2639 const struct intel_crtc_state *crtc_state, 2640 const struct drm_connector_state *conn_state, 2641 struct drm_dp_vsc_sdp *vsc) 2642 { 2643 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2644 2645 vsc->sdp_type = DP_SDP_VSC; 2646 2647 if (dev_priv->psr.psr2_enabled) { 2648 if (dev_priv->psr.colorimetry_support && 2649 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2650 /* [PSR2, +Colorimetry] */ 2651 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2652 vsc); 2653 } else { 2654 /* 2655 * [PSR2, -Colorimetry] 2656 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 2657 * 3D stereo + PSR/PSR2 + Y-coordinate. 2658 */ 2659 vsc->revision = 0x4; 2660 vsc->length = 0xe; 2661 } 2662 } else { 2663 /* 2664 * [PSR1] 2665 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2666 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 2667 * higher). 2668 */ 2669 vsc->revision = 0x2; 2670 vsc->length = 0x8; 2671 } 2672 } 2673 2674 static void 2675 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 2676 struct intel_crtc_state *crtc_state, 2677 const struct drm_connector_state *conn_state) 2678 { 2679 int ret; 2680 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2681 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 2682 2683 if (!conn_state->hdr_output_metadata) 2684 return; 2685 2686 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 2687 2688 if (ret) { 2689 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 2690 return; 2691 } 2692 2693 crtc_state->infoframes.enable |= 2694 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2695 } 2696 2697 static void 2698 intel_dp_drrs_compute_config(struct intel_dp *intel_dp, 2699 struct intel_crtc_state *pipe_config, 2700 int output_bpp, bool constant_n) 2701 { 2702 struct intel_connector *intel_connector = intel_dp->attached_connector; 2703 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2704 2705 /* 2706 * DRRS and PSR can't be enable together, so giving preference to PSR 2707 * as it allows more power-savings by complete shutting down display, 2708 * so to guarantee this, intel_dp_drrs_compute_config() must be called 2709 * after intel_psr_compute_config(). 2710 */ 2711 if (pipe_config->has_psr) 2712 return; 2713 2714 if (!intel_connector->panel.downclock_mode || 2715 dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 2716 return; 2717 2718 pipe_config->has_drrs = true; 2719 intel_link_compute_m_n(output_bpp, pipe_config->lane_count, 2720 intel_connector->panel.downclock_mode->clock, 2721 pipe_config->port_clock, &pipe_config->dp_m2_n2, 2722 constant_n, pipe_config->fec_enable); 2723 } 2724 2725 int 2726 intel_dp_compute_config(struct intel_encoder *encoder, 2727 struct intel_crtc_state *pipe_config, 2728 struct drm_connector_state *conn_state) 2729 { 2730 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2731 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2732 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2733 enum port port = encoder->port; 2734 struct intel_connector *intel_connector = intel_dp->attached_connector; 2735 struct intel_digital_connector_state *intel_conn_state = 2736 to_intel_digital_connector_state(conn_state); 2737 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, 2738 DP_DPCD_QUIRK_CONSTANT_N); 2739 int ret = 0, output_bpp; 2740 2741 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 2742 pipe_config->has_pch_encoder = true; 2743 2744 pipe_config->output_format = intel_dp_output_format(&intel_connector->base, 2745 adjusted_mode); 2746 2747 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 2748 ret = intel_pch_panel_fitting(pipe_config, conn_state); 2749 if (ret) 2750 return ret; 2751 } 2752 2753 if (!intel_dp_port_has_audio(dev_priv, port)) 2754 pipe_config->has_audio = false; 2755 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2756 pipe_config->has_audio = intel_dp->has_audio; 2757 else 2758 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 2759 2760 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2761 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 2762 adjusted_mode); 2763 2764 if (HAS_GMCH(dev_priv)) 2765 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 2766 else 2767 ret = intel_pch_panel_fitting(pipe_config, conn_state); 2768 if (ret) 2769 return ret; 2770 } 2771 2772 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2773 return -EINVAL; 2774 2775 if (HAS_GMCH(dev_priv) && 2776 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2777 return -EINVAL; 2778 2779 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2780 return -EINVAL; 2781 2782 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2783 return -EINVAL; 2784 2785 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2786 if (ret < 0) 2787 return ret; 2788 2789 pipe_config->limited_color_range = 2790 intel_dp_limited_color_range(pipe_config, conn_state); 2791 2792 if (pipe_config->dsc.compression_enable) 2793 output_bpp = pipe_config->dsc.compressed_bpp; 2794 else 2795 output_bpp = intel_dp_output_bpp(pipe_config->output_format, 2796 pipe_config->pipe_bpp); 2797 2798 intel_link_compute_m_n(output_bpp, 2799 pipe_config->lane_count, 2800 adjusted_mode->crtc_clock, 2801 pipe_config->port_clock, 2802 &pipe_config->dp_m_n, 2803 constant_n, pipe_config->fec_enable); 2804 2805 if (!HAS_DDI(dev_priv)) 2806 intel_dp_set_clock(encoder, pipe_config); 2807 2808 intel_psr_compute_config(intel_dp, pipe_config); 2809 intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp, 2810 constant_n); 2811 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 2812 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 2813 2814 return 0; 2815 } 2816 2817 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2818 int link_rate, int lane_count) 2819 { 2820 intel_dp->link_trained = false; 2821 intel_dp->link_rate = link_rate; 2822 intel_dp->lane_count = lane_count; 2823 } 2824 2825 static void intel_dp_prepare(struct intel_encoder *encoder, 2826 const struct intel_crtc_state *pipe_config) 2827 { 2828 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2829 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2830 enum port port = encoder->port; 2831 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2832 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2833 2834 intel_dp_set_link_params(intel_dp, 2835 pipe_config->port_clock, 2836 pipe_config->lane_count); 2837 2838 /* 2839 * There are four kinds of DP registers: 2840 * 2841 * IBX PCH 2842 * SNB CPU 2843 * IVB CPU 2844 * CPT PCH 2845 * 2846 * IBX PCH and CPU are the same for almost everything, 2847 * except that the CPU DP PLL is configured in this 2848 * register 2849 * 2850 * CPT PCH is quite different, having many bits moved 2851 * to the TRANS_DP_CTL register instead. That 2852 * configuration happens (oddly) in ilk_pch_enable 2853 */ 2854 2855 /* Preserve the BIOS-computed detected bit. This is 2856 * supposed to be read-only. 2857 */ 2858 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 2859 2860 /* Handle DP bits in common between all three register formats */ 2861 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 2862 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 2863 2864 /* Split out the IBX/CPU vs CPT settings */ 2865 2866 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 2867 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2868 intel_dp->DP |= DP_SYNC_HS_HIGH; 2869 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2870 intel_dp->DP |= DP_SYNC_VS_HIGH; 2871 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2872 2873 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2874 intel_dp->DP |= DP_ENHANCED_FRAMING; 2875 2876 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 2877 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2878 u32 trans_dp; 2879 2880 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2881 2882 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 2883 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2884 trans_dp |= TRANS_DP_ENH_FRAMING; 2885 else 2886 trans_dp &= ~TRANS_DP_ENH_FRAMING; 2887 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 2888 } else { 2889 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 2890 intel_dp->DP |= DP_COLOR_RANGE_16_235; 2891 2892 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2893 intel_dp->DP |= DP_SYNC_HS_HIGH; 2894 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2895 intel_dp->DP |= DP_SYNC_VS_HIGH; 2896 intel_dp->DP |= DP_LINK_TRAIN_OFF; 2897 2898 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2899 intel_dp->DP |= DP_ENHANCED_FRAMING; 2900 2901 if (IS_CHERRYVIEW(dev_priv)) 2902 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 2903 else 2904 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 2905 } 2906 } 2907 2908 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 2909 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 2910 2911 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 2912 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 2913 2914 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 2915 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 2916 2917 static void intel_pps_verify_state(struct intel_dp *intel_dp); 2918 2919 static void wait_panel_status(struct intel_dp *intel_dp, 2920 u32 mask, 2921 u32 value) 2922 { 2923 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2924 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2925 2926 lockdep_assert_held(&dev_priv->pps_mutex); 2927 2928 intel_pps_verify_state(intel_dp); 2929 2930 pp_stat_reg = _pp_stat_reg(intel_dp); 2931 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2932 2933 drm_dbg_kms(&dev_priv->drm, 2934 "mask %08x value %08x status %08x control %08x\n", 2935 mask, value, 2936 intel_de_read(dev_priv, pp_stat_reg), 2937 intel_de_read(dev_priv, pp_ctrl_reg)); 2938 2939 if (intel_de_wait_for_register(dev_priv, pp_stat_reg, 2940 mask, value, 5000)) 2941 drm_err(&dev_priv->drm, 2942 "Panel status timeout: status %08x control %08x\n", 2943 intel_de_read(dev_priv, pp_stat_reg), 2944 intel_de_read(dev_priv, pp_ctrl_reg)); 2945 2946 drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); 2947 } 2948 2949 static void wait_panel_on(struct intel_dp *intel_dp) 2950 { 2951 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2952 2953 drm_dbg_kms(&i915->drm, "Wait for panel power on\n"); 2954 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 2955 } 2956 2957 static void wait_panel_off(struct intel_dp *intel_dp) 2958 { 2959 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2960 2961 drm_dbg_kms(&i915->drm, "Wait for panel power off time\n"); 2962 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 2963 } 2964 2965 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 2966 { 2967 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2968 ktime_t panel_power_on_time; 2969 s64 panel_power_off_duration; 2970 2971 drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n"); 2972 2973 /* take the difference of currrent time and panel power off time 2974 * and then make panel wait for t11_t12 if needed. */ 2975 panel_power_on_time = ktime_get_boottime(); 2976 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 2977 2978 /* When we disable the VDD override bit last we have to do the manual 2979 * wait. */ 2980 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 2981 wait_remaining_ms_from_jiffies(jiffies, 2982 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 2983 2984 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 2985 } 2986 2987 static void wait_backlight_on(struct intel_dp *intel_dp) 2988 { 2989 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 2990 intel_dp->backlight_on_delay); 2991 } 2992 2993 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 2994 { 2995 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 2996 intel_dp->backlight_off_delay); 2997 } 2998 2999 /* Read the current pp_control value, unlocking the register if it 3000 * is locked 3001 */ 3002 3003 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 3004 { 3005 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3006 u32 control; 3007 3008 lockdep_assert_held(&dev_priv->pps_mutex); 3009 3010 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); 3011 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && 3012 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 3013 control &= ~PANEL_UNLOCK_MASK; 3014 control |= PANEL_UNLOCK_REGS; 3015 } 3016 return control; 3017 } 3018 3019 /* 3020 * Must be paired with edp_panel_vdd_off(). 3021 * Must hold pps_mutex around the whole on/off sequence. 3022 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3023 */ 3024 static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 3025 { 3026 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3027 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3028 u32 pp; 3029 i915_reg_t pp_stat_reg, pp_ctrl_reg; 3030 bool need_to_disable = !intel_dp->want_panel_vdd; 3031 3032 lockdep_assert_held(&dev_priv->pps_mutex); 3033 3034 if (!intel_dp_is_edp(intel_dp)) 3035 return false; 3036 3037 cancel_delayed_work(&intel_dp->panel_vdd_work); 3038 intel_dp->want_panel_vdd = true; 3039 3040 if (edp_have_panel_vdd(intel_dp)) 3041 return need_to_disable; 3042 3043 intel_display_power_get(dev_priv, 3044 intel_aux_power_domain(dig_port)); 3045 3046 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", 3047 dig_port->base.base.base.id, 3048 dig_port->base.base.name); 3049 3050 if (!edp_have_panel_power(intel_dp)) 3051 wait_panel_power_cycle(intel_dp); 3052 3053 pp = ilk_get_pp_control(intel_dp); 3054 pp |= EDP_FORCE_VDD; 3055 3056 pp_stat_reg = _pp_stat_reg(intel_dp); 3057 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3058 3059 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3060 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3061 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 3062 intel_de_read(dev_priv, pp_stat_reg), 3063 intel_de_read(dev_priv, pp_ctrl_reg)); 3064 /* 3065 * If the panel wasn't on, delay before accessing aux channel 3066 */ 3067 if (!edp_have_panel_power(intel_dp)) { 3068 drm_dbg_kms(&dev_priv->drm, 3069 "[ENCODER:%d:%s] panel power wasn't enabled\n", 3070 dig_port->base.base.base.id, 3071 dig_port->base.base.name); 3072 msleep(intel_dp->panel_power_up_delay); 3073 } 3074 3075 return need_to_disable; 3076 } 3077 3078 /* 3079 * Must be paired with intel_edp_panel_vdd_off() or 3080 * intel_edp_panel_off(). 3081 * Nested calls to these functions are not allowed since 3082 * we drop the lock. Caller must use some higher level 3083 * locking to prevent nested calls from other threads. 3084 */ 3085 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 3086 { 3087 intel_wakeref_t wakeref; 3088 bool vdd; 3089 3090 if (!intel_dp_is_edp(intel_dp)) 3091 return; 3092 3093 vdd = false; 3094 with_pps_lock(intel_dp, wakeref) 3095 vdd = edp_panel_vdd_on(intel_dp); 3096 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", 3097 dp_to_dig_port(intel_dp)->base.base.base.id, 3098 dp_to_dig_port(intel_dp)->base.base.name); 3099 } 3100 3101 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 3102 { 3103 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3104 struct intel_digital_port *dig_port = 3105 dp_to_dig_port(intel_dp); 3106 u32 pp; 3107 i915_reg_t pp_stat_reg, pp_ctrl_reg; 3108 3109 lockdep_assert_held(&dev_priv->pps_mutex); 3110 3111 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd); 3112 3113 if (!edp_have_panel_vdd(intel_dp)) 3114 return; 3115 3116 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", 3117 dig_port->base.base.base.id, 3118 dig_port->base.base.name); 3119 3120 pp = ilk_get_pp_control(intel_dp); 3121 pp &= ~EDP_FORCE_VDD; 3122 3123 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3124 pp_stat_reg = _pp_stat_reg(intel_dp); 3125 3126 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3127 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3128 3129 /* Make sure sequencer is idle before allowing subsequent activity */ 3130 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 3131 intel_de_read(dev_priv, pp_stat_reg), 3132 intel_de_read(dev_priv, pp_ctrl_reg)); 3133 3134 if ((pp & PANEL_POWER_ON) == 0) 3135 intel_dp->panel_power_off_time = ktime_get_boottime(); 3136 3137 intel_display_power_put_unchecked(dev_priv, 3138 intel_aux_power_domain(dig_port)); 3139 } 3140 3141 static void edp_panel_vdd_work(struct work_struct *__work) 3142 { 3143 struct intel_dp *intel_dp = 3144 container_of(to_delayed_work(__work), 3145 struct intel_dp, panel_vdd_work); 3146 intel_wakeref_t wakeref; 3147 3148 with_pps_lock(intel_dp, wakeref) { 3149 if (!intel_dp->want_panel_vdd) 3150 edp_panel_vdd_off_sync(intel_dp); 3151 } 3152 } 3153 3154 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 3155 { 3156 unsigned long delay; 3157 3158 /* 3159 * Queue the timer to fire a long time from now (relative to the power 3160 * down delay) to keep the panel power up across a sequence of 3161 * operations. 3162 */ 3163 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 3164 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 3165 } 3166 3167 /* 3168 * Must be paired with edp_panel_vdd_on(). 3169 * Must hold pps_mutex around the whole on/off sequence. 3170 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3171 */ 3172 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 3173 { 3174 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3175 3176 lockdep_assert_held(&dev_priv->pps_mutex); 3177 3178 if (!intel_dp_is_edp(intel_dp)) 3179 return; 3180 3181 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", 3182 dp_to_dig_port(intel_dp)->base.base.base.id, 3183 dp_to_dig_port(intel_dp)->base.base.name); 3184 3185 intel_dp->want_panel_vdd = false; 3186 3187 if (sync) 3188 edp_panel_vdd_off_sync(intel_dp); 3189 else 3190 edp_panel_vdd_schedule_off(intel_dp); 3191 } 3192 3193 static void edp_panel_on(struct intel_dp *intel_dp) 3194 { 3195 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3196 u32 pp; 3197 i915_reg_t pp_ctrl_reg; 3198 3199 lockdep_assert_held(&dev_priv->pps_mutex); 3200 3201 if (!intel_dp_is_edp(intel_dp)) 3202 return; 3203 3204 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n", 3205 dp_to_dig_port(intel_dp)->base.base.base.id, 3206 dp_to_dig_port(intel_dp)->base.base.name); 3207 3208 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), 3209 "[ENCODER:%d:%s] panel power already on\n", 3210 dp_to_dig_port(intel_dp)->base.base.base.id, 3211 dp_to_dig_port(intel_dp)->base.base.name)) 3212 return; 3213 3214 wait_panel_power_cycle(intel_dp); 3215 3216 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3217 pp = ilk_get_pp_control(intel_dp); 3218 if (IS_GEN(dev_priv, 5)) { 3219 /* ILK workaround: disable reset around power sequence */ 3220 pp &= ~PANEL_POWER_RESET; 3221 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3222 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3223 } 3224 3225 pp |= PANEL_POWER_ON; 3226 if (!IS_GEN(dev_priv, 5)) 3227 pp |= PANEL_POWER_RESET; 3228 3229 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3230 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3231 3232 wait_panel_on(intel_dp); 3233 intel_dp->last_power_on = jiffies; 3234 3235 if (IS_GEN(dev_priv, 5)) { 3236 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 3237 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3238 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3239 } 3240 } 3241 3242 void intel_edp_panel_on(struct intel_dp *intel_dp) 3243 { 3244 intel_wakeref_t wakeref; 3245 3246 if (!intel_dp_is_edp(intel_dp)) 3247 return; 3248 3249 with_pps_lock(intel_dp, wakeref) 3250 edp_panel_on(intel_dp); 3251 } 3252 3253 3254 static void edp_panel_off(struct intel_dp *intel_dp) 3255 { 3256 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3257 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3258 u32 pp; 3259 i915_reg_t pp_ctrl_reg; 3260 3261 lockdep_assert_held(&dev_priv->pps_mutex); 3262 3263 if (!intel_dp_is_edp(intel_dp)) 3264 return; 3265 3266 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n", 3267 dig_port->base.base.base.id, dig_port->base.base.name); 3268 3269 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd, 3270 "Need [ENCODER:%d:%s] VDD to turn off panel\n", 3271 dig_port->base.base.base.id, dig_port->base.base.name); 3272 3273 pp = ilk_get_pp_control(intel_dp); 3274 /* We need to switch off panel power _and_ force vdd, for otherwise some 3275 * panels get very unhappy and cease to work. */ 3276 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 3277 EDP_BLC_ENABLE); 3278 3279 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3280 3281 intel_dp->want_panel_vdd = false; 3282 3283 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3284 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3285 3286 wait_panel_off(intel_dp); 3287 intel_dp->panel_power_off_time = ktime_get_boottime(); 3288 3289 /* We got a reference when we enabled the VDD. */ 3290 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); 3291 } 3292 3293 void intel_edp_panel_off(struct intel_dp *intel_dp) 3294 { 3295 intel_wakeref_t wakeref; 3296 3297 if (!intel_dp_is_edp(intel_dp)) 3298 return; 3299 3300 with_pps_lock(intel_dp, wakeref) 3301 edp_panel_off(intel_dp); 3302 } 3303 3304 /* Enable backlight in the panel power control. */ 3305 static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 3306 { 3307 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3308 intel_wakeref_t wakeref; 3309 3310 /* 3311 * If we enable the backlight right away following a panel power 3312 * on, we may see slight flicker as the panel syncs with the eDP 3313 * link. So delay a bit to make sure the image is solid before 3314 * allowing it to appear. 3315 */ 3316 wait_backlight_on(intel_dp); 3317 3318 with_pps_lock(intel_dp, wakeref) { 3319 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3320 u32 pp; 3321 3322 pp = ilk_get_pp_control(intel_dp); 3323 pp |= EDP_BLC_ENABLE; 3324 3325 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3326 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3327 } 3328 } 3329 3330 /* Enable backlight PWM and backlight PP control. */ 3331 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3332 const struct drm_connector_state *conn_state) 3333 { 3334 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3335 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3336 3337 if (!intel_dp_is_edp(intel_dp)) 3338 return; 3339 3340 drm_dbg_kms(&i915->drm, "\n"); 3341 3342 intel_panel_enable_backlight(crtc_state, conn_state); 3343 _intel_edp_backlight_on(intel_dp); 3344 } 3345 3346 /* Disable backlight in the panel power control. */ 3347 static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 3348 { 3349 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3350 intel_wakeref_t wakeref; 3351 3352 if (!intel_dp_is_edp(intel_dp)) 3353 return; 3354 3355 with_pps_lock(intel_dp, wakeref) { 3356 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3357 u32 pp; 3358 3359 pp = ilk_get_pp_control(intel_dp); 3360 pp &= ~EDP_BLC_ENABLE; 3361 3362 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3363 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3364 } 3365 3366 intel_dp->last_backlight_off = jiffies; 3367 edp_wait_backlight_off(intel_dp); 3368 } 3369 3370 /* Disable backlight PP control and backlight PWM. */ 3371 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3372 { 3373 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3374 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3375 3376 if (!intel_dp_is_edp(intel_dp)) 3377 return; 3378 3379 drm_dbg_kms(&i915->drm, "\n"); 3380 3381 _intel_edp_backlight_off(intel_dp); 3382 intel_panel_disable_backlight(old_conn_state); 3383 } 3384 3385 /* 3386 * Hook for controlling the panel power control backlight through the bl_power 3387 * sysfs attribute. Take care to handle multiple calls. 3388 */ 3389 static void intel_edp_backlight_power(struct intel_connector *connector, 3390 bool enable) 3391 { 3392 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3393 struct intel_dp *intel_dp = intel_attached_dp(connector); 3394 intel_wakeref_t wakeref; 3395 bool is_enabled; 3396 3397 is_enabled = false; 3398 with_pps_lock(intel_dp, wakeref) 3399 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 3400 if (is_enabled == enable) 3401 return; 3402 3403 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n", 3404 enable ? "enable" : "disable"); 3405 3406 if (enable) 3407 _intel_edp_backlight_on(intel_dp); 3408 else 3409 _intel_edp_backlight_off(intel_dp); 3410 } 3411 3412 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 3413 { 3414 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3415 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3416 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; 3417 3418 I915_STATE_WARN(cur_state != state, 3419 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 3420 dig_port->base.base.base.id, dig_port->base.base.name, 3421 onoff(state), onoff(cur_state)); 3422 } 3423 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 3424 3425 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 3426 { 3427 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; 3428 3429 I915_STATE_WARN(cur_state != state, 3430 "eDP PLL state assertion failure (expected %s, current %s)\n", 3431 onoff(state), onoff(cur_state)); 3432 } 3433 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 3434 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 3435 3436 static void ilk_edp_pll_on(struct intel_dp *intel_dp, 3437 const struct intel_crtc_state *pipe_config) 3438 { 3439 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3440 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3441 3442 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 3443 assert_dp_port_disabled(intel_dp); 3444 assert_edp_pll_disabled(dev_priv); 3445 3446 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", 3447 pipe_config->port_clock); 3448 3449 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 3450 3451 if (pipe_config->port_clock == 162000) 3452 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 3453 else 3454 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 3455 3456 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3457 intel_de_posting_read(dev_priv, DP_A); 3458 udelay(500); 3459 3460 /* 3461 * [DevILK] Work around required when enabling DP PLL 3462 * while a pipe is enabled going to FDI: 3463 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 3464 * 2. Program DP PLL enable 3465 */ 3466 if (IS_GEN(dev_priv, 5)) 3467 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 3468 3469 intel_dp->DP |= DP_PLL_ENABLE; 3470 3471 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3472 intel_de_posting_read(dev_priv, DP_A); 3473 udelay(200); 3474 } 3475 3476 static void ilk_edp_pll_off(struct intel_dp *intel_dp, 3477 const struct intel_crtc_state *old_crtc_state) 3478 { 3479 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3480 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3481 3482 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 3483 assert_dp_port_disabled(intel_dp); 3484 assert_edp_pll_enabled(dev_priv); 3485 3486 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); 3487 3488 intel_dp->DP &= ~DP_PLL_ENABLE; 3489 3490 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3491 intel_de_posting_read(dev_priv, DP_A); 3492 udelay(200); 3493 } 3494 3495 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3496 { 3497 /* 3498 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3499 * be capable of signalling downstream hpd with a long pulse. 3500 * Whether or not that means D3 is safe to use is not clear, 3501 * but let's assume so until proven otherwise. 3502 * 3503 * FIXME should really check all downstream ports... 3504 */ 3505 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3506 drm_dp_is_branch(intel_dp->dpcd) && 3507 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3508 } 3509 3510 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3511 const struct intel_crtc_state *crtc_state, 3512 bool enable) 3513 { 3514 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3515 int ret; 3516 3517 if (!crtc_state->dsc.compression_enable) 3518 return; 3519 3520 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3521 enable ? DP_DECOMPRESSION_EN : 0); 3522 if (ret < 0) 3523 drm_dbg_kms(&i915->drm, 3524 "Failed to %s sink decompression state\n", 3525 enable ? "enable" : "disable"); 3526 } 3527 3528 /* If the device supports it, try to set the power state appropriately */ 3529 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 3530 { 3531 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3532 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3533 int ret, i; 3534 3535 /* Should have a valid DPCD by this point */ 3536 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3537 return; 3538 3539 if (mode != DP_SET_POWER_D0) { 3540 if (downstream_hpd_needs_d0(intel_dp)) 3541 return; 3542 3543 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3544 } else { 3545 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3546 3547 lspcon_resume(dp_to_dig_port(intel_dp)); 3548 3549 /* 3550 * When turning on, we need to retry for 1ms to give the sink 3551 * time to wake up. 3552 */ 3553 for (i = 0; i < 3; i++) { 3554 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 3555 if (ret == 1) 3556 break; 3557 msleep(1); 3558 } 3559 3560 if (ret == 1 && lspcon->active) 3561 lspcon_wait_pcon_mode(lspcon); 3562 } 3563 3564 if (ret != 1) 3565 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 3566 encoder->base.base.id, encoder->base.name, 3567 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 3568 } 3569 3570 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 3571 enum port port, enum pipe *pipe) 3572 { 3573 enum pipe p; 3574 3575 for_each_pipe(dev_priv, p) { 3576 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); 3577 3578 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 3579 *pipe = p; 3580 return true; 3581 } 3582 } 3583 3584 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", 3585 port_name(port)); 3586 3587 /* must initialize pipe to something for the asserts */ 3588 *pipe = PIPE_A; 3589 3590 return false; 3591 } 3592 3593 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 3594 i915_reg_t dp_reg, enum port port, 3595 enum pipe *pipe) 3596 { 3597 bool ret; 3598 u32 val; 3599 3600 val = intel_de_read(dev_priv, dp_reg); 3601 3602 ret = val & DP_PORT_EN; 3603 3604 /* asserts want to know the pipe even if the port is disabled */ 3605 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3606 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 3607 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3608 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 3609 else if (IS_CHERRYVIEW(dev_priv)) 3610 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 3611 else 3612 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 3613 3614 return ret; 3615 } 3616 3617 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 3618 enum pipe *pipe) 3619 { 3620 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3621 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3622 intel_wakeref_t wakeref; 3623 bool ret; 3624 3625 wakeref = intel_display_power_get_if_enabled(dev_priv, 3626 encoder->power_domain); 3627 if (!wakeref) 3628 return false; 3629 3630 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 3631 encoder->port, pipe); 3632 3633 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 3634 3635 return ret; 3636 } 3637 3638 static void intel_dp_get_config(struct intel_encoder *encoder, 3639 struct intel_crtc_state *pipe_config) 3640 { 3641 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3642 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3643 u32 tmp, flags = 0; 3644 enum port port = encoder->port; 3645 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3646 3647 if (encoder->type == INTEL_OUTPUT_EDP) 3648 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 3649 else 3650 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 3651 3652 tmp = intel_de_read(dev_priv, intel_dp->output_reg); 3653 3654 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 3655 3656 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3657 u32 trans_dp = intel_de_read(dev_priv, 3658 TRANS_DP_CTL(crtc->pipe)); 3659 3660 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 3661 flags |= DRM_MODE_FLAG_PHSYNC; 3662 else 3663 flags |= DRM_MODE_FLAG_NHSYNC; 3664 3665 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 3666 flags |= DRM_MODE_FLAG_PVSYNC; 3667 else 3668 flags |= DRM_MODE_FLAG_NVSYNC; 3669 } else { 3670 if (tmp & DP_SYNC_HS_HIGH) 3671 flags |= DRM_MODE_FLAG_PHSYNC; 3672 else 3673 flags |= DRM_MODE_FLAG_NHSYNC; 3674 3675 if (tmp & DP_SYNC_VS_HIGH) 3676 flags |= DRM_MODE_FLAG_PVSYNC; 3677 else 3678 flags |= DRM_MODE_FLAG_NVSYNC; 3679 } 3680 3681 pipe_config->hw.adjusted_mode.flags |= flags; 3682 3683 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 3684 pipe_config->limited_color_range = true; 3685 3686 pipe_config->lane_count = 3687 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 3688 3689 intel_dp_get_m_n(crtc, pipe_config); 3690 3691 if (port == PORT_A) { 3692 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 3693 pipe_config->port_clock = 162000; 3694 else 3695 pipe_config->port_clock = 270000; 3696 } 3697 3698 pipe_config->hw.adjusted_mode.crtc_clock = 3699 intel_dotclock_calculate(pipe_config->port_clock, 3700 &pipe_config->dp_m_n); 3701 3702 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 3703 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 3704 /* 3705 * This is a big fat ugly hack. 3706 * 3707 * Some machines in UEFI boot mode provide us a VBT that has 18 3708 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3709 * unknown we fail to light up. Yet the same BIOS boots up with 3710 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3711 * max, not what it tells us to use. 3712 * 3713 * Note: This will still be broken if the eDP panel is not lit 3714 * up by the BIOS, and thus we can't get the mode at module 3715 * load. 3716 */ 3717 drm_dbg_kms(&dev_priv->drm, 3718 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3719 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 3720 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3721 } 3722 } 3723 3724 static bool 3725 intel_dp_get_dpcd(struct intel_dp *intel_dp); 3726 3727 /** 3728 * intel_dp_sync_state - sync the encoder state during init/resume 3729 * @encoder: intel encoder to sync 3730 * @crtc_state: state for the CRTC connected to the encoder 3731 * 3732 * Sync any state stored in the encoder wrt. HW state during driver init 3733 * and system resume. 3734 */ 3735 void intel_dp_sync_state(struct intel_encoder *encoder, 3736 const struct intel_crtc_state *crtc_state) 3737 { 3738 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3739 3740 /* 3741 * Don't clobber DPCD if it's been already read out during output 3742 * setup (eDP) or detect. 3743 */ 3744 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 3745 intel_dp_get_dpcd(intel_dp); 3746 3747 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 3748 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 3749 } 3750 3751 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 3752 struct intel_crtc_state *crtc_state) 3753 { 3754 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3755 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3756 3757 /* 3758 * If BIOS has set an unsupported or non-standard link rate for some 3759 * reason force an encoder recompute and full modeset. 3760 */ 3761 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 3762 crtc_state->port_clock) < 0) { 3763 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n"); 3764 crtc_state->uapi.connectors_changed = true; 3765 return false; 3766 } 3767 3768 /* 3769 * FIXME hack to force full modeset when DSC is being used. 3770 * 3771 * As long as we do not have full state readout and config comparison 3772 * of crtc_state->dsc, we have no way to ensure reliable fastset. 3773 * Remove once we have readout for DSC. 3774 */ 3775 if (crtc_state->dsc.compression_enable) { 3776 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n"); 3777 crtc_state->uapi.mode_changed = true; 3778 return false; 3779 } 3780 3781 return true; 3782 } 3783 3784 static void intel_disable_dp(struct intel_atomic_state *state, 3785 struct intel_encoder *encoder, 3786 const struct intel_crtc_state *old_crtc_state, 3787 const struct drm_connector_state *old_conn_state) 3788 { 3789 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3790 3791 intel_dp->link_trained = false; 3792 3793 if (old_crtc_state->has_audio) 3794 intel_audio_codec_disable(encoder, 3795 old_crtc_state, old_conn_state); 3796 3797 /* Make sure the panel is off before trying to change the mode. But also 3798 * ensure that we have vdd while we switch off the panel. */ 3799 intel_edp_panel_vdd_on(intel_dp); 3800 intel_edp_backlight_off(old_conn_state); 3801 intel_dp_set_power(intel_dp, DP_SET_POWER_D3); 3802 intel_edp_panel_off(intel_dp); 3803 } 3804 3805 static void g4x_disable_dp(struct intel_atomic_state *state, 3806 struct intel_encoder *encoder, 3807 const struct intel_crtc_state *old_crtc_state, 3808 const struct drm_connector_state *old_conn_state) 3809 { 3810 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3811 } 3812 3813 static void vlv_disable_dp(struct intel_atomic_state *state, 3814 struct intel_encoder *encoder, 3815 const struct intel_crtc_state *old_crtc_state, 3816 const struct drm_connector_state *old_conn_state) 3817 { 3818 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3819 } 3820 3821 static void g4x_post_disable_dp(struct intel_atomic_state *state, 3822 struct intel_encoder *encoder, 3823 const struct intel_crtc_state *old_crtc_state, 3824 const struct drm_connector_state *old_conn_state) 3825 { 3826 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3827 enum port port = encoder->port; 3828 3829 /* 3830 * Bspec does not list a specific disable sequence for g4x DP. 3831 * Follow the ilk+ sequence (disable pipe before the port) for 3832 * g4x DP as it does not suffer from underruns like the normal 3833 * g4x modeset sequence (disable pipe after the port). 3834 */ 3835 intel_dp_link_down(encoder, old_crtc_state); 3836 3837 /* Only ilk+ has port A */ 3838 if (port == PORT_A) 3839 ilk_edp_pll_off(intel_dp, old_crtc_state); 3840 } 3841 3842 static void vlv_post_disable_dp(struct intel_atomic_state *state, 3843 struct intel_encoder *encoder, 3844 const struct intel_crtc_state *old_crtc_state, 3845 const struct drm_connector_state *old_conn_state) 3846 { 3847 intel_dp_link_down(encoder, old_crtc_state); 3848 } 3849 3850 static void chv_post_disable_dp(struct intel_atomic_state *state, 3851 struct intel_encoder *encoder, 3852 const struct intel_crtc_state *old_crtc_state, 3853 const struct drm_connector_state *old_conn_state) 3854 { 3855 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3856 3857 intel_dp_link_down(encoder, old_crtc_state); 3858 3859 vlv_dpio_get(dev_priv); 3860 3861 /* Assert data lane reset */ 3862 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 3863 3864 vlv_dpio_put(dev_priv); 3865 } 3866 3867 static void 3868 cpt_set_link_train(struct intel_dp *intel_dp, 3869 const struct intel_crtc_state *crtc_state, 3870 u8 dp_train_pat) 3871 { 3872 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3873 u32 *DP = &intel_dp->DP; 3874 3875 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 3876 3877 switch (intel_dp_training_pattern_symbol(dp_train_pat)) { 3878 case DP_TRAINING_PATTERN_DISABLE: 3879 *DP |= DP_LINK_TRAIN_OFF_CPT; 3880 break; 3881 case DP_TRAINING_PATTERN_1: 3882 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 3883 break; 3884 case DP_TRAINING_PATTERN_2: 3885 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3886 break; 3887 case DP_TRAINING_PATTERN_3: 3888 drm_dbg_kms(&dev_priv->drm, 3889 "TPS3 not supported, using TPS2 instead\n"); 3890 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3891 break; 3892 } 3893 3894 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3895 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3896 } 3897 3898 static void 3899 g4x_set_link_train(struct intel_dp *intel_dp, 3900 const struct intel_crtc_state *crtc_state, 3901 u8 dp_train_pat) 3902 { 3903 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3904 u32 *DP = &intel_dp->DP; 3905 3906 *DP &= ~DP_LINK_TRAIN_MASK; 3907 3908 switch (intel_dp_training_pattern_symbol(dp_train_pat)) { 3909 case DP_TRAINING_PATTERN_DISABLE: 3910 *DP |= DP_LINK_TRAIN_OFF; 3911 break; 3912 case DP_TRAINING_PATTERN_1: 3913 *DP |= DP_LINK_TRAIN_PAT_1; 3914 break; 3915 case DP_TRAINING_PATTERN_2: 3916 *DP |= DP_LINK_TRAIN_PAT_2; 3917 break; 3918 case DP_TRAINING_PATTERN_3: 3919 drm_dbg_kms(&dev_priv->drm, 3920 "TPS3 not supported, using TPS2 instead\n"); 3921 *DP |= DP_LINK_TRAIN_PAT_2; 3922 break; 3923 } 3924 3925 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3926 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3927 } 3928 3929 static void intel_dp_enable_port(struct intel_dp *intel_dp, 3930 const struct intel_crtc_state *crtc_state) 3931 { 3932 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3933 3934 /* enable with pattern 1 (as per spec) */ 3935 3936 intel_dp_program_link_training_pattern(intel_dp, crtc_state, 3937 DP_TRAINING_PATTERN_1); 3938 3939 /* 3940 * Magic for VLV/CHV. We _must_ first set up the register 3941 * without actually enabling the port, and then do another 3942 * write to enable the port. Otherwise link training will 3943 * fail when the power sequencer is freshly used for this port. 3944 */ 3945 intel_dp->DP |= DP_PORT_EN; 3946 if (crtc_state->has_audio) 3947 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 3948 3949 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3950 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3951 } 3952 3953 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp) 3954 { 3955 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3956 u8 tmp; 3957 3958 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 3959 return; 3960 3961 if (!drm_dp_is_branch(intel_dp->dpcd)) 3962 return; 3963 3964 tmp = intel_dp->has_hdmi_sink ? 3965 DP_HDMI_DVI_OUTPUT_CONFIG : 0; 3966 3967 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3968 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 3969 drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n", 3970 enableddisabled(intel_dp->has_hdmi_sink)); 3971 3972 tmp = intel_dp->dfp.ycbcr_444_to_420 ? 3973 DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 3974 3975 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3976 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 3977 drm_dbg_kms(&i915->drm, 3978 "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n", 3979 enableddisabled(intel_dp->dfp.ycbcr_444_to_420)); 3980 3981 tmp = 0; 3982 3983 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3984 DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0) 3985 drm_dbg_kms(&i915->drm, 3986 "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n", 3987 enableddisabled(false)); 3988 } 3989 3990 static void intel_enable_dp(struct intel_atomic_state *state, 3991 struct intel_encoder *encoder, 3992 const struct intel_crtc_state *pipe_config, 3993 const struct drm_connector_state *conn_state) 3994 { 3995 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3996 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3997 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3998 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); 3999 enum pipe pipe = crtc->pipe; 4000 intel_wakeref_t wakeref; 4001 4002 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) 4003 return; 4004 4005 with_pps_lock(intel_dp, wakeref) { 4006 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4007 vlv_init_panel_power_sequencer(encoder, pipe_config); 4008 4009 intel_dp_enable_port(intel_dp, pipe_config); 4010 4011 edp_panel_vdd_on(intel_dp); 4012 edp_panel_on(intel_dp); 4013 edp_panel_vdd_off(intel_dp, true); 4014 } 4015 4016 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4017 unsigned int lane_mask = 0x0; 4018 4019 if (IS_CHERRYVIEW(dev_priv)) 4020 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 4021 4022 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 4023 lane_mask); 4024 } 4025 4026 intel_dp_set_power(intel_dp, DP_SET_POWER_D0); 4027 intel_dp_configure_protocol_converter(intel_dp); 4028 intel_dp_start_link_train(intel_dp, pipe_config); 4029 intel_dp_stop_link_train(intel_dp, pipe_config); 4030 4031 if (pipe_config->has_audio) { 4032 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", 4033 pipe_name(pipe)); 4034 intel_audio_codec_enable(encoder, pipe_config, conn_state); 4035 } 4036 } 4037 4038 static void g4x_enable_dp(struct intel_atomic_state *state, 4039 struct intel_encoder *encoder, 4040 const struct intel_crtc_state *pipe_config, 4041 const struct drm_connector_state *conn_state) 4042 { 4043 intel_enable_dp(state, encoder, pipe_config, conn_state); 4044 intel_edp_backlight_on(pipe_config, conn_state); 4045 } 4046 4047 static void vlv_enable_dp(struct intel_atomic_state *state, 4048 struct intel_encoder *encoder, 4049 const struct intel_crtc_state *pipe_config, 4050 const struct drm_connector_state *conn_state) 4051 { 4052 intel_edp_backlight_on(pipe_config, conn_state); 4053 } 4054 4055 static void g4x_pre_enable_dp(struct intel_atomic_state *state, 4056 struct intel_encoder *encoder, 4057 const struct intel_crtc_state *pipe_config, 4058 const struct drm_connector_state *conn_state) 4059 { 4060 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4061 enum port port = encoder->port; 4062 4063 intel_dp_prepare(encoder, pipe_config); 4064 4065 /* Only ilk+ has port A */ 4066 if (port == PORT_A) 4067 ilk_edp_pll_on(intel_dp, pipe_config); 4068 } 4069 4070 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 4071 { 4072 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4073 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 4074 enum pipe pipe = intel_dp->pps_pipe; 4075 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 4076 4077 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 4078 4079 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) 4080 return; 4081 4082 edp_panel_vdd_off_sync(intel_dp); 4083 4084 /* 4085 * VLV seems to get confused when multiple power sequencers 4086 * have the same port selected (even if only one has power/vdd 4087 * enabled). The failure manifests as vlv_wait_port_ready() failing 4088 * CHV on the other hand doesn't seem to mind having the same port 4089 * selected in multiple power sequencers, but let's clear the 4090 * port select always when logically disconnecting a power sequencer 4091 * from a port. 4092 */ 4093 drm_dbg_kms(&dev_priv->drm, 4094 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", 4095 pipe_name(pipe), dig_port->base.base.base.id, 4096 dig_port->base.base.name); 4097 intel_de_write(dev_priv, pp_on_reg, 0); 4098 intel_de_posting_read(dev_priv, pp_on_reg); 4099 4100 intel_dp->pps_pipe = INVALID_PIPE; 4101 } 4102 4103 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 4104 enum pipe pipe) 4105 { 4106 struct intel_encoder *encoder; 4107 4108 lockdep_assert_held(&dev_priv->pps_mutex); 4109 4110 for_each_intel_dp(&dev_priv->drm, encoder) { 4111 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4112 4113 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe, 4114 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", 4115 pipe_name(pipe), encoder->base.base.id, 4116 encoder->base.name); 4117 4118 if (intel_dp->pps_pipe != pipe) 4119 continue; 4120 4121 drm_dbg_kms(&dev_priv->drm, 4122 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", 4123 pipe_name(pipe), encoder->base.base.id, 4124 encoder->base.name); 4125 4126 /* make sure vdd is off before we steal it */ 4127 vlv_detach_power_sequencer(intel_dp); 4128 } 4129 } 4130 4131 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 4132 const struct intel_crtc_state *crtc_state) 4133 { 4134 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4135 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4136 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4137 4138 lockdep_assert_held(&dev_priv->pps_mutex); 4139 4140 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 4141 4142 if (intel_dp->pps_pipe != INVALID_PIPE && 4143 intel_dp->pps_pipe != crtc->pipe) { 4144 /* 4145 * If another power sequencer was being used on this 4146 * port previously make sure to turn off vdd there while 4147 * we still have control of it. 4148 */ 4149 vlv_detach_power_sequencer(intel_dp); 4150 } 4151 4152 /* 4153 * We may be stealing the power 4154 * sequencer from another port. 4155 */ 4156 vlv_steal_power_sequencer(dev_priv, crtc->pipe); 4157 4158 intel_dp->active_pipe = crtc->pipe; 4159 4160 if (!intel_dp_is_edp(intel_dp)) 4161 return; 4162 4163 /* now it's all ours */ 4164 intel_dp->pps_pipe = crtc->pipe; 4165 4166 drm_dbg_kms(&dev_priv->drm, 4167 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", 4168 pipe_name(intel_dp->pps_pipe), encoder->base.base.id, 4169 encoder->base.name); 4170 4171 /* init power sequencer on this pipe and port */ 4172 intel_dp_init_panel_power_sequencer(intel_dp); 4173 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 4174 } 4175 4176 static void vlv_pre_enable_dp(struct intel_atomic_state *state, 4177 struct intel_encoder *encoder, 4178 const struct intel_crtc_state *pipe_config, 4179 const struct drm_connector_state *conn_state) 4180 { 4181 vlv_phy_pre_encoder_enable(encoder, pipe_config); 4182 4183 intel_enable_dp(state, encoder, pipe_config, conn_state); 4184 } 4185 4186 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, 4187 struct intel_encoder *encoder, 4188 const struct intel_crtc_state *pipe_config, 4189 const struct drm_connector_state *conn_state) 4190 { 4191 intel_dp_prepare(encoder, pipe_config); 4192 4193 vlv_phy_pre_pll_enable(encoder, pipe_config); 4194 } 4195 4196 static void chv_pre_enable_dp(struct intel_atomic_state *state, 4197 struct intel_encoder *encoder, 4198 const struct intel_crtc_state *pipe_config, 4199 const struct drm_connector_state *conn_state) 4200 { 4201 chv_phy_pre_encoder_enable(encoder, pipe_config); 4202 4203 intel_enable_dp(state, encoder, pipe_config, conn_state); 4204 4205 /* Second common lane will stay alive on its own now */ 4206 chv_phy_release_cl2_override(encoder); 4207 } 4208 4209 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, 4210 struct intel_encoder *encoder, 4211 const struct intel_crtc_state *pipe_config, 4212 const struct drm_connector_state *conn_state) 4213 { 4214 intel_dp_prepare(encoder, pipe_config); 4215 4216 chv_phy_pre_pll_enable(encoder, pipe_config); 4217 } 4218 4219 static void chv_dp_post_pll_disable(struct intel_atomic_state *state, 4220 struct intel_encoder *encoder, 4221 const struct intel_crtc_state *old_crtc_state, 4222 const struct drm_connector_state *old_conn_state) 4223 { 4224 chv_phy_post_pll_disable(encoder, old_crtc_state); 4225 } 4226 4227 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp, 4228 const struct intel_crtc_state *crtc_state) 4229 { 4230 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 4231 } 4232 4233 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp, 4234 const struct intel_crtc_state *crtc_state) 4235 { 4236 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 4237 } 4238 4239 static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp) 4240 { 4241 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4242 } 4243 4244 static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp) 4245 { 4246 return DP_TRAIN_PRE_EMPH_LEVEL_3; 4247 } 4248 4249 static void vlv_set_signal_levels(struct intel_dp *intel_dp, 4250 const struct intel_crtc_state *crtc_state) 4251 { 4252 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4253 unsigned long demph_reg_value, preemph_reg_value, 4254 uniqtranscale_reg_value; 4255 u8 train_set = intel_dp->train_set[0]; 4256 4257 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4258 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4259 preemph_reg_value = 0x0004000; 4260 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4261 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4262 demph_reg_value = 0x2B405555; 4263 uniqtranscale_reg_value = 0x552AB83A; 4264 break; 4265 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4266 demph_reg_value = 0x2B404040; 4267 uniqtranscale_reg_value = 0x5548B83A; 4268 break; 4269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4270 demph_reg_value = 0x2B245555; 4271 uniqtranscale_reg_value = 0x5560B83A; 4272 break; 4273 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4274 demph_reg_value = 0x2B405555; 4275 uniqtranscale_reg_value = 0x5598DA3A; 4276 break; 4277 default: 4278 return; 4279 } 4280 break; 4281 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4282 preemph_reg_value = 0x0002000; 4283 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4285 demph_reg_value = 0x2B404040; 4286 uniqtranscale_reg_value = 0x5552B83A; 4287 break; 4288 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4289 demph_reg_value = 0x2B404848; 4290 uniqtranscale_reg_value = 0x5580B83A; 4291 break; 4292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4293 demph_reg_value = 0x2B404040; 4294 uniqtranscale_reg_value = 0x55ADDA3A; 4295 break; 4296 default: 4297 return; 4298 } 4299 break; 4300 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4301 preemph_reg_value = 0x0000000; 4302 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4303 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4304 demph_reg_value = 0x2B305555; 4305 uniqtranscale_reg_value = 0x5570B83A; 4306 break; 4307 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4308 demph_reg_value = 0x2B2B4040; 4309 uniqtranscale_reg_value = 0x55ADDA3A; 4310 break; 4311 default: 4312 return; 4313 } 4314 break; 4315 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4316 preemph_reg_value = 0x0006000; 4317 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4319 demph_reg_value = 0x1B405555; 4320 uniqtranscale_reg_value = 0x55ADDA3A; 4321 break; 4322 default: 4323 return; 4324 } 4325 break; 4326 default: 4327 return; 4328 } 4329 4330 vlv_set_phy_signal_level(encoder, crtc_state, 4331 demph_reg_value, preemph_reg_value, 4332 uniqtranscale_reg_value, 0); 4333 } 4334 4335 static void chv_set_signal_levels(struct intel_dp *intel_dp, 4336 const struct intel_crtc_state *crtc_state) 4337 { 4338 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4339 u32 deemph_reg_value, margin_reg_value; 4340 bool uniq_trans_scale = false; 4341 u8 train_set = intel_dp->train_set[0]; 4342 4343 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4344 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4345 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4347 deemph_reg_value = 128; 4348 margin_reg_value = 52; 4349 break; 4350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4351 deemph_reg_value = 128; 4352 margin_reg_value = 77; 4353 break; 4354 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4355 deemph_reg_value = 128; 4356 margin_reg_value = 102; 4357 break; 4358 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4359 deemph_reg_value = 128; 4360 margin_reg_value = 154; 4361 uniq_trans_scale = true; 4362 break; 4363 default: 4364 return; 4365 } 4366 break; 4367 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4368 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4370 deemph_reg_value = 85; 4371 margin_reg_value = 78; 4372 break; 4373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4374 deemph_reg_value = 85; 4375 margin_reg_value = 116; 4376 break; 4377 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4378 deemph_reg_value = 85; 4379 margin_reg_value = 154; 4380 break; 4381 default: 4382 return; 4383 } 4384 break; 4385 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4386 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4388 deemph_reg_value = 64; 4389 margin_reg_value = 104; 4390 break; 4391 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4392 deemph_reg_value = 64; 4393 margin_reg_value = 154; 4394 break; 4395 default: 4396 return; 4397 } 4398 break; 4399 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4400 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4401 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4402 deemph_reg_value = 43; 4403 margin_reg_value = 154; 4404 break; 4405 default: 4406 return; 4407 } 4408 break; 4409 default: 4410 return; 4411 } 4412 4413 chv_set_phy_signal_level(encoder, crtc_state, 4414 deemph_reg_value, margin_reg_value, 4415 uniq_trans_scale); 4416 } 4417 4418 static u32 g4x_signal_levels(u8 train_set) 4419 { 4420 u32 signal_levels = 0; 4421 4422 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4423 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4424 default: 4425 signal_levels |= DP_VOLTAGE_0_4; 4426 break; 4427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4428 signal_levels |= DP_VOLTAGE_0_6; 4429 break; 4430 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4431 signal_levels |= DP_VOLTAGE_0_8; 4432 break; 4433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4434 signal_levels |= DP_VOLTAGE_1_2; 4435 break; 4436 } 4437 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4438 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4439 default: 4440 signal_levels |= DP_PRE_EMPHASIS_0; 4441 break; 4442 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4443 signal_levels |= DP_PRE_EMPHASIS_3_5; 4444 break; 4445 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4446 signal_levels |= DP_PRE_EMPHASIS_6; 4447 break; 4448 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4449 signal_levels |= DP_PRE_EMPHASIS_9_5; 4450 break; 4451 } 4452 return signal_levels; 4453 } 4454 4455 static void 4456 g4x_set_signal_levels(struct intel_dp *intel_dp, 4457 const struct intel_crtc_state *crtc_state) 4458 { 4459 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4460 u8 train_set = intel_dp->train_set[0]; 4461 u32 signal_levels; 4462 4463 signal_levels = g4x_signal_levels(train_set); 4464 4465 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4466 signal_levels); 4467 4468 intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); 4469 intel_dp->DP |= signal_levels; 4470 4471 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4472 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4473 } 4474 4475 /* SNB CPU eDP voltage swing and pre-emphasis control */ 4476 static u32 snb_cpu_edp_signal_levels(u8 train_set) 4477 { 4478 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4479 DP_TRAIN_PRE_EMPHASIS_MASK); 4480 4481 switch (signal_levels) { 4482 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4483 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4484 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4485 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4486 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 4487 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4488 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4489 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 4490 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4492 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 4493 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4494 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4495 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 4496 default: 4497 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4498 "0x%x\n", signal_levels); 4499 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4500 } 4501 } 4502 4503 static void 4504 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp, 4505 const struct intel_crtc_state *crtc_state) 4506 { 4507 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4508 u8 train_set = intel_dp->train_set[0]; 4509 u32 signal_levels; 4510 4511 signal_levels = snb_cpu_edp_signal_levels(train_set); 4512 4513 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4514 signal_levels); 4515 4516 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 4517 intel_dp->DP |= signal_levels; 4518 4519 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4520 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4521 } 4522 4523 /* IVB CPU eDP voltage swing and pre-emphasis control */ 4524 static u32 ivb_cpu_edp_signal_levels(u8 train_set) 4525 { 4526 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4527 DP_TRAIN_PRE_EMPHASIS_MASK); 4528 4529 switch (signal_levels) { 4530 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4531 return EDP_LINK_TRAIN_400MV_0DB_IVB; 4532 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4533 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 4534 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4535 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4536 return EDP_LINK_TRAIN_400MV_6DB_IVB; 4537 4538 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4539 return EDP_LINK_TRAIN_600MV_0DB_IVB; 4540 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4541 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 4542 4543 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4544 return EDP_LINK_TRAIN_800MV_0DB_IVB; 4545 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4546 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 4547 4548 default: 4549 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4550 "0x%x\n", signal_levels); 4551 return EDP_LINK_TRAIN_500MV_0DB_IVB; 4552 } 4553 } 4554 4555 static void 4556 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp, 4557 const struct intel_crtc_state *crtc_state) 4558 { 4559 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4560 u8 train_set = intel_dp->train_set[0]; 4561 u32 signal_levels; 4562 4563 signal_levels = ivb_cpu_edp_signal_levels(train_set); 4564 4565 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4566 signal_levels); 4567 4568 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 4569 intel_dp->DP |= signal_levels; 4570 4571 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4572 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4573 } 4574 4575 void intel_dp_set_signal_levels(struct intel_dp *intel_dp, 4576 const struct intel_crtc_state *crtc_state) 4577 { 4578 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4579 u8 train_set = intel_dp->train_set[0]; 4580 4581 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", 4582 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 4583 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); 4584 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", 4585 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 4586 DP_TRAIN_PRE_EMPHASIS_SHIFT, 4587 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 4588 " (max)" : ""); 4589 4590 intel_dp->set_signal_levels(intel_dp, crtc_state); 4591 } 4592 4593 void 4594 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 4595 const struct intel_crtc_state *crtc_state, 4596 u8 dp_train_pat) 4597 { 4598 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4599 4600 if ((intel_dp_training_pattern_symbol(dp_train_pat)) != 4601 DP_TRAINING_PATTERN_DISABLE) 4602 drm_dbg_kms(&dev_priv->drm, 4603 "Using DP training pattern TPS%d\n", 4604 intel_dp_training_pattern_symbol(dp_train_pat)); 4605 4606 intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); 4607 } 4608 4609 static void 4610 intel_dp_link_down(struct intel_encoder *encoder, 4611 const struct intel_crtc_state *old_crtc_state) 4612 { 4613 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4614 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4615 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 4616 enum port port = encoder->port; 4617 u32 DP = intel_dp->DP; 4618 4619 if (drm_WARN_ON(&dev_priv->drm, 4620 (intel_de_read(dev_priv, intel_dp->output_reg) & 4621 DP_PORT_EN) == 0)) 4622 return; 4623 4624 drm_dbg_kms(&dev_priv->drm, "\n"); 4625 4626 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 4627 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 4628 DP &= ~DP_LINK_TRAIN_MASK_CPT; 4629 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 4630 } else { 4631 DP &= ~DP_LINK_TRAIN_MASK; 4632 DP |= DP_LINK_TRAIN_PAT_IDLE; 4633 } 4634 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4635 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4636 4637 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 4638 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4639 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4640 4641 /* 4642 * HW workaround for IBX, we need to move the port 4643 * to transcoder A after disabling it to allow the 4644 * matching HDMI port to be enabled on transcoder A. 4645 */ 4646 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 4647 /* 4648 * We get CPU/PCH FIFO underruns on the other pipe when 4649 * doing the workaround. Sweep them under the rug. 4650 */ 4651 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4652 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4653 4654 /* always enable with pattern 1 (as per spec) */ 4655 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 4656 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 4657 DP_LINK_TRAIN_PAT_1; 4658 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4659 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4660 4661 DP &= ~DP_PORT_EN; 4662 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4663 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4664 4665 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 4666 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4667 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4668 } 4669 4670 msleep(intel_dp->panel_power_down_delay); 4671 4672 intel_dp->DP = DP; 4673 4674 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4675 intel_wakeref_t wakeref; 4676 4677 with_pps_lock(intel_dp, wakeref) 4678 intel_dp->active_pipe = INVALID_PIPE; 4679 } 4680 } 4681 4682 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 4683 { 4684 u8 dprx = 0; 4685 4686 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 4687 &dprx) != 1) 4688 return false; 4689 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 4690 } 4691 4692 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 4693 { 4694 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4695 4696 /* 4697 * Clear the cached register set to avoid using stale values 4698 * for the sinks that do not support DSC. 4699 */ 4700 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4701 4702 /* Clear fec_capable to avoid using stale values */ 4703 intel_dp->fec_capable = 0; 4704 4705 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 4706 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 4707 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4708 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 4709 intel_dp->dsc_dpcd, 4710 sizeof(intel_dp->dsc_dpcd)) < 0) 4711 drm_err(&i915->drm, 4712 "Failed to read DPCD register 0x%x\n", 4713 DP_DSC_SUPPORT); 4714 4715 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 4716 (int)sizeof(intel_dp->dsc_dpcd), 4717 intel_dp->dsc_dpcd); 4718 4719 /* FEC is supported only on DP 1.4 */ 4720 if (!intel_dp_is_edp(intel_dp) && 4721 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 4722 &intel_dp->fec_capable) < 0) 4723 drm_err(&i915->drm, 4724 "Failed to read FEC DPCD register\n"); 4725 4726 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 4727 intel_dp->fec_capable); 4728 } 4729 } 4730 4731 static bool 4732 intel_edp_init_dpcd(struct intel_dp *intel_dp) 4733 { 4734 struct drm_i915_private *dev_priv = 4735 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4736 4737 /* this function is meant to be called only once */ 4738 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 4739 4740 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 4741 return false; 4742 4743 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4744 drm_dp_is_branch(intel_dp->dpcd)); 4745 4746 /* 4747 * Read the eDP display control registers. 4748 * 4749 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4750 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4751 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4752 * method). The display control registers should read zero if they're 4753 * not supported anyway. 4754 */ 4755 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4756 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4757 sizeof(intel_dp->edp_dpcd)) 4758 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 4759 (int)sizeof(intel_dp->edp_dpcd), 4760 intel_dp->edp_dpcd); 4761 4762 /* 4763 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4764 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4765 */ 4766 intel_psr_init_dpcd(intel_dp); 4767 4768 /* Read the eDP 1.4+ supported link rates. */ 4769 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4770 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4771 int i; 4772 4773 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4774 sink_rates, sizeof(sink_rates)); 4775 4776 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4777 int val = le16_to_cpu(sink_rates[i]); 4778 4779 if (val == 0) 4780 break; 4781 4782 /* Value read multiplied by 200kHz gives the per-lane 4783 * link rate in kHz. The source rates are, however, 4784 * stored in terms of LS_Clk kHz. The full conversion 4785 * back to symbols is 4786 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4787 */ 4788 intel_dp->sink_rates[i] = (val * 200) / 10; 4789 } 4790 intel_dp->num_sink_rates = i; 4791 } 4792 4793 /* 4794 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4795 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4796 */ 4797 if (intel_dp->num_sink_rates) 4798 intel_dp->use_rate_select = true; 4799 else 4800 intel_dp_set_sink_rates(intel_dp); 4801 4802 intel_dp_set_common_rates(intel_dp); 4803 4804 /* Read the eDP DSC DPCD registers */ 4805 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4806 intel_dp_get_dsc_sink_cap(intel_dp); 4807 4808 return true; 4809 } 4810 4811 static bool 4812 intel_dp_has_sink_count(struct intel_dp *intel_dp) 4813 { 4814 if (!intel_dp->attached_connector) 4815 return false; 4816 4817 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 4818 intel_dp->dpcd, 4819 &intel_dp->desc); 4820 } 4821 4822 static bool 4823 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4824 { 4825 int ret; 4826 4827 intel_dp_lttpr_init(intel_dp); 4828 4829 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) 4830 return false; 4831 4832 /* 4833 * Don't clobber cached eDP rates. Also skip re-reading 4834 * the OUI/ID since we know it won't change. 4835 */ 4836 if (!intel_dp_is_edp(intel_dp)) { 4837 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4838 drm_dp_is_branch(intel_dp->dpcd)); 4839 4840 intel_dp_set_sink_rates(intel_dp); 4841 intel_dp_set_common_rates(intel_dp); 4842 } 4843 4844 if (intel_dp_has_sink_count(intel_dp)) { 4845 ret = drm_dp_read_sink_count(&intel_dp->aux); 4846 if (ret < 0) 4847 return false; 4848 4849 /* 4850 * Sink count can change between short pulse hpd hence 4851 * a member variable in intel_dp will track any changes 4852 * between short pulse interrupts. 4853 */ 4854 intel_dp->sink_count = ret; 4855 4856 /* 4857 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4858 * a dongle is present but no display. Unless we require to know 4859 * if a dongle is present or not, we don't need to update 4860 * downstream port information. So, an early return here saves 4861 * time from performing other operations which are not required. 4862 */ 4863 if (!intel_dp->sink_count) 4864 return false; 4865 } 4866 4867 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 4868 intel_dp->downstream_ports) == 0; 4869 } 4870 4871 static bool 4872 intel_dp_can_mst(struct intel_dp *intel_dp) 4873 { 4874 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4875 4876 return i915->params.enable_dp_mst && 4877 intel_dp->can_mst && 4878 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4879 } 4880 4881 static void 4882 intel_dp_configure_mst(struct intel_dp *intel_dp) 4883 { 4884 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4885 struct intel_encoder *encoder = 4886 &dp_to_dig_port(intel_dp)->base; 4887 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4888 4889 drm_dbg_kms(&i915->drm, 4890 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4891 encoder->base.base.id, encoder->base.name, 4892 yesno(intel_dp->can_mst), yesno(sink_can_mst), 4893 yesno(i915->params.enable_dp_mst)); 4894 4895 if (!intel_dp->can_mst) 4896 return; 4897 4898 intel_dp->is_mst = sink_can_mst && 4899 i915->params.enable_dp_mst; 4900 4901 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4902 intel_dp->is_mst); 4903 } 4904 4905 static bool 4906 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4907 { 4908 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4909 sink_irq_vector, DP_DPRX_ESI_LEN) == 4910 DP_DPRX_ESI_LEN; 4911 } 4912 4913 bool 4914 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4915 const struct drm_connector_state *conn_state) 4916 { 4917 /* 4918 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4919 * of Color Encoding Format and Content Color Gamut], in order to 4920 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4921 */ 4922 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4923 return true; 4924 4925 switch (conn_state->colorspace) { 4926 case DRM_MODE_COLORIMETRY_SYCC_601: 4927 case DRM_MODE_COLORIMETRY_OPYCC_601: 4928 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4929 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4930 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4931 return true; 4932 default: 4933 break; 4934 } 4935 4936 return false; 4937 } 4938 4939 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 4940 struct dp_sdp *sdp, size_t size) 4941 { 4942 size_t length = sizeof(struct dp_sdp); 4943 4944 if (size < length) 4945 return -ENOSPC; 4946 4947 memset(sdp, 0, size); 4948 4949 /* 4950 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 4951 * VSC SDP Header Bytes 4952 */ 4953 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 4954 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 4955 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 4956 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 4957 4958 /* 4959 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 4960 * per DP 1.4a spec. 4961 */ 4962 if (vsc->revision != 0x5) 4963 goto out; 4964 4965 /* VSC SDP Payload for DB16 through DB18 */ 4966 /* Pixel Encoding and Colorimetry Formats */ 4967 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 4968 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 4969 4970 switch (vsc->bpc) { 4971 case 6: 4972 /* 6bpc: 0x0 */ 4973 break; 4974 case 8: 4975 sdp->db[17] = 0x1; /* DB17[3:0] */ 4976 break; 4977 case 10: 4978 sdp->db[17] = 0x2; 4979 break; 4980 case 12: 4981 sdp->db[17] = 0x3; 4982 break; 4983 case 16: 4984 sdp->db[17] = 0x4; 4985 break; 4986 default: 4987 MISSING_CASE(vsc->bpc); 4988 break; 4989 } 4990 /* Dynamic Range and Component Bit Depth */ 4991 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 4992 sdp->db[17] |= 0x80; /* DB17[7] */ 4993 4994 /* Content Type */ 4995 sdp->db[18] = vsc->content_type & 0x7; 4996 4997 out: 4998 return length; 4999 } 5000 5001 static ssize_t 5002 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 5003 struct dp_sdp *sdp, 5004 size_t size) 5005 { 5006 size_t length = sizeof(struct dp_sdp); 5007 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 5008 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 5009 ssize_t len; 5010 5011 if (size < length) 5012 return -ENOSPC; 5013 5014 memset(sdp, 0, size); 5015 5016 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 5017 if (len < 0) { 5018 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 5019 return -ENOSPC; 5020 } 5021 5022 if (len != infoframe_size) { 5023 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 5024 return -ENOSPC; 5025 } 5026 5027 /* 5028 * Set up the infoframe sdp packet for HDR static metadata. 5029 * Prepare VSC Header for SU as per DP 1.4a spec, 5030 * Table 2-100 and Table 2-101 5031 */ 5032 5033 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 5034 sdp->sdp_header.HB0 = 0; 5035 /* 5036 * Packet Type 80h + Non-audio INFOFRAME Type value 5037 * HDMI_INFOFRAME_TYPE_DRM: 0x87 5038 * - 80h + Non-audio INFOFRAME Type value 5039 * - InfoFrame Type: 0x07 5040 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 5041 */ 5042 sdp->sdp_header.HB1 = drm_infoframe->type; 5043 /* 5044 * Least Significant Eight Bits of (Data Byte Count – 1) 5045 * infoframe_size - 1 5046 */ 5047 sdp->sdp_header.HB2 = 0x1D; 5048 /* INFOFRAME SDP Version Number */ 5049 sdp->sdp_header.HB3 = (0x13 << 2); 5050 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5051 sdp->db[0] = drm_infoframe->version; 5052 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5053 sdp->db[1] = drm_infoframe->length; 5054 /* 5055 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 5056 * HDMI_INFOFRAME_HEADER_SIZE 5057 */ 5058 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 5059 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 5060 HDMI_DRM_INFOFRAME_SIZE); 5061 5062 /* 5063 * Size of DP infoframe sdp packet for HDR static metadata consists of 5064 * - DP SDP Header(struct dp_sdp_header): 4 bytes 5065 * - Two Data Blocks: 2 bytes 5066 * CTA Header Byte2 (INFOFRAME Version Number) 5067 * CTA Header Byte3 (Length of INFOFRAME) 5068 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 5069 * 5070 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 5071 * infoframe size. But GEN11+ has larger than that size, write_infoframe 5072 * will pad rest of the size. 5073 */ 5074 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 5075 } 5076 5077 static void intel_write_dp_sdp(struct intel_encoder *encoder, 5078 const struct intel_crtc_state *crtc_state, 5079 unsigned int type) 5080 { 5081 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5082 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5083 struct dp_sdp sdp = {}; 5084 ssize_t len; 5085 5086 if ((crtc_state->infoframes.enable & 5087 intel_hdmi_infoframe_enable(type)) == 0) 5088 return; 5089 5090 switch (type) { 5091 case DP_SDP_VSC: 5092 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 5093 sizeof(sdp)); 5094 break; 5095 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5096 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 5097 &sdp, sizeof(sdp)); 5098 break; 5099 default: 5100 MISSING_CASE(type); 5101 return; 5102 } 5103 5104 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 5105 return; 5106 5107 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 5108 } 5109 5110 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 5111 const struct intel_crtc_state *crtc_state, 5112 struct drm_dp_vsc_sdp *vsc) 5113 { 5114 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5115 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5116 struct dp_sdp sdp = {}; 5117 ssize_t len; 5118 5119 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 5120 5121 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 5122 return; 5123 5124 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 5125 &sdp, len); 5126 } 5127 5128 void intel_dp_set_infoframes(struct intel_encoder *encoder, 5129 bool enable, 5130 const struct intel_crtc_state *crtc_state, 5131 const struct drm_connector_state *conn_state) 5132 { 5133 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5134 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5135 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 5136 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 5137 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 5138 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 5139 u32 val = intel_de_read(dev_priv, reg); 5140 5141 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 5142 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 5143 if (intel_psr_enabled(intel_dp)) 5144 val &= ~dip_enable; 5145 else 5146 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); 5147 5148 if (!enable) { 5149 intel_de_write(dev_priv, reg, val); 5150 intel_de_posting_read(dev_priv, reg); 5151 return; 5152 } 5153 5154 intel_de_write(dev_priv, reg, val); 5155 intel_de_posting_read(dev_priv, reg); 5156 5157 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5158 if (!intel_psr_enabled(intel_dp)) 5159 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 5160 5161 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 5162 } 5163 5164 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 5165 const void *buffer, size_t size) 5166 { 5167 const struct dp_sdp *sdp = buffer; 5168 5169 if (size < sizeof(struct dp_sdp)) 5170 return -EINVAL; 5171 5172 memset(vsc, 0, size); 5173 5174 if (sdp->sdp_header.HB0 != 0) 5175 return -EINVAL; 5176 5177 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 5178 return -EINVAL; 5179 5180 vsc->sdp_type = sdp->sdp_header.HB1; 5181 vsc->revision = sdp->sdp_header.HB2; 5182 vsc->length = sdp->sdp_header.HB3; 5183 5184 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 5185 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 5186 /* 5187 * - HB2 = 0x2, HB3 = 0x8 5188 * VSC SDP supporting 3D stereo + PSR 5189 * - HB2 = 0x4, HB3 = 0xe 5190 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 5191 * first scan line of the SU region (applies to eDP v1.4b 5192 * and higher). 5193 */ 5194 return 0; 5195 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 5196 /* 5197 * - HB2 = 0x5, HB3 = 0x13 5198 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 5199 * Format. 5200 */ 5201 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 5202 vsc->colorimetry = sdp->db[16] & 0xf; 5203 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 5204 5205 switch (sdp->db[17] & 0x7) { 5206 case 0x0: 5207 vsc->bpc = 6; 5208 break; 5209 case 0x1: 5210 vsc->bpc = 8; 5211 break; 5212 case 0x2: 5213 vsc->bpc = 10; 5214 break; 5215 case 0x3: 5216 vsc->bpc = 12; 5217 break; 5218 case 0x4: 5219 vsc->bpc = 16; 5220 break; 5221 default: 5222 MISSING_CASE(sdp->db[17] & 0x7); 5223 return -EINVAL; 5224 } 5225 5226 vsc->content_type = sdp->db[18] & 0x7; 5227 } else { 5228 return -EINVAL; 5229 } 5230 5231 return 0; 5232 } 5233 5234 static int 5235 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 5236 const void *buffer, size_t size) 5237 { 5238 int ret; 5239 5240 const struct dp_sdp *sdp = buffer; 5241 5242 if (size < sizeof(struct dp_sdp)) 5243 return -EINVAL; 5244 5245 if (sdp->sdp_header.HB0 != 0) 5246 return -EINVAL; 5247 5248 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 5249 return -EINVAL; 5250 5251 /* 5252 * Least Significant Eight Bits of (Data Byte Count – 1) 5253 * 1Dh (i.e., Data Byte Count = 30 bytes). 5254 */ 5255 if (sdp->sdp_header.HB2 != 0x1D) 5256 return -EINVAL; 5257 5258 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 5259 if ((sdp->sdp_header.HB3 & 0x3) != 0) 5260 return -EINVAL; 5261 5262 /* INFOFRAME SDP Version Number */ 5263 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 5264 return -EINVAL; 5265 5266 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5267 if (sdp->db[0] != 1) 5268 return -EINVAL; 5269 5270 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5271 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 5272 return -EINVAL; 5273 5274 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 5275 HDMI_DRM_INFOFRAME_SIZE); 5276 5277 return ret; 5278 } 5279 5280 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 5281 struct intel_crtc_state *crtc_state, 5282 struct drm_dp_vsc_sdp *vsc) 5283 { 5284 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5285 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5286 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5287 unsigned int type = DP_SDP_VSC; 5288 struct dp_sdp sdp = {}; 5289 int ret; 5290 5291 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5292 if (intel_psr_enabled(intel_dp)) 5293 return; 5294 5295 if ((crtc_state->infoframes.enable & 5296 intel_hdmi_infoframe_enable(type)) == 0) 5297 return; 5298 5299 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 5300 5301 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 5302 5303 if (ret) 5304 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 5305 } 5306 5307 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 5308 struct intel_crtc_state *crtc_state, 5309 struct hdmi_drm_infoframe *drm_infoframe) 5310 { 5311 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5312 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5313 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 5314 struct dp_sdp sdp = {}; 5315 int ret; 5316 5317 if ((crtc_state->infoframes.enable & 5318 intel_hdmi_infoframe_enable(type)) == 0) 5319 return; 5320 5321 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 5322 sizeof(sdp)); 5323 5324 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 5325 sizeof(sdp)); 5326 5327 if (ret) 5328 drm_dbg_kms(&dev_priv->drm, 5329 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 5330 } 5331 5332 void intel_read_dp_sdp(struct intel_encoder *encoder, 5333 struct intel_crtc_state *crtc_state, 5334 unsigned int type) 5335 { 5336 if (encoder->type != INTEL_OUTPUT_DDI) 5337 return; 5338 5339 switch (type) { 5340 case DP_SDP_VSC: 5341 intel_read_dp_vsc_sdp(encoder, crtc_state, 5342 &crtc_state->infoframes.vsc); 5343 break; 5344 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5345 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 5346 &crtc_state->infoframes.drm.drm); 5347 break; 5348 default: 5349 MISSING_CASE(type); 5350 break; 5351 } 5352 } 5353 5354 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 5355 { 5356 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5357 int status = 0; 5358 int test_link_rate; 5359 u8 test_lane_count, test_link_bw; 5360 /* (DP CTS 1.2) 5361 * 4.3.1.11 5362 */ 5363 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 5364 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 5365 &test_lane_count); 5366 5367 if (status <= 0) { 5368 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 5369 return DP_TEST_NAK; 5370 } 5371 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 5372 5373 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 5374 &test_link_bw); 5375 if (status <= 0) { 5376 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 5377 return DP_TEST_NAK; 5378 } 5379 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 5380 5381 /* Validate the requested link rate and lane count */ 5382 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 5383 test_lane_count)) 5384 return DP_TEST_NAK; 5385 5386 intel_dp->compliance.test_lane_count = test_lane_count; 5387 intel_dp->compliance.test_link_rate = test_link_rate; 5388 5389 return DP_TEST_ACK; 5390 } 5391 5392 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 5393 { 5394 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5395 u8 test_pattern; 5396 u8 test_misc; 5397 __be16 h_width, v_height; 5398 int status = 0; 5399 5400 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 5401 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 5402 &test_pattern); 5403 if (status <= 0) { 5404 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 5405 return DP_TEST_NAK; 5406 } 5407 if (test_pattern != DP_COLOR_RAMP) 5408 return DP_TEST_NAK; 5409 5410 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 5411 &h_width, 2); 5412 if (status <= 0) { 5413 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 5414 return DP_TEST_NAK; 5415 } 5416 5417 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 5418 &v_height, 2); 5419 if (status <= 0) { 5420 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 5421 return DP_TEST_NAK; 5422 } 5423 5424 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 5425 &test_misc); 5426 if (status <= 0) { 5427 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 5428 return DP_TEST_NAK; 5429 } 5430 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 5431 return DP_TEST_NAK; 5432 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 5433 return DP_TEST_NAK; 5434 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 5435 case DP_TEST_BIT_DEPTH_6: 5436 intel_dp->compliance.test_data.bpc = 6; 5437 break; 5438 case DP_TEST_BIT_DEPTH_8: 5439 intel_dp->compliance.test_data.bpc = 8; 5440 break; 5441 default: 5442 return DP_TEST_NAK; 5443 } 5444 5445 intel_dp->compliance.test_data.video_pattern = test_pattern; 5446 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 5447 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 5448 /* Set test active flag here so userspace doesn't interrupt things */ 5449 intel_dp->compliance.test_active = true; 5450 5451 return DP_TEST_ACK; 5452 } 5453 5454 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 5455 { 5456 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5457 u8 test_result = DP_TEST_ACK; 5458 struct intel_connector *intel_connector = intel_dp->attached_connector; 5459 struct drm_connector *connector = &intel_connector->base; 5460 5461 if (intel_connector->detect_edid == NULL || 5462 connector->edid_corrupt || 5463 intel_dp->aux.i2c_defer_count > 6) { 5464 /* Check EDID read for NACKs, DEFERs and corruption 5465 * (DP CTS 1.2 Core r1.1) 5466 * 4.2.2.4 : Failed EDID read, I2C_NAK 5467 * 4.2.2.5 : Failed EDID read, I2C_DEFER 5468 * 4.2.2.6 : EDID corruption detected 5469 * Use failsafe mode for all cases 5470 */ 5471 if (intel_dp->aux.i2c_nack_count > 0 || 5472 intel_dp->aux.i2c_defer_count > 0) 5473 drm_dbg_kms(&i915->drm, 5474 "EDID read had %d NACKs, %d DEFERs\n", 5475 intel_dp->aux.i2c_nack_count, 5476 intel_dp->aux.i2c_defer_count); 5477 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 5478 } else { 5479 struct edid *block = intel_connector->detect_edid; 5480 5481 /* We have to write the checksum 5482 * of the last block read 5483 */ 5484 block += intel_connector->detect_edid->extensions; 5485 5486 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 5487 block->checksum) <= 0) 5488 drm_dbg_kms(&i915->drm, 5489 "Failed to write EDID checksum\n"); 5490 5491 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 5492 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 5493 } 5494 5495 /* Set test active flag here so userspace doesn't interrupt things */ 5496 intel_dp->compliance.test_active = true; 5497 5498 return test_result; 5499 } 5500 5501 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 5502 const struct intel_crtc_state *crtc_state) 5503 { 5504 struct drm_i915_private *dev_priv = 5505 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 5506 struct drm_dp_phy_test_params *data = 5507 &intel_dp->compliance.test_data.phytest; 5508 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5509 enum pipe pipe = crtc->pipe; 5510 u32 pattern_val; 5511 5512 switch (data->phy_pattern) { 5513 case DP_PHY_TEST_PATTERN_NONE: 5514 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 5515 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 5516 break; 5517 case DP_PHY_TEST_PATTERN_D10_2: 5518 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 5519 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5520 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 5521 break; 5522 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 5523 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 5524 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5525 DDI_DP_COMP_CTL_ENABLE | 5526 DDI_DP_COMP_CTL_SCRAMBLED_0); 5527 break; 5528 case DP_PHY_TEST_PATTERN_PRBS7: 5529 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 5530 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5531 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 5532 break; 5533 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 5534 /* 5535 * FIXME: Ideally pattern should come from DPCD 0x250. As 5536 * current firmware of DPR-100 could not set it, so hardcoding 5537 * now for complaince test. 5538 */ 5539 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 5540 pattern_val = 0x3e0f83e0; 5541 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 5542 pattern_val = 0x0f83e0f8; 5543 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 5544 pattern_val = 0x0000f83e; 5545 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 5546 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5547 DDI_DP_COMP_CTL_ENABLE | 5548 DDI_DP_COMP_CTL_CUSTOM80); 5549 break; 5550 case DP_PHY_TEST_PATTERN_CP2520: 5551 /* 5552 * FIXME: Ideally pattern should come from DPCD 0x24A. As 5553 * current firmware of DPR-100 could not set it, so hardcoding 5554 * now for complaince test. 5555 */ 5556 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 5557 pattern_val = 0xFB; 5558 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5559 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 5560 pattern_val); 5561 break; 5562 default: 5563 WARN(1, "Invalid Phy Test Pattern\n"); 5564 } 5565 } 5566 5567 static void 5568 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, 5569 const struct intel_crtc_state *crtc_state) 5570 { 5571 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5572 struct drm_device *dev = dig_port->base.base.dev; 5573 struct drm_i915_private *dev_priv = to_i915(dev); 5574 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5575 enum pipe pipe = crtc->pipe; 5576 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5577 5578 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5579 TRANS_DDI_FUNC_CTL(pipe)); 5580 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5581 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5582 5583 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 5584 TGL_TRANS_DDI_PORT_MASK); 5585 trans_conf_value &= ~PIPECONF_ENABLE; 5586 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 5587 5588 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5589 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5590 trans_ddi_func_ctl_value); 5591 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5592 } 5593 5594 static void 5595 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, 5596 const struct intel_crtc_state *crtc_state) 5597 { 5598 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5599 struct drm_device *dev = dig_port->base.base.dev; 5600 struct drm_i915_private *dev_priv = to_i915(dev); 5601 enum port port = dig_port->base.port; 5602 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5603 enum pipe pipe = crtc->pipe; 5604 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5605 5606 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5607 TRANS_DDI_FUNC_CTL(pipe)); 5608 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5609 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5610 5611 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 5612 TGL_TRANS_DDI_SELECT_PORT(port); 5613 trans_conf_value |= PIPECONF_ENABLE; 5614 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 5615 5616 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5617 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5618 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5619 trans_ddi_func_ctl_value); 5620 } 5621 5622 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 5623 const struct intel_crtc_state *crtc_state) 5624 { 5625 struct drm_dp_phy_test_params *data = 5626 &intel_dp->compliance.test_data.phytest; 5627 u8 link_status[DP_LINK_STATUS_SIZE]; 5628 5629 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 5630 link_status) < 0) { 5631 DRM_DEBUG_KMS("failed to get link status\n"); 5632 return; 5633 } 5634 5635 /* retrieve vswing & pre-emphasis setting */ 5636 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 5637 link_status); 5638 5639 intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); 5640 5641 intel_dp_set_signal_levels(intel_dp, crtc_state); 5642 5643 intel_dp_phy_pattern_update(intel_dp, crtc_state); 5644 5645 intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); 5646 5647 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 5648 link_status[DP_DPCD_REV]); 5649 } 5650 5651 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 5652 { 5653 struct drm_dp_phy_test_params *data = 5654 &intel_dp->compliance.test_data.phytest; 5655 5656 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 5657 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 5658 return DP_TEST_NAK; 5659 } 5660 5661 /* Set test active flag here so userspace doesn't interrupt things */ 5662 intel_dp->compliance.test_active = true; 5663 5664 return DP_TEST_ACK; 5665 } 5666 5667 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 5668 { 5669 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5670 u8 response = DP_TEST_NAK; 5671 u8 request = 0; 5672 int status; 5673 5674 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 5675 if (status <= 0) { 5676 drm_dbg_kms(&i915->drm, 5677 "Could not read test request from sink\n"); 5678 goto update_status; 5679 } 5680 5681 switch (request) { 5682 case DP_TEST_LINK_TRAINING: 5683 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 5684 response = intel_dp_autotest_link_training(intel_dp); 5685 break; 5686 case DP_TEST_LINK_VIDEO_PATTERN: 5687 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 5688 response = intel_dp_autotest_video_pattern(intel_dp); 5689 break; 5690 case DP_TEST_LINK_EDID_READ: 5691 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 5692 response = intel_dp_autotest_edid(intel_dp); 5693 break; 5694 case DP_TEST_LINK_PHY_TEST_PATTERN: 5695 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 5696 response = intel_dp_autotest_phy_pattern(intel_dp); 5697 break; 5698 default: 5699 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 5700 request); 5701 break; 5702 } 5703 5704 if (response & DP_TEST_ACK) 5705 intel_dp->compliance.test_type = request; 5706 5707 update_status: 5708 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 5709 if (status <= 0) 5710 drm_dbg_kms(&i915->drm, 5711 "Could not write test response to sink\n"); 5712 } 5713 5714 /** 5715 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 5716 * @intel_dp: Intel DP struct 5717 * 5718 * Read any pending MST interrupts, call MST core to handle these and ack the 5719 * interrupts. Check if the main and AUX link state is ok. 5720 * 5721 * Returns: 5722 * - %true if pending interrupts were serviced (or no interrupts were 5723 * pending) w/o detecting an error condition. 5724 * - %false if an error condition - like AUX failure or a loss of link - is 5725 * detected, which needs servicing from the hotplug work. 5726 */ 5727 static bool 5728 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5729 { 5730 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5731 bool link_ok = true; 5732 5733 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 5734 5735 for (;;) { 5736 u8 esi[DP_DPRX_ESI_LEN] = {}; 5737 bool handled; 5738 int retry; 5739 5740 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 5741 drm_dbg_kms(&i915->drm, 5742 "failed to get ESI - device may have failed\n"); 5743 link_ok = false; 5744 5745 break; 5746 } 5747 5748 /* check link status - esi[10] = 0x200c */ 5749 if (intel_dp->active_mst_links > 0 && link_ok && 5750 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 5751 drm_dbg_kms(&i915->drm, 5752 "channel EQ not ok, retraining\n"); 5753 link_ok = false; 5754 } 5755 5756 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 5757 5758 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 5759 if (!handled) 5760 break; 5761 5762 for (retry = 0; retry < 3; retry++) { 5763 int wret; 5764 5765 wret = drm_dp_dpcd_write(&intel_dp->aux, 5766 DP_SINK_COUNT_ESI+1, 5767 &esi[1], 3); 5768 if (wret == 3) 5769 break; 5770 } 5771 } 5772 5773 return link_ok; 5774 } 5775 5776 static bool 5777 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5778 { 5779 u8 link_status[DP_LINK_STATUS_SIZE]; 5780 5781 if (!intel_dp->link_trained) 5782 return false; 5783 5784 /* 5785 * While PSR source HW is enabled, it will control main-link sending 5786 * frames, enabling and disabling it so trying to do a retrain will fail 5787 * as the link would or not be on or it could mix training patterns 5788 * and frame data at the same time causing retrain to fail. 5789 * Also when exiting PSR, HW will retrain the link anyways fixing 5790 * any link status error. 5791 */ 5792 if (intel_psr_enabled(intel_dp)) 5793 return false; 5794 5795 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 5796 link_status) < 0) 5797 return false; 5798 5799 /* 5800 * Validate the cached values of intel_dp->link_rate and 5801 * intel_dp->lane_count before attempting to retrain. 5802 * 5803 * FIXME would be nice to user the crtc state here, but since 5804 * we need to call this from the short HPD handler that seems 5805 * a bit hard. 5806 */ 5807 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5808 intel_dp->lane_count)) 5809 return false; 5810 5811 /* Retrain if Channel EQ or CR not ok */ 5812 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5813 } 5814 5815 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 5816 const struct drm_connector_state *conn_state) 5817 { 5818 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5819 struct intel_encoder *encoder; 5820 enum pipe pipe; 5821 5822 if (!conn_state->best_encoder) 5823 return false; 5824 5825 /* SST */ 5826 encoder = &dp_to_dig_port(intel_dp)->base; 5827 if (conn_state->best_encoder == &encoder->base) 5828 return true; 5829 5830 /* MST */ 5831 for_each_pipe(i915, pipe) { 5832 encoder = &intel_dp->mst_encoders[pipe]->base; 5833 if (conn_state->best_encoder == &encoder->base) 5834 return true; 5835 } 5836 5837 return false; 5838 } 5839 5840 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 5841 struct drm_modeset_acquire_ctx *ctx, 5842 u32 *crtc_mask) 5843 { 5844 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5845 struct drm_connector_list_iter conn_iter; 5846 struct intel_connector *connector; 5847 int ret = 0; 5848 5849 *crtc_mask = 0; 5850 5851 if (!intel_dp_needs_link_retrain(intel_dp)) 5852 return 0; 5853 5854 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5855 for_each_intel_connector_iter(connector, &conn_iter) { 5856 struct drm_connector_state *conn_state = 5857 connector->base.state; 5858 struct intel_crtc_state *crtc_state; 5859 struct intel_crtc *crtc; 5860 5861 if (!intel_dp_has_connector(intel_dp, conn_state)) 5862 continue; 5863 5864 crtc = to_intel_crtc(conn_state->crtc); 5865 if (!crtc) 5866 continue; 5867 5868 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5869 if (ret) 5870 break; 5871 5872 crtc_state = to_intel_crtc_state(crtc->base.state); 5873 5874 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5875 5876 if (!crtc_state->hw.active) 5877 continue; 5878 5879 if (conn_state->commit && 5880 !try_wait_for_completion(&conn_state->commit->hw_done)) 5881 continue; 5882 5883 *crtc_mask |= drm_crtc_mask(&crtc->base); 5884 } 5885 drm_connector_list_iter_end(&conn_iter); 5886 5887 if (!intel_dp_needs_link_retrain(intel_dp)) 5888 *crtc_mask = 0; 5889 5890 return ret; 5891 } 5892 5893 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 5894 { 5895 struct intel_connector *connector = intel_dp->attached_connector; 5896 5897 return connector->base.status == connector_status_connected || 5898 intel_dp->is_mst; 5899 } 5900 5901 int intel_dp_retrain_link(struct intel_encoder *encoder, 5902 struct drm_modeset_acquire_ctx *ctx) 5903 { 5904 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5905 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5906 struct intel_crtc *crtc; 5907 u32 crtc_mask; 5908 int ret; 5909 5910 if (!intel_dp_is_connected(intel_dp)) 5911 return 0; 5912 5913 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5914 ctx); 5915 if (ret) 5916 return ret; 5917 5918 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 5919 if (ret) 5920 return ret; 5921 5922 if (crtc_mask == 0) 5923 return 0; 5924 5925 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 5926 encoder->base.base.id, encoder->base.name); 5927 5928 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5929 const struct intel_crtc_state *crtc_state = 5930 to_intel_crtc_state(crtc->base.state); 5931 5932 /* Suppress underruns caused by re-training */ 5933 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5934 if (crtc_state->has_pch_encoder) 5935 intel_set_pch_fifo_underrun_reporting(dev_priv, 5936 intel_crtc_pch_transcoder(crtc), false); 5937 } 5938 5939 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5940 const struct intel_crtc_state *crtc_state = 5941 to_intel_crtc_state(crtc->base.state); 5942 5943 /* retrain on the MST master transcoder */ 5944 if (INTEL_GEN(dev_priv) >= 12 && 5945 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 5946 !intel_dp_mst_is_master_trans(crtc_state)) 5947 continue; 5948 5949 intel_dp_start_link_train(intel_dp, crtc_state); 5950 intel_dp_stop_link_train(intel_dp, crtc_state); 5951 break; 5952 } 5953 5954 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5955 const struct intel_crtc_state *crtc_state = 5956 to_intel_crtc_state(crtc->base.state); 5957 5958 /* Keep underrun reporting disabled until things are stable */ 5959 intel_wait_for_vblank(dev_priv, crtc->pipe); 5960 5961 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5962 if (crtc_state->has_pch_encoder) 5963 intel_set_pch_fifo_underrun_reporting(dev_priv, 5964 intel_crtc_pch_transcoder(crtc), true); 5965 } 5966 5967 return 0; 5968 } 5969 5970 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 5971 struct drm_modeset_acquire_ctx *ctx, 5972 u32 *crtc_mask) 5973 { 5974 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5975 struct drm_connector_list_iter conn_iter; 5976 struct intel_connector *connector; 5977 int ret = 0; 5978 5979 *crtc_mask = 0; 5980 5981 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5982 for_each_intel_connector_iter(connector, &conn_iter) { 5983 struct drm_connector_state *conn_state = 5984 connector->base.state; 5985 struct intel_crtc_state *crtc_state; 5986 struct intel_crtc *crtc; 5987 5988 if (!intel_dp_has_connector(intel_dp, conn_state)) 5989 continue; 5990 5991 crtc = to_intel_crtc(conn_state->crtc); 5992 if (!crtc) 5993 continue; 5994 5995 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5996 if (ret) 5997 break; 5998 5999 crtc_state = to_intel_crtc_state(crtc->base.state); 6000 6001 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 6002 6003 if (!crtc_state->hw.active) 6004 continue; 6005 6006 if (conn_state->commit && 6007 !try_wait_for_completion(&conn_state->commit->hw_done)) 6008 continue; 6009 6010 *crtc_mask |= drm_crtc_mask(&crtc->base); 6011 } 6012 drm_connector_list_iter_end(&conn_iter); 6013 6014 return ret; 6015 } 6016 6017 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 6018 struct drm_modeset_acquire_ctx *ctx) 6019 { 6020 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6021 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 6022 struct intel_crtc *crtc; 6023 u32 crtc_mask; 6024 int ret; 6025 6026 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 6027 ctx); 6028 if (ret) 6029 return ret; 6030 6031 ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask); 6032 if (ret) 6033 return ret; 6034 6035 if (crtc_mask == 0) 6036 return 0; 6037 6038 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 6039 encoder->base.base.id, encoder->base.name); 6040 6041 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 6042 const struct intel_crtc_state *crtc_state = 6043 to_intel_crtc_state(crtc->base.state); 6044 6045 /* test on the MST master transcoder */ 6046 if (INTEL_GEN(dev_priv) >= 12 && 6047 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 6048 !intel_dp_mst_is_master_trans(crtc_state)) 6049 continue; 6050 6051 intel_dp_process_phy_request(intel_dp, crtc_state); 6052 break; 6053 } 6054 6055 return 0; 6056 } 6057 6058 static void intel_dp_phy_test(struct intel_encoder *encoder) 6059 { 6060 struct drm_modeset_acquire_ctx ctx; 6061 int ret; 6062 6063 drm_modeset_acquire_init(&ctx, 0); 6064 6065 for (;;) { 6066 ret = intel_dp_do_phy_test(encoder, &ctx); 6067 6068 if (ret == -EDEADLK) { 6069 drm_modeset_backoff(&ctx); 6070 continue; 6071 } 6072 6073 break; 6074 } 6075 6076 drm_modeset_drop_locks(&ctx); 6077 drm_modeset_acquire_fini(&ctx); 6078 drm_WARN(encoder->base.dev, ret, 6079 "Acquiring modeset locks failed with %i\n", ret); 6080 } 6081 6082 /* 6083 * If display is now connected check links status, 6084 * there has been known issues of link loss triggering 6085 * long pulse. 6086 * 6087 * Some sinks (eg. ASUS PB287Q) seem to perform some 6088 * weird HPD ping pong during modesets. So we can apparently 6089 * end up with HPD going low during a modeset, and then 6090 * going back up soon after. And once that happens we must 6091 * retrain the link to get a picture. That's in case no 6092 * userspace component reacted to intermittent HPD dip. 6093 */ 6094 static enum intel_hotplug_state 6095 intel_dp_hotplug(struct intel_encoder *encoder, 6096 struct intel_connector *connector) 6097 { 6098 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 6099 struct drm_modeset_acquire_ctx ctx; 6100 enum intel_hotplug_state state; 6101 int ret; 6102 6103 if (intel_dp->compliance.test_active && 6104 intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) { 6105 intel_dp_phy_test(encoder); 6106 /* just do the PHY test and nothing else */ 6107 return INTEL_HOTPLUG_UNCHANGED; 6108 } 6109 6110 state = intel_encoder_hotplug(encoder, connector); 6111 6112 drm_modeset_acquire_init(&ctx, 0); 6113 6114 for (;;) { 6115 ret = intel_dp_retrain_link(encoder, &ctx); 6116 6117 if (ret == -EDEADLK) { 6118 drm_modeset_backoff(&ctx); 6119 continue; 6120 } 6121 6122 break; 6123 } 6124 6125 drm_modeset_drop_locks(&ctx); 6126 drm_modeset_acquire_fini(&ctx); 6127 drm_WARN(encoder->base.dev, ret, 6128 "Acquiring modeset locks failed with %i\n", ret); 6129 6130 /* 6131 * Keeping it consistent with intel_ddi_hotplug() and 6132 * intel_hdmi_hotplug(). 6133 */ 6134 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) 6135 state = INTEL_HOTPLUG_RETRY; 6136 6137 return state; 6138 } 6139 6140 static void intel_dp_check_service_irq(struct intel_dp *intel_dp) 6141 { 6142 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6143 u8 val; 6144 6145 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 6146 return; 6147 6148 if (drm_dp_dpcd_readb(&intel_dp->aux, 6149 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 6150 return; 6151 6152 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 6153 6154 if (val & DP_AUTOMATED_TEST_REQUEST) 6155 intel_dp_handle_test_request(intel_dp); 6156 6157 if (val & DP_CP_IRQ) 6158 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 6159 6160 if (val & DP_SINK_SPECIFIC_IRQ) 6161 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 6162 } 6163 6164 /* 6165 * According to DP spec 6166 * 5.1.2: 6167 * 1. Read DPCD 6168 * 2. Configure link according to Receiver Capabilities 6169 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 6170 * 4. Check link status on receipt of hot-plug interrupt 6171 * 6172 * intel_dp_short_pulse - handles short pulse interrupts 6173 * when full detection is not required. 6174 * Returns %true if short pulse is handled and full detection 6175 * is NOT required and %false otherwise. 6176 */ 6177 static bool 6178 intel_dp_short_pulse(struct intel_dp *intel_dp) 6179 { 6180 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6181 u8 old_sink_count = intel_dp->sink_count; 6182 bool ret; 6183 6184 /* 6185 * Clearing compliance test variables to allow capturing 6186 * of values for next automated test request. 6187 */ 6188 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 6189 6190 /* 6191 * Now read the DPCD to see if it's actually running 6192 * If the current value of sink count doesn't match with 6193 * the value that was stored earlier or dpcd read failed 6194 * we need to do full detection 6195 */ 6196 ret = intel_dp_get_dpcd(intel_dp); 6197 6198 if ((old_sink_count != intel_dp->sink_count) || !ret) { 6199 /* No need to proceed if we are going to do full detect */ 6200 return false; 6201 } 6202 6203 intel_dp_check_service_irq(intel_dp); 6204 6205 /* Handle CEC interrupts, if any */ 6206 drm_dp_cec_irq(&intel_dp->aux); 6207 6208 /* defer to the hotplug work for link retraining if needed */ 6209 if (intel_dp_needs_link_retrain(intel_dp)) 6210 return false; 6211 6212 intel_psr_short_pulse(intel_dp); 6213 6214 switch (intel_dp->compliance.test_type) { 6215 case DP_TEST_LINK_TRAINING: 6216 drm_dbg_kms(&dev_priv->drm, 6217 "Link Training Compliance Test requested\n"); 6218 /* Send a Hotplug Uevent to userspace to start modeset */ 6219 drm_kms_helper_hotplug_event(&dev_priv->drm); 6220 break; 6221 case DP_TEST_LINK_PHY_TEST_PATTERN: 6222 drm_dbg_kms(&dev_priv->drm, 6223 "PHY test pattern Compliance Test requested\n"); 6224 /* 6225 * Schedule long hpd to do the test 6226 * 6227 * FIXME get rid of the ad-hoc phy test modeset code 6228 * and properly incorporate it into the normal modeset. 6229 */ 6230 return false; 6231 } 6232 6233 return true; 6234 } 6235 6236 /* XXX this is probably wrong for multiple downstream ports */ 6237 static enum drm_connector_status 6238 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 6239 { 6240 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6241 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6242 u8 *dpcd = intel_dp->dpcd; 6243 u8 type; 6244 6245 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 6246 return connector_status_connected; 6247 6248 lspcon_resume(dig_port); 6249 6250 if (!intel_dp_get_dpcd(intel_dp)) 6251 return connector_status_disconnected; 6252 6253 /* if there's no downstream port, we're done */ 6254 if (!drm_dp_is_branch(dpcd)) 6255 return connector_status_connected; 6256 6257 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 6258 if (intel_dp_has_sink_count(intel_dp) && 6259 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 6260 return intel_dp->sink_count ? 6261 connector_status_connected : connector_status_disconnected; 6262 } 6263 6264 if (intel_dp_can_mst(intel_dp)) 6265 return connector_status_connected; 6266 6267 /* If no HPD, poke DDC gently */ 6268 if (drm_probe_ddc(&intel_dp->aux.ddc)) 6269 return connector_status_connected; 6270 6271 /* Well we tried, say unknown for unreliable port types */ 6272 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 6273 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 6274 if (type == DP_DS_PORT_TYPE_VGA || 6275 type == DP_DS_PORT_TYPE_NON_EDID) 6276 return connector_status_unknown; 6277 } else { 6278 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 6279 DP_DWN_STRM_PORT_TYPE_MASK; 6280 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 6281 type == DP_DWN_STRM_PORT_TYPE_OTHER) 6282 return connector_status_unknown; 6283 } 6284 6285 /* Anything else is out of spec, warn and ignore */ 6286 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 6287 return connector_status_disconnected; 6288 } 6289 6290 static enum drm_connector_status 6291 edp_detect(struct intel_dp *intel_dp) 6292 { 6293 return connector_status_connected; 6294 } 6295 6296 static bool ibx_digital_port_connected(struct intel_encoder *encoder) 6297 { 6298 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6299 u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; 6300 6301 return intel_de_read(dev_priv, SDEISR) & bit; 6302 } 6303 6304 static bool g4x_digital_port_connected(struct intel_encoder *encoder) 6305 { 6306 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6307 u32 bit; 6308 6309 switch (encoder->hpd_pin) { 6310 case HPD_PORT_B: 6311 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 6312 break; 6313 case HPD_PORT_C: 6314 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 6315 break; 6316 case HPD_PORT_D: 6317 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 6318 break; 6319 default: 6320 MISSING_CASE(encoder->hpd_pin); 6321 return false; 6322 } 6323 6324 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6325 } 6326 6327 static bool gm45_digital_port_connected(struct intel_encoder *encoder) 6328 { 6329 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6330 u32 bit; 6331 6332 switch (encoder->hpd_pin) { 6333 case HPD_PORT_B: 6334 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 6335 break; 6336 case HPD_PORT_C: 6337 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 6338 break; 6339 case HPD_PORT_D: 6340 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 6341 break; 6342 default: 6343 MISSING_CASE(encoder->hpd_pin); 6344 return false; 6345 } 6346 6347 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6348 } 6349 6350 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 6351 { 6352 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6353 u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; 6354 6355 return intel_de_read(dev_priv, DEISR) & bit; 6356 } 6357 6358 /* 6359 * intel_digital_port_connected - is the specified port connected? 6360 * @encoder: intel_encoder 6361 * 6362 * In cases where there's a connector physically connected but it can't be used 6363 * by our hardware we also return false, since the rest of the driver should 6364 * pretty much treat the port as disconnected. This is relevant for type-C 6365 * (starting on ICL) where there's ownership involved. 6366 * 6367 * Return %true if port is connected, %false otherwise. 6368 */ 6369 bool intel_digital_port_connected(struct intel_encoder *encoder) 6370 { 6371 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6372 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6373 bool is_connected = false; 6374 intel_wakeref_t wakeref; 6375 6376 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 6377 is_connected = dig_port->connected(encoder); 6378 6379 return is_connected; 6380 } 6381 6382 static struct edid * 6383 intel_dp_get_edid(struct intel_dp *intel_dp) 6384 { 6385 struct intel_connector *intel_connector = intel_dp->attached_connector; 6386 6387 /* use cached edid if we have one */ 6388 if (intel_connector->edid) { 6389 /* invalid edid */ 6390 if (IS_ERR(intel_connector->edid)) 6391 return NULL; 6392 6393 return drm_edid_duplicate(intel_connector->edid); 6394 } else 6395 return drm_get_edid(&intel_connector->base, 6396 &intel_dp->aux.ddc); 6397 } 6398 6399 static void 6400 intel_dp_update_dfp(struct intel_dp *intel_dp, 6401 const struct edid *edid) 6402 { 6403 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6404 struct intel_connector *connector = intel_dp->attached_connector; 6405 6406 intel_dp->dfp.max_bpc = 6407 drm_dp_downstream_max_bpc(intel_dp->dpcd, 6408 intel_dp->downstream_ports, edid); 6409 6410 intel_dp->dfp.max_dotclock = 6411 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 6412 intel_dp->downstream_ports); 6413 6414 intel_dp->dfp.min_tmds_clock = 6415 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 6416 intel_dp->downstream_ports, 6417 edid); 6418 intel_dp->dfp.max_tmds_clock = 6419 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 6420 intel_dp->downstream_ports, 6421 edid); 6422 6423 drm_dbg_kms(&i915->drm, 6424 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n", 6425 connector->base.base.id, connector->base.name, 6426 intel_dp->dfp.max_bpc, 6427 intel_dp->dfp.max_dotclock, 6428 intel_dp->dfp.min_tmds_clock, 6429 intel_dp->dfp.max_tmds_clock); 6430 } 6431 6432 static void 6433 intel_dp_update_420(struct intel_dp *intel_dp) 6434 { 6435 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 6436 struct intel_connector *connector = intel_dp->attached_connector; 6437 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420; 6438 6439 /* No YCbCr output support on gmch platforms */ 6440 if (HAS_GMCH(i915)) 6441 return; 6442 6443 /* 6444 * ILK doesn't seem capable of DP YCbCr output. The 6445 * displayed image is severly corrupted. SNB+ is fine. 6446 */ 6447 if (IS_GEN(i915, 5)) 6448 return; 6449 6450 is_branch = drm_dp_is_branch(intel_dp->dpcd); 6451 ycbcr_420_passthrough = 6452 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 6453 intel_dp->downstream_ports); 6454 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 6455 ycbcr_444_to_420 = 6456 dp_to_dig_port(intel_dp)->lspcon.active || 6457 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 6458 intel_dp->downstream_ports); 6459 6460 if (INTEL_GEN(i915) >= 11) { 6461 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ 6462 intel_dp->dfp.ycbcr_444_to_420 = 6463 ycbcr_444_to_420 && !ycbcr_420_passthrough; 6464 6465 connector->base.ycbcr_420_allowed = 6466 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; 6467 } else { 6468 /* 4:4:4->4:2:0 conversion is the only way */ 6469 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; 6470 6471 connector->base.ycbcr_420_allowed = ycbcr_444_to_420; 6472 } 6473 6474 drm_dbg_kms(&i915->drm, 6475 "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 6476 connector->base.base.id, connector->base.name, 6477 yesno(connector->base.ycbcr_420_allowed), 6478 yesno(intel_dp->dfp.ycbcr_444_to_420)); 6479 } 6480 6481 static void 6482 intel_dp_set_edid(struct intel_dp *intel_dp) 6483 { 6484 struct intel_connector *connector = intel_dp->attached_connector; 6485 struct edid *edid; 6486 6487 intel_dp_unset_edid(intel_dp); 6488 edid = intel_dp_get_edid(intel_dp); 6489 connector->detect_edid = edid; 6490 6491 intel_dp_update_dfp(intel_dp, edid); 6492 intel_dp_update_420(intel_dp); 6493 6494 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 6495 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 6496 intel_dp->has_audio = drm_detect_monitor_audio(edid); 6497 } 6498 6499 drm_dp_cec_set_edid(&intel_dp->aux, edid); 6500 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 6501 } 6502 6503 static void 6504 intel_dp_unset_edid(struct intel_dp *intel_dp) 6505 { 6506 struct intel_connector *connector = intel_dp->attached_connector; 6507 6508 drm_dp_cec_unset_edid(&intel_dp->aux); 6509 kfree(connector->detect_edid); 6510 connector->detect_edid = NULL; 6511 6512 intel_dp->has_hdmi_sink = false; 6513 intel_dp->has_audio = false; 6514 intel_dp->edid_quirks = 0; 6515 6516 intel_dp->dfp.max_bpc = 0; 6517 intel_dp->dfp.max_dotclock = 0; 6518 intel_dp->dfp.min_tmds_clock = 0; 6519 intel_dp->dfp.max_tmds_clock = 0; 6520 6521 intel_dp->dfp.ycbcr_444_to_420 = false; 6522 connector->base.ycbcr_420_allowed = false; 6523 } 6524 6525 static int 6526 intel_dp_detect(struct drm_connector *connector, 6527 struct drm_modeset_acquire_ctx *ctx, 6528 bool force) 6529 { 6530 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6531 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6532 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6533 struct intel_encoder *encoder = &dig_port->base; 6534 enum drm_connector_status status; 6535 6536 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6537 connector->base.id, connector->name); 6538 drm_WARN_ON(&dev_priv->drm, 6539 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 6540 6541 if (!INTEL_DISPLAY_ENABLED(dev_priv)) 6542 return connector_status_disconnected; 6543 6544 /* Can't disconnect eDP */ 6545 if (intel_dp_is_edp(intel_dp)) 6546 status = edp_detect(intel_dp); 6547 else if (intel_digital_port_connected(encoder)) 6548 status = intel_dp_detect_dpcd(intel_dp); 6549 else 6550 status = connector_status_disconnected; 6551 6552 if (status == connector_status_disconnected) { 6553 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 6554 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 6555 6556 if (intel_dp->is_mst) { 6557 drm_dbg_kms(&dev_priv->drm, 6558 "MST device may have disappeared %d vs %d\n", 6559 intel_dp->is_mst, 6560 intel_dp->mst_mgr.mst_state); 6561 intel_dp->is_mst = false; 6562 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6563 intel_dp->is_mst); 6564 } 6565 6566 goto out; 6567 } 6568 6569 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 6570 if (INTEL_GEN(dev_priv) >= 11) 6571 intel_dp_get_dsc_sink_cap(intel_dp); 6572 6573 intel_dp_configure_mst(intel_dp); 6574 6575 /* 6576 * TODO: Reset link params when switching to MST mode, until MST 6577 * supports link training fallback params. 6578 */ 6579 if (intel_dp->reset_link_params || intel_dp->is_mst) { 6580 /* Initial max link lane count */ 6581 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 6582 6583 /* Initial max link rate */ 6584 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 6585 6586 intel_dp->reset_link_params = false; 6587 } 6588 6589 intel_dp_print_rates(intel_dp); 6590 6591 if (intel_dp->is_mst) { 6592 /* 6593 * If we are in MST mode then this connector 6594 * won't appear connected or have anything 6595 * with EDID on it 6596 */ 6597 status = connector_status_disconnected; 6598 goto out; 6599 } 6600 6601 /* 6602 * Some external monitors do not signal loss of link synchronization 6603 * with an IRQ_HPD, so force a link status check. 6604 */ 6605 if (!intel_dp_is_edp(intel_dp)) { 6606 int ret; 6607 6608 ret = intel_dp_retrain_link(encoder, ctx); 6609 if (ret) 6610 return ret; 6611 } 6612 6613 /* 6614 * Clearing NACK and defer counts to get their exact values 6615 * while reading EDID which are required by Compliance tests 6616 * 4.2.2.4 and 4.2.2.5 6617 */ 6618 intel_dp->aux.i2c_nack_count = 0; 6619 intel_dp->aux.i2c_defer_count = 0; 6620 6621 intel_dp_set_edid(intel_dp); 6622 if (intel_dp_is_edp(intel_dp) || 6623 to_intel_connector(connector)->detect_edid) 6624 status = connector_status_connected; 6625 6626 intel_dp_check_service_irq(intel_dp); 6627 6628 out: 6629 if (status != connector_status_connected && !intel_dp->is_mst) 6630 intel_dp_unset_edid(intel_dp); 6631 6632 /* 6633 * Make sure the refs for power wells enabled during detect are 6634 * dropped to avoid a new detect cycle triggered by HPD polling. 6635 */ 6636 intel_display_power_flush_work(dev_priv); 6637 6638 if (!intel_dp_is_edp(intel_dp)) 6639 drm_dp_set_subconnector_property(connector, 6640 status, 6641 intel_dp->dpcd, 6642 intel_dp->downstream_ports); 6643 return status; 6644 } 6645 6646 static void 6647 intel_dp_force(struct drm_connector *connector) 6648 { 6649 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6650 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6651 struct intel_encoder *intel_encoder = &dig_port->base; 6652 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 6653 enum intel_display_power_domain aux_domain = 6654 intel_aux_power_domain(dig_port); 6655 intel_wakeref_t wakeref; 6656 6657 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6658 connector->base.id, connector->name); 6659 intel_dp_unset_edid(intel_dp); 6660 6661 if (connector->status != connector_status_connected) 6662 return; 6663 6664 wakeref = intel_display_power_get(dev_priv, aux_domain); 6665 6666 intel_dp_set_edid(intel_dp); 6667 6668 intel_display_power_put(dev_priv, aux_domain, wakeref); 6669 } 6670 6671 static int intel_dp_get_modes(struct drm_connector *connector) 6672 { 6673 struct intel_connector *intel_connector = to_intel_connector(connector); 6674 struct edid *edid; 6675 6676 edid = intel_connector->detect_edid; 6677 if (edid) { 6678 int ret = intel_connector_update_modes(connector, edid); 6679 if (ret) 6680 return ret; 6681 } 6682 6683 /* if eDP has no EDID, fall back to fixed mode */ 6684 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && 6685 intel_connector->panel.fixed_mode) { 6686 struct drm_display_mode *mode; 6687 6688 mode = drm_mode_duplicate(connector->dev, 6689 intel_connector->panel.fixed_mode); 6690 if (mode) { 6691 drm_mode_probed_add(connector, mode); 6692 return 1; 6693 } 6694 } 6695 6696 if (!edid) { 6697 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 6698 struct drm_display_mode *mode; 6699 6700 mode = drm_dp_downstream_mode(connector->dev, 6701 intel_dp->dpcd, 6702 intel_dp->downstream_ports); 6703 if (mode) { 6704 drm_mode_probed_add(connector, mode); 6705 return 1; 6706 } 6707 } 6708 6709 return 0; 6710 } 6711 6712 static int 6713 intel_dp_connector_register(struct drm_connector *connector) 6714 { 6715 struct drm_i915_private *i915 = to_i915(connector->dev); 6716 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6717 int ret; 6718 6719 ret = intel_connector_register(connector); 6720 if (ret) 6721 return ret; 6722 6723 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 6724 intel_dp->aux.name, connector->kdev->kobj.name); 6725 6726 intel_dp->aux.dev = connector->kdev; 6727 ret = drm_dp_aux_register(&intel_dp->aux); 6728 if (!ret) 6729 drm_dp_cec_register_connector(&intel_dp->aux, connector); 6730 return ret; 6731 } 6732 6733 static void 6734 intel_dp_connector_unregister(struct drm_connector *connector) 6735 { 6736 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6737 6738 drm_dp_cec_unregister_connector(&intel_dp->aux); 6739 drm_dp_aux_unregister(&intel_dp->aux); 6740 intel_connector_unregister(connector); 6741 } 6742 6743 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 6744 { 6745 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 6746 struct intel_dp *intel_dp = &dig_port->dp; 6747 6748 intel_dp_mst_encoder_cleanup(dig_port); 6749 if (intel_dp_is_edp(intel_dp)) { 6750 intel_wakeref_t wakeref; 6751 6752 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6753 /* 6754 * vdd might still be enabled do to the delayed vdd off. 6755 * Make sure vdd is actually turned off here. 6756 */ 6757 with_pps_lock(intel_dp, wakeref) 6758 edp_panel_vdd_off_sync(intel_dp); 6759 } 6760 6761 intel_dp_aux_fini(intel_dp); 6762 } 6763 6764 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 6765 { 6766 intel_dp_encoder_flush_work(encoder); 6767 6768 drm_encoder_cleanup(encoder); 6769 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 6770 } 6771 6772 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 6773 { 6774 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6775 intel_wakeref_t wakeref; 6776 6777 if (!intel_dp_is_edp(intel_dp)) 6778 return; 6779 6780 /* 6781 * vdd might still be enabled do to the delayed vdd off. 6782 * Make sure vdd is actually turned off here. 6783 */ 6784 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6785 with_pps_lock(intel_dp, wakeref) 6786 edp_panel_vdd_off_sync(intel_dp); 6787 } 6788 6789 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 6790 { 6791 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6792 intel_wakeref_t wakeref; 6793 6794 if (!intel_dp_is_edp(intel_dp)) 6795 return; 6796 6797 with_pps_lock(intel_dp, wakeref) 6798 wait_panel_power_cycle(intel_dp); 6799 } 6800 6801 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 6802 { 6803 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6804 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6805 6806 lockdep_assert_held(&dev_priv->pps_mutex); 6807 6808 if (!edp_have_panel_vdd(intel_dp)) 6809 return; 6810 6811 /* 6812 * The VDD bit needs a power domain reference, so if the bit is 6813 * already enabled when we boot or resume, grab this reference and 6814 * schedule a vdd off, so we don't hold on to the reference 6815 * indefinitely. 6816 */ 6817 drm_dbg_kms(&dev_priv->drm, 6818 "VDD left on by BIOS, adjusting state tracking\n"); 6819 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); 6820 6821 edp_panel_vdd_schedule_off(intel_dp); 6822 } 6823 6824 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 6825 { 6826 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6827 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 6828 enum pipe pipe; 6829 6830 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 6831 encoder->port, &pipe)) 6832 return pipe; 6833 6834 return INVALID_PIPE; 6835 } 6836 6837 void intel_dp_encoder_reset(struct drm_encoder *encoder) 6838 { 6839 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 6840 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 6841 intel_wakeref_t wakeref; 6842 6843 if (!HAS_DDI(dev_priv)) 6844 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 6845 6846 intel_dp->reset_link_params = true; 6847 6848 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 6849 !intel_dp_is_edp(intel_dp)) 6850 return; 6851 6852 with_pps_lock(intel_dp, wakeref) { 6853 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6854 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 6855 6856 if (intel_dp_is_edp(intel_dp)) { 6857 /* 6858 * Reinit the power sequencer, in case BIOS did 6859 * something nasty with it. 6860 */ 6861 intel_dp_pps_init(intel_dp); 6862 intel_edp_panel_vdd_sanitize(intel_dp); 6863 } 6864 } 6865 } 6866 6867 static int intel_modeset_tile_group(struct intel_atomic_state *state, 6868 int tile_group_id) 6869 { 6870 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6871 struct drm_connector_list_iter conn_iter; 6872 struct drm_connector *connector; 6873 int ret = 0; 6874 6875 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 6876 drm_for_each_connector_iter(connector, &conn_iter) { 6877 struct drm_connector_state *conn_state; 6878 struct intel_crtc_state *crtc_state; 6879 struct intel_crtc *crtc; 6880 6881 if (!connector->has_tile || 6882 connector->tile_group->id != tile_group_id) 6883 continue; 6884 6885 conn_state = drm_atomic_get_connector_state(&state->base, 6886 connector); 6887 if (IS_ERR(conn_state)) { 6888 ret = PTR_ERR(conn_state); 6889 break; 6890 } 6891 6892 crtc = to_intel_crtc(conn_state->crtc); 6893 6894 if (!crtc) 6895 continue; 6896 6897 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6898 crtc_state->uapi.mode_changed = true; 6899 6900 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6901 if (ret) 6902 break; 6903 } 6904 drm_connector_list_iter_end(&conn_iter); 6905 6906 return ret; 6907 } 6908 6909 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 6910 { 6911 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6912 struct intel_crtc *crtc; 6913 6914 if (transcoders == 0) 6915 return 0; 6916 6917 for_each_intel_crtc(&dev_priv->drm, crtc) { 6918 struct intel_crtc_state *crtc_state; 6919 int ret; 6920 6921 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6922 if (IS_ERR(crtc_state)) 6923 return PTR_ERR(crtc_state); 6924 6925 if (!crtc_state->hw.enable) 6926 continue; 6927 6928 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 6929 continue; 6930 6931 crtc_state->uapi.mode_changed = true; 6932 6933 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6934 if (ret) 6935 return ret; 6936 6937 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6938 if (ret) 6939 return ret; 6940 6941 transcoders &= ~BIT(crtc_state->cpu_transcoder); 6942 } 6943 6944 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 6945 6946 return 0; 6947 } 6948 6949 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 6950 struct drm_connector *connector) 6951 { 6952 const struct drm_connector_state *old_conn_state = 6953 drm_atomic_get_old_connector_state(&state->base, connector); 6954 const struct intel_crtc_state *old_crtc_state; 6955 struct intel_crtc *crtc; 6956 u8 transcoders; 6957 6958 crtc = to_intel_crtc(old_conn_state->crtc); 6959 if (!crtc) 6960 return 0; 6961 6962 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6963 6964 if (!old_crtc_state->hw.active) 6965 return 0; 6966 6967 transcoders = old_crtc_state->sync_mode_slaves_mask; 6968 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 6969 transcoders |= BIT(old_crtc_state->master_transcoder); 6970 6971 return intel_modeset_affected_transcoders(state, 6972 transcoders); 6973 } 6974 6975 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 6976 struct drm_atomic_state *_state) 6977 { 6978 struct drm_i915_private *dev_priv = to_i915(conn->dev); 6979 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6980 int ret; 6981 6982 ret = intel_digital_connector_atomic_check(conn, &state->base); 6983 if (ret) 6984 return ret; 6985 6986 /* 6987 * We don't enable port sync on BDW due to missing w/as and 6988 * due to not having adjusted the modeset sequence appropriately. 6989 */ 6990 if (INTEL_GEN(dev_priv) < 9) 6991 return 0; 6992 6993 if (!intel_connector_needs_modeset(state, conn)) 6994 return 0; 6995 6996 if (conn->has_tile) { 6997 ret = intel_modeset_tile_group(state, conn->tile_group->id); 6998 if (ret) 6999 return ret; 7000 } 7001 7002 return intel_modeset_synced_crtcs(state, conn); 7003 } 7004 7005 static const struct drm_connector_funcs intel_dp_connector_funcs = { 7006 .force = intel_dp_force, 7007 .fill_modes = drm_helper_probe_single_connector_modes, 7008 .atomic_get_property = intel_digital_connector_atomic_get_property, 7009 .atomic_set_property = intel_digital_connector_atomic_set_property, 7010 .late_register = intel_dp_connector_register, 7011 .early_unregister = intel_dp_connector_unregister, 7012 .destroy = intel_connector_destroy, 7013 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7014 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 7015 }; 7016 7017 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 7018 .detect_ctx = intel_dp_detect, 7019 .get_modes = intel_dp_get_modes, 7020 .mode_valid = intel_dp_mode_valid, 7021 .atomic_check = intel_dp_connector_atomic_check, 7022 }; 7023 7024 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 7025 .reset = intel_dp_encoder_reset, 7026 .destroy = intel_dp_encoder_destroy, 7027 }; 7028 7029 static bool intel_edp_have_power(struct intel_dp *intel_dp) 7030 { 7031 intel_wakeref_t wakeref; 7032 bool have_power = false; 7033 7034 with_pps_lock(intel_dp, wakeref) { 7035 have_power = edp_have_panel_power(intel_dp) && 7036 edp_have_panel_vdd(intel_dp); 7037 } 7038 7039 return have_power; 7040 } 7041 7042 enum irqreturn 7043 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 7044 { 7045 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 7046 struct intel_dp *intel_dp = &dig_port->dp; 7047 7048 if (dig_port->base.type == INTEL_OUTPUT_EDP && 7049 (long_hpd || !intel_edp_have_power(intel_dp))) { 7050 /* 7051 * vdd off can generate a long/short pulse on eDP which 7052 * would require vdd on to handle it, and thus we 7053 * would end up in an endless cycle of 7054 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 7055 */ 7056 drm_dbg_kms(&i915->drm, 7057 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 7058 long_hpd ? "long" : "short", 7059 dig_port->base.base.base.id, 7060 dig_port->base.base.name); 7061 return IRQ_HANDLED; 7062 } 7063 7064 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 7065 dig_port->base.base.base.id, 7066 dig_port->base.base.name, 7067 long_hpd ? "long" : "short"); 7068 7069 if (long_hpd) { 7070 intel_dp->reset_link_params = true; 7071 return IRQ_NONE; 7072 } 7073 7074 if (intel_dp->is_mst) { 7075 if (!intel_dp_check_mst_status(intel_dp)) 7076 return IRQ_NONE; 7077 } else if (!intel_dp_short_pulse(intel_dp)) { 7078 return IRQ_NONE; 7079 } 7080 7081 return IRQ_HANDLED; 7082 } 7083 7084 /* check the VBT to see whether the eDP is on another port */ 7085 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 7086 { 7087 /* 7088 * eDP not supported on g4x. so bail out early just 7089 * for a bit extra safety in case the VBT is bonkers. 7090 */ 7091 if (INTEL_GEN(dev_priv) < 5) 7092 return false; 7093 7094 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 7095 return true; 7096 7097 return intel_bios_is_port_edp(dev_priv, port); 7098 } 7099 7100 static void 7101 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 7102 { 7103 struct drm_i915_private *dev_priv = to_i915(connector->dev); 7104 enum port port = dp_to_dig_port(intel_dp)->base.port; 7105 7106 if (!intel_dp_is_edp(intel_dp)) 7107 drm_connector_attach_dp_subconnector_property(connector); 7108 7109 if (!IS_G4X(dev_priv) && port != PORT_A) 7110 intel_attach_force_audio_property(connector); 7111 7112 intel_attach_broadcast_rgb_property(connector); 7113 if (HAS_GMCH(dev_priv)) 7114 drm_connector_attach_max_bpc_property(connector, 6, 10); 7115 else if (INTEL_GEN(dev_priv) >= 5) 7116 drm_connector_attach_max_bpc_property(connector, 6, 12); 7117 7118 intel_attach_colorspace_property(connector); 7119 7120 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 7121 drm_object_attach_property(&connector->base, 7122 connector->dev->mode_config.hdr_output_metadata_property, 7123 0); 7124 7125 if (intel_dp_is_edp(intel_dp)) { 7126 u32 allowed_scalers; 7127 7128 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 7129 if (!HAS_GMCH(dev_priv)) 7130 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 7131 7132 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 7133 7134 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 7135 7136 } 7137 } 7138 7139 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 7140 { 7141 intel_dp->panel_power_off_time = ktime_get_boottime(); 7142 intel_dp->last_power_on = jiffies; 7143 intel_dp->last_backlight_off = jiffies; 7144 } 7145 7146 static void 7147 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 7148 { 7149 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7150 u32 pp_on, pp_off, pp_ctl; 7151 struct pps_registers regs; 7152 7153 intel_pps_get_registers(intel_dp, ®s); 7154 7155 pp_ctl = ilk_get_pp_control(intel_dp); 7156 7157 /* Ensure PPS is unlocked */ 7158 if (!HAS_DDI(dev_priv)) 7159 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7160 7161 pp_on = intel_de_read(dev_priv, regs.pp_on); 7162 pp_off = intel_de_read(dev_priv, regs.pp_off); 7163 7164 /* Pull timing values out of registers */ 7165 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 7166 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 7167 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 7168 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 7169 7170 if (i915_mmio_reg_valid(regs.pp_div)) { 7171 u32 pp_div; 7172 7173 pp_div = intel_de_read(dev_priv, regs.pp_div); 7174 7175 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 7176 } else { 7177 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 7178 } 7179 } 7180 7181 static void 7182 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 7183 { 7184 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 7185 state_name, 7186 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 7187 } 7188 7189 static void 7190 intel_pps_verify_state(struct intel_dp *intel_dp) 7191 { 7192 struct edp_power_seq hw; 7193 struct edp_power_seq *sw = &intel_dp->pps_delays; 7194 7195 intel_pps_readout_hw_state(intel_dp, &hw); 7196 7197 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 7198 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 7199 DRM_ERROR("PPS state mismatch\n"); 7200 intel_pps_dump_state("sw", sw); 7201 intel_pps_dump_state("hw", &hw); 7202 } 7203 } 7204 7205 static void 7206 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) 7207 { 7208 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7209 struct edp_power_seq cur, vbt, spec, 7210 *final = &intel_dp->pps_delays; 7211 7212 lockdep_assert_held(&dev_priv->pps_mutex); 7213 7214 /* already initialized? */ 7215 if (final->t11_t12 != 0) 7216 return; 7217 7218 intel_pps_readout_hw_state(intel_dp, &cur); 7219 7220 intel_pps_dump_state("cur", &cur); 7221 7222 vbt = dev_priv->vbt.edp.pps; 7223 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 7224 * of 500ms appears to be too short. Ocassionally the panel 7225 * just fails to power back on. Increasing the delay to 800ms 7226 * seems sufficient to avoid this problem. 7227 */ 7228 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 7229 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 7230 drm_dbg_kms(&dev_priv->drm, 7231 "Increasing T12 panel delay as per the quirk to %d\n", 7232 vbt.t11_t12); 7233 } 7234 /* T11_T12 delay is special and actually in units of 100ms, but zero 7235 * based in the hw (so we need to add 100 ms). But the sw vbt 7236 * table multiplies it with 1000 to make it in units of 100usec, 7237 * too. */ 7238 vbt.t11_t12 += 100 * 10; 7239 7240 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 7241 * our hw here, which are all in 100usec. */ 7242 spec.t1_t3 = 210 * 10; 7243 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 7244 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 7245 spec.t10 = 500 * 10; 7246 /* This one is special and actually in units of 100ms, but zero 7247 * based in the hw (so we need to add 100 ms). But the sw vbt 7248 * table multiplies it with 1000 to make it in units of 100usec, 7249 * too. */ 7250 spec.t11_t12 = (510 + 100) * 10; 7251 7252 intel_pps_dump_state("vbt", &vbt); 7253 7254 /* Use the max of the register settings and vbt. If both are 7255 * unset, fall back to the spec limits. */ 7256 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 7257 spec.field : \ 7258 max(cur.field, vbt.field)) 7259 assign_final(t1_t3); 7260 assign_final(t8); 7261 assign_final(t9); 7262 assign_final(t10); 7263 assign_final(t11_t12); 7264 #undef assign_final 7265 7266 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 7267 intel_dp->panel_power_up_delay = get_delay(t1_t3); 7268 intel_dp->backlight_on_delay = get_delay(t8); 7269 intel_dp->backlight_off_delay = get_delay(t9); 7270 intel_dp->panel_power_down_delay = get_delay(t10); 7271 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 7272 #undef get_delay 7273 7274 drm_dbg_kms(&dev_priv->drm, 7275 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 7276 intel_dp->panel_power_up_delay, 7277 intel_dp->panel_power_down_delay, 7278 intel_dp->panel_power_cycle_delay); 7279 7280 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", 7281 intel_dp->backlight_on_delay, 7282 intel_dp->backlight_off_delay); 7283 7284 /* 7285 * We override the HW backlight delays to 1 because we do manual waits 7286 * on them. For T8, even BSpec recommends doing it. For T9, if we 7287 * don't do this, we'll end up waiting for the backlight off delay 7288 * twice: once when we do the manual sleep, and once when we disable 7289 * the panel and wait for the PP_STATUS bit to become zero. 7290 */ 7291 final->t8 = 1; 7292 final->t9 = 1; 7293 7294 /* 7295 * HW has only a 100msec granularity for t11_t12 so round it up 7296 * accordingly. 7297 */ 7298 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 7299 } 7300 7301 static void 7302 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 7303 bool force_disable_vdd) 7304 { 7305 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7306 u32 pp_on, pp_off, port_sel = 0; 7307 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; 7308 struct pps_registers regs; 7309 enum port port = dp_to_dig_port(intel_dp)->base.port; 7310 const struct edp_power_seq *seq = &intel_dp->pps_delays; 7311 7312 lockdep_assert_held(&dev_priv->pps_mutex); 7313 7314 intel_pps_get_registers(intel_dp, ®s); 7315 7316 /* 7317 * On some VLV machines the BIOS can leave the VDD 7318 * enabled even on power sequencers which aren't 7319 * hooked up to any port. This would mess up the 7320 * power domain tracking the first time we pick 7321 * one of these power sequencers for use since 7322 * edp_panel_vdd_on() would notice that the VDD was 7323 * already on and therefore wouldn't grab the power 7324 * domain reference. Disable VDD first to avoid this. 7325 * This also avoids spuriously turning the VDD on as 7326 * soon as the new power sequencer gets initialized. 7327 */ 7328 if (force_disable_vdd) { 7329 u32 pp = ilk_get_pp_control(intel_dp); 7330 7331 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, 7332 "Panel power already on\n"); 7333 7334 if (pp & EDP_FORCE_VDD) 7335 drm_dbg_kms(&dev_priv->drm, 7336 "VDD already on, disabling first\n"); 7337 7338 pp &= ~EDP_FORCE_VDD; 7339 7340 intel_de_write(dev_priv, regs.pp_ctrl, pp); 7341 } 7342 7343 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 7344 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 7345 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 7346 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 7347 7348 /* Haswell doesn't have any port selection bits for the panel 7349 * power sequencer any more. */ 7350 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7351 port_sel = PANEL_PORT_SELECT_VLV(port); 7352 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7353 switch (port) { 7354 case PORT_A: 7355 port_sel = PANEL_PORT_SELECT_DPA; 7356 break; 7357 case PORT_C: 7358 port_sel = PANEL_PORT_SELECT_DPC; 7359 break; 7360 case PORT_D: 7361 port_sel = PANEL_PORT_SELECT_DPD; 7362 break; 7363 default: 7364 MISSING_CASE(port); 7365 break; 7366 } 7367 } 7368 7369 pp_on |= port_sel; 7370 7371 intel_de_write(dev_priv, regs.pp_on, pp_on); 7372 intel_de_write(dev_priv, regs.pp_off, pp_off); 7373 7374 /* 7375 * Compute the divisor for the pp clock, simply match the Bspec formula. 7376 */ 7377 if (i915_mmio_reg_valid(regs.pp_div)) { 7378 intel_de_write(dev_priv, regs.pp_div, 7379 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 7380 } else { 7381 u32 pp_ctl; 7382 7383 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); 7384 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 7385 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 7386 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7387 } 7388 7389 drm_dbg_kms(&dev_priv->drm, 7390 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 7391 intel_de_read(dev_priv, regs.pp_on), 7392 intel_de_read(dev_priv, regs.pp_off), 7393 i915_mmio_reg_valid(regs.pp_div) ? 7394 intel_de_read(dev_priv, regs.pp_div) : 7395 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 7396 } 7397 7398 static void intel_dp_pps_init(struct intel_dp *intel_dp) 7399 { 7400 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7401 7402 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7403 vlv_initial_power_sequencer_setup(intel_dp); 7404 } else { 7405 intel_dp_init_panel_power_sequencer(intel_dp); 7406 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 7407 } 7408 } 7409 7410 /** 7411 * intel_dp_set_drrs_state - program registers for RR switch to take effect 7412 * @dev_priv: i915 device 7413 * @crtc_state: a pointer to the active intel_crtc_state 7414 * @refresh_rate: RR to be programmed 7415 * 7416 * This function gets called when refresh rate (RR) has to be changed from 7417 * one frequency to another. Switches can be between high and low RR 7418 * supported by the panel or to any other RR based on media playback (in 7419 * this case, RR value needs to be passed from user space). 7420 * 7421 * The caller of this function needs to take a lock on dev_priv->drrs. 7422 */ 7423 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 7424 const struct intel_crtc_state *crtc_state, 7425 int refresh_rate) 7426 { 7427 struct intel_dp *intel_dp = dev_priv->drrs.dp; 7428 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 7429 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 7430 7431 if (refresh_rate <= 0) { 7432 drm_dbg_kms(&dev_priv->drm, 7433 "Refresh rate should be positive non-zero.\n"); 7434 return; 7435 } 7436 7437 if (intel_dp == NULL) { 7438 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 7439 return; 7440 } 7441 7442 if (!intel_crtc) { 7443 drm_dbg_kms(&dev_priv->drm, 7444 "DRRS: intel_crtc not initialized\n"); 7445 return; 7446 } 7447 7448 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 7449 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 7450 return; 7451 } 7452 7453 if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == 7454 refresh_rate) 7455 index = DRRS_LOW_RR; 7456 7457 if (index == dev_priv->drrs.refresh_rate_type) { 7458 drm_dbg_kms(&dev_priv->drm, 7459 "DRRS requested for previously set RR...ignoring\n"); 7460 return; 7461 } 7462 7463 if (!crtc_state->hw.active) { 7464 drm_dbg_kms(&dev_priv->drm, 7465 "eDP encoder disabled. CRTC not Active\n"); 7466 return; 7467 } 7468 7469 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 7470 switch (index) { 7471 case DRRS_HIGH_RR: 7472 intel_dp_set_m_n(crtc_state, M1_N1); 7473 break; 7474 case DRRS_LOW_RR: 7475 intel_dp_set_m_n(crtc_state, M2_N2); 7476 break; 7477 case DRRS_MAX_RR: 7478 default: 7479 drm_err(&dev_priv->drm, 7480 "Unsupported refreshrate type\n"); 7481 } 7482 } else if (INTEL_GEN(dev_priv) > 6) { 7483 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 7484 u32 val; 7485 7486 val = intel_de_read(dev_priv, reg); 7487 if (index > DRRS_HIGH_RR) { 7488 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7489 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7490 else 7491 val |= PIPECONF_EDP_RR_MODE_SWITCH; 7492 } else { 7493 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7494 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7495 else 7496 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 7497 } 7498 intel_de_write(dev_priv, reg, val); 7499 } 7500 7501 dev_priv->drrs.refresh_rate_type = index; 7502 7503 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 7504 refresh_rate); 7505 } 7506 7507 static void 7508 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) 7509 { 7510 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7511 7512 dev_priv->drrs.busy_frontbuffer_bits = 0; 7513 dev_priv->drrs.dp = intel_dp; 7514 } 7515 7516 /** 7517 * intel_edp_drrs_enable - init drrs struct if supported 7518 * @intel_dp: DP struct 7519 * @crtc_state: A pointer to the active crtc state. 7520 * 7521 * Initializes frontbuffer_bits and drrs.dp 7522 */ 7523 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 7524 const struct intel_crtc_state *crtc_state) 7525 { 7526 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7527 7528 if (!crtc_state->has_drrs) 7529 return; 7530 7531 drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); 7532 7533 mutex_lock(&dev_priv->drrs.mutex); 7534 7535 if (dev_priv->drrs.dp) { 7536 drm_warn(&dev_priv->drm, "DRRS already enabled\n"); 7537 goto unlock; 7538 } 7539 7540 intel_edp_drrs_enable_locked(intel_dp); 7541 7542 unlock: 7543 mutex_unlock(&dev_priv->drrs.mutex); 7544 } 7545 7546 static void 7547 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, 7548 const struct intel_crtc_state *crtc_state) 7549 { 7550 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7551 7552 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { 7553 int refresh; 7554 7555 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); 7556 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); 7557 } 7558 7559 dev_priv->drrs.dp = NULL; 7560 } 7561 7562 /** 7563 * intel_edp_drrs_disable - Disable DRRS 7564 * @intel_dp: DP struct 7565 * @old_crtc_state: Pointer to old crtc_state. 7566 * 7567 */ 7568 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 7569 const struct intel_crtc_state *old_crtc_state) 7570 { 7571 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7572 7573 if (!old_crtc_state->has_drrs) 7574 return; 7575 7576 mutex_lock(&dev_priv->drrs.mutex); 7577 if (!dev_priv->drrs.dp) { 7578 mutex_unlock(&dev_priv->drrs.mutex); 7579 return; 7580 } 7581 7582 intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); 7583 mutex_unlock(&dev_priv->drrs.mutex); 7584 7585 cancel_delayed_work_sync(&dev_priv->drrs.work); 7586 } 7587 7588 /** 7589 * intel_edp_drrs_update - Update DRRS state 7590 * @intel_dp: Intel DP 7591 * @crtc_state: new CRTC state 7592 * 7593 * This function will update DRRS states, disabling or enabling DRRS when 7594 * executing fastsets. For full modeset, intel_edp_drrs_disable() and 7595 * intel_edp_drrs_enable() should be called instead. 7596 */ 7597 void 7598 intel_edp_drrs_update(struct intel_dp *intel_dp, 7599 const struct intel_crtc_state *crtc_state) 7600 { 7601 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7602 7603 if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) 7604 return; 7605 7606 mutex_lock(&dev_priv->drrs.mutex); 7607 7608 /* New state matches current one? */ 7609 if (crtc_state->has_drrs == !!dev_priv->drrs.dp) 7610 goto unlock; 7611 7612 if (crtc_state->has_drrs) 7613 intel_edp_drrs_enable_locked(intel_dp); 7614 else 7615 intel_edp_drrs_disable_locked(intel_dp, crtc_state); 7616 7617 unlock: 7618 mutex_unlock(&dev_priv->drrs.mutex); 7619 } 7620 7621 static void intel_edp_drrs_downclock_work(struct work_struct *work) 7622 { 7623 struct drm_i915_private *dev_priv = 7624 container_of(work, typeof(*dev_priv), drrs.work.work); 7625 struct intel_dp *intel_dp; 7626 7627 mutex_lock(&dev_priv->drrs.mutex); 7628 7629 intel_dp = dev_priv->drrs.dp; 7630 7631 if (!intel_dp) 7632 goto unlock; 7633 7634 /* 7635 * The delayed work can race with an invalidate hence we need to 7636 * recheck. 7637 */ 7638 7639 if (dev_priv->drrs.busy_frontbuffer_bits) 7640 goto unlock; 7641 7642 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 7643 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7644 7645 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7646 drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); 7647 } 7648 7649 unlock: 7650 mutex_unlock(&dev_priv->drrs.mutex); 7651 } 7652 7653 /** 7654 * intel_edp_drrs_invalidate - Disable Idleness DRRS 7655 * @dev_priv: i915 device 7656 * @frontbuffer_bits: frontbuffer plane tracking bits 7657 * 7658 * This function gets called everytime rendering on the given planes start. 7659 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 7660 * 7661 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7662 */ 7663 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 7664 unsigned int frontbuffer_bits) 7665 { 7666 struct intel_dp *intel_dp; 7667 struct drm_crtc *crtc; 7668 enum pipe pipe; 7669 7670 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7671 return; 7672 7673 cancel_delayed_work(&dev_priv->drrs.work); 7674 7675 mutex_lock(&dev_priv->drrs.mutex); 7676 7677 intel_dp = dev_priv->drrs.dp; 7678 if (!intel_dp) { 7679 mutex_unlock(&dev_priv->drrs.mutex); 7680 return; 7681 } 7682 7683 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7684 pipe = to_intel_crtc(crtc)->pipe; 7685 7686 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7687 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 7688 7689 /* invalidate means busy screen hence upclock */ 7690 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7691 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7692 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 7693 7694 mutex_unlock(&dev_priv->drrs.mutex); 7695 } 7696 7697 /** 7698 * intel_edp_drrs_flush - Restart Idleness DRRS 7699 * @dev_priv: i915 device 7700 * @frontbuffer_bits: frontbuffer plane tracking bits 7701 * 7702 * This function gets called every time rendering on the given planes has 7703 * completed or flip on a crtc is completed. So DRRS should be upclocked 7704 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 7705 * if no other planes are dirty. 7706 * 7707 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7708 */ 7709 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 7710 unsigned int frontbuffer_bits) 7711 { 7712 struct intel_dp *intel_dp; 7713 struct drm_crtc *crtc; 7714 enum pipe pipe; 7715 7716 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7717 return; 7718 7719 cancel_delayed_work(&dev_priv->drrs.work); 7720 7721 mutex_lock(&dev_priv->drrs.mutex); 7722 7723 intel_dp = dev_priv->drrs.dp; 7724 if (!intel_dp) { 7725 mutex_unlock(&dev_priv->drrs.mutex); 7726 return; 7727 } 7728 7729 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7730 pipe = to_intel_crtc(crtc)->pipe; 7731 7732 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7733 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 7734 7735 /* flush means busy screen hence upclock */ 7736 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7737 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7738 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 7739 7740 /* 7741 * flush also means no more activity hence schedule downclock, if all 7742 * other fbs are quiescent too 7743 */ 7744 if (!dev_priv->drrs.busy_frontbuffer_bits) 7745 schedule_delayed_work(&dev_priv->drrs.work, 7746 msecs_to_jiffies(1000)); 7747 mutex_unlock(&dev_priv->drrs.mutex); 7748 } 7749 7750 /** 7751 * DOC: Display Refresh Rate Switching (DRRS) 7752 * 7753 * Display Refresh Rate Switching (DRRS) is a power conservation feature 7754 * which enables swtching between low and high refresh rates, 7755 * dynamically, based on the usage scenario. This feature is applicable 7756 * for internal panels. 7757 * 7758 * Indication that the panel supports DRRS is given by the panel EDID, which 7759 * would list multiple refresh rates for one resolution. 7760 * 7761 * DRRS is of 2 types - static and seamless. 7762 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 7763 * (may appear as a blink on screen) and is used in dock-undock scenario. 7764 * Seamless DRRS involves changing RR without any visual effect to the user 7765 * and can be used during normal system usage. This is done by programming 7766 * certain registers. 7767 * 7768 * Support for static/seamless DRRS may be indicated in the VBT based on 7769 * inputs from the panel spec. 7770 * 7771 * DRRS saves power by switching to low RR based on usage scenarios. 7772 * 7773 * The implementation is based on frontbuffer tracking implementation. When 7774 * there is a disturbance on the screen triggered by user activity or a periodic 7775 * system activity, DRRS is disabled (RR is changed to high RR). When there is 7776 * no movement on screen, after a timeout of 1 second, a switch to low RR is 7777 * made. 7778 * 7779 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 7780 * and intel_edp_drrs_flush() are called. 7781 * 7782 * DRRS can be further extended to support other internal panels and also 7783 * the scenario of video playback wherein RR is set based on the rate 7784 * requested by userspace. 7785 */ 7786 7787 /** 7788 * intel_dp_drrs_init - Init basic DRRS work and mutex. 7789 * @connector: eDP connector 7790 * @fixed_mode: preferred mode of panel 7791 * 7792 * This function is called only once at driver load to initialize basic 7793 * DRRS stuff. 7794 * 7795 * Returns: 7796 * Downclock mode if panel supports it, else return NULL. 7797 * DRRS support is determined by the presence of downclock mode (apart 7798 * from VBT setting). 7799 */ 7800 static struct drm_display_mode * 7801 intel_dp_drrs_init(struct intel_connector *connector, 7802 struct drm_display_mode *fixed_mode) 7803 { 7804 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 7805 struct drm_display_mode *downclock_mode = NULL; 7806 7807 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 7808 mutex_init(&dev_priv->drrs.mutex); 7809 7810 if (INTEL_GEN(dev_priv) <= 6) { 7811 drm_dbg_kms(&dev_priv->drm, 7812 "DRRS supported for Gen7 and above\n"); 7813 return NULL; 7814 } 7815 7816 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 7817 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 7818 return NULL; 7819 } 7820 7821 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 7822 if (!downclock_mode) { 7823 drm_dbg_kms(&dev_priv->drm, 7824 "Downclock mode is not found. DRRS not supported\n"); 7825 return NULL; 7826 } 7827 7828 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 7829 7830 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 7831 drm_dbg_kms(&dev_priv->drm, 7832 "seamless DRRS supported for eDP panel.\n"); 7833 return downclock_mode; 7834 } 7835 7836 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 7837 struct intel_connector *intel_connector) 7838 { 7839 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7840 struct drm_device *dev = &dev_priv->drm; 7841 struct drm_connector *connector = &intel_connector->base; 7842 struct drm_display_mode *fixed_mode = NULL; 7843 struct drm_display_mode *downclock_mode = NULL; 7844 bool has_dpcd; 7845 enum pipe pipe = INVALID_PIPE; 7846 intel_wakeref_t wakeref; 7847 struct edid *edid; 7848 7849 if (!intel_dp_is_edp(intel_dp)) 7850 return true; 7851 7852 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); 7853 7854 /* 7855 * On IBX/CPT we may get here with LVDS already registered. Since the 7856 * driver uses the only internal power sequencer available for both 7857 * eDP and LVDS bail out early in this case to prevent interfering 7858 * with an already powered-on LVDS power sequencer. 7859 */ 7860 if (intel_get_lvds_encoder(dev_priv)) { 7861 drm_WARN_ON(dev, 7862 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 7863 drm_info(&dev_priv->drm, 7864 "LVDS was detected, not registering eDP\n"); 7865 7866 return false; 7867 } 7868 7869 with_pps_lock(intel_dp, wakeref) { 7870 intel_dp_init_panel_power_timestamps(intel_dp); 7871 intel_dp_pps_init(intel_dp); 7872 intel_edp_panel_vdd_sanitize(intel_dp); 7873 } 7874 7875 /* Cache DPCD and EDID for edp. */ 7876 has_dpcd = intel_edp_init_dpcd(intel_dp); 7877 7878 if (!has_dpcd) { 7879 /* if this fails, presume the device is a ghost */ 7880 drm_info(&dev_priv->drm, 7881 "failed to retrieve link info, disabling eDP\n"); 7882 goto out_vdd_off; 7883 } 7884 7885 mutex_lock(&dev->mode_config.mutex); 7886 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 7887 if (edid) { 7888 if (drm_add_edid_modes(connector, edid)) { 7889 drm_connector_update_edid_property(connector, edid); 7890 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 7891 } else { 7892 kfree(edid); 7893 edid = ERR_PTR(-EINVAL); 7894 } 7895 } else { 7896 edid = ERR_PTR(-ENOENT); 7897 } 7898 intel_connector->edid = edid; 7899 7900 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 7901 if (fixed_mode) 7902 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 7903 7904 /* fallback to VBT if available for eDP */ 7905 if (!fixed_mode) 7906 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 7907 mutex_unlock(&dev->mode_config.mutex); 7908 7909 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7910 /* 7911 * Figure out the current pipe for the initial backlight setup. 7912 * If the current pipe isn't valid, try the PPS pipe, and if that 7913 * fails just assume pipe A. 7914 */ 7915 pipe = vlv_active_pipe(intel_dp); 7916 7917 if (pipe != PIPE_A && pipe != PIPE_B) 7918 pipe = intel_dp->pps_pipe; 7919 7920 if (pipe != PIPE_A && pipe != PIPE_B) 7921 pipe = PIPE_A; 7922 7923 drm_dbg_kms(&dev_priv->drm, 7924 "using pipe %c for initial backlight setup\n", 7925 pipe_name(pipe)); 7926 } 7927 7928 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 7929 intel_connector->panel.backlight.power = intel_edp_backlight_power; 7930 intel_panel_setup_backlight(connector, pipe); 7931 7932 if (fixed_mode) { 7933 drm_connector_set_panel_orientation_with_quirk(connector, 7934 dev_priv->vbt.orientation, 7935 fixed_mode->hdisplay, fixed_mode->vdisplay); 7936 } 7937 7938 return true; 7939 7940 out_vdd_off: 7941 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 7942 /* 7943 * vdd might still be enabled do to the delayed vdd off. 7944 * Make sure vdd is actually turned off here. 7945 */ 7946 with_pps_lock(intel_dp, wakeref) 7947 edp_panel_vdd_off_sync(intel_dp); 7948 7949 return false; 7950 } 7951 7952 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 7953 { 7954 struct intel_connector *intel_connector; 7955 struct drm_connector *connector; 7956 7957 intel_connector = container_of(work, typeof(*intel_connector), 7958 modeset_retry_work); 7959 connector = &intel_connector->base; 7960 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 7961 connector->name); 7962 7963 /* Grab the locks before changing connector property*/ 7964 mutex_lock(&connector->dev->mode_config.mutex); 7965 /* Set connector link status to BAD and send a Uevent to notify 7966 * userspace to do a modeset. 7967 */ 7968 drm_connector_set_link_status_property(connector, 7969 DRM_MODE_LINK_STATUS_BAD); 7970 mutex_unlock(&connector->dev->mode_config.mutex); 7971 /* Send Hotplug uevent so userspace can reprobe */ 7972 drm_kms_helper_hotplug_event(connector->dev); 7973 } 7974 7975 bool 7976 intel_dp_init_connector(struct intel_digital_port *dig_port, 7977 struct intel_connector *intel_connector) 7978 { 7979 struct drm_connector *connector = &intel_connector->base; 7980 struct intel_dp *intel_dp = &dig_port->dp; 7981 struct intel_encoder *intel_encoder = &dig_port->base; 7982 struct drm_device *dev = intel_encoder->base.dev; 7983 struct drm_i915_private *dev_priv = to_i915(dev); 7984 enum port port = intel_encoder->port; 7985 enum phy phy = intel_port_to_phy(dev_priv, port); 7986 int type; 7987 7988 /* Initialize the work for modeset in case of link train failure */ 7989 INIT_WORK(&intel_connector->modeset_retry_work, 7990 intel_dp_modeset_retry_work_fn); 7991 7992 if (drm_WARN(dev, dig_port->max_lanes < 1, 7993 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 7994 dig_port->max_lanes, intel_encoder->base.base.id, 7995 intel_encoder->base.name)) 7996 return false; 7997 7998 intel_dp_set_source_rates(intel_dp); 7999 8000 intel_dp->reset_link_params = true; 8001 intel_dp->pps_pipe = INVALID_PIPE; 8002 intel_dp->active_pipe = INVALID_PIPE; 8003 8004 /* Preserve the current hw state. */ 8005 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 8006 intel_dp->attached_connector = intel_connector; 8007 8008 if (intel_dp_is_port_edp(dev_priv, port)) { 8009 /* 8010 * Currently we don't support eDP on TypeC ports, although in 8011 * theory it could work on TypeC legacy ports. 8012 */ 8013 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 8014 type = DRM_MODE_CONNECTOR_eDP; 8015 } else { 8016 type = DRM_MODE_CONNECTOR_DisplayPort; 8017 } 8018 8019 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 8020 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 8021 8022 /* 8023 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 8024 * for DP the encoder type can be set by the caller to 8025 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 8026 */ 8027 if (type == DRM_MODE_CONNECTOR_eDP) 8028 intel_encoder->type = INTEL_OUTPUT_EDP; 8029 8030 /* eDP only on port B and/or C on vlv/chv */ 8031 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 8032 IS_CHERRYVIEW(dev_priv)) && 8033 intel_dp_is_edp(intel_dp) && 8034 port != PORT_B && port != PORT_C)) 8035 return false; 8036 8037 drm_dbg_kms(&dev_priv->drm, 8038 "Adding %s connector on [ENCODER:%d:%s]\n", 8039 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 8040 intel_encoder->base.base.id, intel_encoder->base.name); 8041 8042 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 8043 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 8044 8045 if (!HAS_GMCH(dev_priv)) 8046 connector->interlace_allowed = true; 8047 connector->doublescan_allowed = 0; 8048 8049 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 8050 8051 intel_dp_aux_init(intel_dp); 8052 8053 intel_connector_attach_encoder(intel_connector, intel_encoder); 8054 8055 if (HAS_DDI(dev_priv)) 8056 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 8057 else 8058 intel_connector->get_hw_state = intel_connector_get_hw_state; 8059 8060 /* init MST on ports that can support it */ 8061 intel_dp_mst_encoder_init(dig_port, 8062 intel_connector->base.base.id); 8063 8064 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 8065 intel_dp_aux_fini(intel_dp); 8066 intel_dp_mst_encoder_cleanup(dig_port); 8067 goto fail; 8068 } 8069 8070 intel_dp_add_properties(intel_dp, connector); 8071 8072 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 8073 int ret = intel_dp_init_hdcp(dig_port, intel_connector); 8074 if (ret) 8075 drm_dbg_kms(&dev_priv->drm, 8076 "HDCP init failed, skipping.\n"); 8077 } 8078 8079 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 8080 * 0xd. Failure to do so will result in spurious interrupts being 8081 * generated on the port when a cable is not attached. 8082 */ 8083 if (IS_G45(dev_priv)) { 8084 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 8085 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 8086 (temp & ~0xf) | 0xd); 8087 } 8088 8089 return true; 8090 8091 fail: 8092 drm_connector_cleanup(connector); 8093 8094 return false; 8095 } 8096 8097 bool intel_dp_init(struct drm_i915_private *dev_priv, 8098 i915_reg_t output_reg, 8099 enum port port) 8100 { 8101 struct intel_digital_port *dig_port; 8102 struct intel_encoder *intel_encoder; 8103 struct drm_encoder *encoder; 8104 struct intel_connector *intel_connector; 8105 8106 dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); 8107 if (!dig_port) 8108 return false; 8109 8110 intel_connector = intel_connector_alloc(); 8111 if (!intel_connector) 8112 goto err_connector_alloc; 8113 8114 intel_encoder = &dig_port->base; 8115 encoder = &intel_encoder->base; 8116 8117 mutex_init(&dig_port->hdcp_mutex); 8118 8119 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 8120 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 8121 "DP %c", port_name(port))) 8122 goto err_encoder_init; 8123 8124 intel_encoder->hotplug = intel_dp_hotplug; 8125 intel_encoder->compute_config = intel_dp_compute_config; 8126 intel_encoder->get_hw_state = intel_dp_get_hw_state; 8127 intel_encoder->get_config = intel_dp_get_config; 8128 intel_encoder->sync_state = intel_dp_sync_state; 8129 intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check; 8130 intel_encoder->update_pipe = intel_panel_update_backlight; 8131 intel_encoder->suspend = intel_dp_encoder_suspend; 8132 intel_encoder->shutdown = intel_dp_encoder_shutdown; 8133 if (IS_CHERRYVIEW(dev_priv)) { 8134 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 8135 intel_encoder->pre_enable = chv_pre_enable_dp; 8136 intel_encoder->enable = vlv_enable_dp; 8137 intel_encoder->disable = vlv_disable_dp; 8138 intel_encoder->post_disable = chv_post_disable_dp; 8139 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 8140 } else if (IS_VALLEYVIEW(dev_priv)) { 8141 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 8142 intel_encoder->pre_enable = vlv_pre_enable_dp; 8143 intel_encoder->enable = vlv_enable_dp; 8144 intel_encoder->disable = vlv_disable_dp; 8145 intel_encoder->post_disable = vlv_post_disable_dp; 8146 } else { 8147 intel_encoder->pre_enable = g4x_pre_enable_dp; 8148 intel_encoder->enable = g4x_enable_dp; 8149 intel_encoder->disable = g4x_disable_dp; 8150 intel_encoder->post_disable = g4x_post_disable_dp; 8151 } 8152 8153 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 8154 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) 8155 dig_port->dp.set_link_train = cpt_set_link_train; 8156 else 8157 dig_port->dp.set_link_train = g4x_set_link_train; 8158 8159 if (IS_CHERRYVIEW(dev_priv)) 8160 dig_port->dp.set_signal_levels = chv_set_signal_levels; 8161 else if (IS_VALLEYVIEW(dev_priv)) 8162 dig_port->dp.set_signal_levels = vlv_set_signal_levels; 8163 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 8164 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; 8165 else if (IS_GEN(dev_priv, 6) && port == PORT_A) 8166 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; 8167 else 8168 dig_port->dp.set_signal_levels = g4x_set_signal_levels; 8169 8170 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || 8171 (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { 8172 dig_port->dp.preemph_max = intel_dp_preemph_max_3; 8173 dig_port->dp.voltage_max = intel_dp_voltage_max_3; 8174 } else { 8175 dig_port->dp.preemph_max = intel_dp_preemph_max_2; 8176 dig_port->dp.voltage_max = intel_dp_voltage_max_2; 8177 } 8178 8179 dig_port->dp.output_reg = output_reg; 8180 dig_port->max_lanes = 4; 8181 8182 intel_encoder->type = INTEL_OUTPUT_DP; 8183 intel_encoder->power_domain = intel_port_to_power_domain(port); 8184 if (IS_CHERRYVIEW(dev_priv)) { 8185 if (port == PORT_D) 8186 intel_encoder->pipe_mask = BIT(PIPE_C); 8187 else 8188 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 8189 } else { 8190 intel_encoder->pipe_mask = ~0; 8191 } 8192 intel_encoder->cloneable = 0; 8193 intel_encoder->port = port; 8194 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 8195 8196 dig_port->hpd_pulse = intel_dp_hpd_pulse; 8197 8198 if (HAS_GMCH(dev_priv)) { 8199 if (IS_GM45(dev_priv)) 8200 dig_port->connected = gm45_digital_port_connected; 8201 else 8202 dig_port->connected = g4x_digital_port_connected; 8203 } else { 8204 if (port == PORT_A) 8205 dig_port->connected = ilk_digital_port_connected; 8206 else 8207 dig_port->connected = ibx_digital_port_connected; 8208 } 8209 8210 if (port != PORT_A) 8211 intel_infoframe_init(dig_port); 8212 8213 dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 8214 if (!intel_dp_init_connector(dig_port, intel_connector)) 8215 goto err_init_connector; 8216 8217 return true; 8218 8219 err_init_connector: 8220 drm_encoder_cleanup(encoder); 8221 err_encoder_init: 8222 kfree(intel_connector); 8223 err_connector_alloc: 8224 kfree(dig_port); 8225 return false; 8226 } 8227 8228 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 8229 { 8230 struct intel_encoder *encoder; 8231 8232 for_each_intel_encoder(&dev_priv->drm, encoder) { 8233 struct intel_dp *intel_dp; 8234 8235 if (encoder->type != INTEL_OUTPUT_DDI) 8236 continue; 8237 8238 intel_dp = enc_to_intel_dp(encoder); 8239 8240 if (!intel_dp->can_mst) 8241 continue; 8242 8243 if (intel_dp->is_mst) 8244 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 8245 } 8246 } 8247 8248 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 8249 { 8250 struct intel_encoder *encoder; 8251 8252 for_each_intel_encoder(&dev_priv->drm, encoder) { 8253 struct intel_dp *intel_dp; 8254 int ret; 8255 8256 if (encoder->type != INTEL_OUTPUT_DDI) 8257 continue; 8258 8259 intel_dp = enc_to_intel_dp(encoder); 8260 8261 if (!intel_dp->can_mst) 8262 continue; 8263 8264 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 8265 true); 8266 if (ret) { 8267 intel_dp->is_mst = false; 8268 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 8269 false); 8270 } 8271 } 8272 } 8273