1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/reboot.h> 32 #include <linux/slab.h> 33 #include <linux/types.h> 34 35 #include <asm/byteorder.h> 36 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_crtc.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_hdcp.h> 42 #include <drm/drm_probe_helper.h> 43 44 #include "i915_debugfs.h" 45 #include "i915_drv.h" 46 #include "i915_trace.h" 47 #include "intel_atomic.h" 48 #include "intel_audio.h" 49 #include "intel_connector.h" 50 #include "intel_ddi.h" 51 #include "intel_display_debugfs.h" 52 #include "intel_display_types.h" 53 #include "intel_dp.h" 54 #include "intel_dp_link_training.h" 55 #include "intel_dp_mst.h" 56 #include "intel_dpio_phy.h" 57 #include "intel_fifo_underrun.h" 58 #include "intel_hdcp.h" 59 #include "intel_hdmi.h" 60 #include "intel_hotplug.h" 61 #include "intel_lspcon.h" 62 #include "intel_lvds.h" 63 #include "intel_panel.h" 64 #include "intel_psr.h" 65 #include "intel_sideband.h" 66 #include "intel_tc.h" 67 #include "intel_vdsc.h" 68 69 #define DP_DPRX_ESI_LEN 14 70 71 /* DP DSC throughput values used for slice count calculations KPixels/s */ 72 #define DP_DSC_PEAK_PIXEL_RATE 2720000 73 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 74 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 75 76 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 77 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 78 79 /* Compliance test status bits */ 80 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 81 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 82 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 83 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 84 85 struct dp_link_dpll { 86 int clock; 87 struct dpll dpll; 88 }; 89 90 static const struct dp_link_dpll g4x_dpll[] = { 91 { 162000, 92 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 93 { 270000, 94 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 95 }; 96 97 static const struct dp_link_dpll pch_dpll[] = { 98 { 162000, 99 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 100 { 270000, 101 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 102 }; 103 104 static const struct dp_link_dpll vlv_dpll[] = { 105 { 162000, 106 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 107 { 270000, 108 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 109 }; 110 111 /* 112 * CHV supports eDP 1.4 that have more link rates. 113 * Below only provides the fixed rate but exclude variable rate. 114 */ 115 static const struct dp_link_dpll chv_dpll[] = { 116 /* 117 * CHV requires to program fractional division for m2. 118 * m2 is stored in fixed point format using formula below 119 * (m2_int << 22) | m2_fraction 120 */ 121 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 122 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 123 { 270000, /* m2_int = 27, m2_fraction = 0 */ 124 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 125 }; 126 127 /* Constants for DP DSC configurations */ 128 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 129 130 /* With Single pipe configuration, HW is capable of supporting maximum 131 * of 4 slices per line. 132 */ 133 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 134 135 /** 136 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 137 * @intel_dp: DP struct 138 * 139 * If a CPU or PCH DP output is attached to an eDP panel, this function 140 * will return true, and false otherwise. 141 */ 142 bool intel_dp_is_edp(struct intel_dp *intel_dp) 143 { 144 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 145 146 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 147 } 148 149 static void intel_dp_link_down(struct intel_encoder *encoder, 150 const struct intel_crtc_state *old_crtc_state); 151 static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 152 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 153 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 154 const struct intel_crtc_state *crtc_state); 155 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 156 enum pipe pipe); 157 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 158 159 /* update sink rates from dpcd */ 160 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 161 { 162 static const int dp_rates[] = { 163 162000, 270000, 540000, 810000 164 }; 165 int i, max_rate; 166 167 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 168 169 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 170 if (dp_rates[i] > max_rate) 171 break; 172 intel_dp->sink_rates[i] = dp_rates[i]; 173 } 174 175 intel_dp->num_sink_rates = i; 176 } 177 178 /* Get length of rates array potentially limited by max_rate. */ 179 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 180 { 181 int i; 182 183 /* Limit results by potentially reduced max rate */ 184 for (i = 0; i < len; i++) { 185 if (rates[len - i - 1] <= max_rate) 186 return len - i; 187 } 188 189 return 0; 190 } 191 192 /* Get length of common rates array potentially limited by max_rate. */ 193 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 194 int max_rate) 195 { 196 return intel_dp_rate_limit_len(intel_dp->common_rates, 197 intel_dp->num_common_rates, max_rate); 198 } 199 200 /* Theoretical max between source and sink */ 201 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 202 { 203 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 204 } 205 206 /* Theoretical max between source and sink */ 207 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 208 { 209 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 210 int source_max = intel_dig_port->max_lanes; 211 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 212 int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port); 213 214 return min3(source_max, sink_max, fia_max); 215 } 216 217 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 218 { 219 return intel_dp->max_link_lane_count; 220 } 221 222 int 223 intel_dp_link_required(int pixel_clock, int bpp) 224 { 225 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 226 return DIV_ROUND_UP(pixel_clock * bpp, 8); 227 } 228 229 int 230 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 231 { 232 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 233 * link rate that is generally expressed in Gbps. Since, 8 bits of data 234 * is transmitted every LS_Clk per lane, there is no need to account for 235 * the channel encoding that is done in the PHY layer here. 236 */ 237 238 return max_link_clock * max_lanes; 239 } 240 241 static int 242 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) 243 { 244 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 245 struct intel_encoder *encoder = &intel_dig_port->base; 246 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 247 int max_dotclk = dev_priv->max_dotclk_freq; 248 int ds_max_dotclk; 249 250 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 251 252 if (type != DP_DS_PORT_TYPE_VGA) 253 return max_dotclk; 254 255 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd, 256 intel_dp->downstream_ports); 257 258 if (ds_max_dotclk != 0) 259 max_dotclk = min(max_dotclk, ds_max_dotclk); 260 261 return max_dotclk; 262 } 263 264 static int cnl_max_source_rate(struct intel_dp *intel_dp) 265 { 266 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 267 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 268 enum port port = dig_port->base.port; 269 270 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 271 272 /* Low voltage SKUs are limited to max of 5.4G */ 273 if (voltage == VOLTAGE_INFO_0_85V) 274 return 540000; 275 276 /* For this SKU 8.1G is supported in all ports */ 277 if (IS_CNL_WITH_PORT_F(dev_priv)) 278 return 810000; 279 280 /* For other SKUs, max rate on ports A and D is 5.4G */ 281 if (port == PORT_A || port == PORT_D) 282 return 540000; 283 284 return 810000; 285 } 286 287 static int icl_max_source_rate(struct intel_dp *intel_dp) 288 { 289 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 290 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 291 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 292 293 if (intel_phy_is_combo(dev_priv, phy) && 294 !IS_ELKHARTLAKE(dev_priv) && 295 !intel_dp_is_edp(intel_dp)) 296 return 540000; 297 298 return 810000; 299 } 300 301 static void 302 intel_dp_set_source_rates(struct intel_dp *intel_dp) 303 { 304 /* The values must be in increasing order */ 305 static const int cnl_rates[] = { 306 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 307 }; 308 static const int bxt_rates[] = { 309 162000, 216000, 243000, 270000, 324000, 432000, 540000 310 }; 311 static const int skl_rates[] = { 312 162000, 216000, 270000, 324000, 432000, 540000 313 }; 314 static const int hsw_rates[] = { 315 162000, 270000, 540000 316 }; 317 static const int g4x_rates[] = { 318 162000, 270000 319 }; 320 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 321 struct intel_encoder *encoder = &dig_port->base; 322 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 323 const int *source_rates; 324 int size, max_rate = 0, vbt_max_rate; 325 326 /* This should only be done once */ 327 drm_WARN_ON(&dev_priv->drm, 328 intel_dp->source_rates || intel_dp->num_source_rates); 329 330 if (INTEL_GEN(dev_priv) >= 10) { 331 source_rates = cnl_rates; 332 size = ARRAY_SIZE(cnl_rates); 333 if (IS_GEN(dev_priv, 10)) 334 max_rate = cnl_max_source_rate(intel_dp); 335 else 336 max_rate = icl_max_source_rate(intel_dp); 337 } else if (IS_GEN9_LP(dev_priv)) { 338 source_rates = bxt_rates; 339 size = ARRAY_SIZE(bxt_rates); 340 } else if (IS_GEN9_BC(dev_priv)) { 341 source_rates = skl_rates; 342 size = ARRAY_SIZE(skl_rates); 343 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 344 IS_BROADWELL(dev_priv)) { 345 source_rates = hsw_rates; 346 size = ARRAY_SIZE(hsw_rates); 347 } else { 348 source_rates = g4x_rates; 349 size = ARRAY_SIZE(g4x_rates); 350 } 351 352 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 353 if (max_rate && vbt_max_rate) 354 max_rate = min(max_rate, vbt_max_rate); 355 else if (vbt_max_rate) 356 max_rate = vbt_max_rate; 357 358 if (max_rate) 359 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 360 361 intel_dp->source_rates = source_rates; 362 intel_dp->num_source_rates = size; 363 } 364 365 static int intersect_rates(const int *source_rates, int source_len, 366 const int *sink_rates, int sink_len, 367 int *common_rates) 368 { 369 int i = 0, j = 0, k = 0; 370 371 while (i < source_len && j < sink_len) { 372 if (source_rates[i] == sink_rates[j]) { 373 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 374 return k; 375 common_rates[k] = source_rates[i]; 376 ++k; 377 ++i; 378 ++j; 379 } else if (source_rates[i] < sink_rates[j]) { 380 ++i; 381 } else { 382 ++j; 383 } 384 } 385 return k; 386 } 387 388 /* return index of rate in rates array, or -1 if not found */ 389 static int intel_dp_rate_index(const int *rates, int len, int rate) 390 { 391 int i; 392 393 for (i = 0; i < len; i++) 394 if (rate == rates[i]) 395 return i; 396 397 return -1; 398 } 399 400 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 401 { 402 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates); 403 404 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 405 intel_dp->num_source_rates, 406 intel_dp->sink_rates, 407 intel_dp->num_sink_rates, 408 intel_dp->common_rates); 409 410 /* Paranoia, there should always be something in common. */ 411 if (WARN_ON(intel_dp->num_common_rates == 0)) { 412 intel_dp->common_rates[0] = 162000; 413 intel_dp->num_common_rates = 1; 414 } 415 } 416 417 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 418 u8 lane_count) 419 { 420 /* 421 * FIXME: we need to synchronize the current link parameters with 422 * hardware readout. Currently fast link training doesn't work on 423 * boot-up. 424 */ 425 if (link_rate == 0 || 426 link_rate > intel_dp->max_link_rate) 427 return false; 428 429 if (lane_count == 0 || 430 lane_count > intel_dp_max_lane_count(intel_dp)) 431 return false; 432 433 return true; 434 } 435 436 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 437 int link_rate, 438 u8 lane_count) 439 { 440 const struct drm_display_mode *fixed_mode = 441 intel_dp->attached_connector->panel.fixed_mode; 442 int mode_rate, max_rate; 443 444 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 445 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 446 if (mode_rate > max_rate) 447 return false; 448 449 return true; 450 } 451 452 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 453 int link_rate, u8 lane_count) 454 { 455 int index; 456 457 index = intel_dp_rate_index(intel_dp->common_rates, 458 intel_dp->num_common_rates, 459 link_rate); 460 if (index > 0) { 461 if (intel_dp_is_edp(intel_dp) && 462 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 463 intel_dp->common_rates[index - 1], 464 lane_count)) { 465 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); 466 return 0; 467 } 468 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 469 intel_dp->max_link_lane_count = lane_count; 470 } else if (lane_count > 1) { 471 if (intel_dp_is_edp(intel_dp) && 472 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 473 intel_dp_max_common_rate(intel_dp), 474 lane_count >> 1)) { 475 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); 476 return 0; 477 } 478 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 479 intel_dp->max_link_lane_count = lane_count >> 1; 480 } else { 481 DRM_ERROR("Link Training Unsuccessful\n"); 482 return -1; 483 } 484 485 return 0; 486 } 487 488 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 489 { 490 return div_u64(mul_u32_u32(mode_clock, 1000000U), 491 DP_DSC_FEC_OVERHEAD_FACTOR); 492 } 493 494 static int 495 small_joiner_ram_size_bits(struct drm_i915_private *i915) 496 { 497 if (INTEL_GEN(i915) >= 11) 498 return 7680 * 8; 499 else 500 return 6144 * 8; 501 } 502 503 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 504 u32 link_clock, u32 lane_count, 505 u32 mode_clock, u32 mode_hdisplay) 506 { 507 u32 bits_per_pixel, max_bpp_small_joiner_ram; 508 int i; 509 510 /* 511 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 512 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 513 * for SST -> TimeSlotsPerMTP is 1, 514 * for MST -> TimeSlotsPerMTP has to be calculated 515 */ 516 bits_per_pixel = (link_clock * lane_count * 8) / 517 intel_dp_mode_to_fec_clock(mode_clock); 518 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 519 520 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 521 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 522 mode_hdisplay; 523 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 524 max_bpp_small_joiner_ram); 525 526 /* 527 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 528 * check, output bpp from small joiner RAM check) 529 */ 530 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 531 532 /* Error out if the max bpp is less than smallest allowed valid bpp */ 533 if (bits_per_pixel < valid_dsc_bpp[0]) { 534 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 535 bits_per_pixel, valid_dsc_bpp[0]); 536 return 0; 537 } 538 539 /* Find the nearest match in the array of known BPPs from VESA */ 540 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 541 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 542 break; 543 } 544 bits_per_pixel = valid_dsc_bpp[i]; 545 546 /* 547 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 548 * fractional part is 0 549 */ 550 return bits_per_pixel << 4; 551 } 552 553 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 554 int mode_clock, int mode_hdisplay) 555 { 556 u8 min_slice_count, i; 557 int max_slice_width; 558 559 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 560 min_slice_count = DIV_ROUND_UP(mode_clock, 561 DP_DSC_MAX_ENC_THROUGHPUT_0); 562 else 563 min_slice_count = DIV_ROUND_UP(mode_clock, 564 DP_DSC_MAX_ENC_THROUGHPUT_1); 565 566 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 567 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 568 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n", 569 max_slice_width); 570 return 0; 571 } 572 /* Also take into account max slice width */ 573 min_slice_count = min_t(u8, min_slice_count, 574 DIV_ROUND_UP(mode_hdisplay, 575 max_slice_width)); 576 577 /* Find the closest match to the valid slice count values */ 578 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 579 if (valid_dsc_slicecount[i] > 580 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 581 false)) 582 break; 583 if (min_slice_count <= valid_dsc_slicecount[i]) 584 return valid_dsc_slicecount[i]; 585 } 586 587 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count); 588 return 0; 589 } 590 591 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 592 int hdisplay) 593 { 594 /* 595 * Older platforms don't like hdisplay==4096 with DP. 596 * 597 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 598 * and frame counter increment), but we don't get vblank interrupts, 599 * and the pipe underruns immediately. The link also doesn't seem 600 * to get trained properly. 601 * 602 * On CHV the vblank interrupts don't seem to disappear but 603 * otherwise the symptoms are similar. 604 * 605 * TODO: confirm the behaviour on HSW+ 606 */ 607 return hdisplay == 4096 && !HAS_DDI(dev_priv); 608 } 609 610 static enum drm_mode_status 611 intel_dp_mode_valid(struct drm_connector *connector, 612 struct drm_display_mode *mode) 613 { 614 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 615 struct intel_connector *intel_connector = to_intel_connector(connector); 616 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 617 struct drm_i915_private *dev_priv = to_i915(connector->dev); 618 int target_clock = mode->clock; 619 int max_rate, mode_rate, max_lanes, max_link_clock; 620 int max_dotclk; 621 u16 dsc_max_output_bpp = 0; 622 u8 dsc_slice_count = 0; 623 624 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 625 return MODE_NO_DBLESCAN; 626 627 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 628 629 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 630 if (mode->hdisplay > fixed_mode->hdisplay) 631 return MODE_PANEL; 632 633 if (mode->vdisplay > fixed_mode->vdisplay) 634 return MODE_PANEL; 635 636 target_clock = fixed_mode->clock; 637 } 638 639 max_link_clock = intel_dp_max_link_rate(intel_dp); 640 max_lanes = intel_dp_max_lane_count(intel_dp); 641 642 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 643 mode_rate = intel_dp_link_required(target_clock, 18); 644 645 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 646 return MODE_H_ILLEGAL; 647 648 /* 649 * Output bpp is stored in 6.4 format so right shift by 4 to get the 650 * integer value since we support only integer values of bpp. 651 */ 652 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 653 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 654 if (intel_dp_is_edp(intel_dp)) { 655 dsc_max_output_bpp = 656 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 657 dsc_slice_count = 658 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 659 true); 660 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 661 dsc_max_output_bpp = 662 intel_dp_dsc_get_output_bpp(dev_priv, 663 max_link_clock, 664 max_lanes, 665 target_clock, 666 mode->hdisplay) >> 4; 667 dsc_slice_count = 668 intel_dp_dsc_get_slice_count(intel_dp, 669 target_clock, 670 mode->hdisplay); 671 } 672 } 673 674 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || 675 target_clock > max_dotclk) 676 return MODE_CLOCK_HIGH; 677 678 if (mode->clock < 10000) 679 return MODE_CLOCK_LOW; 680 681 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 682 return MODE_H_ILLEGAL; 683 684 return intel_mode_valid_max_plane_size(dev_priv, mode); 685 } 686 687 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 688 { 689 int i; 690 u32 v = 0; 691 692 if (src_bytes > 4) 693 src_bytes = 4; 694 for (i = 0; i < src_bytes; i++) 695 v |= ((u32)src[i]) << ((3 - i) * 8); 696 return v; 697 } 698 699 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 700 { 701 int i; 702 if (dst_bytes > 4) 703 dst_bytes = 4; 704 for (i = 0; i < dst_bytes; i++) 705 dst[i] = src >> ((3-i) * 8); 706 } 707 708 static void 709 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); 710 static void 711 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 712 bool force_disable_vdd); 713 static void 714 intel_dp_pps_init(struct intel_dp *intel_dp); 715 716 static intel_wakeref_t 717 pps_lock(struct intel_dp *intel_dp) 718 { 719 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 720 intel_wakeref_t wakeref; 721 722 /* 723 * See intel_power_sequencer_reset() why we need 724 * a power domain reference here. 725 */ 726 wakeref = intel_display_power_get(dev_priv, 727 intel_aux_power_domain(dp_to_dig_port(intel_dp))); 728 729 mutex_lock(&dev_priv->pps_mutex); 730 731 return wakeref; 732 } 733 734 static intel_wakeref_t 735 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) 736 { 737 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 738 739 mutex_unlock(&dev_priv->pps_mutex); 740 intel_display_power_put(dev_priv, 741 intel_aux_power_domain(dp_to_dig_port(intel_dp)), 742 wakeref); 743 return 0; 744 } 745 746 #define with_pps_lock(dp, wf) \ 747 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) 748 749 static void 750 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 751 { 752 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 753 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 754 enum pipe pipe = intel_dp->pps_pipe; 755 bool pll_enabled, release_cl_override = false; 756 enum dpio_phy phy = DPIO_PHY(pipe); 757 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 758 u32 DP; 759 760 if (drm_WARN(&dev_priv->drm, 761 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, 762 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", 763 pipe_name(pipe), intel_dig_port->base.base.base.id, 764 intel_dig_port->base.base.name)) 765 return; 766 767 drm_dbg_kms(&dev_priv->drm, 768 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", 769 pipe_name(pipe), intel_dig_port->base.base.base.id, 770 intel_dig_port->base.base.name); 771 772 /* Preserve the BIOS-computed detected bit. This is 773 * supposed to be read-only. 774 */ 775 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 776 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 777 DP |= DP_PORT_WIDTH(1); 778 DP |= DP_LINK_TRAIN_PAT_1; 779 780 if (IS_CHERRYVIEW(dev_priv)) 781 DP |= DP_PIPE_SEL_CHV(pipe); 782 else 783 DP |= DP_PIPE_SEL(pipe); 784 785 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; 786 787 /* 788 * The DPLL for the pipe must be enabled for this to work. 789 * So enable temporarily it if it's not already enabled. 790 */ 791 if (!pll_enabled) { 792 release_cl_override = IS_CHERRYVIEW(dev_priv) && 793 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 794 795 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 796 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 797 drm_err(&dev_priv->drm, 798 "Failed to force on pll for pipe %c!\n", 799 pipe_name(pipe)); 800 return; 801 } 802 } 803 804 /* 805 * Similar magic as in intel_dp_enable_port(). 806 * We _must_ do this port enable + disable trick 807 * to make this power sequencer lock onto the port. 808 * Otherwise even VDD force bit won't work. 809 */ 810 intel_de_write(dev_priv, intel_dp->output_reg, DP); 811 intel_de_posting_read(dev_priv, intel_dp->output_reg); 812 813 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); 814 intel_de_posting_read(dev_priv, intel_dp->output_reg); 815 816 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); 817 intel_de_posting_read(dev_priv, intel_dp->output_reg); 818 819 if (!pll_enabled) { 820 vlv_force_pll_off(dev_priv, pipe); 821 822 if (release_cl_override) 823 chv_phy_powergate_ch(dev_priv, phy, ch, false); 824 } 825 } 826 827 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 828 { 829 struct intel_encoder *encoder; 830 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 831 832 /* 833 * We don't have power sequencer currently. 834 * Pick one that's not used by other ports. 835 */ 836 for_each_intel_dp(&dev_priv->drm, encoder) { 837 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 838 839 if (encoder->type == INTEL_OUTPUT_EDP) { 840 drm_WARN_ON(&dev_priv->drm, 841 intel_dp->active_pipe != INVALID_PIPE && 842 intel_dp->active_pipe != 843 intel_dp->pps_pipe); 844 845 if (intel_dp->pps_pipe != INVALID_PIPE) 846 pipes &= ~(1 << intel_dp->pps_pipe); 847 } else { 848 drm_WARN_ON(&dev_priv->drm, 849 intel_dp->pps_pipe != INVALID_PIPE); 850 851 if (intel_dp->active_pipe != INVALID_PIPE) 852 pipes &= ~(1 << intel_dp->active_pipe); 853 } 854 } 855 856 if (pipes == 0) 857 return INVALID_PIPE; 858 859 return ffs(pipes) - 1; 860 } 861 862 static enum pipe 863 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 864 { 865 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 866 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 867 enum pipe pipe; 868 869 lockdep_assert_held(&dev_priv->pps_mutex); 870 871 /* We should never land here with regular DP ports */ 872 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 873 874 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE && 875 intel_dp->active_pipe != intel_dp->pps_pipe); 876 877 if (intel_dp->pps_pipe != INVALID_PIPE) 878 return intel_dp->pps_pipe; 879 880 pipe = vlv_find_free_pps(dev_priv); 881 882 /* 883 * Didn't find one. This should not happen since there 884 * are two power sequencers and up to two eDP ports. 885 */ 886 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) 887 pipe = PIPE_A; 888 889 vlv_steal_power_sequencer(dev_priv, pipe); 890 intel_dp->pps_pipe = pipe; 891 892 drm_dbg_kms(&dev_priv->drm, 893 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", 894 pipe_name(intel_dp->pps_pipe), 895 intel_dig_port->base.base.base.id, 896 intel_dig_port->base.base.name); 897 898 /* init power sequencer on this pipe and port */ 899 intel_dp_init_panel_power_sequencer(intel_dp); 900 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 901 902 /* 903 * Even vdd force doesn't work until we've made 904 * the power sequencer lock in on the port. 905 */ 906 vlv_power_sequencer_kick(intel_dp); 907 908 return intel_dp->pps_pipe; 909 } 910 911 static int 912 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 913 { 914 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 915 int backlight_controller = dev_priv->vbt.backlight.controller; 916 917 lockdep_assert_held(&dev_priv->pps_mutex); 918 919 /* We should never land here with regular DP ports */ 920 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 921 922 if (!intel_dp->pps_reset) 923 return backlight_controller; 924 925 intel_dp->pps_reset = false; 926 927 /* 928 * Only the HW needs to be reprogrammed, the SW state is fixed and 929 * has been setup during connector init. 930 */ 931 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 932 933 return backlight_controller; 934 } 935 936 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 937 enum pipe pipe); 938 939 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 940 enum pipe pipe) 941 { 942 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON; 943 } 944 945 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 946 enum pipe pipe) 947 { 948 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD; 949 } 950 951 static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 952 enum pipe pipe) 953 { 954 return true; 955 } 956 957 static enum pipe 958 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 959 enum port port, 960 vlv_pipe_check pipe_check) 961 { 962 enum pipe pipe; 963 964 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 965 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & 966 PANEL_PORT_SELECT_MASK; 967 968 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 969 continue; 970 971 if (!pipe_check(dev_priv, pipe)) 972 continue; 973 974 return pipe; 975 } 976 977 return INVALID_PIPE; 978 } 979 980 static void 981 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 982 { 983 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 984 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 985 enum port port = intel_dig_port->base.port; 986 987 lockdep_assert_held(&dev_priv->pps_mutex); 988 989 /* try to find a pipe with this port selected */ 990 /* first pick one where the panel is on */ 991 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 992 vlv_pipe_has_pp_on); 993 /* didn't find one? pick one where vdd is on */ 994 if (intel_dp->pps_pipe == INVALID_PIPE) 995 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 996 vlv_pipe_has_vdd_on); 997 /* didn't find one? pick one with just the correct port */ 998 if (intel_dp->pps_pipe == INVALID_PIPE) 999 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1000 vlv_pipe_any); 1001 1002 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 1003 if (intel_dp->pps_pipe == INVALID_PIPE) { 1004 drm_dbg_kms(&dev_priv->drm, 1005 "no initial power sequencer for [ENCODER:%d:%s]\n", 1006 intel_dig_port->base.base.base.id, 1007 intel_dig_port->base.base.name); 1008 return; 1009 } 1010 1011 drm_dbg_kms(&dev_priv->drm, 1012 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", 1013 intel_dig_port->base.base.base.id, 1014 intel_dig_port->base.base.name, 1015 pipe_name(intel_dp->pps_pipe)); 1016 1017 intel_dp_init_panel_power_sequencer(intel_dp); 1018 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1019 } 1020 1021 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 1022 { 1023 struct intel_encoder *encoder; 1024 1025 if (drm_WARN_ON(&dev_priv->drm, 1026 !(IS_VALLEYVIEW(dev_priv) || 1027 IS_CHERRYVIEW(dev_priv) || 1028 IS_GEN9_LP(dev_priv)))) 1029 return; 1030 1031 /* 1032 * We can't grab pps_mutex here due to deadlock with power_domain 1033 * mutex when power_domain functions are called while holding pps_mutex. 1034 * That also means that in order to use pps_pipe the code needs to 1035 * hold both a power domain reference and pps_mutex, and the power domain 1036 * reference get/put must be done while _not_ holding pps_mutex. 1037 * pps_{lock,unlock}() do these steps in the correct order, so one 1038 * should use them always. 1039 */ 1040 1041 for_each_intel_dp(&dev_priv->drm, encoder) { 1042 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1043 1044 drm_WARN_ON(&dev_priv->drm, 1045 intel_dp->active_pipe != INVALID_PIPE); 1046 1047 if (encoder->type != INTEL_OUTPUT_EDP) 1048 continue; 1049 1050 if (IS_GEN9_LP(dev_priv)) 1051 intel_dp->pps_reset = true; 1052 else 1053 intel_dp->pps_pipe = INVALID_PIPE; 1054 } 1055 } 1056 1057 struct pps_registers { 1058 i915_reg_t pp_ctrl; 1059 i915_reg_t pp_stat; 1060 i915_reg_t pp_on; 1061 i915_reg_t pp_off; 1062 i915_reg_t pp_div; 1063 }; 1064 1065 static void intel_pps_get_registers(struct intel_dp *intel_dp, 1066 struct pps_registers *regs) 1067 { 1068 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1069 int pps_idx = 0; 1070 1071 memset(regs, 0, sizeof(*regs)); 1072 1073 if (IS_GEN9_LP(dev_priv)) 1074 pps_idx = bxt_power_sequencer_idx(intel_dp); 1075 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1076 pps_idx = vlv_power_sequencer_pipe(intel_dp); 1077 1078 regs->pp_ctrl = PP_CONTROL(pps_idx); 1079 regs->pp_stat = PP_STATUS(pps_idx); 1080 regs->pp_on = PP_ON_DELAYS(pps_idx); 1081 regs->pp_off = PP_OFF_DELAYS(pps_idx); 1082 1083 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 1084 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 1085 regs->pp_div = INVALID_MMIO_REG; 1086 else 1087 regs->pp_div = PP_DIVISOR(pps_idx); 1088 } 1089 1090 static i915_reg_t 1091 _pp_ctrl_reg(struct intel_dp *intel_dp) 1092 { 1093 struct pps_registers regs; 1094 1095 intel_pps_get_registers(intel_dp, ®s); 1096 1097 return regs.pp_ctrl; 1098 } 1099 1100 static i915_reg_t 1101 _pp_stat_reg(struct intel_dp *intel_dp) 1102 { 1103 struct pps_registers regs; 1104 1105 intel_pps_get_registers(intel_dp, ®s); 1106 1107 return regs.pp_stat; 1108 } 1109 1110 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing 1111 This function only applicable when panel PM state is not to be tracked */ 1112 static int edp_notify_handler(struct notifier_block *this, unsigned long code, 1113 void *unused) 1114 { 1115 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 1116 edp_notifier); 1117 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1118 intel_wakeref_t wakeref; 1119 1120 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) 1121 return 0; 1122 1123 with_pps_lock(intel_dp, wakeref) { 1124 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1125 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 1126 i915_reg_t pp_ctrl_reg, pp_div_reg; 1127 u32 pp_div; 1128 1129 pp_ctrl_reg = PP_CONTROL(pipe); 1130 pp_div_reg = PP_DIVISOR(pipe); 1131 pp_div = intel_de_read(dev_priv, pp_div_reg); 1132 pp_div &= PP_REFERENCE_DIVIDER_MASK; 1133 1134 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 1135 intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F); 1136 intel_de_write(dev_priv, pp_ctrl_reg, 1137 PANEL_UNLOCK_REGS); 1138 msleep(intel_dp->panel_power_cycle_delay); 1139 } 1140 } 1141 1142 return 0; 1143 } 1144 1145 static bool edp_have_panel_power(struct intel_dp *intel_dp) 1146 { 1147 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1148 1149 lockdep_assert_held(&dev_priv->pps_mutex); 1150 1151 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1152 intel_dp->pps_pipe == INVALID_PIPE) 1153 return false; 1154 1155 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 1156 } 1157 1158 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 1159 { 1160 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1161 1162 lockdep_assert_held(&dev_priv->pps_mutex); 1163 1164 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1165 intel_dp->pps_pipe == INVALID_PIPE) 1166 return false; 1167 1168 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 1169 } 1170 1171 static void 1172 intel_dp_check_edp(struct intel_dp *intel_dp) 1173 { 1174 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1175 1176 if (!intel_dp_is_edp(intel_dp)) 1177 return; 1178 1179 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 1180 drm_WARN(&dev_priv->drm, 1, 1181 "eDP powered off while attempting aux channel communication.\n"); 1182 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n", 1183 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), 1184 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); 1185 } 1186 } 1187 1188 static u32 1189 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 1190 { 1191 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1192 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1193 const unsigned int timeout_ms = 10; 1194 u32 status; 1195 bool done; 1196 1197 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1198 done = wait_event_timeout(i915->gmbus_wait_queue, C, 1199 msecs_to_jiffies_timeout(timeout_ms)); 1200 1201 /* just trace the final value */ 1202 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1203 1204 if (!done) 1205 drm_err(&i915->drm, 1206 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 1207 intel_dp->aux.name, timeout_ms, status); 1208 #undef C 1209 1210 return status; 1211 } 1212 1213 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1214 { 1215 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1216 1217 if (index) 1218 return 0; 1219 1220 /* 1221 * The clock divider is based off the hrawclk, and would like to run at 1222 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 1223 */ 1224 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 1225 } 1226 1227 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1228 { 1229 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1230 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1231 u32 freq; 1232 1233 if (index) 1234 return 0; 1235 1236 /* 1237 * The clock divider is based off the cdclk or PCH rawclk, and would 1238 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1239 * divide by 2000 and use that 1240 */ 1241 if (dig_port->aux_ch == AUX_CH_A) 1242 freq = dev_priv->cdclk.hw.cdclk; 1243 else 1244 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 1245 return DIV_ROUND_CLOSEST(freq, 2000); 1246 } 1247 1248 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1249 { 1250 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1251 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1252 1253 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1254 /* Workaround for non-ULT HSW */ 1255 switch (index) { 1256 case 0: return 63; 1257 case 1: return 72; 1258 default: return 0; 1259 } 1260 } 1261 1262 return ilk_get_aux_clock_divider(intel_dp, index); 1263 } 1264 1265 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1266 { 1267 /* 1268 * SKL doesn't need us to program the AUX clock divider (Hardware will 1269 * derive the clock from CDCLK automatically). We still implement the 1270 * get_aux_clock_divider vfunc to plug-in into the existing code. 1271 */ 1272 return index ? 0 : 1; 1273 } 1274 1275 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 1276 int send_bytes, 1277 u32 aux_clock_divider) 1278 { 1279 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1280 struct drm_i915_private *dev_priv = 1281 to_i915(intel_dig_port->base.base.dev); 1282 u32 precharge, timeout; 1283 1284 if (IS_GEN(dev_priv, 6)) 1285 precharge = 3; 1286 else 1287 precharge = 5; 1288 1289 if (IS_BROADWELL(dev_priv)) 1290 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1291 else 1292 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1293 1294 return DP_AUX_CH_CTL_SEND_BUSY | 1295 DP_AUX_CH_CTL_DONE | 1296 DP_AUX_CH_CTL_INTERRUPT | 1297 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1298 timeout | 1299 DP_AUX_CH_CTL_RECEIVE_ERROR | 1300 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1301 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1302 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1303 } 1304 1305 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1306 int send_bytes, 1307 u32 unused) 1308 { 1309 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1310 struct drm_i915_private *i915 = 1311 to_i915(intel_dig_port->base.base.dev); 1312 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); 1313 u32 ret; 1314 1315 ret = DP_AUX_CH_CTL_SEND_BUSY | 1316 DP_AUX_CH_CTL_DONE | 1317 DP_AUX_CH_CTL_INTERRUPT | 1318 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1319 DP_AUX_CH_CTL_TIME_OUT_MAX | 1320 DP_AUX_CH_CTL_RECEIVE_ERROR | 1321 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1322 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1323 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1324 1325 if (intel_phy_is_tc(i915, phy) && 1326 intel_dig_port->tc_mode == TC_PORT_TBT_ALT) 1327 ret |= DP_AUX_CH_CTL_TBT_IO; 1328 1329 return ret; 1330 } 1331 1332 static int 1333 intel_dp_aux_xfer(struct intel_dp *intel_dp, 1334 const u8 *send, int send_bytes, 1335 u8 *recv, int recv_size, 1336 u32 aux_send_ctl_flags) 1337 { 1338 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1339 struct drm_i915_private *i915 = 1340 to_i915(intel_dig_port->base.base.dev); 1341 struct intel_uncore *uncore = &i915->uncore; 1342 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); 1343 bool is_tc_port = intel_phy_is_tc(i915, phy); 1344 i915_reg_t ch_ctl, ch_data[5]; 1345 u32 aux_clock_divider; 1346 enum intel_display_power_domain aux_domain = 1347 intel_aux_power_domain(intel_dig_port); 1348 intel_wakeref_t aux_wakeref; 1349 intel_wakeref_t pps_wakeref; 1350 int i, ret, recv_bytes; 1351 int try, clock = 0; 1352 u32 status; 1353 bool vdd; 1354 1355 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1356 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1357 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1358 1359 if (is_tc_port) 1360 intel_tc_port_lock(intel_dig_port); 1361 1362 aux_wakeref = intel_display_power_get(i915, aux_domain); 1363 pps_wakeref = pps_lock(intel_dp); 1364 1365 /* 1366 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1367 * In such cases we want to leave VDD enabled and it's up to upper layers 1368 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1369 * ourselves. 1370 */ 1371 vdd = edp_panel_vdd_on(intel_dp); 1372 1373 /* dp aux is extremely sensitive to irq latency, hence request the 1374 * lowest possible wakeup latency and so prevent the cpu from going into 1375 * deep sleep states. 1376 */ 1377 cpu_latency_qos_update_request(&i915->pm_qos, 0); 1378 1379 intel_dp_check_edp(intel_dp); 1380 1381 /* Try to wait for any previous AUX channel activity */ 1382 for (try = 0; try < 3; try++) { 1383 status = intel_uncore_read_notrace(uncore, ch_ctl); 1384 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1385 break; 1386 msleep(1); 1387 } 1388 /* just trace the final value */ 1389 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1390 1391 if (try == 3) { 1392 const u32 status = intel_uncore_read(uncore, ch_ctl); 1393 1394 if (status != intel_dp->aux_busy_last_status) { 1395 drm_WARN(&i915->drm, 1, 1396 "%s: not started (status 0x%08x)\n", 1397 intel_dp->aux.name, status); 1398 intel_dp->aux_busy_last_status = status; 1399 } 1400 1401 ret = -EBUSY; 1402 goto out; 1403 } 1404 1405 /* Only 5 data registers! */ 1406 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 1407 ret = -E2BIG; 1408 goto out; 1409 } 1410 1411 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1412 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1413 send_bytes, 1414 aux_clock_divider); 1415 1416 send_ctl |= aux_send_ctl_flags; 1417 1418 /* Must try at least 3 times according to DP spec */ 1419 for (try = 0; try < 5; try++) { 1420 /* Load the send data into the aux channel data registers */ 1421 for (i = 0; i < send_bytes; i += 4) 1422 intel_uncore_write(uncore, 1423 ch_data[i >> 2], 1424 intel_dp_pack_aux(send + i, 1425 send_bytes - i)); 1426 1427 /* Send the command and wait for it to complete */ 1428 intel_uncore_write(uncore, ch_ctl, send_ctl); 1429 1430 status = intel_dp_aux_wait_done(intel_dp); 1431 1432 /* Clear done status and any errors */ 1433 intel_uncore_write(uncore, 1434 ch_ctl, 1435 status | 1436 DP_AUX_CH_CTL_DONE | 1437 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1438 DP_AUX_CH_CTL_RECEIVE_ERROR); 1439 1440 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1441 * 400us delay required for errors and timeouts 1442 * Timeout errors from the HW already meet this 1443 * requirement so skip to next iteration 1444 */ 1445 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1446 continue; 1447 1448 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1449 usleep_range(400, 500); 1450 continue; 1451 } 1452 if (status & DP_AUX_CH_CTL_DONE) 1453 goto done; 1454 } 1455 } 1456 1457 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1458 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 1459 intel_dp->aux.name, status); 1460 ret = -EBUSY; 1461 goto out; 1462 } 1463 1464 done: 1465 /* Check for timeout or receive error. 1466 * Timeouts occur when the sink is not connected 1467 */ 1468 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1469 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 1470 intel_dp->aux.name, status); 1471 ret = -EIO; 1472 goto out; 1473 } 1474 1475 /* Timeouts occur when the device isn't connected, so they're 1476 * "normal" -- don't fill the kernel log with these */ 1477 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1478 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 1479 intel_dp->aux.name, status); 1480 ret = -ETIMEDOUT; 1481 goto out; 1482 } 1483 1484 /* Unload any bytes sent back from the other side */ 1485 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1486 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1487 1488 /* 1489 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1490 * We have no idea of what happened so we return -EBUSY so 1491 * drm layer takes care for the necessary retries. 1492 */ 1493 if (recv_bytes == 0 || recv_bytes > 20) { 1494 drm_dbg_kms(&i915->drm, 1495 "%s: Forbidden recv_bytes = %d on aux transaction\n", 1496 intel_dp->aux.name, recv_bytes); 1497 ret = -EBUSY; 1498 goto out; 1499 } 1500 1501 if (recv_bytes > recv_size) 1502 recv_bytes = recv_size; 1503 1504 for (i = 0; i < recv_bytes; i += 4) 1505 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 1506 recv + i, recv_bytes - i); 1507 1508 ret = recv_bytes; 1509 out: 1510 cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); 1511 1512 if (vdd) 1513 edp_panel_vdd_off(intel_dp, false); 1514 1515 pps_unlock(intel_dp, pps_wakeref); 1516 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 1517 1518 if (is_tc_port) 1519 intel_tc_port_unlock(intel_dig_port); 1520 1521 return ret; 1522 } 1523 1524 #define BARE_ADDRESS_SIZE 3 1525 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1526 1527 static void 1528 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1529 const struct drm_dp_aux_msg *msg) 1530 { 1531 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1532 txbuf[1] = (msg->address >> 8) & 0xff; 1533 txbuf[2] = msg->address & 0xff; 1534 txbuf[3] = msg->size - 1; 1535 } 1536 1537 static ssize_t 1538 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1539 { 1540 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1541 u8 txbuf[20], rxbuf[20]; 1542 size_t txsize, rxsize; 1543 int ret; 1544 1545 intel_dp_aux_header(txbuf, msg); 1546 1547 switch (msg->request & ~DP_AUX_I2C_MOT) { 1548 case DP_AUX_NATIVE_WRITE: 1549 case DP_AUX_I2C_WRITE: 1550 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1551 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1552 rxsize = 2; /* 0 or 1 data bytes */ 1553 1554 if (WARN_ON(txsize > 20)) 1555 return -E2BIG; 1556 1557 WARN_ON(!msg->buffer != !msg->size); 1558 1559 if (msg->buffer) 1560 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1561 1562 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1563 rxbuf, rxsize, 0); 1564 if (ret > 0) { 1565 msg->reply = rxbuf[0] >> 4; 1566 1567 if (ret > 1) { 1568 /* Number of bytes written in a short write. */ 1569 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1570 } else { 1571 /* Return payload size. */ 1572 ret = msg->size; 1573 } 1574 } 1575 break; 1576 1577 case DP_AUX_NATIVE_READ: 1578 case DP_AUX_I2C_READ: 1579 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1580 rxsize = msg->size + 1; 1581 1582 if (WARN_ON(rxsize > 20)) 1583 return -E2BIG; 1584 1585 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1586 rxbuf, rxsize, 0); 1587 if (ret > 0) { 1588 msg->reply = rxbuf[0] >> 4; 1589 /* 1590 * Assume happy day, and copy the data. The caller is 1591 * expected to check msg->reply before touching it. 1592 * 1593 * Return payload size. 1594 */ 1595 ret--; 1596 memcpy(msg->buffer, rxbuf + 1, ret); 1597 } 1598 break; 1599 1600 default: 1601 ret = -EINVAL; 1602 break; 1603 } 1604 1605 return ret; 1606 } 1607 1608 1609 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1610 { 1611 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1612 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1613 enum aux_ch aux_ch = dig_port->aux_ch; 1614 1615 switch (aux_ch) { 1616 case AUX_CH_B: 1617 case AUX_CH_C: 1618 case AUX_CH_D: 1619 return DP_AUX_CH_CTL(aux_ch); 1620 default: 1621 MISSING_CASE(aux_ch); 1622 return DP_AUX_CH_CTL(AUX_CH_B); 1623 } 1624 } 1625 1626 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1627 { 1628 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1629 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1630 enum aux_ch aux_ch = dig_port->aux_ch; 1631 1632 switch (aux_ch) { 1633 case AUX_CH_B: 1634 case AUX_CH_C: 1635 case AUX_CH_D: 1636 return DP_AUX_CH_DATA(aux_ch, index); 1637 default: 1638 MISSING_CASE(aux_ch); 1639 return DP_AUX_CH_DATA(AUX_CH_B, index); 1640 } 1641 } 1642 1643 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1644 { 1645 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1646 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1647 enum aux_ch aux_ch = dig_port->aux_ch; 1648 1649 switch (aux_ch) { 1650 case AUX_CH_A: 1651 return DP_AUX_CH_CTL(aux_ch); 1652 case AUX_CH_B: 1653 case AUX_CH_C: 1654 case AUX_CH_D: 1655 return PCH_DP_AUX_CH_CTL(aux_ch); 1656 default: 1657 MISSING_CASE(aux_ch); 1658 return DP_AUX_CH_CTL(AUX_CH_A); 1659 } 1660 } 1661 1662 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1663 { 1664 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1665 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1666 enum aux_ch aux_ch = dig_port->aux_ch; 1667 1668 switch (aux_ch) { 1669 case AUX_CH_A: 1670 return DP_AUX_CH_DATA(aux_ch, index); 1671 case AUX_CH_B: 1672 case AUX_CH_C: 1673 case AUX_CH_D: 1674 return PCH_DP_AUX_CH_DATA(aux_ch, index); 1675 default: 1676 MISSING_CASE(aux_ch); 1677 return DP_AUX_CH_DATA(AUX_CH_A, index); 1678 } 1679 } 1680 1681 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1682 { 1683 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1684 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1685 enum aux_ch aux_ch = dig_port->aux_ch; 1686 1687 switch (aux_ch) { 1688 case AUX_CH_A: 1689 case AUX_CH_B: 1690 case AUX_CH_C: 1691 case AUX_CH_D: 1692 case AUX_CH_E: 1693 case AUX_CH_F: 1694 case AUX_CH_G: 1695 return DP_AUX_CH_CTL(aux_ch); 1696 default: 1697 MISSING_CASE(aux_ch); 1698 return DP_AUX_CH_CTL(AUX_CH_A); 1699 } 1700 } 1701 1702 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1703 { 1704 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1705 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1706 enum aux_ch aux_ch = dig_port->aux_ch; 1707 1708 switch (aux_ch) { 1709 case AUX_CH_A: 1710 case AUX_CH_B: 1711 case AUX_CH_C: 1712 case AUX_CH_D: 1713 case AUX_CH_E: 1714 case AUX_CH_F: 1715 case AUX_CH_G: 1716 return DP_AUX_CH_DATA(aux_ch, index); 1717 default: 1718 MISSING_CASE(aux_ch); 1719 return DP_AUX_CH_DATA(AUX_CH_A, index); 1720 } 1721 } 1722 1723 static void 1724 intel_dp_aux_fini(struct intel_dp *intel_dp) 1725 { 1726 kfree(intel_dp->aux.name); 1727 } 1728 1729 static void 1730 intel_dp_aux_init(struct intel_dp *intel_dp) 1731 { 1732 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1733 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1734 struct intel_encoder *encoder = &dig_port->base; 1735 1736 if (INTEL_GEN(dev_priv) >= 9) { 1737 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1738 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1739 } else if (HAS_PCH_SPLIT(dev_priv)) { 1740 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1741 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1742 } else { 1743 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1744 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1745 } 1746 1747 if (INTEL_GEN(dev_priv) >= 9) 1748 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1749 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1750 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1751 else if (HAS_PCH_SPLIT(dev_priv)) 1752 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1753 else 1754 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1755 1756 if (INTEL_GEN(dev_priv) >= 9) 1757 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1758 else 1759 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1760 1761 drm_dp_aux_init(&intel_dp->aux); 1762 1763 /* Failure to allocate our preferred name is not critical */ 1764 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c", 1765 aux_ch_name(dig_port->aux_ch), 1766 port_name(encoder->port)); 1767 intel_dp->aux.transfer = intel_dp_aux_transfer; 1768 } 1769 1770 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1771 { 1772 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1773 1774 return max_rate >= 540000; 1775 } 1776 1777 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 1778 { 1779 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1780 1781 return max_rate >= 810000; 1782 } 1783 1784 static void 1785 intel_dp_set_clock(struct intel_encoder *encoder, 1786 struct intel_crtc_state *pipe_config) 1787 { 1788 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1789 const struct dp_link_dpll *divisor = NULL; 1790 int i, count = 0; 1791 1792 if (IS_G4X(dev_priv)) { 1793 divisor = g4x_dpll; 1794 count = ARRAY_SIZE(g4x_dpll); 1795 } else if (HAS_PCH_SPLIT(dev_priv)) { 1796 divisor = pch_dpll; 1797 count = ARRAY_SIZE(pch_dpll); 1798 } else if (IS_CHERRYVIEW(dev_priv)) { 1799 divisor = chv_dpll; 1800 count = ARRAY_SIZE(chv_dpll); 1801 } else if (IS_VALLEYVIEW(dev_priv)) { 1802 divisor = vlv_dpll; 1803 count = ARRAY_SIZE(vlv_dpll); 1804 } 1805 1806 if (divisor && count) { 1807 for (i = 0; i < count; i++) { 1808 if (pipe_config->port_clock == divisor[i].clock) { 1809 pipe_config->dpll = divisor[i].dpll; 1810 pipe_config->clock_set = true; 1811 break; 1812 } 1813 } 1814 } 1815 } 1816 1817 static void snprintf_int_array(char *str, size_t len, 1818 const int *array, int nelem) 1819 { 1820 int i; 1821 1822 str[0] = '\0'; 1823 1824 for (i = 0; i < nelem; i++) { 1825 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1826 if (r >= len) 1827 return; 1828 str += r; 1829 len -= r; 1830 } 1831 } 1832 1833 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1834 { 1835 char str[128]; /* FIXME: too big for stack? */ 1836 1837 if (!drm_debug_enabled(DRM_UT_KMS)) 1838 return; 1839 1840 snprintf_int_array(str, sizeof(str), 1841 intel_dp->source_rates, intel_dp->num_source_rates); 1842 DRM_DEBUG_KMS("source rates: %s\n", str); 1843 1844 snprintf_int_array(str, sizeof(str), 1845 intel_dp->sink_rates, intel_dp->num_sink_rates); 1846 DRM_DEBUG_KMS("sink rates: %s\n", str); 1847 1848 snprintf_int_array(str, sizeof(str), 1849 intel_dp->common_rates, intel_dp->num_common_rates); 1850 DRM_DEBUG_KMS("common rates: %s\n", str); 1851 } 1852 1853 int 1854 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1855 { 1856 int len; 1857 1858 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1859 if (WARN_ON(len <= 0)) 1860 return 162000; 1861 1862 return intel_dp->common_rates[len - 1]; 1863 } 1864 1865 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1866 { 1867 int i = intel_dp_rate_index(intel_dp->sink_rates, 1868 intel_dp->num_sink_rates, rate); 1869 1870 if (WARN_ON(i < 0)) 1871 i = 0; 1872 1873 return i; 1874 } 1875 1876 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1877 u8 *link_bw, u8 *rate_select) 1878 { 1879 /* eDP 1.4 rate select method. */ 1880 if (intel_dp->use_rate_select) { 1881 *link_bw = 0; 1882 *rate_select = 1883 intel_dp_rate_select(intel_dp, port_clock); 1884 } else { 1885 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1886 *rate_select = 0; 1887 } 1888 } 1889 1890 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1891 const struct intel_crtc_state *pipe_config) 1892 { 1893 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1894 1895 /* On TGL, FEC is supported on all Pipes */ 1896 if (INTEL_GEN(dev_priv) >= 12) 1897 return true; 1898 1899 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 1900 return true; 1901 1902 return false; 1903 } 1904 1905 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1906 const struct intel_crtc_state *pipe_config) 1907 { 1908 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1909 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1910 } 1911 1912 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1913 const struct intel_crtc_state *crtc_state) 1914 { 1915 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1916 1917 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) 1918 return false; 1919 1920 return intel_dsc_source_support(encoder, crtc_state) && 1921 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1922 } 1923 1924 static int intel_dp_compute_bpp(struct intel_dp *intel_dp, 1925 struct intel_crtc_state *pipe_config) 1926 { 1927 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1928 struct intel_connector *intel_connector = intel_dp->attached_connector; 1929 int bpp, bpc; 1930 1931 bpp = pipe_config->pipe_bpp; 1932 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports); 1933 1934 if (bpc > 0) 1935 bpp = min(bpp, 3*bpc); 1936 1937 if (intel_dp_is_edp(intel_dp)) { 1938 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1939 if (intel_connector->base.display_info.bpc == 0 && 1940 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1941 drm_dbg_kms(&dev_priv->drm, 1942 "clamping bpp for eDP panel to BIOS-provided %i\n", 1943 dev_priv->vbt.edp.bpp); 1944 bpp = dev_priv->vbt.edp.bpp; 1945 } 1946 } 1947 1948 return bpp; 1949 } 1950 1951 /* Adjust link config limits based on compliance test requests. */ 1952 void 1953 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1954 struct intel_crtc_state *pipe_config, 1955 struct link_config_limits *limits) 1956 { 1957 /* For DP Compliance we override the computed bpp for the pipe */ 1958 if (intel_dp->compliance.test_data.bpc != 0) { 1959 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1960 1961 limits->min_bpp = limits->max_bpp = bpp; 1962 pipe_config->dither_force_disable = bpp == 6 * 3; 1963 1964 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp); 1965 } 1966 1967 /* Use values requested by Compliance Test Request */ 1968 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1969 int index; 1970 1971 /* Validate the compliance test data since max values 1972 * might have changed due to link train fallback. 1973 */ 1974 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1975 intel_dp->compliance.test_lane_count)) { 1976 index = intel_dp_rate_index(intel_dp->common_rates, 1977 intel_dp->num_common_rates, 1978 intel_dp->compliance.test_link_rate); 1979 if (index >= 0) 1980 limits->min_clock = limits->max_clock = index; 1981 limits->min_lane_count = limits->max_lane_count = 1982 intel_dp->compliance.test_lane_count; 1983 } 1984 } 1985 } 1986 1987 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp) 1988 { 1989 /* 1990 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 1991 * format of the number of bytes per pixel will be half the number 1992 * of bytes of RGB pixel. 1993 */ 1994 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1995 bpp /= 2; 1996 1997 return bpp; 1998 } 1999 2000 /* Optimize link config in order: max bpp, min clock, min lanes */ 2001 static int 2002 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 2003 struct intel_crtc_state *pipe_config, 2004 const struct link_config_limits *limits) 2005 { 2006 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2007 int bpp, clock, lane_count; 2008 int mode_rate, link_clock, link_avail; 2009 2010 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2011 int output_bpp = intel_dp_output_bpp(pipe_config, bpp); 2012 2013 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2014 output_bpp); 2015 2016 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2017 for (lane_count = limits->min_lane_count; 2018 lane_count <= limits->max_lane_count; 2019 lane_count <<= 1) { 2020 link_clock = intel_dp->common_rates[clock]; 2021 link_avail = intel_dp_max_data_rate(link_clock, 2022 lane_count); 2023 2024 if (mode_rate <= link_avail) { 2025 pipe_config->lane_count = lane_count; 2026 pipe_config->pipe_bpp = bpp; 2027 pipe_config->port_clock = link_clock; 2028 2029 return 0; 2030 } 2031 } 2032 } 2033 } 2034 2035 return -EINVAL; 2036 } 2037 2038 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 2039 { 2040 int i, num_bpc; 2041 u8 dsc_bpc[3] = {0}; 2042 2043 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 2044 dsc_bpc); 2045 for (i = 0; i < num_bpc; i++) { 2046 if (dsc_max_bpc >= dsc_bpc[i]) 2047 return dsc_bpc[i] * 3; 2048 } 2049 2050 return 0; 2051 } 2052 2053 #define DSC_SUPPORTED_VERSION_MIN 1 2054 2055 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 2056 struct intel_crtc_state *crtc_state) 2057 { 2058 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2059 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2060 u8 line_buf_depth; 2061 int ret; 2062 2063 ret = intel_dsc_compute_params(encoder, crtc_state); 2064 if (ret) 2065 return ret; 2066 2067 /* 2068 * Slice Height of 8 works for all currently available panels. So start 2069 * with that if pic_height is an integral multiple of 8. Eventually add 2070 * logic to try multiple slice heights. 2071 */ 2072 if (vdsc_cfg->pic_height % 8 == 0) 2073 vdsc_cfg->slice_height = 8; 2074 else if (vdsc_cfg->pic_height % 4 == 0) 2075 vdsc_cfg->slice_height = 4; 2076 else 2077 vdsc_cfg->slice_height = 2; 2078 2079 vdsc_cfg->dsc_version_major = 2080 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2081 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 2082 vdsc_cfg->dsc_version_minor = 2083 min(DSC_SUPPORTED_VERSION_MIN, 2084 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2085 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 2086 2087 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2088 DP_DSC_RGB; 2089 2090 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 2091 if (!line_buf_depth) { 2092 DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n"); 2093 return -EINVAL; 2094 } 2095 2096 if (vdsc_cfg->dsc_version_minor == 2) 2097 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 2098 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 2099 else 2100 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 2101 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 2102 2103 vdsc_cfg->block_pred_enable = 2104 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2105 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2106 2107 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2108 } 2109 2110 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2111 struct intel_crtc_state *pipe_config, 2112 struct drm_connector_state *conn_state, 2113 struct link_config_limits *limits) 2114 { 2115 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2116 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2117 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2118 u8 dsc_max_bpc; 2119 int pipe_bpp; 2120 int ret; 2121 2122 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2123 intel_dp_supports_fec(intel_dp, pipe_config); 2124 2125 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 2126 return -EINVAL; 2127 2128 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 2129 if (INTEL_GEN(dev_priv) >= 12) 2130 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 2131 else 2132 dsc_max_bpc = min_t(u8, 10, 2133 conn_state->max_requested_bpc); 2134 2135 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 2136 2137 /* Min Input BPC for ICL+ is 8 */ 2138 if (pipe_bpp < 8 * 3) { 2139 drm_dbg_kms(&dev_priv->drm, 2140 "No DSC support for less than 8bpc\n"); 2141 return -EINVAL; 2142 } 2143 2144 /* 2145 * For now enable DSC for max bpp, max link rate, max lane count. 2146 * Optimize this later for the minimum possible link rate/lane count 2147 * with DSC enabled for the requested mode. 2148 */ 2149 pipe_config->pipe_bpp = pipe_bpp; 2150 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 2151 pipe_config->lane_count = limits->max_lane_count; 2152 2153 if (intel_dp_is_edp(intel_dp)) { 2154 pipe_config->dsc.compressed_bpp = 2155 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 2156 pipe_config->pipe_bpp); 2157 pipe_config->dsc.slice_count = 2158 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 2159 true); 2160 } else { 2161 u16 dsc_max_output_bpp; 2162 u8 dsc_dp_slice_count; 2163 2164 dsc_max_output_bpp = 2165 intel_dp_dsc_get_output_bpp(dev_priv, 2166 pipe_config->port_clock, 2167 pipe_config->lane_count, 2168 adjusted_mode->crtc_clock, 2169 adjusted_mode->crtc_hdisplay); 2170 dsc_dp_slice_count = 2171 intel_dp_dsc_get_slice_count(intel_dp, 2172 adjusted_mode->crtc_clock, 2173 adjusted_mode->crtc_hdisplay); 2174 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 2175 drm_dbg_kms(&dev_priv->drm, 2176 "Compressed BPP/Slice Count not supported\n"); 2177 return -EINVAL; 2178 } 2179 pipe_config->dsc.compressed_bpp = min_t(u16, 2180 dsc_max_output_bpp >> 4, 2181 pipe_config->pipe_bpp); 2182 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2183 } 2184 /* 2185 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2186 * is greater than the maximum Cdclock and if slice count is even 2187 * then we need to use 2 VDSC instances. 2188 */ 2189 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { 2190 if (pipe_config->dsc.slice_count > 1) { 2191 pipe_config->dsc.dsc_split = true; 2192 } else { 2193 drm_dbg_kms(&dev_priv->drm, 2194 "Cannot split stream to use 2 VDSC instances\n"); 2195 return -EINVAL; 2196 } 2197 } 2198 2199 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 2200 if (ret < 0) { 2201 drm_dbg_kms(&dev_priv->drm, 2202 "Cannot compute valid DSC parameters for Input Bpp = %d " 2203 "Compressed BPP = %d\n", 2204 pipe_config->pipe_bpp, 2205 pipe_config->dsc.compressed_bpp); 2206 return ret; 2207 } 2208 2209 pipe_config->dsc.compression_enable = true; 2210 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2211 "Compressed Bpp = %d Slice Count = %d\n", 2212 pipe_config->pipe_bpp, 2213 pipe_config->dsc.compressed_bpp, 2214 pipe_config->dsc.slice_count); 2215 2216 return 0; 2217 } 2218 2219 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state) 2220 { 2221 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) 2222 return 6 * 3; 2223 else 2224 return 8 * 3; 2225 } 2226 2227 static int 2228 intel_dp_compute_link_config(struct intel_encoder *encoder, 2229 struct intel_crtc_state *pipe_config, 2230 struct drm_connector_state *conn_state) 2231 { 2232 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2233 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2234 struct link_config_limits limits; 2235 int common_len; 2236 int ret; 2237 2238 common_len = intel_dp_common_len_rate_limit(intel_dp, 2239 intel_dp->max_link_rate); 2240 2241 /* No common link rates between source and sink */ 2242 drm_WARN_ON(encoder->base.dev, common_len <= 0); 2243 2244 limits.min_clock = 0; 2245 limits.max_clock = common_len - 1; 2246 2247 limits.min_lane_count = 1; 2248 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 2249 2250 limits.min_bpp = intel_dp_min_bpp(pipe_config); 2251 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); 2252 2253 if (intel_dp_is_edp(intel_dp)) { 2254 /* 2255 * Use the maximum clock and number of lanes the eDP panel 2256 * advertizes being capable of. The panels are generally 2257 * designed to support only a single clock and lane 2258 * configuration, and typically these values correspond to the 2259 * native resolution of the panel. 2260 */ 2261 limits.min_lane_count = limits.max_lane_count; 2262 limits.min_clock = limits.max_clock; 2263 } 2264 2265 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 2266 2267 DRM_DEBUG_KMS("DP link computation with max lane count %i " 2268 "max rate %d max bpp %d pixel clock %iKHz\n", 2269 limits.max_lane_count, 2270 intel_dp->common_rates[limits.max_clock], 2271 limits.max_bpp, adjusted_mode->crtc_clock); 2272 2273 /* 2274 * Optimize for slow and wide. This is the place to add alternative 2275 * optimization policy. 2276 */ 2277 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 2278 2279 /* enable compression if the mode doesn't fit available BW */ 2280 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en); 2281 if (ret || intel_dp->force_dsc_en) { 2282 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2283 conn_state, &limits); 2284 if (ret < 0) 2285 return ret; 2286 } 2287 2288 if (pipe_config->dsc.compression_enable) { 2289 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2290 pipe_config->lane_count, pipe_config->port_clock, 2291 pipe_config->pipe_bpp, 2292 pipe_config->dsc.compressed_bpp); 2293 2294 DRM_DEBUG_KMS("DP link rate required %i available %i\n", 2295 intel_dp_link_required(adjusted_mode->crtc_clock, 2296 pipe_config->dsc.compressed_bpp), 2297 intel_dp_max_data_rate(pipe_config->port_clock, 2298 pipe_config->lane_count)); 2299 } else { 2300 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", 2301 pipe_config->lane_count, pipe_config->port_clock, 2302 pipe_config->pipe_bpp); 2303 2304 DRM_DEBUG_KMS("DP link rate required %i available %i\n", 2305 intel_dp_link_required(adjusted_mode->crtc_clock, 2306 pipe_config->pipe_bpp), 2307 intel_dp_max_data_rate(pipe_config->port_clock, 2308 pipe_config->lane_count)); 2309 } 2310 return 0; 2311 } 2312 2313 static int 2314 intel_dp_ycbcr420_config(struct intel_dp *intel_dp, 2315 struct drm_connector *connector, 2316 struct intel_crtc_state *crtc_state) 2317 { 2318 const struct drm_display_info *info = &connector->display_info; 2319 const struct drm_display_mode *adjusted_mode = 2320 &crtc_state->hw.adjusted_mode; 2321 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2322 int ret; 2323 2324 if (!drm_mode_is_420_only(info, adjusted_mode) || 2325 !intel_dp_get_colorimetry_status(intel_dp) || 2326 !connector->ycbcr_420_allowed) 2327 return 0; 2328 2329 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2330 2331 /* YCBCR 420 output conversion needs a scaler */ 2332 ret = skl_update_scaler_crtc(crtc_state); 2333 if (ret) { 2334 DRM_DEBUG_KMS("Scaler allocation for output failed\n"); 2335 return ret; 2336 } 2337 2338 intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN); 2339 2340 return 0; 2341 } 2342 2343 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2344 const struct drm_connector_state *conn_state) 2345 { 2346 const struct intel_digital_connector_state *intel_conn_state = 2347 to_intel_digital_connector_state(conn_state); 2348 const struct drm_display_mode *adjusted_mode = 2349 &crtc_state->hw.adjusted_mode; 2350 2351 /* 2352 * Our YCbCr output is always limited range. 2353 * crtc_state->limited_color_range only applies to RGB, 2354 * and it must never be set for YCbCr or we risk setting 2355 * some conflicting bits in PIPECONF which will mess up 2356 * the colors on the monitor. 2357 */ 2358 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2359 return false; 2360 2361 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2362 /* 2363 * See: 2364 * CEA-861-E - 5.1 Default Encoding Parameters 2365 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2366 */ 2367 return crtc_state->pipe_bpp != 18 && 2368 drm_default_rgb_quant_range(adjusted_mode) == 2369 HDMI_QUANTIZATION_RANGE_LIMITED; 2370 } else { 2371 return intel_conn_state->broadcast_rgb == 2372 INTEL_BROADCAST_RGB_LIMITED; 2373 } 2374 } 2375 2376 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2377 enum port port) 2378 { 2379 if (IS_G4X(dev_priv)) 2380 return false; 2381 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 2382 return false; 2383 2384 return true; 2385 } 2386 2387 int 2388 intel_dp_compute_config(struct intel_encoder *encoder, 2389 struct intel_crtc_state *pipe_config, 2390 struct drm_connector_state *conn_state) 2391 { 2392 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2393 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2394 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2395 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); 2396 enum port port = encoder->port; 2397 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); 2398 struct intel_connector *intel_connector = intel_dp->attached_connector; 2399 struct intel_digital_connector_state *intel_conn_state = 2400 to_intel_digital_connector_state(conn_state); 2401 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, 2402 DP_DPCD_QUIRK_CONSTANT_N); 2403 int ret = 0, output_bpp; 2404 2405 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 2406 pipe_config->has_pch_encoder = true; 2407 2408 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2409 2410 if (lspcon->active) 2411 lspcon_ycbcr420_config(&intel_connector->base, pipe_config); 2412 else 2413 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base, 2414 pipe_config); 2415 2416 if (ret) 2417 return ret; 2418 2419 pipe_config->has_drrs = false; 2420 if (!intel_dp_port_has_audio(dev_priv, port)) 2421 pipe_config->has_audio = false; 2422 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2423 pipe_config->has_audio = intel_dp->has_audio; 2424 else 2425 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 2426 2427 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2428 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 2429 adjusted_mode); 2430 2431 if (INTEL_GEN(dev_priv) >= 9) { 2432 ret = skl_update_scaler_crtc(pipe_config); 2433 if (ret) 2434 return ret; 2435 } 2436 2437 if (HAS_GMCH(dev_priv)) 2438 intel_gmch_panel_fitting(intel_crtc, pipe_config, 2439 conn_state->scaling_mode); 2440 else 2441 intel_pch_panel_fitting(intel_crtc, pipe_config, 2442 conn_state->scaling_mode); 2443 } 2444 2445 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2446 return -EINVAL; 2447 2448 if (HAS_GMCH(dev_priv) && 2449 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2450 return -EINVAL; 2451 2452 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2453 return -EINVAL; 2454 2455 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2456 return -EINVAL; 2457 2458 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2459 if (ret < 0) 2460 return ret; 2461 2462 pipe_config->limited_color_range = 2463 intel_dp_limited_color_range(pipe_config, conn_state); 2464 2465 if (pipe_config->dsc.compression_enable) 2466 output_bpp = pipe_config->dsc.compressed_bpp; 2467 else 2468 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); 2469 2470 intel_link_compute_m_n(output_bpp, 2471 pipe_config->lane_count, 2472 adjusted_mode->crtc_clock, 2473 pipe_config->port_clock, 2474 &pipe_config->dp_m_n, 2475 constant_n, pipe_config->fec_enable); 2476 2477 if (intel_connector->panel.downclock_mode != NULL && 2478 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { 2479 pipe_config->has_drrs = true; 2480 intel_link_compute_m_n(output_bpp, 2481 pipe_config->lane_count, 2482 intel_connector->panel.downclock_mode->clock, 2483 pipe_config->port_clock, 2484 &pipe_config->dp_m2_n2, 2485 constant_n, pipe_config->fec_enable); 2486 } 2487 2488 if (!HAS_DDI(dev_priv)) 2489 intel_dp_set_clock(encoder, pipe_config); 2490 2491 intel_psr_compute_config(intel_dp, pipe_config); 2492 2493 return 0; 2494 } 2495 2496 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2497 int link_rate, u8 lane_count, 2498 bool link_mst) 2499 { 2500 intel_dp->link_trained = false; 2501 intel_dp->link_rate = link_rate; 2502 intel_dp->lane_count = lane_count; 2503 intel_dp->link_mst = link_mst; 2504 } 2505 2506 static void intel_dp_prepare(struct intel_encoder *encoder, 2507 const struct intel_crtc_state *pipe_config) 2508 { 2509 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2510 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2511 enum port port = encoder->port; 2512 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2513 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2514 2515 intel_dp_set_link_params(intel_dp, pipe_config->port_clock, 2516 pipe_config->lane_count, 2517 intel_crtc_has_type(pipe_config, 2518 INTEL_OUTPUT_DP_MST)); 2519 2520 /* 2521 * There are four kinds of DP registers: 2522 * 2523 * IBX PCH 2524 * SNB CPU 2525 * IVB CPU 2526 * CPT PCH 2527 * 2528 * IBX PCH and CPU are the same for almost everything, 2529 * except that the CPU DP PLL is configured in this 2530 * register 2531 * 2532 * CPT PCH is quite different, having many bits moved 2533 * to the TRANS_DP_CTL register instead. That 2534 * configuration happens (oddly) in ilk_pch_enable 2535 */ 2536 2537 /* Preserve the BIOS-computed detected bit. This is 2538 * supposed to be read-only. 2539 */ 2540 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 2541 2542 /* Handle DP bits in common between all three register formats */ 2543 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 2544 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 2545 2546 /* Split out the IBX/CPU vs CPT settings */ 2547 2548 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 2549 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2550 intel_dp->DP |= DP_SYNC_HS_HIGH; 2551 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2552 intel_dp->DP |= DP_SYNC_VS_HIGH; 2553 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2554 2555 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2556 intel_dp->DP |= DP_ENHANCED_FRAMING; 2557 2558 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 2559 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2560 u32 trans_dp; 2561 2562 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2563 2564 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 2565 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2566 trans_dp |= TRANS_DP_ENH_FRAMING; 2567 else 2568 trans_dp &= ~TRANS_DP_ENH_FRAMING; 2569 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 2570 } else { 2571 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 2572 intel_dp->DP |= DP_COLOR_RANGE_16_235; 2573 2574 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2575 intel_dp->DP |= DP_SYNC_HS_HIGH; 2576 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2577 intel_dp->DP |= DP_SYNC_VS_HIGH; 2578 intel_dp->DP |= DP_LINK_TRAIN_OFF; 2579 2580 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2581 intel_dp->DP |= DP_ENHANCED_FRAMING; 2582 2583 if (IS_CHERRYVIEW(dev_priv)) 2584 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 2585 else 2586 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 2587 } 2588 } 2589 2590 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 2591 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 2592 2593 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 2594 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 2595 2596 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 2597 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 2598 2599 static void intel_pps_verify_state(struct intel_dp *intel_dp); 2600 2601 static void wait_panel_status(struct intel_dp *intel_dp, 2602 u32 mask, 2603 u32 value) 2604 { 2605 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2606 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2607 2608 lockdep_assert_held(&dev_priv->pps_mutex); 2609 2610 intel_pps_verify_state(intel_dp); 2611 2612 pp_stat_reg = _pp_stat_reg(intel_dp); 2613 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2614 2615 drm_dbg_kms(&dev_priv->drm, 2616 "mask %08x value %08x status %08x control %08x\n", 2617 mask, value, 2618 intel_de_read(dev_priv, pp_stat_reg), 2619 intel_de_read(dev_priv, pp_ctrl_reg)); 2620 2621 if (intel_de_wait_for_register(dev_priv, pp_stat_reg, 2622 mask, value, 5000)) 2623 drm_err(&dev_priv->drm, 2624 "Panel status timeout: status %08x control %08x\n", 2625 intel_de_read(dev_priv, pp_stat_reg), 2626 intel_de_read(dev_priv, pp_ctrl_reg)); 2627 2628 drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); 2629 } 2630 2631 static void wait_panel_on(struct intel_dp *intel_dp) 2632 { 2633 DRM_DEBUG_KMS("Wait for panel power on\n"); 2634 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 2635 } 2636 2637 static void wait_panel_off(struct intel_dp *intel_dp) 2638 { 2639 DRM_DEBUG_KMS("Wait for panel power off time\n"); 2640 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 2641 } 2642 2643 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 2644 { 2645 ktime_t panel_power_on_time; 2646 s64 panel_power_off_duration; 2647 2648 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 2649 2650 /* take the difference of currrent time and panel power off time 2651 * and then make panel wait for t11_t12 if needed. */ 2652 panel_power_on_time = ktime_get_boottime(); 2653 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 2654 2655 /* When we disable the VDD override bit last we have to do the manual 2656 * wait. */ 2657 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 2658 wait_remaining_ms_from_jiffies(jiffies, 2659 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 2660 2661 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 2662 } 2663 2664 static void wait_backlight_on(struct intel_dp *intel_dp) 2665 { 2666 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 2667 intel_dp->backlight_on_delay); 2668 } 2669 2670 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 2671 { 2672 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 2673 intel_dp->backlight_off_delay); 2674 } 2675 2676 /* Read the current pp_control value, unlocking the register if it 2677 * is locked 2678 */ 2679 2680 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 2681 { 2682 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2683 u32 control; 2684 2685 lockdep_assert_held(&dev_priv->pps_mutex); 2686 2687 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); 2688 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && 2689 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 2690 control &= ~PANEL_UNLOCK_MASK; 2691 control |= PANEL_UNLOCK_REGS; 2692 } 2693 return control; 2694 } 2695 2696 /* 2697 * Must be paired with edp_panel_vdd_off(). 2698 * Must hold pps_mutex around the whole on/off sequence. 2699 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2700 */ 2701 static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 2702 { 2703 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2705 u32 pp; 2706 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2707 bool need_to_disable = !intel_dp->want_panel_vdd; 2708 2709 lockdep_assert_held(&dev_priv->pps_mutex); 2710 2711 if (!intel_dp_is_edp(intel_dp)) 2712 return false; 2713 2714 cancel_delayed_work(&intel_dp->panel_vdd_work); 2715 intel_dp->want_panel_vdd = true; 2716 2717 if (edp_have_panel_vdd(intel_dp)) 2718 return need_to_disable; 2719 2720 intel_display_power_get(dev_priv, 2721 intel_aux_power_domain(intel_dig_port)); 2722 2723 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", 2724 intel_dig_port->base.base.base.id, 2725 intel_dig_port->base.base.name); 2726 2727 if (!edp_have_panel_power(intel_dp)) 2728 wait_panel_power_cycle(intel_dp); 2729 2730 pp = ilk_get_pp_control(intel_dp); 2731 pp |= EDP_FORCE_VDD; 2732 2733 pp_stat_reg = _pp_stat_reg(intel_dp); 2734 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2735 2736 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2737 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2738 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2739 intel_de_read(dev_priv, pp_stat_reg), 2740 intel_de_read(dev_priv, pp_ctrl_reg)); 2741 /* 2742 * If the panel wasn't on, delay before accessing aux channel 2743 */ 2744 if (!edp_have_panel_power(intel_dp)) { 2745 drm_dbg_kms(&dev_priv->drm, 2746 "[ENCODER:%d:%s] panel power wasn't enabled\n", 2747 intel_dig_port->base.base.base.id, 2748 intel_dig_port->base.base.name); 2749 msleep(intel_dp->panel_power_up_delay); 2750 } 2751 2752 return need_to_disable; 2753 } 2754 2755 /* 2756 * Must be paired with intel_edp_panel_vdd_off() or 2757 * intel_edp_panel_off(). 2758 * Nested calls to these functions are not allowed since 2759 * we drop the lock. Caller must use some higher level 2760 * locking to prevent nested calls from other threads. 2761 */ 2762 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 2763 { 2764 intel_wakeref_t wakeref; 2765 bool vdd; 2766 2767 if (!intel_dp_is_edp(intel_dp)) 2768 return; 2769 2770 vdd = false; 2771 with_pps_lock(intel_dp, wakeref) 2772 vdd = edp_panel_vdd_on(intel_dp); 2773 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", 2774 dp_to_dig_port(intel_dp)->base.base.base.id, 2775 dp_to_dig_port(intel_dp)->base.base.name); 2776 } 2777 2778 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 2779 { 2780 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2781 struct intel_digital_port *intel_dig_port = 2782 dp_to_dig_port(intel_dp); 2783 u32 pp; 2784 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2785 2786 lockdep_assert_held(&dev_priv->pps_mutex); 2787 2788 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd); 2789 2790 if (!edp_have_panel_vdd(intel_dp)) 2791 return; 2792 2793 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", 2794 intel_dig_port->base.base.base.id, 2795 intel_dig_port->base.base.name); 2796 2797 pp = ilk_get_pp_control(intel_dp); 2798 pp &= ~EDP_FORCE_VDD; 2799 2800 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2801 pp_stat_reg = _pp_stat_reg(intel_dp); 2802 2803 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2804 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2805 2806 /* Make sure sequencer is idle before allowing subsequent activity */ 2807 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2808 intel_de_read(dev_priv, pp_stat_reg), 2809 intel_de_read(dev_priv, pp_ctrl_reg)); 2810 2811 if ((pp & PANEL_POWER_ON) == 0) 2812 intel_dp->panel_power_off_time = ktime_get_boottime(); 2813 2814 intel_display_power_put_unchecked(dev_priv, 2815 intel_aux_power_domain(intel_dig_port)); 2816 } 2817 2818 static void edp_panel_vdd_work(struct work_struct *__work) 2819 { 2820 struct intel_dp *intel_dp = 2821 container_of(to_delayed_work(__work), 2822 struct intel_dp, panel_vdd_work); 2823 intel_wakeref_t wakeref; 2824 2825 with_pps_lock(intel_dp, wakeref) { 2826 if (!intel_dp->want_panel_vdd) 2827 edp_panel_vdd_off_sync(intel_dp); 2828 } 2829 } 2830 2831 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 2832 { 2833 unsigned long delay; 2834 2835 /* 2836 * Queue the timer to fire a long time from now (relative to the power 2837 * down delay) to keep the panel power up across a sequence of 2838 * operations. 2839 */ 2840 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 2841 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 2842 } 2843 2844 /* 2845 * Must be paired with edp_panel_vdd_on(). 2846 * Must hold pps_mutex around the whole on/off sequence. 2847 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2848 */ 2849 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 2850 { 2851 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2852 2853 lockdep_assert_held(&dev_priv->pps_mutex); 2854 2855 if (!intel_dp_is_edp(intel_dp)) 2856 return; 2857 2858 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", 2859 dp_to_dig_port(intel_dp)->base.base.base.id, 2860 dp_to_dig_port(intel_dp)->base.base.name); 2861 2862 intel_dp->want_panel_vdd = false; 2863 2864 if (sync) 2865 edp_panel_vdd_off_sync(intel_dp); 2866 else 2867 edp_panel_vdd_schedule_off(intel_dp); 2868 } 2869 2870 static void edp_panel_on(struct intel_dp *intel_dp) 2871 { 2872 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2873 u32 pp; 2874 i915_reg_t pp_ctrl_reg; 2875 2876 lockdep_assert_held(&dev_priv->pps_mutex); 2877 2878 if (!intel_dp_is_edp(intel_dp)) 2879 return; 2880 2881 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n", 2882 dp_to_dig_port(intel_dp)->base.base.base.id, 2883 dp_to_dig_port(intel_dp)->base.base.name); 2884 2885 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), 2886 "[ENCODER:%d:%s] panel power already on\n", 2887 dp_to_dig_port(intel_dp)->base.base.base.id, 2888 dp_to_dig_port(intel_dp)->base.base.name)) 2889 return; 2890 2891 wait_panel_power_cycle(intel_dp); 2892 2893 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2894 pp = ilk_get_pp_control(intel_dp); 2895 if (IS_GEN(dev_priv, 5)) { 2896 /* ILK workaround: disable reset around power sequence */ 2897 pp &= ~PANEL_POWER_RESET; 2898 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2899 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2900 } 2901 2902 pp |= PANEL_POWER_ON; 2903 if (!IS_GEN(dev_priv, 5)) 2904 pp |= PANEL_POWER_RESET; 2905 2906 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2907 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2908 2909 wait_panel_on(intel_dp); 2910 intel_dp->last_power_on = jiffies; 2911 2912 if (IS_GEN(dev_priv, 5)) { 2913 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 2914 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2915 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2916 } 2917 } 2918 2919 void intel_edp_panel_on(struct intel_dp *intel_dp) 2920 { 2921 intel_wakeref_t wakeref; 2922 2923 if (!intel_dp_is_edp(intel_dp)) 2924 return; 2925 2926 with_pps_lock(intel_dp, wakeref) 2927 edp_panel_on(intel_dp); 2928 } 2929 2930 2931 static void edp_panel_off(struct intel_dp *intel_dp) 2932 { 2933 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2934 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2935 u32 pp; 2936 i915_reg_t pp_ctrl_reg; 2937 2938 lockdep_assert_held(&dev_priv->pps_mutex); 2939 2940 if (!intel_dp_is_edp(intel_dp)) 2941 return; 2942 2943 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n", 2944 dig_port->base.base.base.id, dig_port->base.base.name); 2945 2946 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd, 2947 "Need [ENCODER:%d:%s] VDD to turn off panel\n", 2948 dig_port->base.base.base.id, dig_port->base.base.name); 2949 2950 pp = ilk_get_pp_control(intel_dp); 2951 /* We need to switch off panel power _and_ force vdd, for otherwise some 2952 * panels get very unhappy and cease to work. */ 2953 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 2954 EDP_BLC_ENABLE); 2955 2956 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2957 2958 intel_dp->want_panel_vdd = false; 2959 2960 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2961 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2962 2963 wait_panel_off(intel_dp); 2964 intel_dp->panel_power_off_time = ktime_get_boottime(); 2965 2966 /* We got a reference when we enabled the VDD. */ 2967 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); 2968 } 2969 2970 void intel_edp_panel_off(struct intel_dp *intel_dp) 2971 { 2972 intel_wakeref_t wakeref; 2973 2974 if (!intel_dp_is_edp(intel_dp)) 2975 return; 2976 2977 with_pps_lock(intel_dp, wakeref) 2978 edp_panel_off(intel_dp); 2979 } 2980 2981 /* Enable backlight in the panel power control. */ 2982 static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 2983 { 2984 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2985 intel_wakeref_t wakeref; 2986 2987 /* 2988 * If we enable the backlight right away following a panel power 2989 * on, we may see slight flicker as the panel syncs with the eDP 2990 * link. So delay a bit to make sure the image is solid before 2991 * allowing it to appear. 2992 */ 2993 wait_backlight_on(intel_dp); 2994 2995 with_pps_lock(intel_dp, wakeref) { 2996 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2997 u32 pp; 2998 2999 pp = ilk_get_pp_control(intel_dp); 3000 pp |= EDP_BLC_ENABLE; 3001 3002 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3003 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3004 } 3005 } 3006 3007 /* Enable backlight PWM and backlight PP control. */ 3008 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3009 const struct drm_connector_state *conn_state) 3010 { 3011 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3012 3013 if (!intel_dp_is_edp(intel_dp)) 3014 return; 3015 3016 DRM_DEBUG_KMS("\n"); 3017 3018 intel_panel_enable_backlight(crtc_state, conn_state); 3019 _intel_edp_backlight_on(intel_dp); 3020 } 3021 3022 /* Disable backlight in the panel power control. */ 3023 static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 3024 { 3025 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3026 intel_wakeref_t wakeref; 3027 3028 if (!intel_dp_is_edp(intel_dp)) 3029 return; 3030 3031 with_pps_lock(intel_dp, wakeref) { 3032 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3033 u32 pp; 3034 3035 pp = ilk_get_pp_control(intel_dp); 3036 pp &= ~EDP_BLC_ENABLE; 3037 3038 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3039 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3040 } 3041 3042 intel_dp->last_backlight_off = jiffies; 3043 edp_wait_backlight_off(intel_dp); 3044 } 3045 3046 /* Disable backlight PP control and backlight PWM. */ 3047 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3048 { 3049 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3050 3051 if (!intel_dp_is_edp(intel_dp)) 3052 return; 3053 3054 DRM_DEBUG_KMS("\n"); 3055 3056 _intel_edp_backlight_off(intel_dp); 3057 intel_panel_disable_backlight(old_conn_state); 3058 } 3059 3060 /* 3061 * Hook for controlling the panel power control backlight through the bl_power 3062 * sysfs attribute. Take care to handle multiple calls. 3063 */ 3064 static void intel_edp_backlight_power(struct intel_connector *connector, 3065 bool enable) 3066 { 3067 struct intel_dp *intel_dp = intel_attached_dp(connector); 3068 intel_wakeref_t wakeref; 3069 bool is_enabled; 3070 3071 is_enabled = false; 3072 with_pps_lock(intel_dp, wakeref) 3073 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 3074 if (is_enabled == enable) 3075 return; 3076 3077 DRM_DEBUG_KMS("panel power control backlight %s\n", 3078 enable ? "enable" : "disable"); 3079 3080 if (enable) 3081 _intel_edp_backlight_on(intel_dp); 3082 else 3083 _intel_edp_backlight_off(intel_dp); 3084 } 3085 3086 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 3087 { 3088 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3089 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3090 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; 3091 3092 I915_STATE_WARN(cur_state != state, 3093 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 3094 dig_port->base.base.base.id, dig_port->base.base.name, 3095 onoff(state), onoff(cur_state)); 3096 } 3097 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 3098 3099 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 3100 { 3101 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; 3102 3103 I915_STATE_WARN(cur_state != state, 3104 "eDP PLL state assertion failure (expected %s, current %s)\n", 3105 onoff(state), onoff(cur_state)); 3106 } 3107 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 3108 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 3109 3110 static void ilk_edp_pll_on(struct intel_dp *intel_dp, 3111 const struct intel_crtc_state *pipe_config) 3112 { 3113 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3114 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3115 3116 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 3117 assert_dp_port_disabled(intel_dp); 3118 assert_edp_pll_disabled(dev_priv); 3119 3120 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", 3121 pipe_config->port_clock); 3122 3123 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 3124 3125 if (pipe_config->port_clock == 162000) 3126 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 3127 else 3128 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 3129 3130 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3131 intel_de_posting_read(dev_priv, DP_A); 3132 udelay(500); 3133 3134 /* 3135 * [DevILK] Work around required when enabling DP PLL 3136 * while a pipe is enabled going to FDI: 3137 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 3138 * 2. Program DP PLL enable 3139 */ 3140 if (IS_GEN(dev_priv, 5)) 3141 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 3142 3143 intel_dp->DP |= DP_PLL_ENABLE; 3144 3145 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3146 intel_de_posting_read(dev_priv, DP_A); 3147 udelay(200); 3148 } 3149 3150 static void ilk_edp_pll_off(struct intel_dp *intel_dp, 3151 const struct intel_crtc_state *old_crtc_state) 3152 { 3153 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3154 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3155 3156 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 3157 assert_dp_port_disabled(intel_dp); 3158 assert_edp_pll_enabled(dev_priv); 3159 3160 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); 3161 3162 intel_dp->DP &= ~DP_PLL_ENABLE; 3163 3164 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3165 intel_de_posting_read(dev_priv, DP_A); 3166 udelay(200); 3167 } 3168 3169 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3170 { 3171 /* 3172 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3173 * be capable of signalling downstream hpd with a long pulse. 3174 * Whether or not that means D3 is safe to use is not clear, 3175 * but let's assume so until proven otherwise. 3176 * 3177 * FIXME should really check all downstream ports... 3178 */ 3179 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3180 drm_dp_is_branch(intel_dp->dpcd) && 3181 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3182 } 3183 3184 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3185 const struct intel_crtc_state *crtc_state, 3186 bool enable) 3187 { 3188 int ret; 3189 3190 if (!crtc_state->dsc.compression_enable) 3191 return; 3192 3193 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3194 enable ? DP_DECOMPRESSION_EN : 0); 3195 if (ret < 0) 3196 DRM_DEBUG_KMS("Failed to %s sink decompression state\n", 3197 enable ? "enable" : "disable"); 3198 } 3199 3200 /* If the sink supports it, try to set the power state appropriately */ 3201 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 3202 { 3203 int ret, i; 3204 3205 /* Should have a valid DPCD by this point */ 3206 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3207 return; 3208 3209 if (mode != DRM_MODE_DPMS_ON) { 3210 if (downstream_hpd_needs_d0(intel_dp)) 3211 return; 3212 3213 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3214 DP_SET_POWER_D3); 3215 } else { 3216 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3217 3218 /* 3219 * When turning on, we need to retry for 1ms to give the sink 3220 * time to wake up. 3221 */ 3222 for (i = 0; i < 3; i++) { 3223 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3224 DP_SET_POWER_D0); 3225 if (ret == 1) 3226 break; 3227 msleep(1); 3228 } 3229 3230 if (ret == 1 && lspcon->active) 3231 lspcon_wait_pcon_mode(lspcon); 3232 } 3233 3234 if (ret != 1) 3235 DRM_DEBUG_KMS("failed to %s sink power state\n", 3236 mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); 3237 } 3238 3239 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 3240 enum port port, enum pipe *pipe) 3241 { 3242 enum pipe p; 3243 3244 for_each_pipe(dev_priv, p) { 3245 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); 3246 3247 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 3248 *pipe = p; 3249 return true; 3250 } 3251 } 3252 3253 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", 3254 port_name(port)); 3255 3256 /* must initialize pipe to something for the asserts */ 3257 *pipe = PIPE_A; 3258 3259 return false; 3260 } 3261 3262 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 3263 i915_reg_t dp_reg, enum port port, 3264 enum pipe *pipe) 3265 { 3266 bool ret; 3267 u32 val; 3268 3269 val = intel_de_read(dev_priv, dp_reg); 3270 3271 ret = val & DP_PORT_EN; 3272 3273 /* asserts want to know the pipe even if the port is disabled */ 3274 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3275 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 3276 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3277 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 3278 else if (IS_CHERRYVIEW(dev_priv)) 3279 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 3280 else 3281 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 3282 3283 return ret; 3284 } 3285 3286 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 3287 enum pipe *pipe) 3288 { 3289 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3290 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3291 intel_wakeref_t wakeref; 3292 bool ret; 3293 3294 wakeref = intel_display_power_get_if_enabled(dev_priv, 3295 encoder->power_domain); 3296 if (!wakeref) 3297 return false; 3298 3299 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 3300 encoder->port, pipe); 3301 3302 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 3303 3304 return ret; 3305 } 3306 3307 static void intel_dp_get_config(struct intel_encoder *encoder, 3308 struct intel_crtc_state *pipe_config) 3309 { 3310 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3311 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3312 u32 tmp, flags = 0; 3313 enum port port = encoder->port; 3314 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3315 3316 if (encoder->type == INTEL_OUTPUT_EDP) 3317 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 3318 else 3319 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 3320 3321 tmp = intel_de_read(dev_priv, intel_dp->output_reg); 3322 3323 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 3324 3325 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3326 u32 trans_dp = intel_de_read(dev_priv, 3327 TRANS_DP_CTL(crtc->pipe)); 3328 3329 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 3330 flags |= DRM_MODE_FLAG_PHSYNC; 3331 else 3332 flags |= DRM_MODE_FLAG_NHSYNC; 3333 3334 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 3335 flags |= DRM_MODE_FLAG_PVSYNC; 3336 else 3337 flags |= DRM_MODE_FLAG_NVSYNC; 3338 } else { 3339 if (tmp & DP_SYNC_HS_HIGH) 3340 flags |= DRM_MODE_FLAG_PHSYNC; 3341 else 3342 flags |= DRM_MODE_FLAG_NHSYNC; 3343 3344 if (tmp & DP_SYNC_VS_HIGH) 3345 flags |= DRM_MODE_FLAG_PVSYNC; 3346 else 3347 flags |= DRM_MODE_FLAG_NVSYNC; 3348 } 3349 3350 pipe_config->hw.adjusted_mode.flags |= flags; 3351 3352 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 3353 pipe_config->limited_color_range = true; 3354 3355 pipe_config->lane_count = 3356 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 3357 3358 intel_dp_get_m_n(crtc, pipe_config); 3359 3360 if (port == PORT_A) { 3361 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 3362 pipe_config->port_clock = 162000; 3363 else 3364 pipe_config->port_clock = 270000; 3365 } 3366 3367 pipe_config->hw.adjusted_mode.crtc_clock = 3368 intel_dotclock_calculate(pipe_config->port_clock, 3369 &pipe_config->dp_m_n); 3370 3371 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 3372 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 3373 /* 3374 * This is a big fat ugly hack. 3375 * 3376 * Some machines in UEFI boot mode provide us a VBT that has 18 3377 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3378 * unknown we fail to light up. Yet the same BIOS boots up with 3379 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3380 * max, not what it tells us to use. 3381 * 3382 * Note: This will still be broken if the eDP panel is not lit 3383 * up by the BIOS, and thus we can't get the mode at module 3384 * load. 3385 */ 3386 drm_dbg_kms(&dev_priv->drm, 3387 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3388 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 3389 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3390 } 3391 } 3392 3393 static void intel_disable_dp(struct intel_encoder *encoder, 3394 const struct intel_crtc_state *old_crtc_state, 3395 const struct drm_connector_state *old_conn_state) 3396 { 3397 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3398 3399 intel_dp->link_trained = false; 3400 3401 if (old_crtc_state->has_audio) 3402 intel_audio_codec_disable(encoder, 3403 old_crtc_state, old_conn_state); 3404 3405 /* Make sure the panel is off before trying to change the mode. But also 3406 * ensure that we have vdd while we switch off the panel. */ 3407 intel_edp_panel_vdd_on(intel_dp); 3408 intel_edp_backlight_off(old_conn_state); 3409 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 3410 intel_edp_panel_off(intel_dp); 3411 } 3412 3413 static void g4x_disable_dp(struct intel_encoder *encoder, 3414 const struct intel_crtc_state *old_crtc_state, 3415 const struct drm_connector_state *old_conn_state) 3416 { 3417 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 3418 } 3419 3420 static void vlv_disable_dp(struct intel_encoder *encoder, 3421 const struct intel_crtc_state *old_crtc_state, 3422 const struct drm_connector_state *old_conn_state) 3423 { 3424 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 3425 } 3426 3427 static void g4x_post_disable_dp(struct intel_encoder *encoder, 3428 const struct intel_crtc_state *old_crtc_state, 3429 const struct drm_connector_state *old_conn_state) 3430 { 3431 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3432 enum port port = encoder->port; 3433 3434 /* 3435 * Bspec does not list a specific disable sequence for g4x DP. 3436 * Follow the ilk+ sequence (disable pipe before the port) for 3437 * g4x DP as it does not suffer from underruns like the normal 3438 * g4x modeset sequence (disable pipe after the port). 3439 */ 3440 intel_dp_link_down(encoder, old_crtc_state); 3441 3442 /* Only ilk+ has port A */ 3443 if (port == PORT_A) 3444 ilk_edp_pll_off(intel_dp, old_crtc_state); 3445 } 3446 3447 static void vlv_post_disable_dp(struct intel_encoder *encoder, 3448 const struct intel_crtc_state *old_crtc_state, 3449 const struct drm_connector_state *old_conn_state) 3450 { 3451 intel_dp_link_down(encoder, old_crtc_state); 3452 } 3453 3454 static void chv_post_disable_dp(struct intel_encoder *encoder, 3455 const struct intel_crtc_state *old_crtc_state, 3456 const struct drm_connector_state *old_conn_state) 3457 { 3458 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3459 3460 intel_dp_link_down(encoder, old_crtc_state); 3461 3462 vlv_dpio_get(dev_priv); 3463 3464 /* Assert data lane reset */ 3465 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 3466 3467 vlv_dpio_put(dev_priv); 3468 } 3469 3470 static void 3471 _intel_dp_set_link_train(struct intel_dp *intel_dp, 3472 u32 *DP, 3473 u8 dp_train_pat) 3474 { 3475 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3476 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3477 enum port port = intel_dig_port->base.port; 3478 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); 3479 3480 if (dp_train_pat & train_pat_mask) 3481 drm_dbg_kms(&dev_priv->drm, 3482 "Using DP training pattern TPS%d\n", 3483 dp_train_pat & train_pat_mask); 3484 3485 if (HAS_DDI(dev_priv)) { 3486 u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); 3487 3488 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 3489 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 3490 else 3491 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 3492 3493 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 3494 switch (dp_train_pat & train_pat_mask) { 3495 case DP_TRAINING_PATTERN_DISABLE: 3496 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 3497 3498 break; 3499 case DP_TRAINING_PATTERN_1: 3500 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 3501 break; 3502 case DP_TRAINING_PATTERN_2: 3503 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 3504 break; 3505 case DP_TRAINING_PATTERN_3: 3506 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 3507 break; 3508 case DP_TRAINING_PATTERN_4: 3509 temp |= DP_TP_CTL_LINK_TRAIN_PAT4; 3510 break; 3511 } 3512 intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp); 3513 3514 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 3515 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 3516 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 3517 3518 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3519 case DP_TRAINING_PATTERN_DISABLE: 3520 *DP |= DP_LINK_TRAIN_OFF_CPT; 3521 break; 3522 case DP_TRAINING_PATTERN_1: 3523 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 3524 break; 3525 case DP_TRAINING_PATTERN_2: 3526 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3527 break; 3528 case DP_TRAINING_PATTERN_3: 3529 drm_dbg_kms(&dev_priv->drm, 3530 "TPS3 not supported, using TPS2 instead\n"); 3531 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3532 break; 3533 } 3534 3535 } else { 3536 *DP &= ~DP_LINK_TRAIN_MASK; 3537 3538 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3539 case DP_TRAINING_PATTERN_DISABLE: 3540 *DP |= DP_LINK_TRAIN_OFF; 3541 break; 3542 case DP_TRAINING_PATTERN_1: 3543 *DP |= DP_LINK_TRAIN_PAT_1; 3544 break; 3545 case DP_TRAINING_PATTERN_2: 3546 *DP |= DP_LINK_TRAIN_PAT_2; 3547 break; 3548 case DP_TRAINING_PATTERN_3: 3549 drm_dbg_kms(&dev_priv->drm, 3550 "TPS3 not supported, using TPS2 instead\n"); 3551 *DP |= DP_LINK_TRAIN_PAT_2; 3552 break; 3553 } 3554 } 3555 } 3556 3557 static void intel_dp_enable_port(struct intel_dp *intel_dp, 3558 const struct intel_crtc_state *old_crtc_state) 3559 { 3560 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3561 3562 /* enable with pattern 1 (as per spec) */ 3563 3564 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); 3565 3566 /* 3567 * Magic for VLV/CHV. We _must_ first set up the register 3568 * without actually enabling the port, and then do another 3569 * write to enable the port. Otherwise link training will 3570 * fail when the power sequencer is freshly used for this port. 3571 */ 3572 intel_dp->DP |= DP_PORT_EN; 3573 if (old_crtc_state->has_audio) 3574 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 3575 3576 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3577 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3578 } 3579 3580 static void intel_enable_dp(struct intel_encoder *encoder, 3581 const struct intel_crtc_state *pipe_config, 3582 const struct drm_connector_state *conn_state) 3583 { 3584 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3585 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3586 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3587 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); 3588 enum pipe pipe = crtc->pipe; 3589 intel_wakeref_t wakeref; 3590 3591 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) 3592 return; 3593 3594 with_pps_lock(intel_dp, wakeref) { 3595 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3596 vlv_init_panel_power_sequencer(encoder, pipe_config); 3597 3598 intel_dp_enable_port(intel_dp, pipe_config); 3599 3600 edp_panel_vdd_on(intel_dp); 3601 edp_panel_on(intel_dp); 3602 edp_panel_vdd_off(intel_dp, true); 3603 } 3604 3605 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3606 unsigned int lane_mask = 0x0; 3607 3608 if (IS_CHERRYVIEW(dev_priv)) 3609 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 3610 3611 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 3612 lane_mask); 3613 } 3614 3615 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 3616 intel_dp_start_link_train(intel_dp); 3617 intel_dp_stop_link_train(intel_dp); 3618 3619 if (pipe_config->has_audio) { 3620 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", 3621 pipe_name(pipe)); 3622 intel_audio_codec_enable(encoder, pipe_config, conn_state); 3623 } 3624 } 3625 3626 static void g4x_enable_dp(struct intel_encoder *encoder, 3627 const struct intel_crtc_state *pipe_config, 3628 const struct drm_connector_state *conn_state) 3629 { 3630 intel_enable_dp(encoder, pipe_config, conn_state); 3631 intel_edp_backlight_on(pipe_config, conn_state); 3632 } 3633 3634 static void vlv_enable_dp(struct intel_encoder *encoder, 3635 const struct intel_crtc_state *pipe_config, 3636 const struct drm_connector_state *conn_state) 3637 { 3638 intel_edp_backlight_on(pipe_config, conn_state); 3639 } 3640 3641 static void g4x_pre_enable_dp(struct intel_encoder *encoder, 3642 const struct intel_crtc_state *pipe_config, 3643 const struct drm_connector_state *conn_state) 3644 { 3645 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3646 enum port port = encoder->port; 3647 3648 intel_dp_prepare(encoder, pipe_config); 3649 3650 /* Only ilk+ has port A */ 3651 if (port == PORT_A) 3652 ilk_edp_pll_on(intel_dp, pipe_config); 3653 } 3654 3655 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 3656 { 3657 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3658 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 3659 enum pipe pipe = intel_dp->pps_pipe; 3660 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 3661 3662 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3663 3664 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) 3665 return; 3666 3667 edp_panel_vdd_off_sync(intel_dp); 3668 3669 /* 3670 * VLV seems to get confused when multiple power sequencers 3671 * have the same port selected (even if only one has power/vdd 3672 * enabled). The failure manifests as vlv_wait_port_ready() failing 3673 * CHV on the other hand doesn't seem to mind having the same port 3674 * selected in multiple power sequencers, but let's clear the 3675 * port select always when logically disconnecting a power sequencer 3676 * from a port. 3677 */ 3678 drm_dbg_kms(&dev_priv->drm, 3679 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", 3680 pipe_name(pipe), intel_dig_port->base.base.base.id, 3681 intel_dig_port->base.base.name); 3682 intel_de_write(dev_priv, pp_on_reg, 0); 3683 intel_de_posting_read(dev_priv, pp_on_reg); 3684 3685 intel_dp->pps_pipe = INVALID_PIPE; 3686 } 3687 3688 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 3689 enum pipe pipe) 3690 { 3691 struct intel_encoder *encoder; 3692 3693 lockdep_assert_held(&dev_priv->pps_mutex); 3694 3695 for_each_intel_dp(&dev_priv->drm, encoder) { 3696 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3697 3698 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe, 3699 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", 3700 pipe_name(pipe), encoder->base.base.id, 3701 encoder->base.name); 3702 3703 if (intel_dp->pps_pipe != pipe) 3704 continue; 3705 3706 drm_dbg_kms(&dev_priv->drm, 3707 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", 3708 pipe_name(pipe), encoder->base.base.id, 3709 encoder->base.name); 3710 3711 /* make sure vdd is off before we steal it */ 3712 vlv_detach_power_sequencer(intel_dp); 3713 } 3714 } 3715 3716 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 3717 const struct intel_crtc_state *crtc_state) 3718 { 3719 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3720 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3721 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3722 3723 lockdep_assert_held(&dev_priv->pps_mutex); 3724 3725 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3726 3727 if (intel_dp->pps_pipe != INVALID_PIPE && 3728 intel_dp->pps_pipe != crtc->pipe) { 3729 /* 3730 * If another power sequencer was being used on this 3731 * port previously make sure to turn off vdd there while 3732 * we still have control of it. 3733 */ 3734 vlv_detach_power_sequencer(intel_dp); 3735 } 3736 3737 /* 3738 * We may be stealing the power 3739 * sequencer from another port. 3740 */ 3741 vlv_steal_power_sequencer(dev_priv, crtc->pipe); 3742 3743 intel_dp->active_pipe = crtc->pipe; 3744 3745 if (!intel_dp_is_edp(intel_dp)) 3746 return; 3747 3748 /* now it's all ours */ 3749 intel_dp->pps_pipe = crtc->pipe; 3750 3751 drm_dbg_kms(&dev_priv->drm, 3752 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", 3753 pipe_name(intel_dp->pps_pipe), encoder->base.base.id, 3754 encoder->base.name); 3755 3756 /* init power sequencer on this pipe and port */ 3757 intel_dp_init_panel_power_sequencer(intel_dp); 3758 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 3759 } 3760 3761 static void vlv_pre_enable_dp(struct intel_encoder *encoder, 3762 const struct intel_crtc_state *pipe_config, 3763 const struct drm_connector_state *conn_state) 3764 { 3765 vlv_phy_pre_encoder_enable(encoder, pipe_config); 3766 3767 intel_enable_dp(encoder, pipe_config, conn_state); 3768 } 3769 3770 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder, 3771 const struct intel_crtc_state *pipe_config, 3772 const struct drm_connector_state *conn_state) 3773 { 3774 intel_dp_prepare(encoder, pipe_config); 3775 3776 vlv_phy_pre_pll_enable(encoder, pipe_config); 3777 } 3778 3779 static void chv_pre_enable_dp(struct intel_encoder *encoder, 3780 const struct intel_crtc_state *pipe_config, 3781 const struct drm_connector_state *conn_state) 3782 { 3783 chv_phy_pre_encoder_enable(encoder, pipe_config); 3784 3785 intel_enable_dp(encoder, pipe_config, conn_state); 3786 3787 /* Second common lane will stay alive on its own now */ 3788 chv_phy_release_cl2_override(encoder); 3789 } 3790 3791 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder, 3792 const struct intel_crtc_state *pipe_config, 3793 const struct drm_connector_state *conn_state) 3794 { 3795 intel_dp_prepare(encoder, pipe_config); 3796 3797 chv_phy_pre_pll_enable(encoder, pipe_config); 3798 } 3799 3800 static void chv_dp_post_pll_disable(struct intel_encoder *encoder, 3801 const struct intel_crtc_state *old_crtc_state, 3802 const struct drm_connector_state *old_conn_state) 3803 { 3804 chv_phy_post_pll_disable(encoder, old_crtc_state); 3805 } 3806 3807 /* 3808 * Fetch AUX CH registers 0x202 - 0x207 which contain 3809 * link status information 3810 */ 3811 bool 3812 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) 3813 { 3814 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 3815 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 3816 } 3817 3818 /* These are source-specific values. */ 3819 u8 3820 intel_dp_voltage_max(struct intel_dp *intel_dp) 3821 { 3822 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3823 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3824 enum port port = encoder->port; 3825 3826 if (HAS_DDI(dev_priv)) 3827 return intel_ddi_dp_voltage_max(encoder); 3828 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3829 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 3830 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3831 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 3832 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3833 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 3834 else 3835 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 3836 } 3837 3838 u8 3839 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing) 3840 { 3841 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3842 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3843 enum port port = encoder->port; 3844 3845 if (HAS_DDI(dev_priv)) { 3846 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing); 3847 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3848 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3849 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3850 return DP_TRAIN_PRE_EMPH_LEVEL_3; 3851 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3852 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3853 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3854 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3855 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3856 default: 3857 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3858 } 3859 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 3860 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3861 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3862 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3863 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3864 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3865 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3866 default: 3867 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3868 } 3869 } else { 3870 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3871 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3872 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3873 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3874 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3875 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3876 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3877 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3878 default: 3879 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3880 } 3881 } 3882 } 3883 3884 static u32 vlv_signal_levels(struct intel_dp *intel_dp) 3885 { 3886 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3887 unsigned long demph_reg_value, preemph_reg_value, 3888 uniqtranscale_reg_value; 3889 u8 train_set = intel_dp->train_set[0]; 3890 3891 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3892 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3893 preemph_reg_value = 0x0004000; 3894 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3895 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3896 demph_reg_value = 0x2B405555; 3897 uniqtranscale_reg_value = 0x552AB83A; 3898 break; 3899 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3900 demph_reg_value = 0x2B404040; 3901 uniqtranscale_reg_value = 0x5548B83A; 3902 break; 3903 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3904 demph_reg_value = 0x2B245555; 3905 uniqtranscale_reg_value = 0x5560B83A; 3906 break; 3907 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3908 demph_reg_value = 0x2B405555; 3909 uniqtranscale_reg_value = 0x5598DA3A; 3910 break; 3911 default: 3912 return 0; 3913 } 3914 break; 3915 case DP_TRAIN_PRE_EMPH_LEVEL_1: 3916 preemph_reg_value = 0x0002000; 3917 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3918 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3919 demph_reg_value = 0x2B404040; 3920 uniqtranscale_reg_value = 0x5552B83A; 3921 break; 3922 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3923 demph_reg_value = 0x2B404848; 3924 uniqtranscale_reg_value = 0x5580B83A; 3925 break; 3926 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3927 demph_reg_value = 0x2B404040; 3928 uniqtranscale_reg_value = 0x55ADDA3A; 3929 break; 3930 default: 3931 return 0; 3932 } 3933 break; 3934 case DP_TRAIN_PRE_EMPH_LEVEL_2: 3935 preemph_reg_value = 0x0000000; 3936 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3937 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3938 demph_reg_value = 0x2B305555; 3939 uniqtranscale_reg_value = 0x5570B83A; 3940 break; 3941 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3942 demph_reg_value = 0x2B2B4040; 3943 uniqtranscale_reg_value = 0x55ADDA3A; 3944 break; 3945 default: 3946 return 0; 3947 } 3948 break; 3949 case DP_TRAIN_PRE_EMPH_LEVEL_3: 3950 preemph_reg_value = 0x0006000; 3951 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3953 demph_reg_value = 0x1B405555; 3954 uniqtranscale_reg_value = 0x55ADDA3A; 3955 break; 3956 default: 3957 return 0; 3958 } 3959 break; 3960 default: 3961 return 0; 3962 } 3963 3964 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, 3965 uniqtranscale_reg_value, 0); 3966 3967 return 0; 3968 } 3969 3970 static u32 chv_signal_levels(struct intel_dp *intel_dp) 3971 { 3972 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3973 u32 deemph_reg_value, margin_reg_value; 3974 bool uniq_trans_scale = false; 3975 u8 train_set = intel_dp->train_set[0]; 3976 3977 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3978 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3979 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3980 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3981 deemph_reg_value = 128; 3982 margin_reg_value = 52; 3983 break; 3984 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3985 deemph_reg_value = 128; 3986 margin_reg_value = 77; 3987 break; 3988 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3989 deemph_reg_value = 128; 3990 margin_reg_value = 102; 3991 break; 3992 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3993 deemph_reg_value = 128; 3994 margin_reg_value = 154; 3995 uniq_trans_scale = true; 3996 break; 3997 default: 3998 return 0; 3999 } 4000 break; 4001 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4002 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4003 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4004 deemph_reg_value = 85; 4005 margin_reg_value = 78; 4006 break; 4007 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4008 deemph_reg_value = 85; 4009 margin_reg_value = 116; 4010 break; 4011 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4012 deemph_reg_value = 85; 4013 margin_reg_value = 154; 4014 break; 4015 default: 4016 return 0; 4017 } 4018 break; 4019 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4020 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4021 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4022 deemph_reg_value = 64; 4023 margin_reg_value = 104; 4024 break; 4025 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4026 deemph_reg_value = 64; 4027 margin_reg_value = 154; 4028 break; 4029 default: 4030 return 0; 4031 } 4032 break; 4033 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4034 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4035 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4036 deemph_reg_value = 43; 4037 margin_reg_value = 154; 4038 break; 4039 default: 4040 return 0; 4041 } 4042 break; 4043 default: 4044 return 0; 4045 } 4046 4047 chv_set_phy_signal_level(encoder, deemph_reg_value, 4048 margin_reg_value, uniq_trans_scale); 4049 4050 return 0; 4051 } 4052 4053 static u32 4054 g4x_signal_levels(u8 train_set) 4055 { 4056 u32 signal_levels = 0; 4057 4058 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4060 default: 4061 signal_levels |= DP_VOLTAGE_0_4; 4062 break; 4063 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4064 signal_levels |= DP_VOLTAGE_0_6; 4065 break; 4066 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4067 signal_levels |= DP_VOLTAGE_0_8; 4068 break; 4069 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4070 signal_levels |= DP_VOLTAGE_1_2; 4071 break; 4072 } 4073 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4074 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4075 default: 4076 signal_levels |= DP_PRE_EMPHASIS_0; 4077 break; 4078 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4079 signal_levels |= DP_PRE_EMPHASIS_3_5; 4080 break; 4081 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4082 signal_levels |= DP_PRE_EMPHASIS_6; 4083 break; 4084 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4085 signal_levels |= DP_PRE_EMPHASIS_9_5; 4086 break; 4087 } 4088 return signal_levels; 4089 } 4090 4091 /* SNB CPU eDP voltage swing and pre-emphasis control */ 4092 static u32 4093 snb_cpu_edp_signal_levels(u8 train_set) 4094 { 4095 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4096 DP_TRAIN_PRE_EMPHASIS_MASK); 4097 switch (signal_levels) { 4098 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4099 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4100 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4101 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4102 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 4103 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4104 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4105 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 4106 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4107 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4108 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 4109 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4111 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 4112 default: 4113 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4114 "0x%x\n", signal_levels); 4115 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4116 } 4117 } 4118 4119 /* IVB CPU eDP voltage swing and pre-emphasis control */ 4120 static u32 4121 ivb_cpu_edp_signal_levels(u8 train_set) 4122 { 4123 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4124 DP_TRAIN_PRE_EMPHASIS_MASK); 4125 switch (signal_levels) { 4126 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4127 return EDP_LINK_TRAIN_400MV_0DB_IVB; 4128 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4129 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 4130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4131 return EDP_LINK_TRAIN_400MV_6DB_IVB; 4132 4133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4134 return EDP_LINK_TRAIN_600MV_0DB_IVB; 4135 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4136 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 4137 4138 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4139 return EDP_LINK_TRAIN_800MV_0DB_IVB; 4140 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4141 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 4142 4143 default: 4144 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4145 "0x%x\n", signal_levels); 4146 return EDP_LINK_TRAIN_500MV_0DB_IVB; 4147 } 4148 } 4149 4150 void 4151 intel_dp_set_signal_levels(struct intel_dp *intel_dp) 4152 { 4153 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4155 enum port port = intel_dig_port->base.port; 4156 u32 signal_levels, mask = 0; 4157 u8 train_set = intel_dp->train_set[0]; 4158 4159 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) { 4160 signal_levels = bxt_signal_levels(intel_dp); 4161 } else if (HAS_DDI(dev_priv)) { 4162 signal_levels = ddi_signal_levels(intel_dp); 4163 mask = DDI_BUF_EMP_MASK; 4164 } else if (IS_CHERRYVIEW(dev_priv)) { 4165 signal_levels = chv_signal_levels(intel_dp); 4166 } else if (IS_VALLEYVIEW(dev_priv)) { 4167 signal_levels = vlv_signal_levels(intel_dp); 4168 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 4169 signal_levels = ivb_cpu_edp_signal_levels(train_set); 4170 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 4171 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) { 4172 signal_levels = snb_cpu_edp_signal_levels(train_set); 4173 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 4174 } else { 4175 signal_levels = g4x_signal_levels(train_set); 4176 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; 4177 } 4178 4179 if (mask) 4180 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4181 signal_levels); 4182 4183 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", 4184 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 4185 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); 4186 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", 4187 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 4188 DP_TRAIN_PRE_EMPHASIS_SHIFT, 4189 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 4190 " (max)" : ""); 4191 4192 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels; 4193 4194 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4195 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4196 } 4197 4198 void 4199 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 4200 u8 dp_train_pat) 4201 { 4202 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4203 struct drm_i915_private *dev_priv = 4204 to_i915(intel_dig_port->base.base.dev); 4205 4206 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat); 4207 4208 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4209 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4210 } 4211 4212 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 4213 { 4214 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4215 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4216 enum port port = intel_dig_port->base.port; 4217 u32 val; 4218 4219 if (!HAS_DDI(dev_priv)) 4220 return; 4221 4222 val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); 4223 val &= ~DP_TP_CTL_LINK_TRAIN_MASK; 4224 val |= DP_TP_CTL_LINK_TRAIN_IDLE; 4225 intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val); 4226 4227 /* 4228 * Until TGL on PORT_A we can have only eDP in SST mode. There the only 4229 * reason we need to set idle transmission mode is to work around a HW 4230 * issue where we enable the pipe while not in idle link-training mode. 4231 * In this case there is requirement to wait for a minimum number of 4232 * idle patterns to be sent. 4233 */ 4234 if (port == PORT_A && INTEL_GEN(dev_priv) < 12) 4235 return; 4236 4237 if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, 4238 DP_TP_STATUS_IDLE_DONE, 1)) 4239 drm_err(&dev_priv->drm, 4240 "Timed out waiting for DP idle patterns\n"); 4241 } 4242 4243 static void 4244 intel_dp_link_down(struct intel_encoder *encoder, 4245 const struct intel_crtc_state *old_crtc_state) 4246 { 4247 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4248 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4249 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 4250 enum port port = encoder->port; 4251 u32 DP = intel_dp->DP; 4252 4253 if (drm_WARN_ON(&dev_priv->drm, 4254 (intel_de_read(dev_priv, intel_dp->output_reg) & 4255 DP_PORT_EN) == 0)) 4256 return; 4257 4258 drm_dbg_kms(&dev_priv->drm, "\n"); 4259 4260 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 4261 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 4262 DP &= ~DP_LINK_TRAIN_MASK_CPT; 4263 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 4264 } else { 4265 DP &= ~DP_LINK_TRAIN_MASK; 4266 DP |= DP_LINK_TRAIN_PAT_IDLE; 4267 } 4268 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4269 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4270 4271 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 4272 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4273 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4274 4275 /* 4276 * HW workaround for IBX, we need to move the port 4277 * to transcoder A after disabling it to allow the 4278 * matching HDMI port to be enabled on transcoder A. 4279 */ 4280 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 4281 /* 4282 * We get CPU/PCH FIFO underruns on the other pipe when 4283 * doing the workaround. Sweep them under the rug. 4284 */ 4285 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4286 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4287 4288 /* always enable with pattern 1 (as per spec) */ 4289 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 4290 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 4291 DP_LINK_TRAIN_PAT_1; 4292 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4293 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4294 4295 DP &= ~DP_PORT_EN; 4296 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4297 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4298 4299 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 4300 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4301 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4302 } 4303 4304 msleep(intel_dp->panel_power_down_delay); 4305 4306 intel_dp->DP = DP; 4307 4308 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4309 intel_wakeref_t wakeref; 4310 4311 with_pps_lock(intel_dp, wakeref) 4312 intel_dp->active_pipe = INVALID_PIPE; 4313 } 4314 } 4315 4316 static void 4317 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) 4318 { 4319 u8 dpcd_ext[6]; 4320 4321 /* 4322 * Prior to DP1.3 the bit represented by 4323 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. 4324 * if it is set DP_DPCD_REV at 0000h could be at a value less than 4325 * the true capability of the panel. The only way to check is to 4326 * then compare 0000h and 2200h. 4327 */ 4328 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] & 4329 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) 4330 return; 4331 4332 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV, 4333 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) { 4334 DRM_ERROR("DPCD failed read at extended capabilities\n"); 4335 return; 4336 } 4337 4338 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { 4339 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n"); 4340 return; 4341 } 4342 4343 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext))) 4344 return; 4345 4346 DRM_DEBUG_KMS("Base DPCD: %*ph\n", 4347 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); 4348 4349 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)); 4350 } 4351 4352 bool 4353 intel_dp_read_dpcd(struct intel_dp *intel_dp) 4354 { 4355 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 4356 sizeof(intel_dp->dpcd)) < 0) 4357 return false; /* aux transfer failed */ 4358 4359 intel_dp_extended_receiver_capabilities(intel_dp); 4360 4361 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd); 4362 4363 return intel_dp->dpcd[DP_DPCD_REV] != 0; 4364 } 4365 4366 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 4367 { 4368 u8 dprx = 0; 4369 4370 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 4371 &dprx) != 1) 4372 return false; 4373 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 4374 } 4375 4376 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 4377 { 4378 /* 4379 * Clear the cached register set to avoid using stale values 4380 * for the sinks that do not support DSC. 4381 */ 4382 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4383 4384 /* Clear fec_capable to avoid using stale values */ 4385 intel_dp->fec_capable = 0; 4386 4387 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 4388 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 4389 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4390 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 4391 intel_dp->dsc_dpcd, 4392 sizeof(intel_dp->dsc_dpcd)) < 0) 4393 DRM_ERROR("Failed to read DPCD register 0x%x\n", 4394 DP_DSC_SUPPORT); 4395 4396 DRM_DEBUG_KMS("DSC DPCD: %*ph\n", 4397 (int)sizeof(intel_dp->dsc_dpcd), 4398 intel_dp->dsc_dpcd); 4399 4400 /* FEC is supported only on DP 1.4 */ 4401 if (!intel_dp_is_edp(intel_dp) && 4402 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 4403 &intel_dp->fec_capable) < 0) 4404 DRM_ERROR("Failed to read FEC DPCD register\n"); 4405 4406 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable); 4407 } 4408 } 4409 4410 static bool 4411 intel_edp_init_dpcd(struct intel_dp *intel_dp) 4412 { 4413 struct drm_i915_private *dev_priv = 4414 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4415 4416 /* this function is meant to be called only once */ 4417 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 4418 4419 if (!intel_dp_read_dpcd(intel_dp)) 4420 return false; 4421 4422 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4423 drm_dp_is_branch(intel_dp->dpcd)); 4424 4425 /* 4426 * Read the eDP display control registers. 4427 * 4428 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4429 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4430 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4431 * method). The display control registers should read zero if they're 4432 * not supported anyway. 4433 */ 4434 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4435 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4436 sizeof(intel_dp->edp_dpcd)) 4437 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 4438 (int)sizeof(intel_dp->edp_dpcd), 4439 intel_dp->edp_dpcd); 4440 4441 /* 4442 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4443 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4444 */ 4445 intel_psr_init_dpcd(intel_dp); 4446 4447 /* Read the eDP 1.4+ supported link rates. */ 4448 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4449 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4450 int i; 4451 4452 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4453 sink_rates, sizeof(sink_rates)); 4454 4455 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4456 int val = le16_to_cpu(sink_rates[i]); 4457 4458 if (val == 0) 4459 break; 4460 4461 /* Value read multiplied by 200kHz gives the per-lane 4462 * link rate in kHz. The source rates are, however, 4463 * stored in terms of LS_Clk kHz. The full conversion 4464 * back to symbols is 4465 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4466 */ 4467 intel_dp->sink_rates[i] = (val * 200) / 10; 4468 } 4469 intel_dp->num_sink_rates = i; 4470 } 4471 4472 /* 4473 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4474 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4475 */ 4476 if (intel_dp->num_sink_rates) 4477 intel_dp->use_rate_select = true; 4478 else 4479 intel_dp_set_sink_rates(intel_dp); 4480 4481 intel_dp_set_common_rates(intel_dp); 4482 4483 /* Read the eDP DSC DPCD registers */ 4484 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4485 intel_dp_get_dsc_sink_cap(intel_dp); 4486 4487 return true; 4488 } 4489 4490 4491 static bool 4492 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4493 { 4494 if (!intel_dp_read_dpcd(intel_dp)) 4495 return false; 4496 4497 /* 4498 * Don't clobber cached eDP rates. Also skip re-reading 4499 * the OUI/ID since we know it won't change. 4500 */ 4501 if (!intel_dp_is_edp(intel_dp)) { 4502 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4503 drm_dp_is_branch(intel_dp->dpcd)); 4504 4505 intel_dp_set_sink_rates(intel_dp); 4506 intel_dp_set_common_rates(intel_dp); 4507 } 4508 4509 /* 4510 * Some eDP panels do not set a valid value for sink count, that is why 4511 * it don't care about read it here and in intel_edp_init_dpcd(). 4512 */ 4513 if (!intel_dp_is_edp(intel_dp) && 4514 !drm_dp_has_quirk(&intel_dp->desc, 0, 4515 DP_DPCD_QUIRK_NO_SINK_COUNT)) { 4516 u8 count; 4517 ssize_t r; 4518 4519 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count); 4520 if (r < 1) 4521 return false; 4522 4523 /* 4524 * Sink count can change between short pulse hpd hence 4525 * a member variable in intel_dp will track any changes 4526 * between short pulse interrupts. 4527 */ 4528 intel_dp->sink_count = DP_GET_SINK_COUNT(count); 4529 4530 /* 4531 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4532 * a dongle is present but no display. Unless we require to know 4533 * if a dongle is present or not, we don't need to update 4534 * downstream port information. So, an early return here saves 4535 * time from performing other operations which are not required. 4536 */ 4537 if (!intel_dp->sink_count) 4538 return false; 4539 } 4540 4541 if (!drm_dp_is_branch(intel_dp->dpcd)) 4542 return true; /* native DP sink */ 4543 4544 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 4545 return true; /* no per-port downstream info */ 4546 4547 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, 4548 intel_dp->downstream_ports, 4549 DP_MAX_DOWNSTREAM_PORTS) < 0) 4550 return false; /* downstream port status fetch failed */ 4551 4552 return true; 4553 } 4554 4555 static bool 4556 intel_dp_sink_can_mst(struct intel_dp *intel_dp) 4557 { 4558 u8 mstm_cap; 4559 4560 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 4561 return false; 4562 4563 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1) 4564 return false; 4565 4566 return mstm_cap & DP_MST_CAP; 4567 } 4568 4569 static bool 4570 intel_dp_can_mst(struct intel_dp *intel_dp) 4571 { 4572 return i915_modparams.enable_dp_mst && 4573 intel_dp->can_mst && 4574 intel_dp_sink_can_mst(intel_dp); 4575 } 4576 4577 static void 4578 intel_dp_configure_mst(struct intel_dp *intel_dp) 4579 { 4580 struct intel_encoder *encoder = 4581 &dp_to_dig_port(intel_dp)->base; 4582 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); 4583 4584 DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4585 encoder->base.base.id, encoder->base.name, 4586 yesno(intel_dp->can_mst), yesno(sink_can_mst), 4587 yesno(i915_modparams.enable_dp_mst)); 4588 4589 if (!intel_dp->can_mst) 4590 return; 4591 4592 intel_dp->is_mst = sink_can_mst && 4593 i915_modparams.enable_dp_mst; 4594 4595 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4596 intel_dp->is_mst); 4597 } 4598 4599 static bool 4600 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4601 { 4602 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4603 sink_irq_vector, DP_DPRX_ESI_LEN) == 4604 DP_DPRX_ESI_LEN; 4605 } 4606 4607 bool 4608 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4609 const struct drm_connector_state *conn_state) 4610 { 4611 /* 4612 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4613 * of Color Encoding Format and Content Color Gamut], in order to 4614 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4615 */ 4616 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4617 return true; 4618 4619 switch (conn_state->colorspace) { 4620 case DRM_MODE_COLORIMETRY_SYCC_601: 4621 case DRM_MODE_COLORIMETRY_OPYCC_601: 4622 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4623 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4624 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4625 return true; 4626 default: 4627 break; 4628 } 4629 4630 return false; 4631 } 4632 4633 static void 4634 intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp, 4635 const struct intel_crtc_state *crtc_state, 4636 const struct drm_connector_state *conn_state) 4637 { 4638 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4639 struct dp_sdp vsc_sdp = {}; 4640 4641 /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */ 4642 vsc_sdp.sdp_header.HB0 = 0; 4643 vsc_sdp.sdp_header.HB1 = 0x7; 4644 4645 /* 4646 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 4647 * Colorimetry Format indication. 4648 */ 4649 vsc_sdp.sdp_header.HB2 = 0x5; 4650 4651 /* 4652 * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/ 4653 * Colorimetry Format indication (HB2 = 05h). 4654 */ 4655 vsc_sdp.sdp_header.HB3 = 0x13; 4656 4657 /* DP 1.4a spec, Table 2-120 */ 4658 switch (crtc_state->output_format) { 4659 case INTEL_OUTPUT_FORMAT_YCBCR444: 4660 vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */ 4661 break; 4662 case INTEL_OUTPUT_FORMAT_YCBCR420: 4663 vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */ 4664 break; 4665 case INTEL_OUTPUT_FORMAT_RGB: 4666 default: 4667 /* RGB: DB16[7:4] = 0h */ 4668 break; 4669 } 4670 4671 switch (conn_state->colorspace) { 4672 case DRM_MODE_COLORIMETRY_BT709_YCC: 4673 vsc_sdp.db[16] |= 0x1; 4674 break; 4675 case DRM_MODE_COLORIMETRY_XVYCC_601: 4676 vsc_sdp.db[16] |= 0x2; 4677 break; 4678 case DRM_MODE_COLORIMETRY_XVYCC_709: 4679 vsc_sdp.db[16] |= 0x3; 4680 break; 4681 case DRM_MODE_COLORIMETRY_SYCC_601: 4682 vsc_sdp.db[16] |= 0x4; 4683 break; 4684 case DRM_MODE_COLORIMETRY_OPYCC_601: 4685 vsc_sdp.db[16] |= 0x5; 4686 break; 4687 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4688 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4689 vsc_sdp.db[16] |= 0x6; 4690 break; 4691 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4692 vsc_sdp.db[16] |= 0x7; 4693 break; 4694 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 4695 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 4696 vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */ 4697 break; 4698 default: 4699 /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */ 4700 4701 /* RGB->YCBCR color conversion uses the BT.709 color space. */ 4702 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4703 vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */ 4704 break; 4705 } 4706 4707 /* 4708 * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only, 4709 * the following Component Bit Depth values are defined: 4710 * 001b = 8bpc. 4711 * 010b = 10bpc. 4712 * 011b = 12bpc. 4713 * 100b = 16bpc. 4714 */ 4715 switch (crtc_state->pipe_bpp) { 4716 case 24: /* 8bpc */ 4717 vsc_sdp.db[17] = 0x1; 4718 break; 4719 case 30: /* 10bpc */ 4720 vsc_sdp.db[17] = 0x2; 4721 break; 4722 case 36: /* 12bpc */ 4723 vsc_sdp.db[17] = 0x3; 4724 break; 4725 case 48: /* 16bpc */ 4726 vsc_sdp.db[17] = 0x4; 4727 break; 4728 default: 4729 MISSING_CASE(crtc_state->pipe_bpp); 4730 break; 4731 } 4732 4733 /* 4734 * Dynamic Range (Bit 7) 4735 * 0 = VESA range, 1 = CTA range. 4736 * all YCbCr are always limited range 4737 */ 4738 vsc_sdp.db[17] |= 0x80; 4739 4740 /* 4741 * Content Type (Bits 2:0) 4742 * 000b = Not defined. 4743 * 001b = Graphics. 4744 * 010b = Photo. 4745 * 011b = Video. 4746 * 100b = Game 4747 * All other values are RESERVED. 4748 * Note: See CTA-861-G for the definition and expected 4749 * processing by a stream sink for the above contect types. 4750 */ 4751 vsc_sdp.db[18] = 0; 4752 4753 intel_dig_port->write_infoframe(&intel_dig_port->base, 4754 crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp)); 4755 } 4756 4757 static void 4758 intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 4759 const struct intel_crtc_state *crtc_state, 4760 const struct drm_connector_state *conn_state) 4761 { 4762 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4763 struct dp_sdp infoframe_sdp = {}; 4764 struct hdmi_drm_infoframe drm_infoframe = {}; 4765 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4766 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4767 ssize_t len; 4768 int ret; 4769 4770 ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state); 4771 if (ret) { 4772 DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n"); 4773 return; 4774 } 4775 4776 len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf)); 4777 if (len < 0) { 4778 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 4779 return; 4780 } 4781 4782 if (len != infoframe_size) { 4783 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 4784 return; 4785 } 4786 4787 /* 4788 * Set up the infoframe sdp packet for HDR static metadata. 4789 * Prepare VSC Header for SU as per DP 1.4a spec, 4790 * Table 2-100 and Table 2-101 4791 */ 4792 4793 /* Packet ID, 00h for non-Audio INFOFRAME */ 4794 infoframe_sdp.sdp_header.HB0 = 0; 4795 /* 4796 * Packet Type 80h + Non-audio INFOFRAME Type value 4797 * HDMI_INFOFRAME_TYPE_DRM: 0x87, 4798 */ 4799 infoframe_sdp.sdp_header.HB1 = drm_infoframe.type; 4800 /* 4801 * Least Significant Eight Bits of (Data Byte Count – 1) 4802 * infoframe_size - 1, 4803 */ 4804 infoframe_sdp.sdp_header.HB2 = 0x1D; 4805 /* INFOFRAME SDP Version Number */ 4806 infoframe_sdp.sdp_header.HB3 = (0x13 << 2); 4807 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4808 infoframe_sdp.db[0] = drm_infoframe.version; 4809 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4810 infoframe_sdp.db[1] = drm_infoframe.length; 4811 /* 4812 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4813 * HDMI_INFOFRAME_HEADER_SIZE 4814 */ 4815 BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4816 memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4817 HDMI_DRM_INFOFRAME_SIZE); 4818 4819 /* 4820 * Size of DP infoframe sdp packet for HDR static metadata is consist of 4821 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4822 * - Two Data Blocks: 2 bytes 4823 * CTA Header Byte2 (INFOFRAME Version Number) 4824 * CTA Header Byte3 (Length of INFOFRAME) 4825 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4826 * 4827 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4828 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4829 * will pad rest of the size. 4830 */ 4831 intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state, 4832 HDMI_PACKET_TYPE_GAMUT_METADATA, 4833 &infoframe_sdp, 4834 sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE); 4835 } 4836 4837 void intel_dp_vsc_enable(struct intel_dp *intel_dp, 4838 const struct intel_crtc_state *crtc_state, 4839 const struct drm_connector_state *conn_state) 4840 { 4841 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 4842 return; 4843 4844 intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state); 4845 } 4846 4847 void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp, 4848 const struct intel_crtc_state *crtc_state, 4849 const struct drm_connector_state *conn_state) 4850 { 4851 if (!conn_state->hdr_output_metadata) 4852 return; 4853 4854 intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp, 4855 crtc_state, 4856 conn_state); 4857 } 4858 4859 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 4860 { 4861 int status = 0; 4862 int test_link_rate; 4863 u8 test_lane_count, test_link_bw; 4864 /* (DP CTS 1.2) 4865 * 4.3.1.11 4866 */ 4867 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 4868 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 4869 &test_lane_count); 4870 4871 if (status <= 0) { 4872 DRM_DEBUG_KMS("Lane count read failed\n"); 4873 return DP_TEST_NAK; 4874 } 4875 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 4876 4877 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 4878 &test_link_bw); 4879 if (status <= 0) { 4880 DRM_DEBUG_KMS("Link Rate read failed\n"); 4881 return DP_TEST_NAK; 4882 } 4883 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 4884 4885 /* Validate the requested link rate and lane count */ 4886 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 4887 test_lane_count)) 4888 return DP_TEST_NAK; 4889 4890 intel_dp->compliance.test_lane_count = test_lane_count; 4891 intel_dp->compliance.test_link_rate = test_link_rate; 4892 4893 return DP_TEST_ACK; 4894 } 4895 4896 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 4897 { 4898 u8 test_pattern; 4899 u8 test_misc; 4900 __be16 h_width, v_height; 4901 int status = 0; 4902 4903 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 4904 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 4905 &test_pattern); 4906 if (status <= 0) { 4907 DRM_DEBUG_KMS("Test pattern read failed\n"); 4908 return DP_TEST_NAK; 4909 } 4910 if (test_pattern != DP_COLOR_RAMP) 4911 return DP_TEST_NAK; 4912 4913 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 4914 &h_width, 2); 4915 if (status <= 0) { 4916 DRM_DEBUG_KMS("H Width read failed\n"); 4917 return DP_TEST_NAK; 4918 } 4919 4920 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 4921 &v_height, 2); 4922 if (status <= 0) { 4923 DRM_DEBUG_KMS("V Height read failed\n"); 4924 return DP_TEST_NAK; 4925 } 4926 4927 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 4928 &test_misc); 4929 if (status <= 0) { 4930 DRM_DEBUG_KMS("TEST MISC read failed\n"); 4931 return DP_TEST_NAK; 4932 } 4933 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 4934 return DP_TEST_NAK; 4935 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 4936 return DP_TEST_NAK; 4937 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 4938 case DP_TEST_BIT_DEPTH_6: 4939 intel_dp->compliance.test_data.bpc = 6; 4940 break; 4941 case DP_TEST_BIT_DEPTH_8: 4942 intel_dp->compliance.test_data.bpc = 8; 4943 break; 4944 default: 4945 return DP_TEST_NAK; 4946 } 4947 4948 intel_dp->compliance.test_data.video_pattern = test_pattern; 4949 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 4950 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 4951 /* Set test active flag here so userspace doesn't interrupt things */ 4952 intel_dp->compliance.test_active = true; 4953 4954 return DP_TEST_ACK; 4955 } 4956 4957 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 4958 { 4959 u8 test_result = DP_TEST_ACK; 4960 struct intel_connector *intel_connector = intel_dp->attached_connector; 4961 struct drm_connector *connector = &intel_connector->base; 4962 4963 if (intel_connector->detect_edid == NULL || 4964 connector->edid_corrupt || 4965 intel_dp->aux.i2c_defer_count > 6) { 4966 /* Check EDID read for NACKs, DEFERs and corruption 4967 * (DP CTS 1.2 Core r1.1) 4968 * 4.2.2.4 : Failed EDID read, I2C_NAK 4969 * 4.2.2.5 : Failed EDID read, I2C_DEFER 4970 * 4.2.2.6 : EDID corruption detected 4971 * Use failsafe mode for all cases 4972 */ 4973 if (intel_dp->aux.i2c_nack_count > 0 || 4974 intel_dp->aux.i2c_defer_count > 0) 4975 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n", 4976 intel_dp->aux.i2c_nack_count, 4977 intel_dp->aux.i2c_defer_count); 4978 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 4979 } else { 4980 struct edid *block = intel_connector->detect_edid; 4981 4982 /* We have to write the checksum 4983 * of the last block read 4984 */ 4985 block += intel_connector->detect_edid->extensions; 4986 4987 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 4988 block->checksum) <= 0) 4989 DRM_DEBUG_KMS("Failed to write EDID checksum\n"); 4990 4991 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 4992 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 4993 } 4994 4995 /* Set test active flag here so userspace doesn't interrupt things */ 4996 intel_dp->compliance.test_active = true; 4997 4998 return test_result; 4999 } 5000 5001 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 5002 { 5003 u8 test_result = DP_TEST_NAK; 5004 return test_result; 5005 } 5006 5007 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 5008 { 5009 u8 response = DP_TEST_NAK; 5010 u8 request = 0; 5011 int status; 5012 5013 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 5014 if (status <= 0) { 5015 DRM_DEBUG_KMS("Could not read test request from sink\n"); 5016 goto update_status; 5017 } 5018 5019 switch (request) { 5020 case DP_TEST_LINK_TRAINING: 5021 DRM_DEBUG_KMS("LINK_TRAINING test requested\n"); 5022 response = intel_dp_autotest_link_training(intel_dp); 5023 break; 5024 case DP_TEST_LINK_VIDEO_PATTERN: 5025 DRM_DEBUG_KMS("TEST_PATTERN test requested\n"); 5026 response = intel_dp_autotest_video_pattern(intel_dp); 5027 break; 5028 case DP_TEST_LINK_EDID_READ: 5029 DRM_DEBUG_KMS("EDID test requested\n"); 5030 response = intel_dp_autotest_edid(intel_dp); 5031 break; 5032 case DP_TEST_LINK_PHY_TEST_PATTERN: 5033 DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); 5034 response = intel_dp_autotest_phy_pattern(intel_dp); 5035 break; 5036 default: 5037 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); 5038 break; 5039 } 5040 5041 if (response & DP_TEST_ACK) 5042 intel_dp->compliance.test_type = request; 5043 5044 update_status: 5045 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 5046 if (status <= 0) 5047 DRM_DEBUG_KMS("Could not write test response to sink\n"); 5048 } 5049 5050 static int 5051 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5052 { 5053 bool bret; 5054 5055 if (intel_dp->is_mst) { 5056 u8 esi[DP_DPRX_ESI_LEN] = { 0 }; 5057 int ret = 0; 5058 int retry; 5059 bool handled; 5060 5061 WARN_ON_ONCE(intel_dp->active_mst_links < 0); 5062 bret = intel_dp_get_sink_irq_esi(intel_dp, esi); 5063 go_again: 5064 if (bret == true) { 5065 5066 /* check link status - esi[10] = 0x200c */ 5067 if (intel_dp->active_mst_links > 0 && 5068 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 5069 DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); 5070 intel_dp_start_link_train(intel_dp); 5071 intel_dp_stop_link_train(intel_dp); 5072 } 5073 5074 DRM_DEBUG_KMS("got esi %3ph\n", esi); 5075 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 5076 5077 if (handled) { 5078 for (retry = 0; retry < 3; retry++) { 5079 int wret; 5080 wret = drm_dp_dpcd_write(&intel_dp->aux, 5081 DP_SINK_COUNT_ESI+1, 5082 &esi[1], 3); 5083 if (wret == 3) { 5084 break; 5085 } 5086 } 5087 5088 bret = intel_dp_get_sink_irq_esi(intel_dp, esi); 5089 if (bret == true) { 5090 DRM_DEBUG_KMS("got esi2 %3ph\n", esi); 5091 goto go_again; 5092 } 5093 } else 5094 ret = 0; 5095 5096 return ret; 5097 } else { 5098 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n"); 5099 intel_dp->is_mst = false; 5100 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5101 intel_dp->is_mst); 5102 } 5103 } 5104 return -EINVAL; 5105 } 5106 5107 static bool 5108 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5109 { 5110 u8 link_status[DP_LINK_STATUS_SIZE]; 5111 5112 if (!intel_dp->link_trained) 5113 return false; 5114 5115 /* 5116 * While PSR source HW is enabled, it will control main-link sending 5117 * frames, enabling and disabling it so trying to do a retrain will fail 5118 * as the link would or not be on or it could mix training patterns 5119 * and frame data at the same time causing retrain to fail. 5120 * Also when exiting PSR, HW will retrain the link anyways fixing 5121 * any link status error. 5122 */ 5123 if (intel_psr_enabled(intel_dp)) 5124 return false; 5125 5126 if (!intel_dp_get_link_status(intel_dp, link_status)) 5127 return false; 5128 5129 /* 5130 * Validate the cached values of intel_dp->link_rate and 5131 * intel_dp->lane_count before attempting to retrain. 5132 */ 5133 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5134 intel_dp->lane_count)) 5135 return false; 5136 5137 /* Retrain if Channel EQ or CR not ok */ 5138 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5139 } 5140 5141 int intel_dp_retrain_link(struct intel_encoder *encoder, 5142 struct drm_modeset_acquire_ctx *ctx) 5143 { 5144 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5145 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5146 struct intel_connector *connector = intel_dp->attached_connector; 5147 struct drm_connector_state *conn_state; 5148 struct intel_crtc_state *crtc_state; 5149 struct intel_crtc *crtc; 5150 int ret; 5151 5152 /* FIXME handle the MST connectors as well */ 5153 5154 if (!connector || connector->base.status != connector_status_connected) 5155 return 0; 5156 5157 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5158 ctx); 5159 if (ret) 5160 return ret; 5161 5162 conn_state = connector->base.state; 5163 5164 crtc = to_intel_crtc(conn_state->crtc); 5165 if (!crtc) 5166 return 0; 5167 5168 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5169 if (ret) 5170 return ret; 5171 5172 crtc_state = to_intel_crtc_state(crtc->base.state); 5173 5174 drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5175 5176 if (!crtc_state->hw.active) 5177 return 0; 5178 5179 if (conn_state->commit && 5180 !try_wait_for_completion(&conn_state->commit->hw_done)) 5181 return 0; 5182 5183 if (!intel_dp_needs_link_retrain(intel_dp)) 5184 return 0; 5185 5186 /* Suppress underruns caused by re-training */ 5187 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5188 if (crtc_state->has_pch_encoder) 5189 intel_set_pch_fifo_underrun_reporting(dev_priv, 5190 intel_crtc_pch_transcoder(crtc), false); 5191 5192 intel_dp_start_link_train(intel_dp); 5193 intel_dp_stop_link_train(intel_dp); 5194 5195 /* Keep underrun reporting disabled until things are stable */ 5196 intel_wait_for_vblank(dev_priv, crtc->pipe); 5197 5198 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5199 if (crtc_state->has_pch_encoder) 5200 intel_set_pch_fifo_underrun_reporting(dev_priv, 5201 intel_crtc_pch_transcoder(crtc), true); 5202 5203 return 0; 5204 } 5205 5206 /* 5207 * If display is now connected check links status, 5208 * there has been known issues of link loss triggering 5209 * long pulse. 5210 * 5211 * Some sinks (eg. ASUS PB287Q) seem to perform some 5212 * weird HPD ping pong during modesets. So we can apparently 5213 * end up with HPD going low during a modeset, and then 5214 * going back up soon after. And once that happens we must 5215 * retrain the link to get a picture. That's in case no 5216 * userspace component reacted to intermittent HPD dip. 5217 */ 5218 static enum intel_hotplug_state 5219 intel_dp_hotplug(struct intel_encoder *encoder, 5220 struct intel_connector *connector, 5221 bool irq_received) 5222 { 5223 struct drm_modeset_acquire_ctx ctx; 5224 enum intel_hotplug_state state; 5225 int ret; 5226 5227 state = intel_encoder_hotplug(encoder, connector, irq_received); 5228 5229 drm_modeset_acquire_init(&ctx, 0); 5230 5231 for (;;) { 5232 ret = intel_dp_retrain_link(encoder, &ctx); 5233 5234 if (ret == -EDEADLK) { 5235 drm_modeset_backoff(&ctx); 5236 continue; 5237 } 5238 5239 break; 5240 } 5241 5242 drm_modeset_drop_locks(&ctx); 5243 drm_modeset_acquire_fini(&ctx); 5244 drm_WARN(encoder->base.dev, ret, 5245 "Acquiring modeset locks failed with %i\n", ret); 5246 5247 /* 5248 * Keeping it consistent with intel_ddi_hotplug() and 5249 * intel_hdmi_hotplug(). 5250 */ 5251 if (state == INTEL_HOTPLUG_UNCHANGED && irq_received) 5252 state = INTEL_HOTPLUG_RETRY; 5253 5254 return state; 5255 } 5256 5257 static void intel_dp_check_service_irq(struct intel_dp *intel_dp) 5258 { 5259 u8 val; 5260 5261 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5262 return; 5263 5264 if (drm_dp_dpcd_readb(&intel_dp->aux, 5265 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5266 return; 5267 5268 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5269 5270 if (val & DP_AUTOMATED_TEST_REQUEST) 5271 intel_dp_handle_test_request(intel_dp); 5272 5273 if (val & DP_CP_IRQ) 5274 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5275 5276 if (val & DP_SINK_SPECIFIC_IRQ) 5277 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n"); 5278 } 5279 5280 /* 5281 * According to DP spec 5282 * 5.1.2: 5283 * 1. Read DPCD 5284 * 2. Configure link according to Receiver Capabilities 5285 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5286 * 4. Check link status on receipt of hot-plug interrupt 5287 * 5288 * intel_dp_short_pulse - handles short pulse interrupts 5289 * when full detection is not required. 5290 * Returns %true if short pulse is handled and full detection 5291 * is NOT required and %false otherwise. 5292 */ 5293 static bool 5294 intel_dp_short_pulse(struct intel_dp *intel_dp) 5295 { 5296 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5297 u8 old_sink_count = intel_dp->sink_count; 5298 bool ret; 5299 5300 /* 5301 * Clearing compliance test variables to allow capturing 5302 * of values for next automated test request. 5303 */ 5304 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5305 5306 /* 5307 * Now read the DPCD to see if it's actually running 5308 * If the current value of sink count doesn't match with 5309 * the value that was stored earlier or dpcd read failed 5310 * we need to do full detection 5311 */ 5312 ret = intel_dp_get_dpcd(intel_dp); 5313 5314 if ((old_sink_count != intel_dp->sink_count) || !ret) { 5315 /* No need to proceed if we are going to do full detect */ 5316 return false; 5317 } 5318 5319 intel_dp_check_service_irq(intel_dp); 5320 5321 /* Handle CEC interrupts, if any */ 5322 drm_dp_cec_irq(&intel_dp->aux); 5323 5324 /* defer to the hotplug work for link retraining if needed */ 5325 if (intel_dp_needs_link_retrain(intel_dp)) 5326 return false; 5327 5328 intel_psr_short_pulse(intel_dp); 5329 5330 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 5331 drm_dbg_kms(&dev_priv->drm, 5332 "Link Training Compliance Test requested\n"); 5333 /* Send a Hotplug Uevent to userspace to start modeset */ 5334 drm_kms_helper_hotplug_event(&dev_priv->drm); 5335 } 5336 5337 return true; 5338 } 5339 5340 /* XXX this is probably wrong for multiple downstream ports */ 5341 static enum drm_connector_status 5342 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 5343 { 5344 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 5345 u8 *dpcd = intel_dp->dpcd; 5346 u8 type; 5347 5348 if (WARN_ON(intel_dp_is_edp(intel_dp))) 5349 return connector_status_connected; 5350 5351 if (lspcon->active) 5352 lspcon_resume(lspcon); 5353 5354 if (!intel_dp_get_dpcd(intel_dp)) 5355 return connector_status_disconnected; 5356 5357 /* if there's no downstream port, we're done */ 5358 if (!drm_dp_is_branch(dpcd)) 5359 return connector_status_connected; 5360 5361 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 5362 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 5363 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 5364 5365 return intel_dp->sink_count ? 5366 connector_status_connected : connector_status_disconnected; 5367 } 5368 5369 if (intel_dp_can_mst(intel_dp)) 5370 return connector_status_connected; 5371 5372 /* If no HPD, poke DDC gently */ 5373 if (drm_probe_ddc(&intel_dp->aux.ddc)) 5374 return connector_status_connected; 5375 5376 /* Well we tried, say unknown for unreliable port types */ 5377 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 5378 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 5379 if (type == DP_DS_PORT_TYPE_VGA || 5380 type == DP_DS_PORT_TYPE_NON_EDID) 5381 return connector_status_unknown; 5382 } else { 5383 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 5384 DP_DWN_STRM_PORT_TYPE_MASK; 5385 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 5386 type == DP_DWN_STRM_PORT_TYPE_OTHER) 5387 return connector_status_unknown; 5388 } 5389 5390 /* Anything else is out of spec, warn and ignore */ 5391 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 5392 return connector_status_disconnected; 5393 } 5394 5395 static enum drm_connector_status 5396 edp_detect(struct intel_dp *intel_dp) 5397 { 5398 return connector_status_connected; 5399 } 5400 5401 static bool ibx_digital_port_connected(struct intel_encoder *encoder) 5402 { 5403 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5404 u32 bit; 5405 5406 switch (encoder->hpd_pin) { 5407 case HPD_PORT_B: 5408 bit = SDE_PORTB_HOTPLUG; 5409 break; 5410 case HPD_PORT_C: 5411 bit = SDE_PORTC_HOTPLUG; 5412 break; 5413 case HPD_PORT_D: 5414 bit = SDE_PORTD_HOTPLUG; 5415 break; 5416 default: 5417 MISSING_CASE(encoder->hpd_pin); 5418 return false; 5419 } 5420 5421 return intel_de_read(dev_priv, SDEISR) & bit; 5422 } 5423 5424 static bool cpt_digital_port_connected(struct intel_encoder *encoder) 5425 { 5426 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5427 u32 bit; 5428 5429 switch (encoder->hpd_pin) { 5430 case HPD_PORT_B: 5431 bit = SDE_PORTB_HOTPLUG_CPT; 5432 break; 5433 case HPD_PORT_C: 5434 bit = SDE_PORTC_HOTPLUG_CPT; 5435 break; 5436 case HPD_PORT_D: 5437 bit = SDE_PORTD_HOTPLUG_CPT; 5438 break; 5439 default: 5440 MISSING_CASE(encoder->hpd_pin); 5441 return false; 5442 } 5443 5444 return intel_de_read(dev_priv, SDEISR) & bit; 5445 } 5446 5447 static bool spt_digital_port_connected(struct intel_encoder *encoder) 5448 { 5449 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5450 u32 bit; 5451 5452 switch (encoder->hpd_pin) { 5453 case HPD_PORT_A: 5454 bit = SDE_PORTA_HOTPLUG_SPT; 5455 break; 5456 case HPD_PORT_E: 5457 bit = SDE_PORTE_HOTPLUG_SPT; 5458 break; 5459 default: 5460 return cpt_digital_port_connected(encoder); 5461 } 5462 5463 return intel_de_read(dev_priv, SDEISR) & bit; 5464 } 5465 5466 static bool g4x_digital_port_connected(struct intel_encoder *encoder) 5467 { 5468 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5469 u32 bit; 5470 5471 switch (encoder->hpd_pin) { 5472 case HPD_PORT_B: 5473 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 5474 break; 5475 case HPD_PORT_C: 5476 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 5477 break; 5478 case HPD_PORT_D: 5479 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 5480 break; 5481 default: 5482 MISSING_CASE(encoder->hpd_pin); 5483 return false; 5484 } 5485 5486 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 5487 } 5488 5489 static bool gm45_digital_port_connected(struct intel_encoder *encoder) 5490 { 5491 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5492 u32 bit; 5493 5494 switch (encoder->hpd_pin) { 5495 case HPD_PORT_B: 5496 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 5497 break; 5498 case HPD_PORT_C: 5499 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 5500 break; 5501 case HPD_PORT_D: 5502 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 5503 break; 5504 default: 5505 MISSING_CASE(encoder->hpd_pin); 5506 return false; 5507 } 5508 5509 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 5510 } 5511 5512 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 5513 { 5514 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5515 5516 if (encoder->hpd_pin == HPD_PORT_A) 5517 return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG; 5518 else 5519 return ibx_digital_port_connected(encoder); 5520 } 5521 5522 static bool snb_digital_port_connected(struct intel_encoder *encoder) 5523 { 5524 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5525 5526 if (encoder->hpd_pin == HPD_PORT_A) 5527 return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG; 5528 else 5529 return cpt_digital_port_connected(encoder); 5530 } 5531 5532 static bool ivb_digital_port_connected(struct intel_encoder *encoder) 5533 { 5534 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5535 5536 if (encoder->hpd_pin == HPD_PORT_A) 5537 return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG_IVB; 5538 else 5539 return cpt_digital_port_connected(encoder); 5540 } 5541 5542 static bool bdw_digital_port_connected(struct intel_encoder *encoder) 5543 { 5544 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5545 5546 if (encoder->hpd_pin == HPD_PORT_A) 5547 return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG; 5548 else 5549 return cpt_digital_port_connected(encoder); 5550 } 5551 5552 static bool bxt_digital_port_connected(struct intel_encoder *encoder) 5553 { 5554 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5555 u32 bit; 5556 5557 switch (encoder->hpd_pin) { 5558 case HPD_PORT_A: 5559 bit = BXT_DE_PORT_HP_DDIA; 5560 break; 5561 case HPD_PORT_B: 5562 bit = BXT_DE_PORT_HP_DDIB; 5563 break; 5564 case HPD_PORT_C: 5565 bit = BXT_DE_PORT_HP_DDIC; 5566 break; 5567 default: 5568 MISSING_CASE(encoder->hpd_pin); 5569 return false; 5570 } 5571 5572 return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit; 5573 } 5574 5575 static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv, 5576 enum phy phy) 5577 { 5578 if (HAS_PCH_MCC(dev_priv) && phy == PHY_C) 5579 return intel_de_read(dev_priv, SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1); 5580 5581 return intel_de_read(dev_priv, SDEISR) & SDE_DDI_HOTPLUG_ICP(phy); 5582 } 5583 5584 static bool icp_digital_port_connected(struct intel_encoder *encoder) 5585 { 5586 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5587 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5588 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 5589 5590 if (intel_phy_is_combo(dev_priv, phy)) 5591 return intel_combo_phy_connected(dev_priv, phy); 5592 else if (intel_phy_is_tc(dev_priv, phy)) 5593 return intel_tc_port_connected(dig_port); 5594 else 5595 MISSING_CASE(encoder->hpd_pin); 5596 5597 return false; 5598 } 5599 5600 /* 5601 * intel_digital_port_connected - is the specified port connected? 5602 * @encoder: intel_encoder 5603 * 5604 * In cases where there's a connector physically connected but it can't be used 5605 * by our hardware we also return false, since the rest of the driver should 5606 * pretty much treat the port as disconnected. This is relevant for type-C 5607 * (starting on ICL) where there's ownership involved. 5608 * 5609 * Return %true if port is connected, %false otherwise. 5610 */ 5611 static bool __intel_digital_port_connected(struct intel_encoder *encoder) 5612 { 5613 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5614 5615 if (HAS_GMCH(dev_priv)) { 5616 if (IS_GM45(dev_priv)) 5617 return gm45_digital_port_connected(encoder); 5618 else 5619 return g4x_digital_port_connected(encoder); 5620 } 5621 5622 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 5623 return icp_digital_port_connected(encoder); 5624 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 5625 return spt_digital_port_connected(encoder); 5626 else if (IS_GEN9_LP(dev_priv)) 5627 return bxt_digital_port_connected(encoder); 5628 else if (IS_GEN(dev_priv, 8)) 5629 return bdw_digital_port_connected(encoder); 5630 else if (IS_GEN(dev_priv, 7)) 5631 return ivb_digital_port_connected(encoder); 5632 else if (IS_GEN(dev_priv, 6)) 5633 return snb_digital_port_connected(encoder); 5634 else if (IS_GEN(dev_priv, 5)) 5635 return ilk_digital_port_connected(encoder); 5636 5637 MISSING_CASE(INTEL_GEN(dev_priv)); 5638 return false; 5639 } 5640 5641 bool intel_digital_port_connected(struct intel_encoder *encoder) 5642 { 5643 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5644 bool is_connected = false; 5645 intel_wakeref_t wakeref; 5646 5647 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 5648 is_connected = __intel_digital_port_connected(encoder); 5649 5650 return is_connected; 5651 } 5652 5653 static struct edid * 5654 intel_dp_get_edid(struct intel_dp *intel_dp) 5655 { 5656 struct intel_connector *intel_connector = intel_dp->attached_connector; 5657 5658 /* use cached edid if we have one */ 5659 if (intel_connector->edid) { 5660 /* invalid edid */ 5661 if (IS_ERR(intel_connector->edid)) 5662 return NULL; 5663 5664 return drm_edid_duplicate(intel_connector->edid); 5665 } else 5666 return drm_get_edid(&intel_connector->base, 5667 &intel_dp->aux.ddc); 5668 } 5669 5670 static void 5671 intel_dp_set_edid(struct intel_dp *intel_dp) 5672 { 5673 struct intel_connector *intel_connector = intel_dp->attached_connector; 5674 struct edid *edid; 5675 5676 intel_dp_unset_edid(intel_dp); 5677 edid = intel_dp_get_edid(intel_dp); 5678 intel_connector->detect_edid = edid; 5679 5680 intel_dp->has_audio = drm_detect_monitor_audio(edid); 5681 drm_dp_cec_set_edid(&intel_dp->aux, edid); 5682 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 5683 } 5684 5685 static void 5686 intel_dp_unset_edid(struct intel_dp *intel_dp) 5687 { 5688 struct intel_connector *intel_connector = intel_dp->attached_connector; 5689 5690 drm_dp_cec_unset_edid(&intel_dp->aux); 5691 kfree(intel_connector->detect_edid); 5692 intel_connector->detect_edid = NULL; 5693 5694 intel_dp->has_audio = false; 5695 intel_dp->edid_quirks = 0; 5696 } 5697 5698 static int 5699 intel_dp_detect(struct drm_connector *connector, 5700 struct drm_modeset_acquire_ctx *ctx, 5701 bool force) 5702 { 5703 struct drm_i915_private *dev_priv = to_i915(connector->dev); 5704 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5705 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5706 struct intel_encoder *encoder = &dig_port->base; 5707 enum drm_connector_status status; 5708 5709 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 5710 connector->base.id, connector->name); 5711 drm_WARN_ON(&dev_priv->drm, 5712 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 5713 5714 /* Can't disconnect eDP */ 5715 if (intel_dp_is_edp(intel_dp)) 5716 status = edp_detect(intel_dp); 5717 else if (intel_digital_port_connected(encoder)) 5718 status = intel_dp_detect_dpcd(intel_dp); 5719 else 5720 status = connector_status_disconnected; 5721 5722 if (status == connector_status_disconnected) { 5723 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5724 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 5725 5726 if (intel_dp->is_mst) { 5727 drm_dbg_kms(&dev_priv->drm, 5728 "MST device may have disappeared %d vs %d\n", 5729 intel_dp->is_mst, 5730 intel_dp->mst_mgr.mst_state); 5731 intel_dp->is_mst = false; 5732 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5733 intel_dp->is_mst); 5734 } 5735 5736 goto out; 5737 } 5738 5739 if (intel_dp->reset_link_params) { 5740 /* Initial max link lane count */ 5741 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 5742 5743 /* Initial max link rate */ 5744 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 5745 5746 intel_dp->reset_link_params = false; 5747 } 5748 5749 intel_dp_print_rates(intel_dp); 5750 5751 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 5752 if (INTEL_GEN(dev_priv) >= 11) 5753 intel_dp_get_dsc_sink_cap(intel_dp); 5754 5755 intel_dp_configure_mst(intel_dp); 5756 5757 if (intel_dp->is_mst) { 5758 /* 5759 * If we are in MST mode then this connector 5760 * won't appear connected or have anything 5761 * with EDID on it 5762 */ 5763 status = connector_status_disconnected; 5764 goto out; 5765 } 5766 5767 /* 5768 * Some external monitors do not signal loss of link synchronization 5769 * with an IRQ_HPD, so force a link status check. 5770 */ 5771 if (!intel_dp_is_edp(intel_dp)) { 5772 int ret; 5773 5774 ret = intel_dp_retrain_link(encoder, ctx); 5775 if (ret) 5776 return ret; 5777 } 5778 5779 /* 5780 * Clearing NACK and defer counts to get their exact values 5781 * while reading EDID which are required by Compliance tests 5782 * 4.2.2.4 and 4.2.2.5 5783 */ 5784 intel_dp->aux.i2c_nack_count = 0; 5785 intel_dp->aux.i2c_defer_count = 0; 5786 5787 intel_dp_set_edid(intel_dp); 5788 if (intel_dp_is_edp(intel_dp) || 5789 to_intel_connector(connector)->detect_edid) 5790 status = connector_status_connected; 5791 5792 intel_dp_check_service_irq(intel_dp); 5793 5794 out: 5795 if (status != connector_status_connected && !intel_dp->is_mst) 5796 intel_dp_unset_edid(intel_dp); 5797 5798 /* 5799 * Make sure the refs for power wells enabled during detect are 5800 * dropped to avoid a new detect cycle triggered by HPD polling. 5801 */ 5802 intel_display_power_flush_work(dev_priv); 5803 5804 return status; 5805 } 5806 5807 static void 5808 intel_dp_force(struct drm_connector *connector) 5809 { 5810 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5811 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5812 struct intel_encoder *intel_encoder = &dig_port->base; 5813 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 5814 enum intel_display_power_domain aux_domain = 5815 intel_aux_power_domain(dig_port); 5816 intel_wakeref_t wakeref; 5817 5818 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 5819 connector->base.id, connector->name); 5820 intel_dp_unset_edid(intel_dp); 5821 5822 if (connector->status != connector_status_connected) 5823 return; 5824 5825 wakeref = intel_display_power_get(dev_priv, aux_domain); 5826 5827 intel_dp_set_edid(intel_dp); 5828 5829 intel_display_power_put(dev_priv, aux_domain, wakeref); 5830 } 5831 5832 static int intel_dp_get_modes(struct drm_connector *connector) 5833 { 5834 struct intel_connector *intel_connector = to_intel_connector(connector); 5835 struct edid *edid; 5836 5837 edid = intel_connector->detect_edid; 5838 if (edid) { 5839 int ret = intel_connector_update_modes(connector, edid); 5840 if (ret) 5841 return ret; 5842 } 5843 5844 /* if eDP has no EDID, fall back to fixed mode */ 5845 if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) && 5846 intel_connector->panel.fixed_mode) { 5847 struct drm_display_mode *mode; 5848 5849 mode = drm_mode_duplicate(connector->dev, 5850 intel_connector->panel.fixed_mode); 5851 if (mode) { 5852 drm_mode_probed_add(connector, mode); 5853 return 1; 5854 } 5855 } 5856 5857 return 0; 5858 } 5859 5860 static int 5861 intel_dp_connector_register(struct drm_connector *connector) 5862 { 5863 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5864 int ret; 5865 5866 ret = intel_connector_register(connector); 5867 if (ret) 5868 return ret; 5869 5870 intel_connector_debugfs_add(connector); 5871 5872 DRM_DEBUG_KMS("registering %s bus for %s\n", 5873 intel_dp->aux.name, connector->kdev->kobj.name); 5874 5875 intel_dp->aux.dev = connector->kdev; 5876 ret = drm_dp_aux_register(&intel_dp->aux); 5877 if (!ret) 5878 drm_dp_cec_register_connector(&intel_dp->aux, connector); 5879 return ret; 5880 } 5881 5882 static void 5883 intel_dp_connector_unregister(struct drm_connector *connector) 5884 { 5885 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5886 5887 drm_dp_cec_unregister_connector(&intel_dp->aux); 5888 drm_dp_aux_unregister(&intel_dp->aux); 5889 intel_connector_unregister(connector); 5890 } 5891 5892 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 5893 { 5894 struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 5895 struct intel_dp *intel_dp = &intel_dig_port->dp; 5896 5897 intel_dp_mst_encoder_cleanup(intel_dig_port); 5898 if (intel_dp_is_edp(intel_dp)) { 5899 intel_wakeref_t wakeref; 5900 5901 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 5902 /* 5903 * vdd might still be enabled do to the delayed vdd off. 5904 * Make sure vdd is actually turned off here. 5905 */ 5906 with_pps_lock(intel_dp, wakeref) 5907 edp_panel_vdd_off_sync(intel_dp); 5908 5909 if (intel_dp->edp_notifier.notifier_call) { 5910 unregister_reboot_notifier(&intel_dp->edp_notifier); 5911 intel_dp->edp_notifier.notifier_call = NULL; 5912 } 5913 } 5914 5915 intel_dp_aux_fini(intel_dp); 5916 } 5917 5918 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 5919 { 5920 intel_dp_encoder_flush_work(encoder); 5921 5922 drm_encoder_cleanup(encoder); 5923 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 5924 } 5925 5926 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 5927 { 5928 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 5929 intel_wakeref_t wakeref; 5930 5931 if (!intel_dp_is_edp(intel_dp)) 5932 return; 5933 5934 /* 5935 * vdd might still be enabled do to the delayed vdd off. 5936 * Make sure vdd is actually turned off here. 5937 */ 5938 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 5939 with_pps_lock(intel_dp, wakeref) 5940 edp_panel_vdd_off_sync(intel_dp); 5941 } 5942 5943 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) 5944 { 5945 long ret; 5946 5947 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) 5948 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, 5949 msecs_to_jiffies(timeout)); 5950 5951 if (!ret) 5952 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); 5953 } 5954 5955 static 5956 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, 5957 u8 *an) 5958 { 5959 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base)); 5960 static const struct drm_dp_aux_msg msg = { 5961 .request = DP_AUX_NATIVE_WRITE, 5962 .address = DP_AUX_HDCP_AKSV, 5963 .size = DRM_HDCP_KSV_LEN, 5964 }; 5965 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; 5966 ssize_t dpcd_ret; 5967 int ret; 5968 5969 /* Output An first, that's easy */ 5970 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, 5971 an, DRM_HDCP_AN_LEN); 5972 if (dpcd_ret != DRM_HDCP_AN_LEN) { 5973 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n", 5974 dpcd_ret); 5975 return dpcd_ret >= 0 ? -EIO : dpcd_ret; 5976 } 5977 5978 /* 5979 * Since Aksv is Oh-So-Secret, we can't access it in software. So in 5980 * order to get it on the wire, we need to create the AUX header as if 5981 * we were writing the data, and then tickle the hardware to output the 5982 * data once the header is sent out. 5983 */ 5984 intel_dp_aux_header(txbuf, &msg); 5985 5986 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size, 5987 rxbuf, sizeof(rxbuf), 5988 DP_AUX_CH_CTL_AUX_AKSV_SELECT); 5989 if (ret < 0) { 5990 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret); 5991 return ret; 5992 } else if (ret == 0) { 5993 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n"); 5994 return -EIO; 5995 } 5996 5997 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; 5998 if (reply != DP_AUX_NATIVE_REPLY_ACK) { 5999 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", 6000 reply); 6001 return -EIO; 6002 } 6003 return 0; 6004 } 6005 6006 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, 6007 u8 *bksv) 6008 { 6009 ssize_t ret; 6010 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, 6011 DRM_HDCP_KSV_LEN); 6012 if (ret != DRM_HDCP_KSV_LEN) { 6013 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret); 6014 return ret >= 0 ? -EIO : ret; 6015 } 6016 return 0; 6017 } 6018 6019 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, 6020 u8 *bstatus) 6021 { 6022 ssize_t ret; 6023 /* 6024 * For some reason the HDMI and DP HDCP specs call this register 6025 * definition by different names. In the HDMI spec, it's called BSTATUS, 6026 * but in DP it's called BINFO. 6027 */ 6028 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, 6029 bstatus, DRM_HDCP_BSTATUS_LEN); 6030 if (ret != DRM_HDCP_BSTATUS_LEN) { 6031 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); 6032 return ret >= 0 ? -EIO : ret; 6033 } 6034 return 0; 6035 } 6036 6037 static 6038 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, 6039 u8 *bcaps) 6040 { 6041 ssize_t ret; 6042 6043 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, 6044 bcaps, 1); 6045 if (ret != 1) { 6046 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret); 6047 return ret >= 0 ? -EIO : ret; 6048 } 6049 6050 return 0; 6051 } 6052 6053 static 6054 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, 6055 bool *repeater_present) 6056 { 6057 ssize_t ret; 6058 u8 bcaps; 6059 6060 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); 6061 if (ret) 6062 return ret; 6063 6064 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; 6065 return 0; 6066 } 6067 6068 static 6069 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, 6070 u8 *ri_prime) 6071 { 6072 ssize_t ret; 6073 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, 6074 ri_prime, DRM_HDCP_RI_LEN); 6075 if (ret != DRM_HDCP_RI_LEN) { 6076 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret); 6077 return ret >= 0 ? -EIO : ret; 6078 } 6079 return 0; 6080 } 6081 6082 static 6083 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, 6084 bool *ksv_ready) 6085 { 6086 ssize_t ret; 6087 u8 bstatus; 6088 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6089 &bstatus, 1); 6090 if (ret != 1) { 6091 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); 6092 return ret >= 0 ? -EIO : ret; 6093 } 6094 *ksv_ready = bstatus & DP_BSTATUS_READY; 6095 return 0; 6096 } 6097 6098 static 6099 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, 6100 int num_downstream, u8 *ksv_fifo) 6101 { 6102 ssize_t ret; 6103 int i; 6104 6105 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ 6106 for (i = 0; i < num_downstream; i += 3) { 6107 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; 6108 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6109 DP_AUX_HDCP_KSV_FIFO, 6110 ksv_fifo + i * DRM_HDCP_KSV_LEN, 6111 len); 6112 if (ret != len) { 6113 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n", 6114 i, ret); 6115 return ret >= 0 ? -EIO : ret; 6116 } 6117 } 6118 return 0; 6119 } 6120 6121 static 6122 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, 6123 int i, u32 *part) 6124 { 6125 ssize_t ret; 6126 6127 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) 6128 return -EINVAL; 6129 6130 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6131 DP_AUX_HDCP_V_PRIME(i), part, 6132 DRM_HDCP_V_PRIME_PART_LEN); 6133 if (ret != DRM_HDCP_V_PRIME_PART_LEN) { 6134 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); 6135 return ret >= 0 ? -EIO : ret; 6136 } 6137 return 0; 6138 } 6139 6140 static 6141 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, 6142 bool enable) 6143 { 6144 /* Not used for single stream DisplayPort setups */ 6145 return 0; 6146 } 6147 6148 static 6149 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) 6150 { 6151 ssize_t ret; 6152 u8 bstatus; 6153 6154 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6155 &bstatus, 1); 6156 if (ret != 1) { 6157 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); 6158 return false; 6159 } 6160 6161 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); 6162 } 6163 6164 static 6165 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port, 6166 bool *hdcp_capable) 6167 { 6168 ssize_t ret; 6169 u8 bcaps; 6170 6171 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); 6172 if (ret) 6173 return ret; 6174 6175 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; 6176 return 0; 6177 } 6178 6179 struct hdcp2_dp_errata_stream_type { 6180 u8 msg_id; 6181 u8 stream_type; 6182 } __packed; 6183 6184 struct hdcp2_dp_msg_data { 6185 u8 msg_id; 6186 u32 offset; 6187 bool msg_detectable; 6188 u32 timeout; 6189 u32 timeout2; /* Added for non_paired situation */ 6190 }; 6191 6192 static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { 6193 { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 }, 6194 { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, 6195 false, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, 6196 { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, 6197 false, 0, 0 }, 6198 { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, 6199 false, 0, 0 }, 6200 { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, 6201 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, 6202 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, 6203 { HDCP_2_2_AKE_SEND_PAIRING_INFO, 6204 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, 6205 HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, 6206 { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 }, 6207 { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, 6208 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 }, 6209 { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, 6210 0, 0 }, 6211 { HDCP_2_2_REP_SEND_RECVID_LIST, 6212 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, 6213 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, 6214 { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, 6215 0, 0 }, 6216 { HDCP_2_2_REP_STREAM_MANAGE, 6217 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, 6218 0, 0 }, 6219 { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, 6220 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, 6221 /* local define to shovel this through the write_2_2 interface */ 6222 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 6223 { HDCP_2_2_ERRATA_DP_STREAM_TYPE, 6224 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, 6225 0, 0 }, 6226 }; 6227 6228 static inline 6229 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, 6230 u8 *rx_status) 6231 { 6232 ssize_t ret; 6233 6234 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6235 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, 6236 HDCP_2_2_DP_RXSTATUS_LEN); 6237 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { 6238 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); 6239 return ret >= 0 ? -EIO : ret; 6240 } 6241 6242 return 0; 6243 } 6244 6245 static 6246 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, 6247 u8 msg_id, bool *msg_ready) 6248 { 6249 u8 rx_status; 6250 int ret; 6251 6252 *msg_ready = false; 6253 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); 6254 if (ret < 0) 6255 return ret; 6256 6257 switch (msg_id) { 6258 case HDCP_2_2_AKE_SEND_HPRIME: 6259 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) 6260 *msg_ready = true; 6261 break; 6262 case HDCP_2_2_AKE_SEND_PAIRING_INFO: 6263 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) 6264 *msg_ready = true; 6265 break; 6266 case HDCP_2_2_REP_SEND_RECVID_LIST: 6267 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6268 *msg_ready = true; 6269 break; 6270 default: 6271 DRM_ERROR("Unidentified msg_id: %d\n", msg_id); 6272 return -EINVAL; 6273 } 6274 6275 return 0; 6276 } 6277 6278 static ssize_t 6279 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, 6280 const struct hdcp2_dp_msg_data *hdcp2_msg_data) 6281 { 6282 struct intel_dp *dp = &intel_dig_port->dp; 6283 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6284 u8 msg_id = hdcp2_msg_data->msg_id; 6285 int ret, timeout; 6286 bool msg_ready = false; 6287 6288 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) 6289 timeout = hdcp2_msg_data->timeout2; 6290 else 6291 timeout = hdcp2_msg_data->timeout; 6292 6293 /* 6294 * There is no way to detect the CERT, LPRIME and STREAM_READY 6295 * availability. So Wait for timeout and read the msg. 6296 */ 6297 if (!hdcp2_msg_data->msg_detectable) { 6298 mdelay(timeout); 6299 ret = 0; 6300 } else { 6301 /* 6302 * As we want to check the msg availability at timeout, Ignoring 6303 * the timeout at wait for CP_IRQ. 6304 */ 6305 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); 6306 ret = hdcp2_detect_msg_availability(intel_dig_port, 6307 msg_id, &msg_ready); 6308 if (!msg_ready) 6309 ret = -ETIMEDOUT; 6310 } 6311 6312 if (ret) 6313 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n", 6314 hdcp2_msg_data->msg_id, ret, timeout); 6315 6316 return ret; 6317 } 6318 6319 static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) 6320 { 6321 int i; 6322 6323 for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) 6324 if (hdcp2_dp_msg_data[i].msg_id == msg_id) 6325 return &hdcp2_dp_msg_data[i]; 6326 6327 return NULL; 6328 } 6329 6330 static 6331 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, 6332 void *buf, size_t size) 6333 { 6334 struct intel_dp *dp = &intel_dig_port->dp; 6335 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6336 unsigned int offset; 6337 u8 *byte = buf; 6338 ssize_t ret, bytes_to_write, len; 6339 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6340 6341 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); 6342 if (!hdcp2_msg_data) 6343 return -EINVAL; 6344 6345 offset = hdcp2_msg_data->offset; 6346 6347 /* No msg_id in DP HDCP2.2 msgs */ 6348 bytes_to_write = size - 1; 6349 byte++; 6350 6351 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); 6352 6353 while (bytes_to_write) { 6354 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? 6355 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; 6356 6357 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, 6358 offset, (void *)byte, len); 6359 if (ret < 0) 6360 return ret; 6361 6362 bytes_to_write -= ret; 6363 byte += ret; 6364 offset += ret; 6365 } 6366 6367 return size; 6368 } 6369 6370 static 6371 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port) 6372 { 6373 u8 rx_info[HDCP_2_2_RXINFO_LEN]; 6374 u32 dev_cnt; 6375 ssize_t ret; 6376 6377 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6378 DP_HDCP_2_2_REG_RXINFO_OFFSET, 6379 (void *)rx_info, HDCP_2_2_RXINFO_LEN); 6380 if (ret != HDCP_2_2_RXINFO_LEN) 6381 return ret >= 0 ? -EIO : ret; 6382 6383 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 6384 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 6385 6386 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) 6387 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; 6388 6389 ret = sizeof(struct hdcp2_rep_send_receiverid_list) - 6390 HDCP_2_2_RECEIVER_IDS_MAX_LEN + 6391 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); 6392 6393 return ret; 6394 } 6395 6396 static 6397 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, 6398 u8 msg_id, void *buf, size_t size) 6399 { 6400 unsigned int offset; 6401 u8 *byte = buf; 6402 ssize_t ret, bytes_to_recv, len; 6403 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6404 6405 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); 6406 if (!hdcp2_msg_data) 6407 return -EINVAL; 6408 offset = hdcp2_msg_data->offset; 6409 6410 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data); 6411 if (ret < 0) 6412 return ret; 6413 6414 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { 6415 ret = get_receiver_id_list_size(intel_dig_port); 6416 if (ret < 0) 6417 return ret; 6418 6419 size = ret; 6420 } 6421 bytes_to_recv = size - 1; 6422 6423 /* DP adaptation msgs has no msg_id */ 6424 byte++; 6425 6426 while (bytes_to_recv) { 6427 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? 6428 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; 6429 6430 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset, 6431 (void *)byte, len); 6432 if (ret < 0) { 6433 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret); 6434 return ret; 6435 } 6436 6437 bytes_to_recv -= ret; 6438 byte += ret; 6439 offset += ret; 6440 } 6441 byte = buf; 6442 *byte = msg_id; 6443 6444 return size; 6445 } 6446 6447 static 6448 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, 6449 bool is_repeater, u8 content_type) 6450 { 6451 int ret; 6452 struct hdcp2_dp_errata_stream_type stream_type_msg; 6453 6454 if (is_repeater) 6455 return 0; 6456 6457 /* 6458 * Errata for DP: As Stream type is used for encryption, Receiver 6459 * should be communicated with stream type for the decryption of the 6460 * content. 6461 * Repeater will be communicated with stream type as a part of it's 6462 * auth later in time. 6463 */ 6464 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; 6465 stream_type_msg.stream_type = content_type; 6466 6467 ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, 6468 sizeof(stream_type_msg)); 6469 6470 return ret < 0 ? ret : 0; 6471 6472 } 6473 6474 static 6475 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port) 6476 { 6477 u8 rx_status; 6478 int ret; 6479 6480 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); 6481 if (ret) 6482 return ret; 6483 6484 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) 6485 ret = HDCP_REAUTH_REQUEST; 6486 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) 6487 ret = HDCP_LINK_INTEGRITY_FAILURE; 6488 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6489 ret = HDCP_TOPOLOGY_CHANGE; 6490 6491 return ret; 6492 } 6493 6494 static 6495 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port, 6496 bool *capable) 6497 { 6498 u8 rx_caps[3]; 6499 int ret; 6500 6501 *capable = false; 6502 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6503 DP_HDCP_2_2_REG_RX_CAPS_OFFSET, 6504 rx_caps, HDCP_2_2_RXCAPS_LEN); 6505 if (ret != HDCP_2_2_RXCAPS_LEN) 6506 return ret >= 0 ? -EIO : ret; 6507 6508 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && 6509 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) 6510 *capable = true; 6511 6512 return 0; 6513 } 6514 6515 static const struct intel_hdcp_shim intel_dp_hdcp_shim = { 6516 .write_an_aksv = intel_dp_hdcp_write_an_aksv, 6517 .read_bksv = intel_dp_hdcp_read_bksv, 6518 .read_bstatus = intel_dp_hdcp_read_bstatus, 6519 .repeater_present = intel_dp_hdcp_repeater_present, 6520 .read_ri_prime = intel_dp_hdcp_read_ri_prime, 6521 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, 6522 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, 6523 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, 6524 .toggle_signalling = intel_dp_hdcp_toggle_signalling, 6525 .check_link = intel_dp_hdcp_check_link, 6526 .hdcp_capable = intel_dp_hdcp_capable, 6527 .write_2_2_msg = intel_dp_hdcp2_write_msg, 6528 .read_2_2_msg = intel_dp_hdcp2_read_msg, 6529 .config_stream_type = intel_dp_hdcp2_config_stream_type, 6530 .check_2_2_link = intel_dp_hdcp2_check_link, 6531 .hdcp_2_2_capable = intel_dp_hdcp2_capable, 6532 .protocol = HDCP_PROTOCOL_DP, 6533 }; 6534 6535 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 6536 { 6537 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6538 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6539 6540 lockdep_assert_held(&dev_priv->pps_mutex); 6541 6542 if (!edp_have_panel_vdd(intel_dp)) 6543 return; 6544 6545 /* 6546 * The VDD bit needs a power domain reference, so if the bit is 6547 * already enabled when we boot or resume, grab this reference and 6548 * schedule a vdd off, so we don't hold on to the reference 6549 * indefinitely. 6550 */ 6551 drm_dbg_kms(&dev_priv->drm, 6552 "VDD left on by BIOS, adjusting state tracking\n"); 6553 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); 6554 6555 edp_panel_vdd_schedule_off(intel_dp); 6556 } 6557 6558 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 6559 { 6560 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6561 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 6562 enum pipe pipe; 6563 6564 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 6565 encoder->port, &pipe)) 6566 return pipe; 6567 6568 return INVALID_PIPE; 6569 } 6570 6571 void intel_dp_encoder_reset(struct drm_encoder *encoder) 6572 { 6573 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 6574 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 6575 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 6576 intel_wakeref_t wakeref; 6577 6578 if (!HAS_DDI(dev_priv)) 6579 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 6580 6581 if (lspcon->active) 6582 lspcon_resume(lspcon); 6583 6584 intel_dp->reset_link_params = true; 6585 6586 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 6587 !intel_dp_is_edp(intel_dp)) 6588 return; 6589 6590 with_pps_lock(intel_dp, wakeref) { 6591 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6592 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 6593 6594 if (intel_dp_is_edp(intel_dp)) { 6595 /* 6596 * Reinit the power sequencer, in case BIOS did 6597 * something nasty with it. 6598 */ 6599 intel_dp_pps_init(intel_dp); 6600 intel_edp_panel_vdd_sanitize(intel_dp); 6601 } 6602 } 6603 } 6604 6605 static int intel_modeset_tile_group(struct intel_atomic_state *state, 6606 int tile_group_id) 6607 { 6608 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6609 struct drm_connector_list_iter conn_iter; 6610 struct drm_connector *connector; 6611 int ret = 0; 6612 6613 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 6614 drm_for_each_connector_iter(connector, &conn_iter) { 6615 struct drm_connector_state *conn_state; 6616 struct intel_crtc_state *crtc_state; 6617 struct intel_crtc *crtc; 6618 6619 if (!connector->has_tile || 6620 connector->tile_group->id != tile_group_id) 6621 continue; 6622 6623 conn_state = drm_atomic_get_connector_state(&state->base, 6624 connector); 6625 if (IS_ERR(conn_state)) { 6626 ret = PTR_ERR(conn_state); 6627 break; 6628 } 6629 6630 crtc = to_intel_crtc(conn_state->crtc); 6631 6632 if (!crtc) 6633 continue; 6634 6635 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6636 crtc_state->uapi.mode_changed = true; 6637 6638 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6639 if (ret) 6640 break; 6641 } 6642 drm_connector_list_iter_end(&conn_iter); 6643 6644 return ret; 6645 } 6646 6647 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 6648 { 6649 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6650 struct intel_crtc *crtc; 6651 6652 if (transcoders == 0) 6653 return 0; 6654 6655 for_each_intel_crtc(&dev_priv->drm, crtc) { 6656 struct intel_crtc_state *crtc_state; 6657 int ret; 6658 6659 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6660 if (IS_ERR(crtc_state)) 6661 return PTR_ERR(crtc_state); 6662 6663 if (!crtc_state->hw.enable) 6664 continue; 6665 6666 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 6667 continue; 6668 6669 crtc_state->uapi.mode_changed = true; 6670 6671 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6672 if (ret) 6673 return ret; 6674 6675 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6676 if (ret) 6677 return ret; 6678 6679 transcoders &= ~BIT(crtc_state->cpu_transcoder); 6680 } 6681 6682 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 6683 6684 return 0; 6685 } 6686 6687 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 6688 struct drm_connector *connector) 6689 { 6690 const struct drm_connector_state *old_conn_state = 6691 drm_atomic_get_old_connector_state(&state->base, connector); 6692 const struct intel_crtc_state *old_crtc_state; 6693 struct intel_crtc *crtc; 6694 u8 transcoders; 6695 6696 crtc = to_intel_crtc(old_conn_state->crtc); 6697 if (!crtc) 6698 return 0; 6699 6700 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6701 6702 if (!old_crtc_state->hw.active) 6703 return 0; 6704 6705 transcoders = old_crtc_state->sync_mode_slaves_mask; 6706 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 6707 transcoders |= BIT(old_crtc_state->master_transcoder); 6708 6709 return intel_modeset_affected_transcoders(state, 6710 transcoders); 6711 } 6712 6713 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 6714 struct drm_atomic_state *_state) 6715 { 6716 struct drm_i915_private *dev_priv = to_i915(conn->dev); 6717 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6718 int ret; 6719 6720 ret = intel_digital_connector_atomic_check(conn, &state->base); 6721 if (ret) 6722 return ret; 6723 6724 if (INTEL_GEN(dev_priv) < 11) 6725 return 0; 6726 6727 if (!intel_connector_needs_modeset(state, conn)) 6728 return 0; 6729 6730 if (conn->has_tile) { 6731 ret = intel_modeset_tile_group(state, conn->tile_group->id); 6732 if (ret) 6733 return ret; 6734 } 6735 6736 return intel_modeset_synced_crtcs(state, conn); 6737 } 6738 6739 static const struct drm_connector_funcs intel_dp_connector_funcs = { 6740 .force = intel_dp_force, 6741 .fill_modes = drm_helper_probe_single_connector_modes, 6742 .atomic_get_property = intel_digital_connector_atomic_get_property, 6743 .atomic_set_property = intel_digital_connector_atomic_set_property, 6744 .late_register = intel_dp_connector_register, 6745 .early_unregister = intel_dp_connector_unregister, 6746 .destroy = intel_connector_destroy, 6747 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6748 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 6749 }; 6750 6751 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 6752 .detect_ctx = intel_dp_detect, 6753 .get_modes = intel_dp_get_modes, 6754 .mode_valid = intel_dp_mode_valid, 6755 .atomic_check = intel_dp_connector_atomic_check, 6756 }; 6757 6758 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 6759 .reset = intel_dp_encoder_reset, 6760 .destroy = intel_dp_encoder_destroy, 6761 }; 6762 6763 enum irqreturn 6764 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) 6765 { 6766 struct intel_dp *intel_dp = &intel_dig_port->dp; 6767 6768 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { 6769 /* 6770 * vdd off can generate a long pulse on eDP which 6771 * would require vdd on to handle it, and thus we 6772 * would end up in an endless cycle of 6773 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..." 6774 */ 6775 DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n", 6776 intel_dig_port->base.base.base.id, 6777 intel_dig_port->base.base.name); 6778 return IRQ_HANDLED; 6779 } 6780 6781 DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n", 6782 intel_dig_port->base.base.base.id, 6783 intel_dig_port->base.base.name, 6784 long_hpd ? "long" : "short"); 6785 6786 if (long_hpd) { 6787 intel_dp->reset_link_params = true; 6788 return IRQ_NONE; 6789 } 6790 6791 if (intel_dp->is_mst) { 6792 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { 6793 /* 6794 * If we were in MST mode, and device is not 6795 * there, get out of MST mode 6796 */ 6797 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", 6798 intel_dp->is_mst, intel_dp->mst_mgr.mst_state); 6799 intel_dp->is_mst = false; 6800 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6801 intel_dp->is_mst); 6802 6803 return IRQ_NONE; 6804 } 6805 } 6806 6807 if (!intel_dp->is_mst) { 6808 bool handled; 6809 6810 handled = intel_dp_short_pulse(intel_dp); 6811 6812 if (!handled) 6813 return IRQ_NONE; 6814 } 6815 6816 return IRQ_HANDLED; 6817 } 6818 6819 /* check the VBT to see whether the eDP is on another port */ 6820 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 6821 { 6822 /* 6823 * eDP not supported on g4x. so bail out early just 6824 * for a bit extra safety in case the VBT is bonkers. 6825 */ 6826 if (INTEL_GEN(dev_priv) < 5) 6827 return false; 6828 6829 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 6830 return true; 6831 6832 return intel_bios_is_port_edp(dev_priv, port); 6833 } 6834 6835 static void 6836 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 6837 { 6838 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6839 enum port port = dp_to_dig_port(intel_dp)->base.port; 6840 6841 if (!IS_G4X(dev_priv) && port != PORT_A) 6842 intel_attach_force_audio_property(connector); 6843 6844 intel_attach_broadcast_rgb_property(connector); 6845 if (HAS_GMCH(dev_priv)) 6846 drm_connector_attach_max_bpc_property(connector, 6, 10); 6847 else if (INTEL_GEN(dev_priv) >= 5) 6848 drm_connector_attach_max_bpc_property(connector, 6, 12); 6849 6850 intel_attach_colorspace_property(connector); 6851 6852 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 6853 drm_object_attach_property(&connector->base, 6854 connector->dev->mode_config.hdr_output_metadata_property, 6855 0); 6856 6857 if (intel_dp_is_edp(intel_dp)) { 6858 u32 allowed_scalers; 6859 6860 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 6861 if (!HAS_GMCH(dev_priv)) 6862 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 6863 6864 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 6865 6866 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 6867 6868 } 6869 } 6870 6871 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 6872 { 6873 intel_dp->panel_power_off_time = ktime_get_boottime(); 6874 intel_dp->last_power_on = jiffies; 6875 intel_dp->last_backlight_off = jiffies; 6876 } 6877 6878 static void 6879 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 6880 { 6881 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6882 u32 pp_on, pp_off, pp_ctl; 6883 struct pps_registers regs; 6884 6885 intel_pps_get_registers(intel_dp, ®s); 6886 6887 pp_ctl = ilk_get_pp_control(intel_dp); 6888 6889 /* Ensure PPS is unlocked */ 6890 if (!HAS_DDI(dev_priv)) 6891 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 6892 6893 pp_on = intel_de_read(dev_priv, regs.pp_on); 6894 pp_off = intel_de_read(dev_priv, regs.pp_off); 6895 6896 /* Pull timing values out of registers */ 6897 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 6898 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 6899 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 6900 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 6901 6902 if (i915_mmio_reg_valid(regs.pp_div)) { 6903 u32 pp_div; 6904 6905 pp_div = intel_de_read(dev_priv, regs.pp_div); 6906 6907 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 6908 } else { 6909 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 6910 } 6911 } 6912 6913 static void 6914 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 6915 { 6916 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 6917 state_name, 6918 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 6919 } 6920 6921 static void 6922 intel_pps_verify_state(struct intel_dp *intel_dp) 6923 { 6924 struct edp_power_seq hw; 6925 struct edp_power_seq *sw = &intel_dp->pps_delays; 6926 6927 intel_pps_readout_hw_state(intel_dp, &hw); 6928 6929 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 6930 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 6931 DRM_ERROR("PPS state mismatch\n"); 6932 intel_pps_dump_state("sw", sw); 6933 intel_pps_dump_state("hw", &hw); 6934 } 6935 } 6936 6937 static void 6938 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) 6939 { 6940 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6941 struct edp_power_seq cur, vbt, spec, 6942 *final = &intel_dp->pps_delays; 6943 6944 lockdep_assert_held(&dev_priv->pps_mutex); 6945 6946 /* already initialized? */ 6947 if (final->t11_t12 != 0) 6948 return; 6949 6950 intel_pps_readout_hw_state(intel_dp, &cur); 6951 6952 intel_pps_dump_state("cur", &cur); 6953 6954 vbt = dev_priv->vbt.edp.pps; 6955 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 6956 * of 500ms appears to be too short. Ocassionally the panel 6957 * just fails to power back on. Increasing the delay to 800ms 6958 * seems sufficient to avoid this problem. 6959 */ 6960 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 6961 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 6962 drm_dbg_kms(&dev_priv->drm, 6963 "Increasing T12 panel delay as per the quirk to %d\n", 6964 vbt.t11_t12); 6965 } 6966 /* T11_T12 delay is special and actually in units of 100ms, but zero 6967 * based in the hw (so we need to add 100 ms). But the sw vbt 6968 * table multiplies it with 1000 to make it in units of 100usec, 6969 * too. */ 6970 vbt.t11_t12 += 100 * 10; 6971 6972 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 6973 * our hw here, which are all in 100usec. */ 6974 spec.t1_t3 = 210 * 10; 6975 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 6976 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 6977 spec.t10 = 500 * 10; 6978 /* This one is special and actually in units of 100ms, but zero 6979 * based in the hw (so we need to add 100 ms). But the sw vbt 6980 * table multiplies it with 1000 to make it in units of 100usec, 6981 * too. */ 6982 spec.t11_t12 = (510 + 100) * 10; 6983 6984 intel_pps_dump_state("vbt", &vbt); 6985 6986 /* Use the max of the register settings and vbt. If both are 6987 * unset, fall back to the spec limits. */ 6988 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 6989 spec.field : \ 6990 max(cur.field, vbt.field)) 6991 assign_final(t1_t3); 6992 assign_final(t8); 6993 assign_final(t9); 6994 assign_final(t10); 6995 assign_final(t11_t12); 6996 #undef assign_final 6997 6998 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 6999 intel_dp->panel_power_up_delay = get_delay(t1_t3); 7000 intel_dp->backlight_on_delay = get_delay(t8); 7001 intel_dp->backlight_off_delay = get_delay(t9); 7002 intel_dp->panel_power_down_delay = get_delay(t10); 7003 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 7004 #undef get_delay 7005 7006 drm_dbg_kms(&dev_priv->drm, 7007 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 7008 intel_dp->panel_power_up_delay, 7009 intel_dp->panel_power_down_delay, 7010 intel_dp->panel_power_cycle_delay); 7011 7012 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", 7013 intel_dp->backlight_on_delay, 7014 intel_dp->backlight_off_delay); 7015 7016 /* 7017 * We override the HW backlight delays to 1 because we do manual waits 7018 * on them. For T8, even BSpec recommends doing it. For T9, if we 7019 * don't do this, we'll end up waiting for the backlight off delay 7020 * twice: once when we do the manual sleep, and once when we disable 7021 * the panel and wait for the PP_STATUS bit to become zero. 7022 */ 7023 final->t8 = 1; 7024 final->t9 = 1; 7025 7026 /* 7027 * HW has only a 100msec granularity for t11_t12 so round it up 7028 * accordingly. 7029 */ 7030 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 7031 } 7032 7033 static void 7034 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 7035 bool force_disable_vdd) 7036 { 7037 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7038 u32 pp_on, pp_off, port_sel = 0; 7039 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; 7040 struct pps_registers regs; 7041 enum port port = dp_to_dig_port(intel_dp)->base.port; 7042 const struct edp_power_seq *seq = &intel_dp->pps_delays; 7043 7044 lockdep_assert_held(&dev_priv->pps_mutex); 7045 7046 intel_pps_get_registers(intel_dp, ®s); 7047 7048 /* 7049 * On some VLV machines the BIOS can leave the VDD 7050 * enabled even on power sequencers which aren't 7051 * hooked up to any port. This would mess up the 7052 * power domain tracking the first time we pick 7053 * one of these power sequencers for use since 7054 * edp_panel_vdd_on() would notice that the VDD was 7055 * already on and therefore wouldn't grab the power 7056 * domain reference. Disable VDD first to avoid this. 7057 * This also avoids spuriously turning the VDD on as 7058 * soon as the new power sequencer gets initialized. 7059 */ 7060 if (force_disable_vdd) { 7061 u32 pp = ilk_get_pp_control(intel_dp); 7062 7063 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, 7064 "Panel power already on\n"); 7065 7066 if (pp & EDP_FORCE_VDD) 7067 drm_dbg_kms(&dev_priv->drm, 7068 "VDD already on, disabling first\n"); 7069 7070 pp &= ~EDP_FORCE_VDD; 7071 7072 intel_de_write(dev_priv, regs.pp_ctrl, pp); 7073 } 7074 7075 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 7076 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 7077 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 7078 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 7079 7080 /* Haswell doesn't have any port selection bits for the panel 7081 * power sequencer any more. */ 7082 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7083 port_sel = PANEL_PORT_SELECT_VLV(port); 7084 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7085 switch (port) { 7086 case PORT_A: 7087 port_sel = PANEL_PORT_SELECT_DPA; 7088 break; 7089 case PORT_C: 7090 port_sel = PANEL_PORT_SELECT_DPC; 7091 break; 7092 case PORT_D: 7093 port_sel = PANEL_PORT_SELECT_DPD; 7094 break; 7095 default: 7096 MISSING_CASE(port); 7097 break; 7098 } 7099 } 7100 7101 pp_on |= port_sel; 7102 7103 intel_de_write(dev_priv, regs.pp_on, pp_on); 7104 intel_de_write(dev_priv, regs.pp_off, pp_off); 7105 7106 /* 7107 * Compute the divisor for the pp clock, simply match the Bspec formula. 7108 */ 7109 if (i915_mmio_reg_valid(regs.pp_div)) { 7110 intel_de_write(dev_priv, regs.pp_div, 7111 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 7112 } else { 7113 u32 pp_ctl; 7114 7115 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); 7116 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 7117 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 7118 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7119 } 7120 7121 drm_dbg_kms(&dev_priv->drm, 7122 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 7123 intel_de_read(dev_priv, regs.pp_on), 7124 intel_de_read(dev_priv, regs.pp_off), 7125 i915_mmio_reg_valid(regs.pp_div) ? 7126 intel_de_read(dev_priv, regs.pp_div) : 7127 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 7128 } 7129 7130 static void intel_dp_pps_init(struct intel_dp *intel_dp) 7131 { 7132 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7133 7134 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7135 vlv_initial_power_sequencer_setup(intel_dp); 7136 } else { 7137 intel_dp_init_panel_power_sequencer(intel_dp); 7138 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 7139 } 7140 } 7141 7142 /** 7143 * intel_dp_set_drrs_state - program registers for RR switch to take effect 7144 * @dev_priv: i915 device 7145 * @crtc_state: a pointer to the active intel_crtc_state 7146 * @refresh_rate: RR to be programmed 7147 * 7148 * This function gets called when refresh rate (RR) has to be changed from 7149 * one frequency to another. Switches can be between high and low RR 7150 * supported by the panel or to any other RR based on media playback (in 7151 * this case, RR value needs to be passed from user space). 7152 * 7153 * The caller of this function needs to take a lock on dev_priv->drrs. 7154 */ 7155 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 7156 const struct intel_crtc_state *crtc_state, 7157 int refresh_rate) 7158 { 7159 struct intel_dp *intel_dp = dev_priv->drrs.dp; 7160 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 7161 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 7162 7163 if (refresh_rate <= 0) { 7164 drm_dbg_kms(&dev_priv->drm, 7165 "Refresh rate should be positive non-zero.\n"); 7166 return; 7167 } 7168 7169 if (intel_dp == NULL) { 7170 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 7171 return; 7172 } 7173 7174 if (!intel_crtc) { 7175 drm_dbg_kms(&dev_priv->drm, 7176 "DRRS: intel_crtc not initialized\n"); 7177 return; 7178 } 7179 7180 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 7181 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 7182 return; 7183 } 7184 7185 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh == 7186 refresh_rate) 7187 index = DRRS_LOW_RR; 7188 7189 if (index == dev_priv->drrs.refresh_rate_type) { 7190 drm_dbg_kms(&dev_priv->drm, 7191 "DRRS requested for previously set RR...ignoring\n"); 7192 return; 7193 } 7194 7195 if (!crtc_state->hw.active) { 7196 drm_dbg_kms(&dev_priv->drm, 7197 "eDP encoder disabled. CRTC not Active\n"); 7198 return; 7199 } 7200 7201 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 7202 switch (index) { 7203 case DRRS_HIGH_RR: 7204 intel_dp_set_m_n(crtc_state, M1_N1); 7205 break; 7206 case DRRS_LOW_RR: 7207 intel_dp_set_m_n(crtc_state, M2_N2); 7208 break; 7209 case DRRS_MAX_RR: 7210 default: 7211 drm_err(&dev_priv->drm, 7212 "Unsupported refreshrate type\n"); 7213 } 7214 } else if (INTEL_GEN(dev_priv) > 6) { 7215 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 7216 u32 val; 7217 7218 val = intel_de_read(dev_priv, reg); 7219 if (index > DRRS_HIGH_RR) { 7220 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7221 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7222 else 7223 val |= PIPECONF_EDP_RR_MODE_SWITCH; 7224 } else { 7225 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7226 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7227 else 7228 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 7229 } 7230 intel_de_write(dev_priv, reg, val); 7231 } 7232 7233 dev_priv->drrs.refresh_rate_type = index; 7234 7235 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 7236 refresh_rate); 7237 } 7238 7239 /** 7240 * intel_edp_drrs_enable - init drrs struct if supported 7241 * @intel_dp: DP struct 7242 * @crtc_state: A pointer to the active crtc state. 7243 * 7244 * Initializes frontbuffer_bits and drrs.dp 7245 */ 7246 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 7247 const struct intel_crtc_state *crtc_state) 7248 { 7249 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7250 7251 if (!crtc_state->has_drrs) { 7252 drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n"); 7253 return; 7254 } 7255 7256 if (dev_priv->psr.enabled) { 7257 drm_dbg_kms(&dev_priv->drm, 7258 "PSR enabled. Not enabling DRRS.\n"); 7259 return; 7260 } 7261 7262 mutex_lock(&dev_priv->drrs.mutex); 7263 if (dev_priv->drrs.dp) { 7264 drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n"); 7265 goto unlock; 7266 } 7267 7268 dev_priv->drrs.busy_frontbuffer_bits = 0; 7269 7270 dev_priv->drrs.dp = intel_dp; 7271 7272 unlock: 7273 mutex_unlock(&dev_priv->drrs.mutex); 7274 } 7275 7276 /** 7277 * intel_edp_drrs_disable - Disable DRRS 7278 * @intel_dp: DP struct 7279 * @old_crtc_state: Pointer to old crtc_state. 7280 * 7281 */ 7282 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 7283 const struct intel_crtc_state *old_crtc_state) 7284 { 7285 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7286 7287 if (!old_crtc_state->has_drrs) 7288 return; 7289 7290 mutex_lock(&dev_priv->drrs.mutex); 7291 if (!dev_priv->drrs.dp) { 7292 mutex_unlock(&dev_priv->drrs.mutex); 7293 return; 7294 } 7295 7296 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7297 intel_dp_set_drrs_state(dev_priv, old_crtc_state, 7298 intel_dp->attached_connector->panel.fixed_mode->vrefresh); 7299 7300 dev_priv->drrs.dp = NULL; 7301 mutex_unlock(&dev_priv->drrs.mutex); 7302 7303 cancel_delayed_work_sync(&dev_priv->drrs.work); 7304 } 7305 7306 static void intel_edp_drrs_downclock_work(struct work_struct *work) 7307 { 7308 struct drm_i915_private *dev_priv = 7309 container_of(work, typeof(*dev_priv), drrs.work.work); 7310 struct intel_dp *intel_dp; 7311 7312 mutex_lock(&dev_priv->drrs.mutex); 7313 7314 intel_dp = dev_priv->drrs.dp; 7315 7316 if (!intel_dp) 7317 goto unlock; 7318 7319 /* 7320 * The delayed work can race with an invalidate hence we need to 7321 * recheck. 7322 */ 7323 7324 if (dev_priv->drrs.busy_frontbuffer_bits) 7325 goto unlock; 7326 7327 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 7328 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7329 7330 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7331 intel_dp->attached_connector->panel.downclock_mode->vrefresh); 7332 } 7333 7334 unlock: 7335 mutex_unlock(&dev_priv->drrs.mutex); 7336 } 7337 7338 /** 7339 * intel_edp_drrs_invalidate - Disable Idleness DRRS 7340 * @dev_priv: i915 device 7341 * @frontbuffer_bits: frontbuffer plane tracking bits 7342 * 7343 * This function gets called everytime rendering on the given planes start. 7344 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 7345 * 7346 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7347 */ 7348 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 7349 unsigned int frontbuffer_bits) 7350 { 7351 struct drm_crtc *crtc; 7352 enum pipe pipe; 7353 7354 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7355 return; 7356 7357 cancel_delayed_work(&dev_priv->drrs.work); 7358 7359 mutex_lock(&dev_priv->drrs.mutex); 7360 if (!dev_priv->drrs.dp) { 7361 mutex_unlock(&dev_priv->drrs.mutex); 7362 return; 7363 } 7364 7365 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 7366 pipe = to_intel_crtc(crtc)->pipe; 7367 7368 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7369 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 7370 7371 /* invalidate means busy screen hence upclock */ 7372 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7373 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7374 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 7375 7376 mutex_unlock(&dev_priv->drrs.mutex); 7377 } 7378 7379 /** 7380 * intel_edp_drrs_flush - Restart Idleness DRRS 7381 * @dev_priv: i915 device 7382 * @frontbuffer_bits: frontbuffer plane tracking bits 7383 * 7384 * This function gets called every time rendering on the given planes has 7385 * completed or flip on a crtc is completed. So DRRS should be upclocked 7386 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 7387 * if no other planes are dirty. 7388 * 7389 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7390 */ 7391 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 7392 unsigned int frontbuffer_bits) 7393 { 7394 struct drm_crtc *crtc; 7395 enum pipe pipe; 7396 7397 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7398 return; 7399 7400 cancel_delayed_work(&dev_priv->drrs.work); 7401 7402 mutex_lock(&dev_priv->drrs.mutex); 7403 if (!dev_priv->drrs.dp) { 7404 mutex_unlock(&dev_priv->drrs.mutex); 7405 return; 7406 } 7407 7408 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 7409 pipe = to_intel_crtc(crtc)->pipe; 7410 7411 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7412 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 7413 7414 /* flush means busy screen hence upclock */ 7415 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7416 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7417 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 7418 7419 /* 7420 * flush also means no more activity hence schedule downclock, if all 7421 * other fbs are quiescent too 7422 */ 7423 if (!dev_priv->drrs.busy_frontbuffer_bits) 7424 schedule_delayed_work(&dev_priv->drrs.work, 7425 msecs_to_jiffies(1000)); 7426 mutex_unlock(&dev_priv->drrs.mutex); 7427 } 7428 7429 /** 7430 * DOC: Display Refresh Rate Switching (DRRS) 7431 * 7432 * Display Refresh Rate Switching (DRRS) is a power conservation feature 7433 * which enables swtching between low and high refresh rates, 7434 * dynamically, based on the usage scenario. This feature is applicable 7435 * for internal panels. 7436 * 7437 * Indication that the panel supports DRRS is given by the panel EDID, which 7438 * would list multiple refresh rates for one resolution. 7439 * 7440 * DRRS is of 2 types - static and seamless. 7441 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 7442 * (may appear as a blink on screen) and is used in dock-undock scenario. 7443 * Seamless DRRS involves changing RR without any visual effect to the user 7444 * and can be used during normal system usage. This is done by programming 7445 * certain registers. 7446 * 7447 * Support for static/seamless DRRS may be indicated in the VBT based on 7448 * inputs from the panel spec. 7449 * 7450 * DRRS saves power by switching to low RR based on usage scenarios. 7451 * 7452 * The implementation is based on frontbuffer tracking implementation. When 7453 * there is a disturbance on the screen triggered by user activity or a periodic 7454 * system activity, DRRS is disabled (RR is changed to high RR). When there is 7455 * no movement on screen, after a timeout of 1 second, a switch to low RR is 7456 * made. 7457 * 7458 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 7459 * and intel_edp_drrs_flush() are called. 7460 * 7461 * DRRS can be further extended to support other internal panels and also 7462 * the scenario of video playback wherein RR is set based on the rate 7463 * requested by userspace. 7464 */ 7465 7466 /** 7467 * intel_dp_drrs_init - Init basic DRRS work and mutex. 7468 * @connector: eDP connector 7469 * @fixed_mode: preferred mode of panel 7470 * 7471 * This function is called only once at driver load to initialize basic 7472 * DRRS stuff. 7473 * 7474 * Returns: 7475 * Downclock mode if panel supports it, else return NULL. 7476 * DRRS support is determined by the presence of downclock mode (apart 7477 * from VBT setting). 7478 */ 7479 static struct drm_display_mode * 7480 intel_dp_drrs_init(struct intel_connector *connector, 7481 struct drm_display_mode *fixed_mode) 7482 { 7483 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 7484 struct drm_display_mode *downclock_mode = NULL; 7485 7486 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 7487 mutex_init(&dev_priv->drrs.mutex); 7488 7489 if (INTEL_GEN(dev_priv) <= 6) { 7490 drm_dbg_kms(&dev_priv->drm, 7491 "DRRS supported for Gen7 and above\n"); 7492 return NULL; 7493 } 7494 7495 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 7496 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 7497 return NULL; 7498 } 7499 7500 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 7501 if (!downclock_mode) { 7502 drm_dbg_kms(&dev_priv->drm, 7503 "Downclock mode is not found. DRRS not supported\n"); 7504 return NULL; 7505 } 7506 7507 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 7508 7509 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 7510 drm_dbg_kms(&dev_priv->drm, 7511 "seamless DRRS supported for eDP panel.\n"); 7512 return downclock_mode; 7513 } 7514 7515 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 7516 struct intel_connector *intel_connector) 7517 { 7518 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7519 struct drm_device *dev = &dev_priv->drm; 7520 struct drm_connector *connector = &intel_connector->base; 7521 struct drm_display_mode *fixed_mode = NULL; 7522 struct drm_display_mode *downclock_mode = NULL; 7523 bool has_dpcd; 7524 enum pipe pipe = INVALID_PIPE; 7525 intel_wakeref_t wakeref; 7526 struct edid *edid; 7527 7528 if (!intel_dp_is_edp(intel_dp)) 7529 return true; 7530 7531 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); 7532 7533 /* 7534 * On IBX/CPT we may get here with LVDS already registered. Since the 7535 * driver uses the only internal power sequencer available for both 7536 * eDP and LVDS bail out early in this case to prevent interfering 7537 * with an already powered-on LVDS power sequencer. 7538 */ 7539 if (intel_get_lvds_encoder(dev_priv)) { 7540 drm_WARN_ON(dev, 7541 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 7542 drm_info(&dev_priv->drm, 7543 "LVDS was detected, not registering eDP\n"); 7544 7545 return false; 7546 } 7547 7548 with_pps_lock(intel_dp, wakeref) { 7549 intel_dp_init_panel_power_timestamps(intel_dp); 7550 intel_dp_pps_init(intel_dp); 7551 intel_edp_panel_vdd_sanitize(intel_dp); 7552 } 7553 7554 /* Cache DPCD and EDID for edp. */ 7555 has_dpcd = intel_edp_init_dpcd(intel_dp); 7556 7557 if (!has_dpcd) { 7558 /* if this fails, presume the device is a ghost */ 7559 drm_info(&dev_priv->drm, 7560 "failed to retrieve link info, disabling eDP\n"); 7561 goto out_vdd_off; 7562 } 7563 7564 mutex_lock(&dev->mode_config.mutex); 7565 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 7566 if (edid) { 7567 if (drm_add_edid_modes(connector, edid)) { 7568 drm_connector_update_edid_property(connector, edid); 7569 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 7570 } else { 7571 kfree(edid); 7572 edid = ERR_PTR(-EINVAL); 7573 } 7574 } else { 7575 edid = ERR_PTR(-ENOENT); 7576 } 7577 intel_connector->edid = edid; 7578 7579 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 7580 if (fixed_mode) 7581 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 7582 7583 /* fallback to VBT if available for eDP */ 7584 if (!fixed_mode) 7585 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 7586 mutex_unlock(&dev->mode_config.mutex); 7587 7588 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7589 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 7590 register_reboot_notifier(&intel_dp->edp_notifier); 7591 7592 /* 7593 * Figure out the current pipe for the initial backlight setup. 7594 * If the current pipe isn't valid, try the PPS pipe, and if that 7595 * fails just assume pipe A. 7596 */ 7597 pipe = vlv_active_pipe(intel_dp); 7598 7599 if (pipe != PIPE_A && pipe != PIPE_B) 7600 pipe = intel_dp->pps_pipe; 7601 7602 if (pipe != PIPE_A && pipe != PIPE_B) 7603 pipe = PIPE_A; 7604 7605 drm_dbg_kms(&dev_priv->drm, 7606 "using pipe %c for initial backlight setup\n", 7607 pipe_name(pipe)); 7608 } 7609 7610 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 7611 intel_connector->panel.backlight.power = intel_edp_backlight_power; 7612 intel_panel_setup_backlight(connector, pipe); 7613 7614 if (fixed_mode) { 7615 drm_connector_set_panel_orientation_with_quirk(connector, 7616 dev_priv->vbt.orientation, 7617 fixed_mode->hdisplay, fixed_mode->vdisplay); 7618 } 7619 7620 return true; 7621 7622 out_vdd_off: 7623 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 7624 /* 7625 * vdd might still be enabled do to the delayed vdd off. 7626 * Make sure vdd is actually turned off here. 7627 */ 7628 with_pps_lock(intel_dp, wakeref) 7629 edp_panel_vdd_off_sync(intel_dp); 7630 7631 return false; 7632 } 7633 7634 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 7635 { 7636 struct intel_connector *intel_connector; 7637 struct drm_connector *connector; 7638 7639 intel_connector = container_of(work, typeof(*intel_connector), 7640 modeset_retry_work); 7641 connector = &intel_connector->base; 7642 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 7643 connector->name); 7644 7645 /* Grab the locks before changing connector property*/ 7646 mutex_lock(&connector->dev->mode_config.mutex); 7647 /* Set connector link status to BAD and send a Uevent to notify 7648 * userspace to do a modeset. 7649 */ 7650 drm_connector_set_link_status_property(connector, 7651 DRM_MODE_LINK_STATUS_BAD); 7652 mutex_unlock(&connector->dev->mode_config.mutex); 7653 /* Send Hotplug uevent so userspace can reprobe */ 7654 drm_kms_helper_hotplug_event(connector->dev); 7655 } 7656 7657 bool 7658 intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 7659 struct intel_connector *intel_connector) 7660 { 7661 struct drm_connector *connector = &intel_connector->base; 7662 struct intel_dp *intel_dp = &intel_dig_port->dp; 7663 struct intel_encoder *intel_encoder = &intel_dig_port->base; 7664 struct drm_device *dev = intel_encoder->base.dev; 7665 struct drm_i915_private *dev_priv = to_i915(dev); 7666 enum port port = intel_encoder->port; 7667 enum phy phy = intel_port_to_phy(dev_priv, port); 7668 int type; 7669 7670 /* Initialize the work for modeset in case of link train failure */ 7671 INIT_WORK(&intel_connector->modeset_retry_work, 7672 intel_dp_modeset_retry_work_fn); 7673 7674 if (drm_WARN(dev, intel_dig_port->max_lanes < 1, 7675 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 7676 intel_dig_port->max_lanes, intel_encoder->base.base.id, 7677 intel_encoder->base.name)) 7678 return false; 7679 7680 intel_dp_set_source_rates(intel_dp); 7681 7682 intel_dp->reset_link_params = true; 7683 intel_dp->pps_pipe = INVALID_PIPE; 7684 intel_dp->active_pipe = INVALID_PIPE; 7685 7686 /* Preserve the current hw state. */ 7687 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 7688 intel_dp->attached_connector = intel_connector; 7689 7690 if (intel_dp_is_port_edp(dev_priv, port)) { 7691 /* 7692 * Currently we don't support eDP on TypeC ports, although in 7693 * theory it could work on TypeC legacy ports. 7694 */ 7695 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 7696 type = DRM_MODE_CONNECTOR_eDP; 7697 } else { 7698 type = DRM_MODE_CONNECTOR_DisplayPort; 7699 } 7700 7701 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7702 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 7703 7704 /* 7705 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 7706 * for DP the encoder type can be set by the caller to 7707 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 7708 */ 7709 if (type == DRM_MODE_CONNECTOR_eDP) 7710 intel_encoder->type = INTEL_OUTPUT_EDP; 7711 7712 /* eDP only on port B and/or C on vlv/chv */ 7713 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 7714 IS_CHERRYVIEW(dev_priv)) && 7715 intel_dp_is_edp(intel_dp) && 7716 port != PORT_B && port != PORT_C)) 7717 return false; 7718 7719 drm_dbg_kms(&dev_priv->drm, 7720 "Adding %s connector on [ENCODER:%d:%s]\n", 7721 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 7722 intel_encoder->base.base.id, intel_encoder->base.name); 7723 7724 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 7725 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 7726 7727 if (!HAS_GMCH(dev_priv)) 7728 connector->interlace_allowed = true; 7729 connector->doublescan_allowed = 0; 7730 7731 if (INTEL_GEN(dev_priv) >= 11) 7732 connector->ycbcr_420_allowed = true; 7733 7734 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 7735 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 7736 7737 intel_dp_aux_init(intel_dp); 7738 7739 intel_connector_attach_encoder(intel_connector, intel_encoder); 7740 7741 if (HAS_DDI(dev_priv)) 7742 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 7743 else 7744 intel_connector->get_hw_state = intel_connector_get_hw_state; 7745 7746 /* init MST on ports that can support it */ 7747 intel_dp_mst_encoder_init(intel_dig_port, 7748 intel_connector->base.base.id); 7749 7750 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 7751 intel_dp_aux_fini(intel_dp); 7752 intel_dp_mst_encoder_cleanup(intel_dig_port); 7753 goto fail; 7754 } 7755 7756 intel_dp_add_properties(intel_dp, connector); 7757 7758 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 7759 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); 7760 if (ret) 7761 drm_dbg_kms(&dev_priv->drm, 7762 "HDCP init failed, skipping.\n"); 7763 } 7764 7765 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 7766 * 0xd. Failure to do so will result in spurious interrupts being 7767 * generated on the port when a cable is not attached. 7768 */ 7769 if (IS_G45(dev_priv)) { 7770 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 7771 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 7772 (temp & ~0xf) | 0xd); 7773 } 7774 7775 return true; 7776 7777 fail: 7778 drm_connector_cleanup(connector); 7779 7780 return false; 7781 } 7782 7783 bool intel_dp_init(struct drm_i915_private *dev_priv, 7784 i915_reg_t output_reg, 7785 enum port port) 7786 { 7787 struct intel_digital_port *intel_dig_port; 7788 struct intel_encoder *intel_encoder; 7789 struct drm_encoder *encoder; 7790 struct intel_connector *intel_connector; 7791 7792 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 7793 if (!intel_dig_port) 7794 return false; 7795 7796 intel_connector = intel_connector_alloc(); 7797 if (!intel_connector) 7798 goto err_connector_alloc; 7799 7800 intel_encoder = &intel_dig_port->base; 7801 encoder = &intel_encoder->base; 7802 7803 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 7804 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 7805 "DP %c", port_name(port))) 7806 goto err_encoder_init; 7807 7808 intel_encoder->hotplug = intel_dp_hotplug; 7809 intel_encoder->compute_config = intel_dp_compute_config; 7810 intel_encoder->get_hw_state = intel_dp_get_hw_state; 7811 intel_encoder->get_config = intel_dp_get_config; 7812 intel_encoder->update_pipe = intel_panel_update_backlight; 7813 intel_encoder->suspend = intel_dp_encoder_suspend; 7814 if (IS_CHERRYVIEW(dev_priv)) { 7815 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 7816 intel_encoder->pre_enable = chv_pre_enable_dp; 7817 intel_encoder->enable = vlv_enable_dp; 7818 intel_encoder->disable = vlv_disable_dp; 7819 intel_encoder->post_disable = chv_post_disable_dp; 7820 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 7821 } else if (IS_VALLEYVIEW(dev_priv)) { 7822 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 7823 intel_encoder->pre_enable = vlv_pre_enable_dp; 7824 intel_encoder->enable = vlv_enable_dp; 7825 intel_encoder->disable = vlv_disable_dp; 7826 intel_encoder->post_disable = vlv_post_disable_dp; 7827 } else { 7828 intel_encoder->pre_enable = g4x_pre_enable_dp; 7829 intel_encoder->enable = g4x_enable_dp; 7830 intel_encoder->disable = g4x_disable_dp; 7831 intel_encoder->post_disable = g4x_post_disable_dp; 7832 } 7833 7834 intel_dig_port->dp.output_reg = output_reg; 7835 intel_dig_port->max_lanes = 4; 7836 intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); 7837 intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); 7838 7839 intel_encoder->type = INTEL_OUTPUT_DP; 7840 intel_encoder->power_domain = intel_port_to_power_domain(port); 7841 if (IS_CHERRYVIEW(dev_priv)) { 7842 if (port == PORT_D) 7843 intel_encoder->pipe_mask = BIT(PIPE_C); 7844 else 7845 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 7846 } else { 7847 intel_encoder->pipe_mask = ~0; 7848 } 7849 intel_encoder->cloneable = 0; 7850 intel_encoder->port = port; 7851 7852 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; 7853 7854 if (port != PORT_A) 7855 intel_infoframe_init(intel_dig_port); 7856 7857 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 7858 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 7859 goto err_init_connector; 7860 7861 return true; 7862 7863 err_init_connector: 7864 drm_encoder_cleanup(encoder); 7865 err_encoder_init: 7866 kfree(intel_connector); 7867 err_connector_alloc: 7868 kfree(intel_dig_port); 7869 return false; 7870 } 7871 7872 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 7873 { 7874 struct intel_encoder *encoder; 7875 7876 for_each_intel_encoder(&dev_priv->drm, encoder) { 7877 struct intel_dp *intel_dp; 7878 7879 if (encoder->type != INTEL_OUTPUT_DDI) 7880 continue; 7881 7882 intel_dp = enc_to_intel_dp(encoder); 7883 7884 if (!intel_dp->can_mst) 7885 continue; 7886 7887 if (intel_dp->is_mst) 7888 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 7889 } 7890 } 7891 7892 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 7893 { 7894 struct intel_encoder *encoder; 7895 7896 for_each_intel_encoder(&dev_priv->drm, encoder) { 7897 struct intel_dp *intel_dp; 7898 int ret; 7899 7900 if (encoder->type != INTEL_OUTPUT_DDI) 7901 continue; 7902 7903 intel_dp = enc_to_intel_dp(encoder); 7904 7905 if (!intel_dp->can_mst) 7906 continue; 7907 7908 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 7909 true); 7910 if (ret) { 7911 intel_dp->is_mst = false; 7912 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 7913 false); 7914 } 7915 } 7916 } 7917