1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/reboot.h> 32 #include <linux/slab.h> 33 #include <linux/types.h> 34 35 #include <asm/byteorder.h> 36 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_crtc.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_hdcp.h> 42 #include <drm/drm_probe_helper.h> 43 44 #include "i915_debugfs.h" 45 #include "i915_drv.h" 46 #include "i915_trace.h" 47 #include "intel_atomic.h" 48 #include "intel_audio.h" 49 #include "intel_connector.h" 50 #include "intel_ddi.h" 51 #include "intel_display_debugfs.h" 52 #include "intel_display_types.h" 53 #include "intel_dp.h" 54 #include "intel_dp_link_training.h" 55 #include "intel_dp_mst.h" 56 #include "intel_dpio_phy.h" 57 #include "intel_fifo_underrun.h" 58 #include "intel_hdcp.h" 59 #include "intel_hdmi.h" 60 #include "intel_hotplug.h" 61 #include "intel_lspcon.h" 62 #include "intel_lvds.h" 63 #include "intel_panel.h" 64 #include "intel_psr.h" 65 #include "intel_sideband.h" 66 #include "intel_tc.h" 67 #include "intel_vdsc.h" 68 69 #define DP_DPRX_ESI_LEN 14 70 71 /* DP DSC throughput values used for slice count calculations KPixels/s */ 72 #define DP_DSC_PEAK_PIXEL_RATE 2720000 73 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 74 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 75 76 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 77 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 78 79 /* Compliance test status bits */ 80 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 81 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 82 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 83 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 84 85 struct dp_link_dpll { 86 int clock; 87 struct dpll dpll; 88 }; 89 90 static const struct dp_link_dpll g4x_dpll[] = { 91 { 162000, 92 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 93 { 270000, 94 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 95 }; 96 97 static const struct dp_link_dpll pch_dpll[] = { 98 { 162000, 99 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 100 { 270000, 101 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 102 }; 103 104 static const struct dp_link_dpll vlv_dpll[] = { 105 { 162000, 106 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 107 { 270000, 108 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 109 }; 110 111 /* 112 * CHV supports eDP 1.4 that have more link rates. 113 * Below only provides the fixed rate but exclude variable rate. 114 */ 115 static const struct dp_link_dpll chv_dpll[] = { 116 /* 117 * CHV requires to program fractional division for m2. 118 * m2 is stored in fixed point format using formula below 119 * (m2_int << 22) | m2_fraction 120 */ 121 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 122 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 123 { 270000, /* m2_int = 27, m2_fraction = 0 */ 124 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 125 }; 126 127 /* Constants for DP DSC configurations */ 128 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 129 130 /* With Single pipe configuration, HW is capable of supporting maximum 131 * of 4 slices per line. 132 */ 133 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 134 135 /** 136 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 137 * @intel_dp: DP struct 138 * 139 * If a CPU or PCH DP output is attached to an eDP panel, this function 140 * will return true, and false otherwise. 141 */ 142 bool intel_dp_is_edp(struct intel_dp *intel_dp) 143 { 144 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 145 146 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 147 } 148 149 static void intel_dp_link_down(struct intel_encoder *encoder, 150 const struct intel_crtc_state *old_crtc_state); 151 static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 152 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 153 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 154 const struct intel_crtc_state *crtc_state); 155 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 156 enum pipe pipe); 157 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 158 159 /* update sink rates from dpcd */ 160 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 161 { 162 static const int dp_rates[] = { 163 162000, 270000, 540000, 810000 164 }; 165 int i, max_rate; 166 167 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 168 169 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 170 if (dp_rates[i] > max_rate) 171 break; 172 intel_dp->sink_rates[i] = dp_rates[i]; 173 } 174 175 intel_dp->num_sink_rates = i; 176 } 177 178 /* Get length of rates array potentially limited by max_rate. */ 179 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 180 { 181 int i; 182 183 /* Limit results by potentially reduced max rate */ 184 for (i = 0; i < len; i++) { 185 if (rates[len - i - 1] <= max_rate) 186 return len - i; 187 } 188 189 return 0; 190 } 191 192 /* Get length of common rates array potentially limited by max_rate. */ 193 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 194 int max_rate) 195 { 196 return intel_dp_rate_limit_len(intel_dp->common_rates, 197 intel_dp->num_common_rates, max_rate); 198 } 199 200 /* Theoretical max between source and sink */ 201 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 202 { 203 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 204 } 205 206 /* Theoretical max between source and sink */ 207 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 208 { 209 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 210 int source_max = intel_dig_port->max_lanes; 211 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 212 int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port); 213 214 return min3(source_max, sink_max, fia_max); 215 } 216 217 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 218 { 219 return intel_dp->max_link_lane_count; 220 } 221 222 int 223 intel_dp_link_required(int pixel_clock, int bpp) 224 { 225 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 226 return DIV_ROUND_UP(pixel_clock * bpp, 8); 227 } 228 229 int 230 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 231 { 232 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 233 * link rate that is generally expressed in Gbps. Since, 8 bits of data 234 * is transmitted every LS_Clk per lane, there is no need to account for 235 * the channel encoding that is done in the PHY layer here. 236 */ 237 238 return max_link_clock * max_lanes; 239 } 240 241 static int 242 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) 243 { 244 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 245 struct intel_encoder *encoder = &intel_dig_port->base; 246 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 247 int max_dotclk = dev_priv->max_dotclk_freq; 248 int ds_max_dotclk; 249 250 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 251 252 if (type != DP_DS_PORT_TYPE_VGA) 253 return max_dotclk; 254 255 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd, 256 intel_dp->downstream_ports); 257 258 if (ds_max_dotclk != 0) 259 max_dotclk = min(max_dotclk, ds_max_dotclk); 260 261 return max_dotclk; 262 } 263 264 static int cnl_max_source_rate(struct intel_dp *intel_dp) 265 { 266 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 267 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 268 enum port port = dig_port->base.port; 269 270 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 271 272 /* Low voltage SKUs are limited to max of 5.4G */ 273 if (voltage == VOLTAGE_INFO_0_85V) 274 return 540000; 275 276 /* For this SKU 8.1G is supported in all ports */ 277 if (IS_CNL_WITH_PORT_F(dev_priv)) 278 return 810000; 279 280 /* For other SKUs, max rate on ports A and D is 5.4G */ 281 if (port == PORT_A || port == PORT_D) 282 return 540000; 283 284 return 810000; 285 } 286 287 static int icl_max_source_rate(struct intel_dp *intel_dp) 288 { 289 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 290 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 291 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 292 293 if (intel_phy_is_combo(dev_priv, phy) && 294 !IS_ELKHARTLAKE(dev_priv) && 295 !intel_dp_is_edp(intel_dp)) 296 return 540000; 297 298 return 810000; 299 } 300 301 static void 302 intel_dp_set_source_rates(struct intel_dp *intel_dp) 303 { 304 /* The values must be in increasing order */ 305 static const int cnl_rates[] = { 306 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 307 }; 308 static const int bxt_rates[] = { 309 162000, 216000, 243000, 270000, 324000, 432000, 540000 310 }; 311 static const int skl_rates[] = { 312 162000, 216000, 270000, 324000, 432000, 540000 313 }; 314 static const int hsw_rates[] = { 315 162000, 270000, 540000 316 }; 317 static const int g4x_rates[] = { 318 162000, 270000 319 }; 320 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 321 struct intel_encoder *encoder = &dig_port->base; 322 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 323 const int *source_rates; 324 int size, max_rate = 0, vbt_max_rate; 325 326 /* This should only be done once */ 327 drm_WARN_ON(&dev_priv->drm, 328 intel_dp->source_rates || intel_dp->num_source_rates); 329 330 if (INTEL_GEN(dev_priv) >= 10) { 331 source_rates = cnl_rates; 332 size = ARRAY_SIZE(cnl_rates); 333 if (IS_GEN(dev_priv, 10)) 334 max_rate = cnl_max_source_rate(intel_dp); 335 else 336 max_rate = icl_max_source_rate(intel_dp); 337 } else if (IS_GEN9_LP(dev_priv)) { 338 source_rates = bxt_rates; 339 size = ARRAY_SIZE(bxt_rates); 340 } else if (IS_GEN9_BC(dev_priv)) { 341 source_rates = skl_rates; 342 size = ARRAY_SIZE(skl_rates); 343 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 344 IS_BROADWELL(dev_priv)) { 345 source_rates = hsw_rates; 346 size = ARRAY_SIZE(hsw_rates); 347 } else { 348 source_rates = g4x_rates; 349 size = ARRAY_SIZE(g4x_rates); 350 } 351 352 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 353 if (max_rate && vbt_max_rate) 354 max_rate = min(max_rate, vbt_max_rate); 355 else if (vbt_max_rate) 356 max_rate = vbt_max_rate; 357 358 if (max_rate) 359 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 360 361 intel_dp->source_rates = source_rates; 362 intel_dp->num_source_rates = size; 363 } 364 365 static int intersect_rates(const int *source_rates, int source_len, 366 const int *sink_rates, int sink_len, 367 int *common_rates) 368 { 369 int i = 0, j = 0, k = 0; 370 371 while (i < source_len && j < sink_len) { 372 if (source_rates[i] == sink_rates[j]) { 373 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 374 return k; 375 common_rates[k] = source_rates[i]; 376 ++k; 377 ++i; 378 ++j; 379 } else if (source_rates[i] < sink_rates[j]) { 380 ++i; 381 } else { 382 ++j; 383 } 384 } 385 return k; 386 } 387 388 /* return index of rate in rates array, or -1 if not found */ 389 static int intel_dp_rate_index(const int *rates, int len, int rate) 390 { 391 int i; 392 393 for (i = 0; i < len; i++) 394 if (rate == rates[i]) 395 return i; 396 397 return -1; 398 } 399 400 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 401 { 402 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates); 403 404 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 405 intel_dp->num_source_rates, 406 intel_dp->sink_rates, 407 intel_dp->num_sink_rates, 408 intel_dp->common_rates); 409 410 /* Paranoia, there should always be something in common. */ 411 if (WARN_ON(intel_dp->num_common_rates == 0)) { 412 intel_dp->common_rates[0] = 162000; 413 intel_dp->num_common_rates = 1; 414 } 415 } 416 417 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 418 u8 lane_count) 419 { 420 /* 421 * FIXME: we need to synchronize the current link parameters with 422 * hardware readout. Currently fast link training doesn't work on 423 * boot-up. 424 */ 425 if (link_rate == 0 || 426 link_rate > intel_dp->max_link_rate) 427 return false; 428 429 if (lane_count == 0 || 430 lane_count > intel_dp_max_lane_count(intel_dp)) 431 return false; 432 433 return true; 434 } 435 436 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 437 int link_rate, 438 u8 lane_count) 439 { 440 const struct drm_display_mode *fixed_mode = 441 intel_dp->attached_connector->panel.fixed_mode; 442 int mode_rate, max_rate; 443 444 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 445 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 446 if (mode_rate > max_rate) 447 return false; 448 449 return true; 450 } 451 452 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 453 int link_rate, u8 lane_count) 454 { 455 int index; 456 457 index = intel_dp_rate_index(intel_dp->common_rates, 458 intel_dp->num_common_rates, 459 link_rate); 460 if (index > 0) { 461 if (intel_dp_is_edp(intel_dp) && 462 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 463 intel_dp->common_rates[index - 1], 464 lane_count)) { 465 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); 466 return 0; 467 } 468 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 469 intel_dp->max_link_lane_count = lane_count; 470 } else if (lane_count > 1) { 471 if (intel_dp_is_edp(intel_dp) && 472 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 473 intel_dp_max_common_rate(intel_dp), 474 lane_count >> 1)) { 475 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); 476 return 0; 477 } 478 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 479 intel_dp->max_link_lane_count = lane_count >> 1; 480 } else { 481 DRM_ERROR("Link Training Unsuccessful\n"); 482 return -1; 483 } 484 485 return 0; 486 } 487 488 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 489 { 490 return div_u64(mul_u32_u32(mode_clock, 1000000U), 491 DP_DSC_FEC_OVERHEAD_FACTOR); 492 } 493 494 static int 495 small_joiner_ram_size_bits(struct drm_i915_private *i915) 496 { 497 if (INTEL_GEN(i915) >= 11) 498 return 7680 * 8; 499 else 500 return 6144 * 8; 501 } 502 503 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 504 u32 link_clock, u32 lane_count, 505 u32 mode_clock, u32 mode_hdisplay) 506 { 507 u32 bits_per_pixel, max_bpp_small_joiner_ram; 508 int i; 509 510 /* 511 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 512 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 513 * for SST -> TimeSlotsPerMTP is 1, 514 * for MST -> TimeSlotsPerMTP has to be calculated 515 */ 516 bits_per_pixel = (link_clock * lane_count * 8) / 517 intel_dp_mode_to_fec_clock(mode_clock); 518 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 519 520 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 521 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 522 mode_hdisplay; 523 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 524 max_bpp_small_joiner_ram); 525 526 /* 527 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 528 * check, output bpp from small joiner RAM check) 529 */ 530 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 531 532 /* Error out if the max bpp is less than smallest allowed valid bpp */ 533 if (bits_per_pixel < valid_dsc_bpp[0]) { 534 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 535 bits_per_pixel, valid_dsc_bpp[0]); 536 return 0; 537 } 538 539 /* Find the nearest match in the array of known BPPs from VESA */ 540 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 541 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 542 break; 543 } 544 bits_per_pixel = valid_dsc_bpp[i]; 545 546 /* 547 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 548 * fractional part is 0 549 */ 550 return bits_per_pixel << 4; 551 } 552 553 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 554 int mode_clock, int mode_hdisplay) 555 { 556 u8 min_slice_count, i; 557 int max_slice_width; 558 559 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 560 min_slice_count = DIV_ROUND_UP(mode_clock, 561 DP_DSC_MAX_ENC_THROUGHPUT_0); 562 else 563 min_slice_count = DIV_ROUND_UP(mode_clock, 564 DP_DSC_MAX_ENC_THROUGHPUT_1); 565 566 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 567 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 568 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n", 569 max_slice_width); 570 return 0; 571 } 572 /* Also take into account max slice width */ 573 min_slice_count = min_t(u8, min_slice_count, 574 DIV_ROUND_UP(mode_hdisplay, 575 max_slice_width)); 576 577 /* Find the closest match to the valid slice count values */ 578 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 579 if (valid_dsc_slicecount[i] > 580 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 581 false)) 582 break; 583 if (min_slice_count <= valid_dsc_slicecount[i]) 584 return valid_dsc_slicecount[i]; 585 } 586 587 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count); 588 return 0; 589 } 590 591 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 592 int hdisplay) 593 { 594 /* 595 * Older platforms don't like hdisplay==4096 with DP. 596 * 597 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 598 * and frame counter increment), but we don't get vblank interrupts, 599 * and the pipe underruns immediately. The link also doesn't seem 600 * to get trained properly. 601 * 602 * On CHV the vblank interrupts don't seem to disappear but 603 * otherwise the symptoms are similar. 604 * 605 * TODO: confirm the behaviour on HSW+ 606 */ 607 return hdisplay == 4096 && !HAS_DDI(dev_priv); 608 } 609 610 static enum drm_mode_status 611 intel_dp_mode_valid(struct drm_connector *connector, 612 struct drm_display_mode *mode) 613 { 614 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 615 struct intel_connector *intel_connector = to_intel_connector(connector); 616 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 617 struct drm_i915_private *dev_priv = to_i915(connector->dev); 618 int target_clock = mode->clock; 619 int max_rate, mode_rate, max_lanes, max_link_clock; 620 int max_dotclk; 621 u16 dsc_max_output_bpp = 0; 622 u8 dsc_slice_count = 0; 623 624 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 625 return MODE_NO_DBLESCAN; 626 627 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 628 629 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 630 if (mode->hdisplay > fixed_mode->hdisplay) 631 return MODE_PANEL; 632 633 if (mode->vdisplay > fixed_mode->vdisplay) 634 return MODE_PANEL; 635 636 target_clock = fixed_mode->clock; 637 } 638 639 max_link_clock = intel_dp_max_link_rate(intel_dp); 640 max_lanes = intel_dp_max_lane_count(intel_dp); 641 642 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 643 mode_rate = intel_dp_link_required(target_clock, 18); 644 645 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 646 return MODE_H_ILLEGAL; 647 648 /* 649 * Output bpp is stored in 6.4 format so right shift by 4 to get the 650 * integer value since we support only integer values of bpp. 651 */ 652 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 653 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 654 if (intel_dp_is_edp(intel_dp)) { 655 dsc_max_output_bpp = 656 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 657 dsc_slice_count = 658 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 659 true); 660 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 661 dsc_max_output_bpp = 662 intel_dp_dsc_get_output_bpp(dev_priv, 663 max_link_clock, 664 max_lanes, 665 target_clock, 666 mode->hdisplay) >> 4; 667 dsc_slice_count = 668 intel_dp_dsc_get_slice_count(intel_dp, 669 target_clock, 670 mode->hdisplay); 671 } 672 } 673 674 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || 675 target_clock > max_dotclk) 676 return MODE_CLOCK_HIGH; 677 678 if (mode->clock < 10000) 679 return MODE_CLOCK_LOW; 680 681 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 682 return MODE_H_ILLEGAL; 683 684 return intel_mode_valid_max_plane_size(dev_priv, mode); 685 } 686 687 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 688 { 689 int i; 690 u32 v = 0; 691 692 if (src_bytes > 4) 693 src_bytes = 4; 694 for (i = 0; i < src_bytes; i++) 695 v |= ((u32)src[i]) << ((3 - i) * 8); 696 return v; 697 } 698 699 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 700 { 701 int i; 702 if (dst_bytes > 4) 703 dst_bytes = 4; 704 for (i = 0; i < dst_bytes; i++) 705 dst[i] = src >> ((3-i) * 8); 706 } 707 708 static void 709 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); 710 static void 711 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 712 bool force_disable_vdd); 713 static void 714 intel_dp_pps_init(struct intel_dp *intel_dp); 715 716 static intel_wakeref_t 717 pps_lock(struct intel_dp *intel_dp) 718 { 719 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 720 intel_wakeref_t wakeref; 721 722 /* 723 * See intel_power_sequencer_reset() why we need 724 * a power domain reference here. 725 */ 726 wakeref = intel_display_power_get(dev_priv, 727 intel_aux_power_domain(dp_to_dig_port(intel_dp))); 728 729 mutex_lock(&dev_priv->pps_mutex); 730 731 return wakeref; 732 } 733 734 static intel_wakeref_t 735 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) 736 { 737 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 738 739 mutex_unlock(&dev_priv->pps_mutex); 740 intel_display_power_put(dev_priv, 741 intel_aux_power_domain(dp_to_dig_port(intel_dp)), 742 wakeref); 743 return 0; 744 } 745 746 #define with_pps_lock(dp, wf) \ 747 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) 748 749 static void 750 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 751 { 752 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 753 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 754 enum pipe pipe = intel_dp->pps_pipe; 755 bool pll_enabled, release_cl_override = false; 756 enum dpio_phy phy = DPIO_PHY(pipe); 757 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 758 u32 DP; 759 760 if (drm_WARN(&dev_priv->drm, 761 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, 762 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", 763 pipe_name(pipe), intel_dig_port->base.base.base.id, 764 intel_dig_port->base.base.name)) 765 return; 766 767 drm_dbg_kms(&dev_priv->drm, 768 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", 769 pipe_name(pipe), intel_dig_port->base.base.base.id, 770 intel_dig_port->base.base.name); 771 772 /* Preserve the BIOS-computed detected bit. This is 773 * supposed to be read-only. 774 */ 775 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 776 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 777 DP |= DP_PORT_WIDTH(1); 778 DP |= DP_LINK_TRAIN_PAT_1; 779 780 if (IS_CHERRYVIEW(dev_priv)) 781 DP |= DP_PIPE_SEL_CHV(pipe); 782 else 783 DP |= DP_PIPE_SEL(pipe); 784 785 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; 786 787 /* 788 * The DPLL for the pipe must be enabled for this to work. 789 * So enable temporarily it if it's not already enabled. 790 */ 791 if (!pll_enabled) { 792 release_cl_override = IS_CHERRYVIEW(dev_priv) && 793 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 794 795 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 796 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 797 drm_err(&dev_priv->drm, 798 "Failed to force on pll for pipe %c!\n", 799 pipe_name(pipe)); 800 return; 801 } 802 } 803 804 /* 805 * Similar magic as in intel_dp_enable_port(). 806 * We _must_ do this port enable + disable trick 807 * to make this power sequencer lock onto the port. 808 * Otherwise even VDD force bit won't work. 809 */ 810 intel_de_write(dev_priv, intel_dp->output_reg, DP); 811 intel_de_posting_read(dev_priv, intel_dp->output_reg); 812 813 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); 814 intel_de_posting_read(dev_priv, intel_dp->output_reg); 815 816 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); 817 intel_de_posting_read(dev_priv, intel_dp->output_reg); 818 819 if (!pll_enabled) { 820 vlv_force_pll_off(dev_priv, pipe); 821 822 if (release_cl_override) 823 chv_phy_powergate_ch(dev_priv, phy, ch, false); 824 } 825 } 826 827 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 828 { 829 struct intel_encoder *encoder; 830 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 831 832 /* 833 * We don't have power sequencer currently. 834 * Pick one that's not used by other ports. 835 */ 836 for_each_intel_dp(&dev_priv->drm, encoder) { 837 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 838 839 if (encoder->type == INTEL_OUTPUT_EDP) { 840 drm_WARN_ON(&dev_priv->drm, 841 intel_dp->active_pipe != INVALID_PIPE && 842 intel_dp->active_pipe != 843 intel_dp->pps_pipe); 844 845 if (intel_dp->pps_pipe != INVALID_PIPE) 846 pipes &= ~(1 << intel_dp->pps_pipe); 847 } else { 848 drm_WARN_ON(&dev_priv->drm, 849 intel_dp->pps_pipe != INVALID_PIPE); 850 851 if (intel_dp->active_pipe != INVALID_PIPE) 852 pipes &= ~(1 << intel_dp->active_pipe); 853 } 854 } 855 856 if (pipes == 0) 857 return INVALID_PIPE; 858 859 return ffs(pipes) - 1; 860 } 861 862 static enum pipe 863 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 864 { 865 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 866 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 867 enum pipe pipe; 868 869 lockdep_assert_held(&dev_priv->pps_mutex); 870 871 /* We should never land here with regular DP ports */ 872 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 873 874 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE && 875 intel_dp->active_pipe != intel_dp->pps_pipe); 876 877 if (intel_dp->pps_pipe != INVALID_PIPE) 878 return intel_dp->pps_pipe; 879 880 pipe = vlv_find_free_pps(dev_priv); 881 882 /* 883 * Didn't find one. This should not happen since there 884 * are two power sequencers and up to two eDP ports. 885 */ 886 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) 887 pipe = PIPE_A; 888 889 vlv_steal_power_sequencer(dev_priv, pipe); 890 intel_dp->pps_pipe = pipe; 891 892 drm_dbg_kms(&dev_priv->drm, 893 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", 894 pipe_name(intel_dp->pps_pipe), 895 intel_dig_port->base.base.base.id, 896 intel_dig_port->base.base.name); 897 898 /* init power sequencer on this pipe and port */ 899 intel_dp_init_panel_power_sequencer(intel_dp); 900 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 901 902 /* 903 * Even vdd force doesn't work until we've made 904 * the power sequencer lock in on the port. 905 */ 906 vlv_power_sequencer_kick(intel_dp); 907 908 return intel_dp->pps_pipe; 909 } 910 911 static int 912 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 913 { 914 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 915 int backlight_controller = dev_priv->vbt.backlight.controller; 916 917 lockdep_assert_held(&dev_priv->pps_mutex); 918 919 /* We should never land here with regular DP ports */ 920 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 921 922 if (!intel_dp->pps_reset) 923 return backlight_controller; 924 925 intel_dp->pps_reset = false; 926 927 /* 928 * Only the HW needs to be reprogrammed, the SW state is fixed and 929 * has been setup during connector init. 930 */ 931 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 932 933 return backlight_controller; 934 } 935 936 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 937 enum pipe pipe); 938 939 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 940 enum pipe pipe) 941 { 942 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON; 943 } 944 945 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 946 enum pipe pipe) 947 { 948 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD; 949 } 950 951 static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 952 enum pipe pipe) 953 { 954 return true; 955 } 956 957 static enum pipe 958 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 959 enum port port, 960 vlv_pipe_check pipe_check) 961 { 962 enum pipe pipe; 963 964 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 965 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & 966 PANEL_PORT_SELECT_MASK; 967 968 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 969 continue; 970 971 if (!pipe_check(dev_priv, pipe)) 972 continue; 973 974 return pipe; 975 } 976 977 return INVALID_PIPE; 978 } 979 980 static void 981 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 982 { 983 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 984 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 985 enum port port = intel_dig_port->base.port; 986 987 lockdep_assert_held(&dev_priv->pps_mutex); 988 989 /* try to find a pipe with this port selected */ 990 /* first pick one where the panel is on */ 991 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 992 vlv_pipe_has_pp_on); 993 /* didn't find one? pick one where vdd is on */ 994 if (intel_dp->pps_pipe == INVALID_PIPE) 995 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 996 vlv_pipe_has_vdd_on); 997 /* didn't find one? pick one with just the correct port */ 998 if (intel_dp->pps_pipe == INVALID_PIPE) 999 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1000 vlv_pipe_any); 1001 1002 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 1003 if (intel_dp->pps_pipe == INVALID_PIPE) { 1004 drm_dbg_kms(&dev_priv->drm, 1005 "no initial power sequencer for [ENCODER:%d:%s]\n", 1006 intel_dig_port->base.base.base.id, 1007 intel_dig_port->base.base.name); 1008 return; 1009 } 1010 1011 drm_dbg_kms(&dev_priv->drm, 1012 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", 1013 intel_dig_port->base.base.base.id, 1014 intel_dig_port->base.base.name, 1015 pipe_name(intel_dp->pps_pipe)); 1016 1017 intel_dp_init_panel_power_sequencer(intel_dp); 1018 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1019 } 1020 1021 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 1022 { 1023 struct intel_encoder *encoder; 1024 1025 if (drm_WARN_ON(&dev_priv->drm, 1026 !(IS_VALLEYVIEW(dev_priv) || 1027 IS_CHERRYVIEW(dev_priv) || 1028 IS_GEN9_LP(dev_priv)))) 1029 return; 1030 1031 /* 1032 * We can't grab pps_mutex here due to deadlock with power_domain 1033 * mutex when power_domain functions are called while holding pps_mutex. 1034 * That also means that in order to use pps_pipe the code needs to 1035 * hold both a power domain reference and pps_mutex, and the power domain 1036 * reference get/put must be done while _not_ holding pps_mutex. 1037 * pps_{lock,unlock}() do these steps in the correct order, so one 1038 * should use them always. 1039 */ 1040 1041 for_each_intel_dp(&dev_priv->drm, encoder) { 1042 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1043 1044 drm_WARN_ON(&dev_priv->drm, 1045 intel_dp->active_pipe != INVALID_PIPE); 1046 1047 if (encoder->type != INTEL_OUTPUT_EDP) 1048 continue; 1049 1050 if (IS_GEN9_LP(dev_priv)) 1051 intel_dp->pps_reset = true; 1052 else 1053 intel_dp->pps_pipe = INVALID_PIPE; 1054 } 1055 } 1056 1057 struct pps_registers { 1058 i915_reg_t pp_ctrl; 1059 i915_reg_t pp_stat; 1060 i915_reg_t pp_on; 1061 i915_reg_t pp_off; 1062 i915_reg_t pp_div; 1063 }; 1064 1065 static void intel_pps_get_registers(struct intel_dp *intel_dp, 1066 struct pps_registers *regs) 1067 { 1068 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1069 int pps_idx = 0; 1070 1071 memset(regs, 0, sizeof(*regs)); 1072 1073 if (IS_GEN9_LP(dev_priv)) 1074 pps_idx = bxt_power_sequencer_idx(intel_dp); 1075 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1076 pps_idx = vlv_power_sequencer_pipe(intel_dp); 1077 1078 regs->pp_ctrl = PP_CONTROL(pps_idx); 1079 regs->pp_stat = PP_STATUS(pps_idx); 1080 regs->pp_on = PP_ON_DELAYS(pps_idx); 1081 regs->pp_off = PP_OFF_DELAYS(pps_idx); 1082 1083 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 1084 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 1085 regs->pp_div = INVALID_MMIO_REG; 1086 else 1087 regs->pp_div = PP_DIVISOR(pps_idx); 1088 } 1089 1090 static i915_reg_t 1091 _pp_ctrl_reg(struct intel_dp *intel_dp) 1092 { 1093 struct pps_registers regs; 1094 1095 intel_pps_get_registers(intel_dp, ®s); 1096 1097 return regs.pp_ctrl; 1098 } 1099 1100 static i915_reg_t 1101 _pp_stat_reg(struct intel_dp *intel_dp) 1102 { 1103 struct pps_registers regs; 1104 1105 intel_pps_get_registers(intel_dp, ®s); 1106 1107 return regs.pp_stat; 1108 } 1109 1110 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing 1111 This function only applicable when panel PM state is not to be tracked */ 1112 static int edp_notify_handler(struct notifier_block *this, unsigned long code, 1113 void *unused) 1114 { 1115 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 1116 edp_notifier); 1117 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1118 intel_wakeref_t wakeref; 1119 1120 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) 1121 return 0; 1122 1123 with_pps_lock(intel_dp, wakeref) { 1124 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1125 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 1126 i915_reg_t pp_ctrl_reg, pp_div_reg; 1127 u32 pp_div; 1128 1129 pp_ctrl_reg = PP_CONTROL(pipe); 1130 pp_div_reg = PP_DIVISOR(pipe); 1131 pp_div = intel_de_read(dev_priv, pp_div_reg); 1132 pp_div &= PP_REFERENCE_DIVIDER_MASK; 1133 1134 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 1135 intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F); 1136 intel_de_write(dev_priv, pp_ctrl_reg, 1137 PANEL_UNLOCK_REGS); 1138 msleep(intel_dp->panel_power_cycle_delay); 1139 } 1140 } 1141 1142 return 0; 1143 } 1144 1145 static bool edp_have_panel_power(struct intel_dp *intel_dp) 1146 { 1147 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1148 1149 lockdep_assert_held(&dev_priv->pps_mutex); 1150 1151 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1152 intel_dp->pps_pipe == INVALID_PIPE) 1153 return false; 1154 1155 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 1156 } 1157 1158 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 1159 { 1160 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1161 1162 lockdep_assert_held(&dev_priv->pps_mutex); 1163 1164 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1165 intel_dp->pps_pipe == INVALID_PIPE) 1166 return false; 1167 1168 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 1169 } 1170 1171 static void 1172 intel_dp_check_edp(struct intel_dp *intel_dp) 1173 { 1174 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1175 1176 if (!intel_dp_is_edp(intel_dp)) 1177 return; 1178 1179 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 1180 drm_WARN(&dev_priv->drm, 1, 1181 "eDP powered off while attempting aux channel communication.\n"); 1182 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n", 1183 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), 1184 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); 1185 } 1186 } 1187 1188 static u32 1189 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 1190 { 1191 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1192 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1193 const unsigned int timeout_ms = 10; 1194 u32 status; 1195 bool done; 1196 1197 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1198 done = wait_event_timeout(i915->gmbus_wait_queue, C, 1199 msecs_to_jiffies_timeout(timeout_ms)); 1200 1201 /* just trace the final value */ 1202 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1203 1204 if (!done) 1205 drm_err(&i915->drm, 1206 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 1207 intel_dp->aux.name, timeout_ms, status); 1208 #undef C 1209 1210 return status; 1211 } 1212 1213 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1214 { 1215 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1216 1217 if (index) 1218 return 0; 1219 1220 /* 1221 * The clock divider is based off the hrawclk, and would like to run at 1222 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 1223 */ 1224 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 1225 } 1226 1227 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1228 { 1229 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1230 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1231 u32 freq; 1232 1233 if (index) 1234 return 0; 1235 1236 /* 1237 * The clock divider is based off the cdclk or PCH rawclk, and would 1238 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1239 * divide by 2000 and use that 1240 */ 1241 if (dig_port->aux_ch == AUX_CH_A) 1242 freq = dev_priv->cdclk.hw.cdclk; 1243 else 1244 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 1245 return DIV_ROUND_CLOSEST(freq, 2000); 1246 } 1247 1248 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1249 { 1250 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1251 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1252 1253 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1254 /* Workaround for non-ULT HSW */ 1255 switch (index) { 1256 case 0: return 63; 1257 case 1: return 72; 1258 default: return 0; 1259 } 1260 } 1261 1262 return ilk_get_aux_clock_divider(intel_dp, index); 1263 } 1264 1265 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1266 { 1267 /* 1268 * SKL doesn't need us to program the AUX clock divider (Hardware will 1269 * derive the clock from CDCLK automatically). We still implement the 1270 * get_aux_clock_divider vfunc to plug-in into the existing code. 1271 */ 1272 return index ? 0 : 1; 1273 } 1274 1275 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 1276 int send_bytes, 1277 u32 aux_clock_divider) 1278 { 1279 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1280 struct drm_i915_private *dev_priv = 1281 to_i915(intel_dig_port->base.base.dev); 1282 u32 precharge, timeout; 1283 1284 if (IS_GEN(dev_priv, 6)) 1285 precharge = 3; 1286 else 1287 precharge = 5; 1288 1289 if (IS_BROADWELL(dev_priv)) 1290 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1291 else 1292 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1293 1294 return DP_AUX_CH_CTL_SEND_BUSY | 1295 DP_AUX_CH_CTL_DONE | 1296 DP_AUX_CH_CTL_INTERRUPT | 1297 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1298 timeout | 1299 DP_AUX_CH_CTL_RECEIVE_ERROR | 1300 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1301 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1302 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1303 } 1304 1305 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1306 int send_bytes, 1307 u32 unused) 1308 { 1309 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1310 struct drm_i915_private *i915 = 1311 to_i915(intel_dig_port->base.base.dev); 1312 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); 1313 u32 ret; 1314 1315 ret = DP_AUX_CH_CTL_SEND_BUSY | 1316 DP_AUX_CH_CTL_DONE | 1317 DP_AUX_CH_CTL_INTERRUPT | 1318 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1319 DP_AUX_CH_CTL_TIME_OUT_MAX | 1320 DP_AUX_CH_CTL_RECEIVE_ERROR | 1321 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1322 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1323 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1324 1325 if (intel_phy_is_tc(i915, phy) && 1326 intel_dig_port->tc_mode == TC_PORT_TBT_ALT) 1327 ret |= DP_AUX_CH_CTL_TBT_IO; 1328 1329 return ret; 1330 } 1331 1332 static int 1333 intel_dp_aux_xfer(struct intel_dp *intel_dp, 1334 const u8 *send, int send_bytes, 1335 u8 *recv, int recv_size, 1336 u32 aux_send_ctl_flags) 1337 { 1338 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1339 struct drm_i915_private *i915 = 1340 to_i915(intel_dig_port->base.base.dev); 1341 struct intel_uncore *uncore = &i915->uncore; 1342 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); 1343 bool is_tc_port = intel_phy_is_tc(i915, phy); 1344 i915_reg_t ch_ctl, ch_data[5]; 1345 u32 aux_clock_divider; 1346 enum intel_display_power_domain aux_domain = 1347 intel_aux_power_domain(intel_dig_port); 1348 intel_wakeref_t aux_wakeref; 1349 intel_wakeref_t pps_wakeref; 1350 int i, ret, recv_bytes; 1351 int try, clock = 0; 1352 u32 status; 1353 bool vdd; 1354 1355 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1356 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1357 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1358 1359 if (is_tc_port) 1360 intel_tc_port_lock(intel_dig_port); 1361 1362 aux_wakeref = intel_display_power_get(i915, aux_domain); 1363 pps_wakeref = pps_lock(intel_dp); 1364 1365 /* 1366 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1367 * In such cases we want to leave VDD enabled and it's up to upper layers 1368 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1369 * ourselves. 1370 */ 1371 vdd = edp_panel_vdd_on(intel_dp); 1372 1373 /* dp aux is extremely sensitive to irq latency, hence request the 1374 * lowest possible wakeup latency and so prevent the cpu from going into 1375 * deep sleep states. 1376 */ 1377 cpu_latency_qos_update_request(&i915->pm_qos, 0); 1378 1379 intel_dp_check_edp(intel_dp); 1380 1381 /* Try to wait for any previous AUX channel activity */ 1382 for (try = 0; try < 3; try++) { 1383 status = intel_uncore_read_notrace(uncore, ch_ctl); 1384 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1385 break; 1386 msleep(1); 1387 } 1388 /* just trace the final value */ 1389 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1390 1391 if (try == 3) { 1392 const u32 status = intel_uncore_read(uncore, ch_ctl); 1393 1394 if (status != intel_dp->aux_busy_last_status) { 1395 drm_WARN(&i915->drm, 1, 1396 "%s: not started (status 0x%08x)\n", 1397 intel_dp->aux.name, status); 1398 intel_dp->aux_busy_last_status = status; 1399 } 1400 1401 ret = -EBUSY; 1402 goto out; 1403 } 1404 1405 /* Only 5 data registers! */ 1406 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 1407 ret = -E2BIG; 1408 goto out; 1409 } 1410 1411 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1412 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1413 send_bytes, 1414 aux_clock_divider); 1415 1416 send_ctl |= aux_send_ctl_flags; 1417 1418 /* Must try at least 3 times according to DP spec */ 1419 for (try = 0; try < 5; try++) { 1420 /* Load the send data into the aux channel data registers */ 1421 for (i = 0; i < send_bytes; i += 4) 1422 intel_uncore_write(uncore, 1423 ch_data[i >> 2], 1424 intel_dp_pack_aux(send + i, 1425 send_bytes - i)); 1426 1427 /* Send the command and wait for it to complete */ 1428 intel_uncore_write(uncore, ch_ctl, send_ctl); 1429 1430 status = intel_dp_aux_wait_done(intel_dp); 1431 1432 /* Clear done status and any errors */ 1433 intel_uncore_write(uncore, 1434 ch_ctl, 1435 status | 1436 DP_AUX_CH_CTL_DONE | 1437 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1438 DP_AUX_CH_CTL_RECEIVE_ERROR); 1439 1440 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1441 * 400us delay required for errors and timeouts 1442 * Timeout errors from the HW already meet this 1443 * requirement so skip to next iteration 1444 */ 1445 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1446 continue; 1447 1448 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1449 usleep_range(400, 500); 1450 continue; 1451 } 1452 if (status & DP_AUX_CH_CTL_DONE) 1453 goto done; 1454 } 1455 } 1456 1457 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1458 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 1459 intel_dp->aux.name, status); 1460 ret = -EBUSY; 1461 goto out; 1462 } 1463 1464 done: 1465 /* Check for timeout or receive error. 1466 * Timeouts occur when the sink is not connected 1467 */ 1468 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1469 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 1470 intel_dp->aux.name, status); 1471 ret = -EIO; 1472 goto out; 1473 } 1474 1475 /* Timeouts occur when the device isn't connected, so they're 1476 * "normal" -- don't fill the kernel log with these */ 1477 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1478 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 1479 intel_dp->aux.name, status); 1480 ret = -ETIMEDOUT; 1481 goto out; 1482 } 1483 1484 /* Unload any bytes sent back from the other side */ 1485 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1486 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1487 1488 /* 1489 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1490 * We have no idea of what happened so we return -EBUSY so 1491 * drm layer takes care for the necessary retries. 1492 */ 1493 if (recv_bytes == 0 || recv_bytes > 20) { 1494 drm_dbg_kms(&i915->drm, 1495 "%s: Forbidden recv_bytes = %d on aux transaction\n", 1496 intel_dp->aux.name, recv_bytes); 1497 ret = -EBUSY; 1498 goto out; 1499 } 1500 1501 if (recv_bytes > recv_size) 1502 recv_bytes = recv_size; 1503 1504 for (i = 0; i < recv_bytes; i += 4) 1505 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 1506 recv + i, recv_bytes - i); 1507 1508 ret = recv_bytes; 1509 out: 1510 cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); 1511 1512 if (vdd) 1513 edp_panel_vdd_off(intel_dp, false); 1514 1515 pps_unlock(intel_dp, pps_wakeref); 1516 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 1517 1518 if (is_tc_port) 1519 intel_tc_port_unlock(intel_dig_port); 1520 1521 return ret; 1522 } 1523 1524 #define BARE_ADDRESS_SIZE 3 1525 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1526 1527 static void 1528 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1529 const struct drm_dp_aux_msg *msg) 1530 { 1531 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1532 txbuf[1] = (msg->address >> 8) & 0xff; 1533 txbuf[2] = msg->address & 0xff; 1534 txbuf[3] = msg->size - 1; 1535 } 1536 1537 static ssize_t 1538 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1539 { 1540 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1541 u8 txbuf[20], rxbuf[20]; 1542 size_t txsize, rxsize; 1543 int ret; 1544 1545 intel_dp_aux_header(txbuf, msg); 1546 1547 switch (msg->request & ~DP_AUX_I2C_MOT) { 1548 case DP_AUX_NATIVE_WRITE: 1549 case DP_AUX_I2C_WRITE: 1550 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1551 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1552 rxsize = 2; /* 0 or 1 data bytes */ 1553 1554 if (WARN_ON(txsize > 20)) 1555 return -E2BIG; 1556 1557 WARN_ON(!msg->buffer != !msg->size); 1558 1559 if (msg->buffer) 1560 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1561 1562 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1563 rxbuf, rxsize, 0); 1564 if (ret > 0) { 1565 msg->reply = rxbuf[0] >> 4; 1566 1567 if (ret > 1) { 1568 /* Number of bytes written in a short write. */ 1569 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1570 } else { 1571 /* Return payload size. */ 1572 ret = msg->size; 1573 } 1574 } 1575 break; 1576 1577 case DP_AUX_NATIVE_READ: 1578 case DP_AUX_I2C_READ: 1579 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1580 rxsize = msg->size + 1; 1581 1582 if (WARN_ON(rxsize > 20)) 1583 return -E2BIG; 1584 1585 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1586 rxbuf, rxsize, 0); 1587 if (ret > 0) { 1588 msg->reply = rxbuf[0] >> 4; 1589 /* 1590 * Assume happy day, and copy the data. The caller is 1591 * expected to check msg->reply before touching it. 1592 * 1593 * Return payload size. 1594 */ 1595 ret--; 1596 memcpy(msg->buffer, rxbuf + 1, ret); 1597 } 1598 break; 1599 1600 default: 1601 ret = -EINVAL; 1602 break; 1603 } 1604 1605 return ret; 1606 } 1607 1608 1609 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1610 { 1611 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1612 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1613 enum aux_ch aux_ch = dig_port->aux_ch; 1614 1615 switch (aux_ch) { 1616 case AUX_CH_B: 1617 case AUX_CH_C: 1618 case AUX_CH_D: 1619 return DP_AUX_CH_CTL(aux_ch); 1620 default: 1621 MISSING_CASE(aux_ch); 1622 return DP_AUX_CH_CTL(AUX_CH_B); 1623 } 1624 } 1625 1626 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1627 { 1628 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1629 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1630 enum aux_ch aux_ch = dig_port->aux_ch; 1631 1632 switch (aux_ch) { 1633 case AUX_CH_B: 1634 case AUX_CH_C: 1635 case AUX_CH_D: 1636 return DP_AUX_CH_DATA(aux_ch, index); 1637 default: 1638 MISSING_CASE(aux_ch); 1639 return DP_AUX_CH_DATA(AUX_CH_B, index); 1640 } 1641 } 1642 1643 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1644 { 1645 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1646 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1647 enum aux_ch aux_ch = dig_port->aux_ch; 1648 1649 switch (aux_ch) { 1650 case AUX_CH_A: 1651 return DP_AUX_CH_CTL(aux_ch); 1652 case AUX_CH_B: 1653 case AUX_CH_C: 1654 case AUX_CH_D: 1655 return PCH_DP_AUX_CH_CTL(aux_ch); 1656 default: 1657 MISSING_CASE(aux_ch); 1658 return DP_AUX_CH_CTL(AUX_CH_A); 1659 } 1660 } 1661 1662 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1663 { 1664 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1665 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1666 enum aux_ch aux_ch = dig_port->aux_ch; 1667 1668 switch (aux_ch) { 1669 case AUX_CH_A: 1670 return DP_AUX_CH_DATA(aux_ch, index); 1671 case AUX_CH_B: 1672 case AUX_CH_C: 1673 case AUX_CH_D: 1674 return PCH_DP_AUX_CH_DATA(aux_ch, index); 1675 default: 1676 MISSING_CASE(aux_ch); 1677 return DP_AUX_CH_DATA(AUX_CH_A, index); 1678 } 1679 } 1680 1681 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1682 { 1683 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1684 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1685 enum aux_ch aux_ch = dig_port->aux_ch; 1686 1687 switch (aux_ch) { 1688 case AUX_CH_A: 1689 case AUX_CH_B: 1690 case AUX_CH_C: 1691 case AUX_CH_D: 1692 case AUX_CH_E: 1693 case AUX_CH_F: 1694 case AUX_CH_G: 1695 return DP_AUX_CH_CTL(aux_ch); 1696 default: 1697 MISSING_CASE(aux_ch); 1698 return DP_AUX_CH_CTL(AUX_CH_A); 1699 } 1700 } 1701 1702 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1703 { 1704 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1705 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1706 enum aux_ch aux_ch = dig_port->aux_ch; 1707 1708 switch (aux_ch) { 1709 case AUX_CH_A: 1710 case AUX_CH_B: 1711 case AUX_CH_C: 1712 case AUX_CH_D: 1713 case AUX_CH_E: 1714 case AUX_CH_F: 1715 case AUX_CH_G: 1716 return DP_AUX_CH_DATA(aux_ch, index); 1717 default: 1718 MISSING_CASE(aux_ch); 1719 return DP_AUX_CH_DATA(AUX_CH_A, index); 1720 } 1721 } 1722 1723 static void 1724 intel_dp_aux_fini(struct intel_dp *intel_dp) 1725 { 1726 kfree(intel_dp->aux.name); 1727 } 1728 1729 static void 1730 intel_dp_aux_init(struct intel_dp *intel_dp) 1731 { 1732 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1733 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1734 struct intel_encoder *encoder = &dig_port->base; 1735 1736 if (INTEL_GEN(dev_priv) >= 9) { 1737 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1738 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1739 } else if (HAS_PCH_SPLIT(dev_priv)) { 1740 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1741 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1742 } else { 1743 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1744 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1745 } 1746 1747 if (INTEL_GEN(dev_priv) >= 9) 1748 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1749 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1750 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1751 else if (HAS_PCH_SPLIT(dev_priv)) 1752 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1753 else 1754 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1755 1756 if (INTEL_GEN(dev_priv) >= 9) 1757 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1758 else 1759 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1760 1761 drm_dp_aux_init(&intel_dp->aux); 1762 1763 /* Failure to allocate our preferred name is not critical */ 1764 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c", 1765 aux_ch_name(dig_port->aux_ch), 1766 port_name(encoder->port)); 1767 intel_dp->aux.transfer = intel_dp_aux_transfer; 1768 } 1769 1770 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1771 { 1772 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1773 1774 return max_rate >= 540000; 1775 } 1776 1777 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 1778 { 1779 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1780 1781 return max_rate >= 810000; 1782 } 1783 1784 static void 1785 intel_dp_set_clock(struct intel_encoder *encoder, 1786 struct intel_crtc_state *pipe_config) 1787 { 1788 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1789 const struct dp_link_dpll *divisor = NULL; 1790 int i, count = 0; 1791 1792 if (IS_G4X(dev_priv)) { 1793 divisor = g4x_dpll; 1794 count = ARRAY_SIZE(g4x_dpll); 1795 } else if (HAS_PCH_SPLIT(dev_priv)) { 1796 divisor = pch_dpll; 1797 count = ARRAY_SIZE(pch_dpll); 1798 } else if (IS_CHERRYVIEW(dev_priv)) { 1799 divisor = chv_dpll; 1800 count = ARRAY_SIZE(chv_dpll); 1801 } else if (IS_VALLEYVIEW(dev_priv)) { 1802 divisor = vlv_dpll; 1803 count = ARRAY_SIZE(vlv_dpll); 1804 } 1805 1806 if (divisor && count) { 1807 for (i = 0; i < count; i++) { 1808 if (pipe_config->port_clock == divisor[i].clock) { 1809 pipe_config->dpll = divisor[i].dpll; 1810 pipe_config->clock_set = true; 1811 break; 1812 } 1813 } 1814 } 1815 } 1816 1817 static void snprintf_int_array(char *str, size_t len, 1818 const int *array, int nelem) 1819 { 1820 int i; 1821 1822 str[0] = '\0'; 1823 1824 for (i = 0; i < nelem; i++) { 1825 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1826 if (r >= len) 1827 return; 1828 str += r; 1829 len -= r; 1830 } 1831 } 1832 1833 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1834 { 1835 char str[128]; /* FIXME: too big for stack? */ 1836 1837 if (!drm_debug_enabled(DRM_UT_KMS)) 1838 return; 1839 1840 snprintf_int_array(str, sizeof(str), 1841 intel_dp->source_rates, intel_dp->num_source_rates); 1842 DRM_DEBUG_KMS("source rates: %s\n", str); 1843 1844 snprintf_int_array(str, sizeof(str), 1845 intel_dp->sink_rates, intel_dp->num_sink_rates); 1846 DRM_DEBUG_KMS("sink rates: %s\n", str); 1847 1848 snprintf_int_array(str, sizeof(str), 1849 intel_dp->common_rates, intel_dp->num_common_rates); 1850 DRM_DEBUG_KMS("common rates: %s\n", str); 1851 } 1852 1853 int 1854 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1855 { 1856 int len; 1857 1858 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1859 if (WARN_ON(len <= 0)) 1860 return 162000; 1861 1862 return intel_dp->common_rates[len - 1]; 1863 } 1864 1865 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1866 { 1867 int i = intel_dp_rate_index(intel_dp->sink_rates, 1868 intel_dp->num_sink_rates, rate); 1869 1870 if (WARN_ON(i < 0)) 1871 i = 0; 1872 1873 return i; 1874 } 1875 1876 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1877 u8 *link_bw, u8 *rate_select) 1878 { 1879 /* eDP 1.4 rate select method. */ 1880 if (intel_dp->use_rate_select) { 1881 *link_bw = 0; 1882 *rate_select = 1883 intel_dp_rate_select(intel_dp, port_clock); 1884 } else { 1885 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1886 *rate_select = 0; 1887 } 1888 } 1889 1890 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1891 const struct intel_crtc_state *pipe_config) 1892 { 1893 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1894 1895 /* On TGL, FEC is supported on all Pipes */ 1896 if (INTEL_GEN(dev_priv) >= 12) 1897 return true; 1898 1899 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 1900 return true; 1901 1902 return false; 1903 } 1904 1905 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1906 const struct intel_crtc_state *pipe_config) 1907 { 1908 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1909 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1910 } 1911 1912 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1913 const struct intel_crtc_state *crtc_state) 1914 { 1915 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1916 1917 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) 1918 return false; 1919 1920 return intel_dsc_source_support(encoder, crtc_state) && 1921 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1922 } 1923 1924 static int intel_dp_compute_bpp(struct intel_dp *intel_dp, 1925 struct intel_crtc_state *pipe_config) 1926 { 1927 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1928 struct intel_connector *intel_connector = intel_dp->attached_connector; 1929 int bpp, bpc; 1930 1931 bpp = pipe_config->pipe_bpp; 1932 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports); 1933 1934 if (bpc > 0) 1935 bpp = min(bpp, 3*bpc); 1936 1937 if (intel_dp_is_edp(intel_dp)) { 1938 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1939 if (intel_connector->base.display_info.bpc == 0 && 1940 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1941 drm_dbg_kms(&dev_priv->drm, 1942 "clamping bpp for eDP panel to BIOS-provided %i\n", 1943 dev_priv->vbt.edp.bpp); 1944 bpp = dev_priv->vbt.edp.bpp; 1945 } 1946 } 1947 1948 return bpp; 1949 } 1950 1951 /* Adjust link config limits based on compliance test requests. */ 1952 void 1953 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1954 struct intel_crtc_state *pipe_config, 1955 struct link_config_limits *limits) 1956 { 1957 /* For DP Compliance we override the computed bpp for the pipe */ 1958 if (intel_dp->compliance.test_data.bpc != 0) { 1959 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1960 1961 limits->min_bpp = limits->max_bpp = bpp; 1962 pipe_config->dither_force_disable = bpp == 6 * 3; 1963 1964 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp); 1965 } 1966 1967 /* Use values requested by Compliance Test Request */ 1968 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1969 int index; 1970 1971 /* Validate the compliance test data since max values 1972 * might have changed due to link train fallback. 1973 */ 1974 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1975 intel_dp->compliance.test_lane_count)) { 1976 index = intel_dp_rate_index(intel_dp->common_rates, 1977 intel_dp->num_common_rates, 1978 intel_dp->compliance.test_link_rate); 1979 if (index >= 0) 1980 limits->min_clock = limits->max_clock = index; 1981 limits->min_lane_count = limits->max_lane_count = 1982 intel_dp->compliance.test_lane_count; 1983 } 1984 } 1985 } 1986 1987 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp) 1988 { 1989 /* 1990 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 1991 * format of the number of bytes per pixel will be half the number 1992 * of bytes of RGB pixel. 1993 */ 1994 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1995 bpp /= 2; 1996 1997 return bpp; 1998 } 1999 2000 /* Optimize link config in order: max bpp, min clock, min lanes */ 2001 static int 2002 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 2003 struct intel_crtc_state *pipe_config, 2004 const struct link_config_limits *limits) 2005 { 2006 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2007 int bpp, clock, lane_count; 2008 int mode_rate, link_clock, link_avail; 2009 2010 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2011 int output_bpp = intel_dp_output_bpp(pipe_config, bpp); 2012 2013 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2014 output_bpp); 2015 2016 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2017 for (lane_count = limits->min_lane_count; 2018 lane_count <= limits->max_lane_count; 2019 lane_count <<= 1) { 2020 link_clock = intel_dp->common_rates[clock]; 2021 link_avail = intel_dp_max_data_rate(link_clock, 2022 lane_count); 2023 2024 if (mode_rate <= link_avail) { 2025 pipe_config->lane_count = lane_count; 2026 pipe_config->pipe_bpp = bpp; 2027 pipe_config->port_clock = link_clock; 2028 2029 return 0; 2030 } 2031 } 2032 } 2033 } 2034 2035 return -EINVAL; 2036 } 2037 2038 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 2039 { 2040 int i, num_bpc; 2041 u8 dsc_bpc[3] = {0}; 2042 2043 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 2044 dsc_bpc); 2045 for (i = 0; i < num_bpc; i++) { 2046 if (dsc_max_bpc >= dsc_bpc[i]) 2047 return dsc_bpc[i] * 3; 2048 } 2049 2050 return 0; 2051 } 2052 2053 #define DSC_SUPPORTED_VERSION_MIN 1 2054 2055 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 2056 struct intel_crtc_state *crtc_state) 2057 { 2058 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2059 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2060 u8 line_buf_depth; 2061 int ret; 2062 2063 ret = intel_dsc_compute_params(encoder, crtc_state); 2064 if (ret) 2065 return ret; 2066 2067 /* 2068 * Slice Height of 8 works for all currently available panels. So start 2069 * with that if pic_height is an integral multiple of 8. Eventually add 2070 * logic to try multiple slice heights. 2071 */ 2072 if (vdsc_cfg->pic_height % 8 == 0) 2073 vdsc_cfg->slice_height = 8; 2074 else if (vdsc_cfg->pic_height % 4 == 0) 2075 vdsc_cfg->slice_height = 4; 2076 else 2077 vdsc_cfg->slice_height = 2; 2078 2079 vdsc_cfg->dsc_version_major = 2080 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2081 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 2082 vdsc_cfg->dsc_version_minor = 2083 min(DSC_SUPPORTED_VERSION_MIN, 2084 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2085 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 2086 2087 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2088 DP_DSC_RGB; 2089 2090 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 2091 if (!line_buf_depth) { 2092 DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n"); 2093 return -EINVAL; 2094 } 2095 2096 if (vdsc_cfg->dsc_version_minor == 2) 2097 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 2098 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 2099 else 2100 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 2101 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 2102 2103 vdsc_cfg->block_pred_enable = 2104 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2105 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2106 2107 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2108 } 2109 2110 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2111 struct intel_crtc_state *pipe_config, 2112 struct drm_connector_state *conn_state, 2113 struct link_config_limits *limits) 2114 { 2115 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2116 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2117 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2118 u8 dsc_max_bpc; 2119 int pipe_bpp; 2120 int ret; 2121 2122 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2123 intel_dp_supports_fec(intel_dp, pipe_config); 2124 2125 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 2126 return -EINVAL; 2127 2128 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 2129 if (INTEL_GEN(dev_priv) >= 12) 2130 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 2131 else 2132 dsc_max_bpc = min_t(u8, 10, 2133 conn_state->max_requested_bpc); 2134 2135 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 2136 2137 /* Min Input BPC for ICL+ is 8 */ 2138 if (pipe_bpp < 8 * 3) { 2139 drm_dbg_kms(&dev_priv->drm, 2140 "No DSC support for less than 8bpc\n"); 2141 return -EINVAL; 2142 } 2143 2144 /* 2145 * For now enable DSC for max bpp, max link rate, max lane count. 2146 * Optimize this later for the minimum possible link rate/lane count 2147 * with DSC enabled for the requested mode. 2148 */ 2149 pipe_config->pipe_bpp = pipe_bpp; 2150 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 2151 pipe_config->lane_count = limits->max_lane_count; 2152 2153 if (intel_dp_is_edp(intel_dp)) { 2154 pipe_config->dsc.compressed_bpp = 2155 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 2156 pipe_config->pipe_bpp); 2157 pipe_config->dsc.slice_count = 2158 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 2159 true); 2160 } else { 2161 u16 dsc_max_output_bpp; 2162 u8 dsc_dp_slice_count; 2163 2164 dsc_max_output_bpp = 2165 intel_dp_dsc_get_output_bpp(dev_priv, 2166 pipe_config->port_clock, 2167 pipe_config->lane_count, 2168 adjusted_mode->crtc_clock, 2169 adjusted_mode->crtc_hdisplay); 2170 dsc_dp_slice_count = 2171 intel_dp_dsc_get_slice_count(intel_dp, 2172 adjusted_mode->crtc_clock, 2173 adjusted_mode->crtc_hdisplay); 2174 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 2175 drm_dbg_kms(&dev_priv->drm, 2176 "Compressed BPP/Slice Count not supported\n"); 2177 return -EINVAL; 2178 } 2179 pipe_config->dsc.compressed_bpp = min_t(u16, 2180 dsc_max_output_bpp >> 4, 2181 pipe_config->pipe_bpp); 2182 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2183 } 2184 /* 2185 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2186 * is greater than the maximum Cdclock and if slice count is even 2187 * then we need to use 2 VDSC instances. 2188 */ 2189 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { 2190 if (pipe_config->dsc.slice_count > 1) { 2191 pipe_config->dsc.dsc_split = true; 2192 } else { 2193 drm_dbg_kms(&dev_priv->drm, 2194 "Cannot split stream to use 2 VDSC instances\n"); 2195 return -EINVAL; 2196 } 2197 } 2198 2199 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 2200 if (ret < 0) { 2201 drm_dbg_kms(&dev_priv->drm, 2202 "Cannot compute valid DSC parameters for Input Bpp = %d " 2203 "Compressed BPP = %d\n", 2204 pipe_config->pipe_bpp, 2205 pipe_config->dsc.compressed_bpp); 2206 return ret; 2207 } 2208 2209 pipe_config->dsc.compression_enable = true; 2210 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2211 "Compressed Bpp = %d Slice Count = %d\n", 2212 pipe_config->pipe_bpp, 2213 pipe_config->dsc.compressed_bpp, 2214 pipe_config->dsc.slice_count); 2215 2216 return 0; 2217 } 2218 2219 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state) 2220 { 2221 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) 2222 return 6 * 3; 2223 else 2224 return 8 * 3; 2225 } 2226 2227 static int 2228 intel_dp_compute_link_config(struct intel_encoder *encoder, 2229 struct intel_crtc_state *pipe_config, 2230 struct drm_connector_state *conn_state) 2231 { 2232 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2233 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2234 struct link_config_limits limits; 2235 int common_len; 2236 int ret; 2237 2238 common_len = intel_dp_common_len_rate_limit(intel_dp, 2239 intel_dp->max_link_rate); 2240 2241 /* No common link rates between source and sink */ 2242 drm_WARN_ON(encoder->base.dev, common_len <= 0); 2243 2244 limits.min_clock = 0; 2245 limits.max_clock = common_len - 1; 2246 2247 limits.min_lane_count = 1; 2248 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 2249 2250 limits.min_bpp = intel_dp_min_bpp(pipe_config); 2251 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); 2252 2253 if (intel_dp_is_edp(intel_dp)) { 2254 /* 2255 * Use the maximum clock and number of lanes the eDP panel 2256 * advertizes being capable of. The panels are generally 2257 * designed to support only a single clock and lane 2258 * configuration, and typically these values correspond to the 2259 * native resolution of the panel. 2260 */ 2261 limits.min_lane_count = limits.max_lane_count; 2262 limits.min_clock = limits.max_clock; 2263 } 2264 2265 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 2266 2267 DRM_DEBUG_KMS("DP link computation with max lane count %i " 2268 "max rate %d max bpp %d pixel clock %iKHz\n", 2269 limits.max_lane_count, 2270 intel_dp->common_rates[limits.max_clock], 2271 limits.max_bpp, adjusted_mode->crtc_clock); 2272 2273 /* 2274 * Optimize for slow and wide. This is the place to add alternative 2275 * optimization policy. 2276 */ 2277 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 2278 2279 /* enable compression if the mode doesn't fit available BW */ 2280 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en); 2281 if (ret || intel_dp->force_dsc_en) { 2282 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2283 conn_state, &limits); 2284 if (ret < 0) 2285 return ret; 2286 } 2287 2288 if (pipe_config->dsc.compression_enable) { 2289 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2290 pipe_config->lane_count, pipe_config->port_clock, 2291 pipe_config->pipe_bpp, 2292 pipe_config->dsc.compressed_bpp); 2293 2294 DRM_DEBUG_KMS("DP link rate required %i available %i\n", 2295 intel_dp_link_required(adjusted_mode->crtc_clock, 2296 pipe_config->dsc.compressed_bpp), 2297 intel_dp_max_data_rate(pipe_config->port_clock, 2298 pipe_config->lane_count)); 2299 } else { 2300 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", 2301 pipe_config->lane_count, pipe_config->port_clock, 2302 pipe_config->pipe_bpp); 2303 2304 DRM_DEBUG_KMS("DP link rate required %i available %i\n", 2305 intel_dp_link_required(adjusted_mode->crtc_clock, 2306 pipe_config->pipe_bpp), 2307 intel_dp_max_data_rate(pipe_config->port_clock, 2308 pipe_config->lane_count)); 2309 } 2310 return 0; 2311 } 2312 2313 static int 2314 intel_dp_ycbcr420_config(struct intel_dp *intel_dp, 2315 struct drm_connector *connector, 2316 struct intel_crtc_state *crtc_state) 2317 { 2318 const struct drm_display_info *info = &connector->display_info; 2319 const struct drm_display_mode *adjusted_mode = 2320 &crtc_state->hw.adjusted_mode; 2321 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2322 int ret; 2323 2324 if (!drm_mode_is_420_only(info, adjusted_mode) || 2325 !intel_dp_get_colorimetry_status(intel_dp) || 2326 !connector->ycbcr_420_allowed) 2327 return 0; 2328 2329 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2330 2331 /* YCBCR 420 output conversion needs a scaler */ 2332 ret = skl_update_scaler_crtc(crtc_state); 2333 if (ret) { 2334 DRM_DEBUG_KMS("Scaler allocation for output failed\n"); 2335 return ret; 2336 } 2337 2338 intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN); 2339 2340 return 0; 2341 } 2342 2343 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2344 const struct drm_connector_state *conn_state) 2345 { 2346 const struct intel_digital_connector_state *intel_conn_state = 2347 to_intel_digital_connector_state(conn_state); 2348 const struct drm_display_mode *adjusted_mode = 2349 &crtc_state->hw.adjusted_mode; 2350 2351 /* 2352 * Our YCbCr output is always limited range. 2353 * crtc_state->limited_color_range only applies to RGB, 2354 * and it must never be set for YCbCr or we risk setting 2355 * some conflicting bits in PIPECONF which will mess up 2356 * the colors on the monitor. 2357 */ 2358 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2359 return false; 2360 2361 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2362 /* 2363 * See: 2364 * CEA-861-E - 5.1 Default Encoding Parameters 2365 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2366 */ 2367 return crtc_state->pipe_bpp != 18 && 2368 drm_default_rgb_quant_range(adjusted_mode) == 2369 HDMI_QUANTIZATION_RANGE_LIMITED; 2370 } else { 2371 return intel_conn_state->broadcast_rgb == 2372 INTEL_BROADCAST_RGB_LIMITED; 2373 } 2374 } 2375 2376 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2377 enum port port) 2378 { 2379 if (IS_G4X(dev_priv)) 2380 return false; 2381 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 2382 return false; 2383 2384 return true; 2385 } 2386 2387 int 2388 intel_dp_compute_config(struct intel_encoder *encoder, 2389 struct intel_crtc_state *pipe_config, 2390 struct drm_connector_state *conn_state) 2391 { 2392 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2393 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2394 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2395 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); 2396 enum port port = encoder->port; 2397 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); 2398 struct intel_connector *intel_connector = intel_dp->attached_connector; 2399 struct intel_digital_connector_state *intel_conn_state = 2400 to_intel_digital_connector_state(conn_state); 2401 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, 2402 DP_DPCD_QUIRK_CONSTANT_N); 2403 int ret = 0, output_bpp; 2404 2405 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 2406 pipe_config->has_pch_encoder = true; 2407 2408 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2409 2410 if (lspcon->active) 2411 lspcon_ycbcr420_config(&intel_connector->base, pipe_config); 2412 else 2413 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base, 2414 pipe_config); 2415 2416 if (ret) 2417 return ret; 2418 2419 pipe_config->has_drrs = false; 2420 if (!intel_dp_port_has_audio(dev_priv, port)) 2421 pipe_config->has_audio = false; 2422 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2423 pipe_config->has_audio = intel_dp->has_audio; 2424 else 2425 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 2426 2427 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2428 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 2429 adjusted_mode); 2430 2431 if (INTEL_GEN(dev_priv) >= 9) { 2432 ret = skl_update_scaler_crtc(pipe_config); 2433 if (ret) 2434 return ret; 2435 } 2436 2437 if (HAS_GMCH(dev_priv)) 2438 intel_gmch_panel_fitting(intel_crtc, pipe_config, 2439 conn_state->scaling_mode); 2440 else 2441 intel_pch_panel_fitting(intel_crtc, pipe_config, 2442 conn_state->scaling_mode); 2443 } 2444 2445 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2446 return -EINVAL; 2447 2448 if (HAS_GMCH(dev_priv) && 2449 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2450 return -EINVAL; 2451 2452 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2453 return -EINVAL; 2454 2455 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2456 return -EINVAL; 2457 2458 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2459 if (ret < 0) 2460 return ret; 2461 2462 pipe_config->limited_color_range = 2463 intel_dp_limited_color_range(pipe_config, conn_state); 2464 2465 if (pipe_config->dsc.compression_enable) 2466 output_bpp = pipe_config->dsc.compressed_bpp; 2467 else 2468 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); 2469 2470 intel_link_compute_m_n(output_bpp, 2471 pipe_config->lane_count, 2472 adjusted_mode->crtc_clock, 2473 pipe_config->port_clock, 2474 &pipe_config->dp_m_n, 2475 constant_n, pipe_config->fec_enable); 2476 2477 if (intel_connector->panel.downclock_mode != NULL && 2478 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { 2479 pipe_config->has_drrs = true; 2480 intel_link_compute_m_n(output_bpp, 2481 pipe_config->lane_count, 2482 intel_connector->panel.downclock_mode->clock, 2483 pipe_config->port_clock, 2484 &pipe_config->dp_m2_n2, 2485 constant_n, pipe_config->fec_enable); 2486 } 2487 2488 if (!HAS_DDI(dev_priv)) 2489 intel_dp_set_clock(encoder, pipe_config); 2490 2491 intel_psr_compute_config(intel_dp, pipe_config); 2492 2493 return 0; 2494 } 2495 2496 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2497 int link_rate, u8 lane_count, 2498 bool link_mst) 2499 { 2500 intel_dp->link_trained = false; 2501 intel_dp->link_rate = link_rate; 2502 intel_dp->lane_count = lane_count; 2503 intel_dp->link_mst = link_mst; 2504 } 2505 2506 static void intel_dp_prepare(struct intel_encoder *encoder, 2507 const struct intel_crtc_state *pipe_config) 2508 { 2509 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2510 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2511 enum port port = encoder->port; 2512 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2513 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2514 2515 intel_dp_set_link_params(intel_dp, pipe_config->port_clock, 2516 pipe_config->lane_count, 2517 intel_crtc_has_type(pipe_config, 2518 INTEL_OUTPUT_DP_MST)); 2519 2520 intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port); 2521 intel_dp->regs.dp_tp_status = DP_TP_STATUS(port); 2522 2523 /* 2524 * There are four kinds of DP registers: 2525 * 2526 * IBX PCH 2527 * SNB CPU 2528 * IVB CPU 2529 * CPT PCH 2530 * 2531 * IBX PCH and CPU are the same for almost everything, 2532 * except that the CPU DP PLL is configured in this 2533 * register 2534 * 2535 * CPT PCH is quite different, having many bits moved 2536 * to the TRANS_DP_CTL register instead. That 2537 * configuration happens (oddly) in ilk_pch_enable 2538 */ 2539 2540 /* Preserve the BIOS-computed detected bit. This is 2541 * supposed to be read-only. 2542 */ 2543 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 2544 2545 /* Handle DP bits in common between all three register formats */ 2546 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 2547 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 2548 2549 /* Split out the IBX/CPU vs CPT settings */ 2550 2551 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 2552 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2553 intel_dp->DP |= DP_SYNC_HS_HIGH; 2554 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2555 intel_dp->DP |= DP_SYNC_VS_HIGH; 2556 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2557 2558 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2559 intel_dp->DP |= DP_ENHANCED_FRAMING; 2560 2561 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 2562 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2563 u32 trans_dp; 2564 2565 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2566 2567 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 2568 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2569 trans_dp |= TRANS_DP_ENH_FRAMING; 2570 else 2571 trans_dp &= ~TRANS_DP_ENH_FRAMING; 2572 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 2573 } else { 2574 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 2575 intel_dp->DP |= DP_COLOR_RANGE_16_235; 2576 2577 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2578 intel_dp->DP |= DP_SYNC_HS_HIGH; 2579 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2580 intel_dp->DP |= DP_SYNC_VS_HIGH; 2581 intel_dp->DP |= DP_LINK_TRAIN_OFF; 2582 2583 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2584 intel_dp->DP |= DP_ENHANCED_FRAMING; 2585 2586 if (IS_CHERRYVIEW(dev_priv)) 2587 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 2588 else 2589 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 2590 } 2591 } 2592 2593 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 2594 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 2595 2596 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 2597 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 2598 2599 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 2600 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 2601 2602 static void intel_pps_verify_state(struct intel_dp *intel_dp); 2603 2604 static void wait_panel_status(struct intel_dp *intel_dp, 2605 u32 mask, 2606 u32 value) 2607 { 2608 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2609 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2610 2611 lockdep_assert_held(&dev_priv->pps_mutex); 2612 2613 intel_pps_verify_state(intel_dp); 2614 2615 pp_stat_reg = _pp_stat_reg(intel_dp); 2616 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2617 2618 drm_dbg_kms(&dev_priv->drm, 2619 "mask %08x value %08x status %08x control %08x\n", 2620 mask, value, 2621 intel_de_read(dev_priv, pp_stat_reg), 2622 intel_de_read(dev_priv, pp_ctrl_reg)); 2623 2624 if (intel_de_wait_for_register(dev_priv, pp_stat_reg, 2625 mask, value, 5000)) 2626 drm_err(&dev_priv->drm, 2627 "Panel status timeout: status %08x control %08x\n", 2628 intel_de_read(dev_priv, pp_stat_reg), 2629 intel_de_read(dev_priv, pp_ctrl_reg)); 2630 2631 drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); 2632 } 2633 2634 static void wait_panel_on(struct intel_dp *intel_dp) 2635 { 2636 DRM_DEBUG_KMS("Wait for panel power on\n"); 2637 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 2638 } 2639 2640 static void wait_panel_off(struct intel_dp *intel_dp) 2641 { 2642 DRM_DEBUG_KMS("Wait for panel power off time\n"); 2643 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 2644 } 2645 2646 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 2647 { 2648 ktime_t panel_power_on_time; 2649 s64 panel_power_off_duration; 2650 2651 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 2652 2653 /* take the difference of currrent time and panel power off time 2654 * and then make panel wait for t11_t12 if needed. */ 2655 panel_power_on_time = ktime_get_boottime(); 2656 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 2657 2658 /* When we disable the VDD override bit last we have to do the manual 2659 * wait. */ 2660 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 2661 wait_remaining_ms_from_jiffies(jiffies, 2662 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 2663 2664 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 2665 } 2666 2667 static void wait_backlight_on(struct intel_dp *intel_dp) 2668 { 2669 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 2670 intel_dp->backlight_on_delay); 2671 } 2672 2673 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 2674 { 2675 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 2676 intel_dp->backlight_off_delay); 2677 } 2678 2679 /* Read the current pp_control value, unlocking the register if it 2680 * is locked 2681 */ 2682 2683 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 2684 { 2685 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2686 u32 control; 2687 2688 lockdep_assert_held(&dev_priv->pps_mutex); 2689 2690 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); 2691 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && 2692 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 2693 control &= ~PANEL_UNLOCK_MASK; 2694 control |= PANEL_UNLOCK_REGS; 2695 } 2696 return control; 2697 } 2698 2699 /* 2700 * Must be paired with edp_panel_vdd_off(). 2701 * Must hold pps_mutex around the whole on/off sequence. 2702 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2703 */ 2704 static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 2705 { 2706 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2707 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2708 u32 pp; 2709 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2710 bool need_to_disable = !intel_dp->want_panel_vdd; 2711 2712 lockdep_assert_held(&dev_priv->pps_mutex); 2713 2714 if (!intel_dp_is_edp(intel_dp)) 2715 return false; 2716 2717 cancel_delayed_work(&intel_dp->panel_vdd_work); 2718 intel_dp->want_panel_vdd = true; 2719 2720 if (edp_have_panel_vdd(intel_dp)) 2721 return need_to_disable; 2722 2723 intel_display_power_get(dev_priv, 2724 intel_aux_power_domain(intel_dig_port)); 2725 2726 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", 2727 intel_dig_port->base.base.base.id, 2728 intel_dig_port->base.base.name); 2729 2730 if (!edp_have_panel_power(intel_dp)) 2731 wait_panel_power_cycle(intel_dp); 2732 2733 pp = ilk_get_pp_control(intel_dp); 2734 pp |= EDP_FORCE_VDD; 2735 2736 pp_stat_reg = _pp_stat_reg(intel_dp); 2737 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2738 2739 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2740 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2741 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2742 intel_de_read(dev_priv, pp_stat_reg), 2743 intel_de_read(dev_priv, pp_ctrl_reg)); 2744 /* 2745 * If the panel wasn't on, delay before accessing aux channel 2746 */ 2747 if (!edp_have_panel_power(intel_dp)) { 2748 drm_dbg_kms(&dev_priv->drm, 2749 "[ENCODER:%d:%s] panel power wasn't enabled\n", 2750 intel_dig_port->base.base.base.id, 2751 intel_dig_port->base.base.name); 2752 msleep(intel_dp->panel_power_up_delay); 2753 } 2754 2755 return need_to_disable; 2756 } 2757 2758 /* 2759 * Must be paired with intel_edp_panel_vdd_off() or 2760 * intel_edp_panel_off(). 2761 * Nested calls to these functions are not allowed since 2762 * we drop the lock. Caller must use some higher level 2763 * locking to prevent nested calls from other threads. 2764 */ 2765 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 2766 { 2767 intel_wakeref_t wakeref; 2768 bool vdd; 2769 2770 if (!intel_dp_is_edp(intel_dp)) 2771 return; 2772 2773 vdd = false; 2774 with_pps_lock(intel_dp, wakeref) 2775 vdd = edp_panel_vdd_on(intel_dp); 2776 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", 2777 dp_to_dig_port(intel_dp)->base.base.base.id, 2778 dp_to_dig_port(intel_dp)->base.base.name); 2779 } 2780 2781 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 2782 { 2783 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2784 struct intel_digital_port *intel_dig_port = 2785 dp_to_dig_port(intel_dp); 2786 u32 pp; 2787 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2788 2789 lockdep_assert_held(&dev_priv->pps_mutex); 2790 2791 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd); 2792 2793 if (!edp_have_panel_vdd(intel_dp)) 2794 return; 2795 2796 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", 2797 intel_dig_port->base.base.base.id, 2798 intel_dig_port->base.base.name); 2799 2800 pp = ilk_get_pp_control(intel_dp); 2801 pp &= ~EDP_FORCE_VDD; 2802 2803 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2804 pp_stat_reg = _pp_stat_reg(intel_dp); 2805 2806 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2807 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2808 2809 /* Make sure sequencer is idle before allowing subsequent activity */ 2810 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2811 intel_de_read(dev_priv, pp_stat_reg), 2812 intel_de_read(dev_priv, pp_ctrl_reg)); 2813 2814 if ((pp & PANEL_POWER_ON) == 0) 2815 intel_dp->panel_power_off_time = ktime_get_boottime(); 2816 2817 intel_display_power_put_unchecked(dev_priv, 2818 intel_aux_power_domain(intel_dig_port)); 2819 } 2820 2821 static void edp_panel_vdd_work(struct work_struct *__work) 2822 { 2823 struct intel_dp *intel_dp = 2824 container_of(to_delayed_work(__work), 2825 struct intel_dp, panel_vdd_work); 2826 intel_wakeref_t wakeref; 2827 2828 with_pps_lock(intel_dp, wakeref) { 2829 if (!intel_dp->want_panel_vdd) 2830 edp_panel_vdd_off_sync(intel_dp); 2831 } 2832 } 2833 2834 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 2835 { 2836 unsigned long delay; 2837 2838 /* 2839 * Queue the timer to fire a long time from now (relative to the power 2840 * down delay) to keep the panel power up across a sequence of 2841 * operations. 2842 */ 2843 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 2844 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 2845 } 2846 2847 /* 2848 * Must be paired with edp_panel_vdd_on(). 2849 * Must hold pps_mutex around the whole on/off sequence. 2850 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2851 */ 2852 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 2853 { 2854 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2855 2856 lockdep_assert_held(&dev_priv->pps_mutex); 2857 2858 if (!intel_dp_is_edp(intel_dp)) 2859 return; 2860 2861 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", 2862 dp_to_dig_port(intel_dp)->base.base.base.id, 2863 dp_to_dig_port(intel_dp)->base.base.name); 2864 2865 intel_dp->want_panel_vdd = false; 2866 2867 if (sync) 2868 edp_panel_vdd_off_sync(intel_dp); 2869 else 2870 edp_panel_vdd_schedule_off(intel_dp); 2871 } 2872 2873 static void edp_panel_on(struct intel_dp *intel_dp) 2874 { 2875 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2876 u32 pp; 2877 i915_reg_t pp_ctrl_reg; 2878 2879 lockdep_assert_held(&dev_priv->pps_mutex); 2880 2881 if (!intel_dp_is_edp(intel_dp)) 2882 return; 2883 2884 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n", 2885 dp_to_dig_port(intel_dp)->base.base.base.id, 2886 dp_to_dig_port(intel_dp)->base.base.name); 2887 2888 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), 2889 "[ENCODER:%d:%s] panel power already on\n", 2890 dp_to_dig_port(intel_dp)->base.base.base.id, 2891 dp_to_dig_port(intel_dp)->base.base.name)) 2892 return; 2893 2894 wait_panel_power_cycle(intel_dp); 2895 2896 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2897 pp = ilk_get_pp_control(intel_dp); 2898 if (IS_GEN(dev_priv, 5)) { 2899 /* ILK workaround: disable reset around power sequence */ 2900 pp &= ~PANEL_POWER_RESET; 2901 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2902 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2903 } 2904 2905 pp |= PANEL_POWER_ON; 2906 if (!IS_GEN(dev_priv, 5)) 2907 pp |= PANEL_POWER_RESET; 2908 2909 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2910 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2911 2912 wait_panel_on(intel_dp); 2913 intel_dp->last_power_on = jiffies; 2914 2915 if (IS_GEN(dev_priv, 5)) { 2916 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 2917 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2918 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2919 } 2920 } 2921 2922 void intel_edp_panel_on(struct intel_dp *intel_dp) 2923 { 2924 intel_wakeref_t wakeref; 2925 2926 if (!intel_dp_is_edp(intel_dp)) 2927 return; 2928 2929 with_pps_lock(intel_dp, wakeref) 2930 edp_panel_on(intel_dp); 2931 } 2932 2933 2934 static void edp_panel_off(struct intel_dp *intel_dp) 2935 { 2936 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2937 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2938 u32 pp; 2939 i915_reg_t pp_ctrl_reg; 2940 2941 lockdep_assert_held(&dev_priv->pps_mutex); 2942 2943 if (!intel_dp_is_edp(intel_dp)) 2944 return; 2945 2946 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n", 2947 dig_port->base.base.base.id, dig_port->base.base.name); 2948 2949 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd, 2950 "Need [ENCODER:%d:%s] VDD to turn off panel\n", 2951 dig_port->base.base.base.id, dig_port->base.base.name); 2952 2953 pp = ilk_get_pp_control(intel_dp); 2954 /* We need to switch off panel power _and_ force vdd, for otherwise some 2955 * panels get very unhappy and cease to work. */ 2956 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 2957 EDP_BLC_ENABLE); 2958 2959 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2960 2961 intel_dp->want_panel_vdd = false; 2962 2963 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2964 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2965 2966 wait_panel_off(intel_dp); 2967 intel_dp->panel_power_off_time = ktime_get_boottime(); 2968 2969 /* We got a reference when we enabled the VDD. */ 2970 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); 2971 } 2972 2973 void intel_edp_panel_off(struct intel_dp *intel_dp) 2974 { 2975 intel_wakeref_t wakeref; 2976 2977 if (!intel_dp_is_edp(intel_dp)) 2978 return; 2979 2980 with_pps_lock(intel_dp, wakeref) 2981 edp_panel_off(intel_dp); 2982 } 2983 2984 /* Enable backlight in the panel power control. */ 2985 static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 2986 { 2987 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2988 intel_wakeref_t wakeref; 2989 2990 /* 2991 * If we enable the backlight right away following a panel power 2992 * on, we may see slight flicker as the panel syncs with the eDP 2993 * link. So delay a bit to make sure the image is solid before 2994 * allowing it to appear. 2995 */ 2996 wait_backlight_on(intel_dp); 2997 2998 with_pps_lock(intel_dp, wakeref) { 2999 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3000 u32 pp; 3001 3002 pp = ilk_get_pp_control(intel_dp); 3003 pp |= EDP_BLC_ENABLE; 3004 3005 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3006 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3007 } 3008 } 3009 3010 /* Enable backlight PWM and backlight PP control. */ 3011 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3012 const struct drm_connector_state *conn_state) 3013 { 3014 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3015 3016 if (!intel_dp_is_edp(intel_dp)) 3017 return; 3018 3019 DRM_DEBUG_KMS("\n"); 3020 3021 intel_panel_enable_backlight(crtc_state, conn_state); 3022 _intel_edp_backlight_on(intel_dp); 3023 } 3024 3025 /* Disable backlight in the panel power control. */ 3026 static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 3027 { 3028 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3029 intel_wakeref_t wakeref; 3030 3031 if (!intel_dp_is_edp(intel_dp)) 3032 return; 3033 3034 with_pps_lock(intel_dp, wakeref) { 3035 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3036 u32 pp; 3037 3038 pp = ilk_get_pp_control(intel_dp); 3039 pp &= ~EDP_BLC_ENABLE; 3040 3041 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3042 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3043 } 3044 3045 intel_dp->last_backlight_off = jiffies; 3046 edp_wait_backlight_off(intel_dp); 3047 } 3048 3049 /* Disable backlight PP control and backlight PWM. */ 3050 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3051 { 3052 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3053 3054 if (!intel_dp_is_edp(intel_dp)) 3055 return; 3056 3057 DRM_DEBUG_KMS("\n"); 3058 3059 _intel_edp_backlight_off(intel_dp); 3060 intel_panel_disable_backlight(old_conn_state); 3061 } 3062 3063 /* 3064 * Hook for controlling the panel power control backlight through the bl_power 3065 * sysfs attribute. Take care to handle multiple calls. 3066 */ 3067 static void intel_edp_backlight_power(struct intel_connector *connector, 3068 bool enable) 3069 { 3070 struct intel_dp *intel_dp = intel_attached_dp(connector); 3071 intel_wakeref_t wakeref; 3072 bool is_enabled; 3073 3074 is_enabled = false; 3075 with_pps_lock(intel_dp, wakeref) 3076 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 3077 if (is_enabled == enable) 3078 return; 3079 3080 DRM_DEBUG_KMS("panel power control backlight %s\n", 3081 enable ? "enable" : "disable"); 3082 3083 if (enable) 3084 _intel_edp_backlight_on(intel_dp); 3085 else 3086 _intel_edp_backlight_off(intel_dp); 3087 } 3088 3089 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 3090 { 3091 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3092 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3093 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; 3094 3095 I915_STATE_WARN(cur_state != state, 3096 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 3097 dig_port->base.base.base.id, dig_port->base.base.name, 3098 onoff(state), onoff(cur_state)); 3099 } 3100 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 3101 3102 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 3103 { 3104 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; 3105 3106 I915_STATE_WARN(cur_state != state, 3107 "eDP PLL state assertion failure (expected %s, current %s)\n", 3108 onoff(state), onoff(cur_state)); 3109 } 3110 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 3111 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 3112 3113 static void ilk_edp_pll_on(struct intel_dp *intel_dp, 3114 const struct intel_crtc_state *pipe_config) 3115 { 3116 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3117 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3118 3119 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 3120 assert_dp_port_disabled(intel_dp); 3121 assert_edp_pll_disabled(dev_priv); 3122 3123 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", 3124 pipe_config->port_clock); 3125 3126 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 3127 3128 if (pipe_config->port_clock == 162000) 3129 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 3130 else 3131 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 3132 3133 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3134 intel_de_posting_read(dev_priv, DP_A); 3135 udelay(500); 3136 3137 /* 3138 * [DevILK] Work around required when enabling DP PLL 3139 * while a pipe is enabled going to FDI: 3140 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 3141 * 2. Program DP PLL enable 3142 */ 3143 if (IS_GEN(dev_priv, 5)) 3144 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 3145 3146 intel_dp->DP |= DP_PLL_ENABLE; 3147 3148 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3149 intel_de_posting_read(dev_priv, DP_A); 3150 udelay(200); 3151 } 3152 3153 static void ilk_edp_pll_off(struct intel_dp *intel_dp, 3154 const struct intel_crtc_state *old_crtc_state) 3155 { 3156 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3157 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3158 3159 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 3160 assert_dp_port_disabled(intel_dp); 3161 assert_edp_pll_enabled(dev_priv); 3162 3163 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); 3164 3165 intel_dp->DP &= ~DP_PLL_ENABLE; 3166 3167 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3168 intel_de_posting_read(dev_priv, DP_A); 3169 udelay(200); 3170 } 3171 3172 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3173 { 3174 /* 3175 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3176 * be capable of signalling downstream hpd with a long pulse. 3177 * Whether or not that means D3 is safe to use is not clear, 3178 * but let's assume so until proven otherwise. 3179 * 3180 * FIXME should really check all downstream ports... 3181 */ 3182 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3183 drm_dp_is_branch(intel_dp->dpcd) && 3184 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3185 } 3186 3187 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3188 const struct intel_crtc_state *crtc_state, 3189 bool enable) 3190 { 3191 int ret; 3192 3193 if (!crtc_state->dsc.compression_enable) 3194 return; 3195 3196 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3197 enable ? DP_DECOMPRESSION_EN : 0); 3198 if (ret < 0) 3199 DRM_DEBUG_KMS("Failed to %s sink decompression state\n", 3200 enable ? "enable" : "disable"); 3201 } 3202 3203 /* If the sink supports it, try to set the power state appropriately */ 3204 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 3205 { 3206 int ret, i; 3207 3208 /* Should have a valid DPCD by this point */ 3209 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3210 return; 3211 3212 if (mode != DRM_MODE_DPMS_ON) { 3213 if (downstream_hpd_needs_d0(intel_dp)) 3214 return; 3215 3216 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3217 DP_SET_POWER_D3); 3218 } else { 3219 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3220 3221 /* 3222 * When turning on, we need to retry for 1ms to give the sink 3223 * time to wake up. 3224 */ 3225 for (i = 0; i < 3; i++) { 3226 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3227 DP_SET_POWER_D0); 3228 if (ret == 1) 3229 break; 3230 msleep(1); 3231 } 3232 3233 if (ret == 1 && lspcon->active) 3234 lspcon_wait_pcon_mode(lspcon); 3235 } 3236 3237 if (ret != 1) 3238 DRM_DEBUG_KMS("failed to %s sink power state\n", 3239 mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); 3240 } 3241 3242 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 3243 enum port port, enum pipe *pipe) 3244 { 3245 enum pipe p; 3246 3247 for_each_pipe(dev_priv, p) { 3248 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); 3249 3250 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 3251 *pipe = p; 3252 return true; 3253 } 3254 } 3255 3256 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", 3257 port_name(port)); 3258 3259 /* must initialize pipe to something for the asserts */ 3260 *pipe = PIPE_A; 3261 3262 return false; 3263 } 3264 3265 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 3266 i915_reg_t dp_reg, enum port port, 3267 enum pipe *pipe) 3268 { 3269 bool ret; 3270 u32 val; 3271 3272 val = intel_de_read(dev_priv, dp_reg); 3273 3274 ret = val & DP_PORT_EN; 3275 3276 /* asserts want to know the pipe even if the port is disabled */ 3277 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3278 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 3279 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3280 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 3281 else if (IS_CHERRYVIEW(dev_priv)) 3282 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 3283 else 3284 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 3285 3286 return ret; 3287 } 3288 3289 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 3290 enum pipe *pipe) 3291 { 3292 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3293 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3294 intel_wakeref_t wakeref; 3295 bool ret; 3296 3297 wakeref = intel_display_power_get_if_enabled(dev_priv, 3298 encoder->power_domain); 3299 if (!wakeref) 3300 return false; 3301 3302 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 3303 encoder->port, pipe); 3304 3305 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 3306 3307 return ret; 3308 } 3309 3310 static void intel_dp_get_config(struct intel_encoder *encoder, 3311 struct intel_crtc_state *pipe_config) 3312 { 3313 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3314 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3315 u32 tmp, flags = 0; 3316 enum port port = encoder->port; 3317 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3318 3319 if (encoder->type == INTEL_OUTPUT_EDP) 3320 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 3321 else 3322 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 3323 3324 tmp = intel_de_read(dev_priv, intel_dp->output_reg); 3325 3326 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 3327 3328 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3329 u32 trans_dp = intel_de_read(dev_priv, 3330 TRANS_DP_CTL(crtc->pipe)); 3331 3332 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 3333 flags |= DRM_MODE_FLAG_PHSYNC; 3334 else 3335 flags |= DRM_MODE_FLAG_NHSYNC; 3336 3337 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 3338 flags |= DRM_MODE_FLAG_PVSYNC; 3339 else 3340 flags |= DRM_MODE_FLAG_NVSYNC; 3341 } else { 3342 if (tmp & DP_SYNC_HS_HIGH) 3343 flags |= DRM_MODE_FLAG_PHSYNC; 3344 else 3345 flags |= DRM_MODE_FLAG_NHSYNC; 3346 3347 if (tmp & DP_SYNC_VS_HIGH) 3348 flags |= DRM_MODE_FLAG_PVSYNC; 3349 else 3350 flags |= DRM_MODE_FLAG_NVSYNC; 3351 } 3352 3353 pipe_config->hw.adjusted_mode.flags |= flags; 3354 3355 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 3356 pipe_config->limited_color_range = true; 3357 3358 pipe_config->lane_count = 3359 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 3360 3361 intel_dp_get_m_n(crtc, pipe_config); 3362 3363 if (port == PORT_A) { 3364 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 3365 pipe_config->port_clock = 162000; 3366 else 3367 pipe_config->port_clock = 270000; 3368 } 3369 3370 pipe_config->hw.adjusted_mode.crtc_clock = 3371 intel_dotclock_calculate(pipe_config->port_clock, 3372 &pipe_config->dp_m_n); 3373 3374 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 3375 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 3376 /* 3377 * This is a big fat ugly hack. 3378 * 3379 * Some machines in UEFI boot mode provide us a VBT that has 18 3380 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3381 * unknown we fail to light up. Yet the same BIOS boots up with 3382 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3383 * max, not what it tells us to use. 3384 * 3385 * Note: This will still be broken if the eDP panel is not lit 3386 * up by the BIOS, and thus we can't get the mode at module 3387 * load. 3388 */ 3389 drm_dbg_kms(&dev_priv->drm, 3390 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3391 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 3392 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3393 } 3394 } 3395 3396 static void intel_disable_dp(struct intel_encoder *encoder, 3397 const struct intel_crtc_state *old_crtc_state, 3398 const struct drm_connector_state *old_conn_state) 3399 { 3400 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3401 3402 intel_dp->link_trained = false; 3403 3404 if (old_crtc_state->has_audio) 3405 intel_audio_codec_disable(encoder, 3406 old_crtc_state, old_conn_state); 3407 3408 /* Make sure the panel is off before trying to change the mode. But also 3409 * ensure that we have vdd while we switch off the panel. */ 3410 intel_edp_panel_vdd_on(intel_dp); 3411 intel_edp_backlight_off(old_conn_state); 3412 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 3413 intel_edp_panel_off(intel_dp); 3414 } 3415 3416 static void g4x_disable_dp(struct intel_encoder *encoder, 3417 const struct intel_crtc_state *old_crtc_state, 3418 const struct drm_connector_state *old_conn_state) 3419 { 3420 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 3421 } 3422 3423 static void vlv_disable_dp(struct intel_encoder *encoder, 3424 const struct intel_crtc_state *old_crtc_state, 3425 const struct drm_connector_state *old_conn_state) 3426 { 3427 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 3428 } 3429 3430 static void g4x_post_disable_dp(struct intel_encoder *encoder, 3431 const struct intel_crtc_state *old_crtc_state, 3432 const struct drm_connector_state *old_conn_state) 3433 { 3434 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3435 enum port port = encoder->port; 3436 3437 /* 3438 * Bspec does not list a specific disable sequence for g4x DP. 3439 * Follow the ilk+ sequence (disable pipe before the port) for 3440 * g4x DP as it does not suffer from underruns like the normal 3441 * g4x modeset sequence (disable pipe after the port). 3442 */ 3443 intel_dp_link_down(encoder, old_crtc_state); 3444 3445 /* Only ilk+ has port A */ 3446 if (port == PORT_A) 3447 ilk_edp_pll_off(intel_dp, old_crtc_state); 3448 } 3449 3450 static void vlv_post_disable_dp(struct intel_encoder *encoder, 3451 const struct intel_crtc_state *old_crtc_state, 3452 const struct drm_connector_state *old_conn_state) 3453 { 3454 intel_dp_link_down(encoder, old_crtc_state); 3455 } 3456 3457 static void chv_post_disable_dp(struct intel_encoder *encoder, 3458 const struct intel_crtc_state *old_crtc_state, 3459 const struct drm_connector_state *old_conn_state) 3460 { 3461 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3462 3463 intel_dp_link_down(encoder, old_crtc_state); 3464 3465 vlv_dpio_get(dev_priv); 3466 3467 /* Assert data lane reset */ 3468 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 3469 3470 vlv_dpio_put(dev_priv); 3471 } 3472 3473 static void 3474 _intel_dp_set_link_train(struct intel_dp *intel_dp, 3475 u32 *DP, 3476 u8 dp_train_pat) 3477 { 3478 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3479 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3480 enum port port = intel_dig_port->base.port; 3481 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); 3482 3483 if (dp_train_pat & train_pat_mask) 3484 drm_dbg_kms(&dev_priv->drm, 3485 "Using DP training pattern TPS%d\n", 3486 dp_train_pat & train_pat_mask); 3487 3488 if (HAS_DDI(dev_priv)) { 3489 u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); 3490 3491 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 3492 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 3493 else 3494 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 3495 3496 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 3497 switch (dp_train_pat & train_pat_mask) { 3498 case DP_TRAINING_PATTERN_DISABLE: 3499 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 3500 3501 break; 3502 case DP_TRAINING_PATTERN_1: 3503 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 3504 break; 3505 case DP_TRAINING_PATTERN_2: 3506 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 3507 break; 3508 case DP_TRAINING_PATTERN_3: 3509 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 3510 break; 3511 case DP_TRAINING_PATTERN_4: 3512 temp |= DP_TP_CTL_LINK_TRAIN_PAT4; 3513 break; 3514 } 3515 intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp); 3516 3517 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 3518 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 3519 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 3520 3521 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3522 case DP_TRAINING_PATTERN_DISABLE: 3523 *DP |= DP_LINK_TRAIN_OFF_CPT; 3524 break; 3525 case DP_TRAINING_PATTERN_1: 3526 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 3527 break; 3528 case DP_TRAINING_PATTERN_2: 3529 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3530 break; 3531 case DP_TRAINING_PATTERN_3: 3532 drm_dbg_kms(&dev_priv->drm, 3533 "TPS3 not supported, using TPS2 instead\n"); 3534 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3535 break; 3536 } 3537 3538 } else { 3539 *DP &= ~DP_LINK_TRAIN_MASK; 3540 3541 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3542 case DP_TRAINING_PATTERN_DISABLE: 3543 *DP |= DP_LINK_TRAIN_OFF; 3544 break; 3545 case DP_TRAINING_PATTERN_1: 3546 *DP |= DP_LINK_TRAIN_PAT_1; 3547 break; 3548 case DP_TRAINING_PATTERN_2: 3549 *DP |= DP_LINK_TRAIN_PAT_2; 3550 break; 3551 case DP_TRAINING_PATTERN_3: 3552 drm_dbg_kms(&dev_priv->drm, 3553 "TPS3 not supported, using TPS2 instead\n"); 3554 *DP |= DP_LINK_TRAIN_PAT_2; 3555 break; 3556 } 3557 } 3558 } 3559 3560 static void intel_dp_enable_port(struct intel_dp *intel_dp, 3561 const struct intel_crtc_state *old_crtc_state) 3562 { 3563 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3564 3565 /* enable with pattern 1 (as per spec) */ 3566 3567 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); 3568 3569 /* 3570 * Magic for VLV/CHV. We _must_ first set up the register 3571 * without actually enabling the port, and then do another 3572 * write to enable the port. Otherwise link training will 3573 * fail when the power sequencer is freshly used for this port. 3574 */ 3575 intel_dp->DP |= DP_PORT_EN; 3576 if (old_crtc_state->has_audio) 3577 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 3578 3579 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3580 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3581 } 3582 3583 static void intel_enable_dp(struct intel_encoder *encoder, 3584 const struct intel_crtc_state *pipe_config, 3585 const struct drm_connector_state *conn_state) 3586 { 3587 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3588 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3589 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3590 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); 3591 enum pipe pipe = crtc->pipe; 3592 intel_wakeref_t wakeref; 3593 3594 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) 3595 return; 3596 3597 with_pps_lock(intel_dp, wakeref) { 3598 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3599 vlv_init_panel_power_sequencer(encoder, pipe_config); 3600 3601 intel_dp_enable_port(intel_dp, pipe_config); 3602 3603 edp_panel_vdd_on(intel_dp); 3604 edp_panel_on(intel_dp); 3605 edp_panel_vdd_off(intel_dp, true); 3606 } 3607 3608 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3609 unsigned int lane_mask = 0x0; 3610 3611 if (IS_CHERRYVIEW(dev_priv)) 3612 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 3613 3614 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 3615 lane_mask); 3616 } 3617 3618 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 3619 intel_dp_start_link_train(intel_dp); 3620 intel_dp_stop_link_train(intel_dp); 3621 3622 if (pipe_config->has_audio) { 3623 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", 3624 pipe_name(pipe)); 3625 intel_audio_codec_enable(encoder, pipe_config, conn_state); 3626 } 3627 } 3628 3629 static void g4x_enable_dp(struct intel_encoder *encoder, 3630 const struct intel_crtc_state *pipe_config, 3631 const struct drm_connector_state *conn_state) 3632 { 3633 intel_enable_dp(encoder, pipe_config, conn_state); 3634 intel_edp_backlight_on(pipe_config, conn_state); 3635 } 3636 3637 static void vlv_enable_dp(struct intel_encoder *encoder, 3638 const struct intel_crtc_state *pipe_config, 3639 const struct drm_connector_state *conn_state) 3640 { 3641 intel_edp_backlight_on(pipe_config, conn_state); 3642 } 3643 3644 static void g4x_pre_enable_dp(struct intel_encoder *encoder, 3645 const struct intel_crtc_state *pipe_config, 3646 const struct drm_connector_state *conn_state) 3647 { 3648 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3649 enum port port = encoder->port; 3650 3651 intel_dp_prepare(encoder, pipe_config); 3652 3653 /* Only ilk+ has port A */ 3654 if (port == PORT_A) 3655 ilk_edp_pll_on(intel_dp, pipe_config); 3656 } 3657 3658 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 3659 { 3660 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3661 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 3662 enum pipe pipe = intel_dp->pps_pipe; 3663 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 3664 3665 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3666 3667 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) 3668 return; 3669 3670 edp_panel_vdd_off_sync(intel_dp); 3671 3672 /* 3673 * VLV seems to get confused when multiple power sequencers 3674 * have the same port selected (even if only one has power/vdd 3675 * enabled). The failure manifests as vlv_wait_port_ready() failing 3676 * CHV on the other hand doesn't seem to mind having the same port 3677 * selected in multiple power sequencers, but let's clear the 3678 * port select always when logically disconnecting a power sequencer 3679 * from a port. 3680 */ 3681 drm_dbg_kms(&dev_priv->drm, 3682 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", 3683 pipe_name(pipe), intel_dig_port->base.base.base.id, 3684 intel_dig_port->base.base.name); 3685 intel_de_write(dev_priv, pp_on_reg, 0); 3686 intel_de_posting_read(dev_priv, pp_on_reg); 3687 3688 intel_dp->pps_pipe = INVALID_PIPE; 3689 } 3690 3691 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 3692 enum pipe pipe) 3693 { 3694 struct intel_encoder *encoder; 3695 3696 lockdep_assert_held(&dev_priv->pps_mutex); 3697 3698 for_each_intel_dp(&dev_priv->drm, encoder) { 3699 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3700 3701 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe, 3702 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", 3703 pipe_name(pipe), encoder->base.base.id, 3704 encoder->base.name); 3705 3706 if (intel_dp->pps_pipe != pipe) 3707 continue; 3708 3709 drm_dbg_kms(&dev_priv->drm, 3710 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", 3711 pipe_name(pipe), encoder->base.base.id, 3712 encoder->base.name); 3713 3714 /* make sure vdd is off before we steal it */ 3715 vlv_detach_power_sequencer(intel_dp); 3716 } 3717 } 3718 3719 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 3720 const struct intel_crtc_state *crtc_state) 3721 { 3722 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3723 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3724 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3725 3726 lockdep_assert_held(&dev_priv->pps_mutex); 3727 3728 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3729 3730 if (intel_dp->pps_pipe != INVALID_PIPE && 3731 intel_dp->pps_pipe != crtc->pipe) { 3732 /* 3733 * If another power sequencer was being used on this 3734 * port previously make sure to turn off vdd there while 3735 * we still have control of it. 3736 */ 3737 vlv_detach_power_sequencer(intel_dp); 3738 } 3739 3740 /* 3741 * We may be stealing the power 3742 * sequencer from another port. 3743 */ 3744 vlv_steal_power_sequencer(dev_priv, crtc->pipe); 3745 3746 intel_dp->active_pipe = crtc->pipe; 3747 3748 if (!intel_dp_is_edp(intel_dp)) 3749 return; 3750 3751 /* now it's all ours */ 3752 intel_dp->pps_pipe = crtc->pipe; 3753 3754 drm_dbg_kms(&dev_priv->drm, 3755 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", 3756 pipe_name(intel_dp->pps_pipe), encoder->base.base.id, 3757 encoder->base.name); 3758 3759 /* init power sequencer on this pipe and port */ 3760 intel_dp_init_panel_power_sequencer(intel_dp); 3761 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 3762 } 3763 3764 static void vlv_pre_enable_dp(struct intel_encoder *encoder, 3765 const struct intel_crtc_state *pipe_config, 3766 const struct drm_connector_state *conn_state) 3767 { 3768 vlv_phy_pre_encoder_enable(encoder, pipe_config); 3769 3770 intel_enable_dp(encoder, pipe_config, conn_state); 3771 } 3772 3773 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder, 3774 const struct intel_crtc_state *pipe_config, 3775 const struct drm_connector_state *conn_state) 3776 { 3777 intel_dp_prepare(encoder, pipe_config); 3778 3779 vlv_phy_pre_pll_enable(encoder, pipe_config); 3780 } 3781 3782 static void chv_pre_enable_dp(struct intel_encoder *encoder, 3783 const struct intel_crtc_state *pipe_config, 3784 const struct drm_connector_state *conn_state) 3785 { 3786 chv_phy_pre_encoder_enable(encoder, pipe_config); 3787 3788 intel_enable_dp(encoder, pipe_config, conn_state); 3789 3790 /* Second common lane will stay alive on its own now */ 3791 chv_phy_release_cl2_override(encoder); 3792 } 3793 3794 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder, 3795 const struct intel_crtc_state *pipe_config, 3796 const struct drm_connector_state *conn_state) 3797 { 3798 intel_dp_prepare(encoder, pipe_config); 3799 3800 chv_phy_pre_pll_enable(encoder, pipe_config); 3801 } 3802 3803 static void chv_dp_post_pll_disable(struct intel_encoder *encoder, 3804 const struct intel_crtc_state *old_crtc_state, 3805 const struct drm_connector_state *old_conn_state) 3806 { 3807 chv_phy_post_pll_disable(encoder, old_crtc_state); 3808 } 3809 3810 /* 3811 * Fetch AUX CH registers 0x202 - 0x207 which contain 3812 * link status information 3813 */ 3814 bool 3815 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) 3816 { 3817 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 3818 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 3819 } 3820 3821 /* These are source-specific values. */ 3822 u8 3823 intel_dp_voltage_max(struct intel_dp *intel_dp) 3824 { 3825 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3826 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3827 enum port port = encoder->port; 3828 3829 if (HAS_DDI(dev_priv)) 3830 return intel_ddi_dp_voltage_max(encoder); 3831 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3832 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 3833 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3834 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 3835 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3836 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 3837 else 3838 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 3839 } 3840 3841 u8 3842 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing) 3843 { 3844 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3845 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3846 enum port port = encoder->port; 3847 3848 if (HAS_DDI(dev_priv)) { 3849 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing); 3850 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3851 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3852 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3853 return DP_TRAIN_PRE_EMPH_LEVEL_3; 3854 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3855 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3856 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3857 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3858 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3859 default: 3860 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3861 } 3862 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 3863 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3864 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3865 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3866 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3867 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3868 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3869 default: 3870 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3871 } 3872 } else { 3873 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 3874 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3875 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3876 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3877 return DP_TRAIN_PRE_EMPH_LEVEL_2; 3878 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3879 return DP_TRAIN_PRE_EMPH_LEVEL_1; 3880 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3881 default: 3882 return DP_TRAIN_PRE_EMPH_LEVEL_0; 3883 } 3884 } 3885 } 3886 3887 static u32 vlv_signal_levels(struct intel_dp *intel_dp) 3888 { 3889 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3890 unsigned long demph_reg_value, preemph_reg_value, 3891 uniqtranscale_reg_value; 3892 u8 train_set = intel_dp->train_set[0]; 3893 3894 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3895 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3896 preemph_reg_value = 0x0004000; 3897 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3898 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3899 demph_reg_value = 0x2B405555; 3900 uniqtranscale_reg_value = 0x552AB83A; 3901 break; 3902 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3903 demph_reg_value = 0x2B404040; 3904 uniqtranscale_reg_value = 0x5548B83A; 3905 break; 3906 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3907 demph_reg_value = 0x2B245555; 3908 uniqtranscale_reg_value = 0x5560B83A; 3909 break; 3910 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3911 demph_reg_value = 0x2B405555; 3912 uniqtranscale_reg_value = 0x5598DA3A; 3913 break; 3914 default: 3915 return 0; 3916 } 3917 break; 3918 case DP_TRAIN_PRE_EMPH_LEVEL_1: 3919 preemph_reg_value = 0x0002000; 3920 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3921 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3922 demph_reg_value = 0x2B404040; 3923 uniqtranscale_reg_value = 0x5552B83A; 3924 break; 3925 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3926 demph_reg_value = 0x2B404848; 3927 uniqtranscale_reg_value = 0x5580B83A; 3928 break; 3929 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3930 demph_reg_value = 0x2B404040; 3931 uniqtranscale_reg_value = 0x55ADDA3A; 3932 break; 3933 default: 3934 return 0; 3935 } 3936 break; 3937 case DP_TRAIN_PRE_EMPH_LEVEL_2: 3938 preemph_reg_value = 0x0000000; 3939 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3940 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3941 demph_reg_value = 0x2B305555; 3942 uniqtranscale_reg_value = 0x5570B83A; 3943 break; 3944 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3945 demph_reg_value = 0x2B2B4040; 3946 uniqtranscale_reg_value = 0x55ADDA3A; 3947 break; 3948 default: 3949 return 0; 3950 } 3951 break; 3952 case DP_TRAIN_PRE_EMPH_LEVEL_3: 3953 preemph_reg_value = 0x0006000; 3954 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3955 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3956 demph_reg_value = 0x1B405555; 3957 uniqtranscale_reg_value = 0x55ADDA3A; 3958 break; 3959 default: 3960 return 0; 3961 } 3962 break; 3963 default: 3964 return 0; 3965 } 3966 3967 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, 3968 uniqtranscale_reg_value, 0); 3969 3970 return 0; 3971 } 3972 3973 static u32 chv_signal_levels(struct intel_dp *intel_dp) 3974 { 3975 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3976 u32 deemph_reg_value, margin_reg_value; 3977 bool uniq_trans_scale = false; 3978 u8 train_set = intel_dp->train_set[0]; 3979 3980 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3981 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3982 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3983 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 3984 deemph_reg_value = 128; 3985 margin_reg_value = 52; 3986 break; 3987 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 3988 deemph_reg_value = 128; 3989 margin_reg_value = 77; 3990 break; 3991 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 3992 deemph_reg_value = 128; 3993 margin_reg_value = 102; 3994 break; 3995 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3996 deemph_reg_value = 128; 3997 margin_reg_value = 154; 3998 uniq_trans_scale = true; 3999 break; 4000 default: 4001 return 0; 4002 } 4003 break; 4004 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4005 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4006 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4007 deemph_reg_value = 85; 4008 margin_reg_value = 78; 4009 break; 4010 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4011 deemph_reg_value = 85; 4012 margin_reg_value = 116; 4013 break; 4014 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4015 deemph_reg_value = 85; 4016 margin_reg_value = 154; 4017 break; 4018 default: 4019 return 0; 4020 } 4021 break; 4022 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4023 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4024 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4025 deemph_reg_value = 64; 4026 margin_reg_value = 104; 4027 break; 4028 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4029 deemph_reg_value = 64; 4030 margin_reg_value = 154; 4031 break; 4032 default: 4033 return 0; 4034 } 4035 break; 4036 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4037 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4038 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4039 deemph_reg_value = 43; 4040 margin_reg_value = 154; 4041 break; 4042 default: 4043 return 0; 4044 } 4045 break; 4046 default: 4047 return 0; 4048 } 4049 4050 chv_set_phy_signal_level(encoder, deemph_reg_value, 4051 margin_reg_value, uniq_trans_scale); 4052 4053 return 0; 4054 } 4055 4056 static u32 4057 g4x_signal_levels(u8 train_set) 4058 { 4059 u32 signal_levels = 0; 4060 4061 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4062 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4063 default: 4064 signal_levels |= DP_VOLTAGE_0_4; 4065 break; 4066 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4067 signal_levels |= DP_VOLTAGE_0_6; 4068 break; 4069 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4070 signal_levels |= DP_VOLTAGE_0_8; 4071 break; 4072 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4073 signal_levels |= DP_VOLTAGE_1_2; 4074 break; 4075 } 4076 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4077 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4078 default: 4079 signal_levels |= DP_PRE_EMPHASIS_0; 4080 break; 4081 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4082 signal_levels |= DP_PRE_EMPHASIS_3_5; 4083 break; 4084 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4085 signal_levels |= DP_PRE_EMPHASIS_6; 4086 break; 4087 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4088 signal_levels |= DP_PRE_EMPHASIS_9_5; 4089 break; 4090 } 4091 return signal_levels; 4092 } 4093 4094 /* SNB CPU eDP voltage swing and pre-emphasis control */ 4095 static u32 4096 snb_cpu_edp_signal_levels(u8 train_set) 4097 { 4098 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4099 DP_TRAIN_PRE_EMPHASIS_MASK); 4100 switch (signal_levels) { 4101 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4102 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4103 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4104 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4105 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 4106 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4107 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4108 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 4109 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4111 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 4112 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4113 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4114 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 4115 default: 4116 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4117 "0x%x\n", signal_levels); 4118 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4119 } 4120 } 4121 4122 /* IVB CPU eDP voltage swing and pre-emphasis control */ 4123 static u32 4124 ivb_cpu_edp_signal_levels(u8 train_set) 4125 { 4126 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4127 DP_TRAIN_PRE_EMPHASIS_MASK); 4128 switch (signal_levels) { 4129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4130 return EDP_LINK_TRAIN_400MV_0DB_IVB; 4131 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4132 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 4133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4134 return EDP_LINK_TRAIN_400MV_6DB_IVB; 4135 4136 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4137 return EDP_LINK_TRAIN_600MV_0DB_IVB; 4138 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4139 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 4140 4141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4142 return EDP_LINK_TRAIN_800MV_0DB_IVB; 4143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4144 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 4145 4146 default: 4147 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4148 "0x%x\n", signal_levels); 4149 return EDP_LINK_TRAIN_500MV_0DB_IVB; 4150 } 4151 } 4152 4153 void 4154 intel_dp_set_signal_levels(struct intel_dp *intel_dp) 4155 { 4156 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4158 enum port port = intel_dig_port->base.port; 4159 u32 signal_levels, mask = 0; 4160 u8 train_set = intel_dp->train_set[0]; 4161 4162 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) { 4163 signal_levels = bxt_signal_levels(intel_dp); 4164 } else if (HAS_DDI(dev_priv)) { 4165 signal_levels = ddi_signal_levels(intel_dp); 4166 mask = DDI_BUF_EMP_MASK; 4167 } else if (IS_CHERRYVIEW(dev_priv)) { 4168 signal_levels = chv_signal_levels(intel_dp); 4169 } else if (IS_VALLEYVIEW(dev_priv)) { 4170 signal_levels = vlv_signal_levels(intel_dp); 4171 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 4172 signal_levels = ivb_cpu_edp_signal_levels(train_set); 4173 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 4174 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) { 4175 signal_levels = snb_cpu_edp_signal_levels(train_set); 4176 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 4177 } else { 4178 signal_levels = g4x_signal_levels(train_set); 4179 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; 4180 } 4181 4182 if (mask) 4183 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4184 signal_levels); 4185 4186 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", 4187 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 4188 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); 4189 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", 4190 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 4191 DP_TRAIN_PRE_EMPHASIS_SHIFT, 4192 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 4193 " (max)" : ""); 4194 4195 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels; 4196 4197 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4198 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4199 } 4200 4201 void 4202 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 4203 u8 dp_train_pat) 4204 { 4205 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4206 struct drm_i915_private *dev_priv = 4207 to_i915(intel_dig_port->base.base.dev); 4208 4209 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat); 4210 4211 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4212 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4213 } 4214 4215 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 4216 { 4217 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4218 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4219 enum port port = intel_dig_port->base.port; 4220 u32 val; 4221 4222 if (!HAS_DDI(dev_priv)) 4223 return; 4224 4225 val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); 4226 val &= ~DP_TP_CTL_LINK_TRAIN_MASK; 4227 val |= DP_TP_CTL_LINK_TRAIN_IDLE; 4228 intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val); 4229 4230 /* 4231 * Until TGL on PORT_A we can have only eDP in SST mode. There the only 4232 * reason we need to set idle transmission mode is to work around a HW 4233 * issue where we enable the pipe while not in idle link-training mode. 4234 * In this case there is requirement to wait for a minimum number of 4235 * idle patterns to be sent. 4236 */ 4237 if (port == PORT_A && INTEL_GEN(dev_priv) < 12) 4238 return; 4239 4240 if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, 4241 DP_TP_STATUS_IDLE_DONE, 1)) 4242 drm_err(&dev_priv->drm, 4243 "Timed out waiting for DP idle patterns\n"); 4244 } 4245 4246 static void 4247 intel_dp_link_down(struct intel_encoder *encoder, 4248 const struct intel_crtc_state *old_crtc_state) 4249 { 4250 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4251 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4252 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 4253 enum port port = encoder->port; 4254 u32 DP = intel_dp->DP; 4255 4256 if (drm_WARN_ON(&dev_priv->drm, 4257 (intel_de_read(dev_priv, intel_dp->output_reg) & 4258 DP_PORT_EN) == 0)) 4259 return; 4260 4261 drm_dbg_kms(&dev_priv->drm, "\n"); 4262 4263 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 4264 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 4265 DP &= ~DP_LINK_TRAIN_MASK_CPT; 4266 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 4267 } else { 4268 DP &= ~DP_LINK_TRAIN_MASK; 4269 DP |= DP_LINK_TRAIN_PAT_IDLE; 4270 } 4271 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4272 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4273 4274 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 4275 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4276 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4277 4278 /* 4279 * HW workaround for IBX, we need to move the port 4280 * to transcoder A after disabling it to allow the 4281 * matching HDMI port to be enabled on transcoder A. 4282 */ 4283 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 4284 /* 4285 * We get CPU/PCH FIFO underruns on the other pipe when 4286 * doing the workaround. Sweep them under the rug. 4287 */ 4288 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4289 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4290 4291 /* always enable with pattern 1 (as per spec) */ 4292 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 4293 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 4294 DP_LINK_TRAIN_PAT_1; 4295 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4296 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4297 4298 DP &= ~DP_PORT_EN; 4299 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4300 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4301 4302 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 4303 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4304 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4305 } 4306 4307 msleep(intel_dp->panel_power_down_delay); 4308 4309 intel_dp->DP = DP; 4310 4311 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4312 intel_wakeref_t wakeref; 4313 4314 with_pps_lock(intel_dp, wakeref) 4315 intel_dp->active_pipe = INVALID_PIPE; 4316 } 4317 } 4318 4319 static void 4320 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) 4321 { 4322 u8 dpcd_ext[6]; 4323 4324 /* 4325 * Prior to DP1.3 the bit represented by 4326 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. 4327 * if it is set DP_DPCD_REV at 0000h could be at a value less than 4328 * the true capability of the panel. The only way to check is to 4329 * then compare 0000h and 2200h. 4330 */ 4331 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] & 4332 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) 4333 return; 4334 4335 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV, 4336 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) { 4337 DRM_ERROR("DPCD failed read at extended capabilities\n"); 4338 return; 4339 } 4340 4341 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { 4342 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n"); 4343 return; 4344 } 4345 4346 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext))) 4347 return; 4348 4349 DRM_DEBUG_KMS("Base DPCD: %*ph\n", 4350 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); 4351 4352 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)); 4353 } 4354 4355 bool 4356 intel_dp_read_dpcd(struct intel_dp *intel_dp) 4357 { 4358 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 4359 sizeof(intel_dp->dpcd)) < 0) 4360 return false; /* aux transfer failed */ 4361 4362 intel_dp_extended_receiver_capabilities(intel_dp); 4363 4364 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd); 4365 4366 return intel_dp->dpcd[DP_DPCD_REV] != 0; 4367 } 4368 4369 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 4370 { 4371 u8 dprx = 0; 4372 4373 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 4374 &dprx) != 1) 4375 return false; 4376 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 4377 } 4378 4379 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 4380 { 4381 /* 4382 * Clear the cached register set to avoid using stale values 4383 * for the sinks that do not support DSC. 4384 */ 4385 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4386 4387 /* Clear fec_capable to avoid using stale values */ 4388 intel_dp->fec_capable = 0; 4389 4390 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 4391 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 4392 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4393 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 4394 intel_dp->dsc_dpcd, 4395 sizeof(intel_dp->dsc_dpcd)) < 0) 4396 DRM_ERROR("Failed to read DPCD register 0x%x\n", 4397 DP_DSC_SUPPORT); 4398 4399 DRM_DEBUG_KMS("DSC DPCD: %*ph\n", 4400 (int)sizeof(intel_dp->dsc_dpcd), 4401 intel_dp->dsc_dpcd); 4402 4403 /* FEC is supported only on DP 1.4 */ 4404 if (!intel_dp_is_edp(intel_dp) && 4405 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 4406 &intel_dp->fec_capable) < 0) 4407 DRM_ERROR("Failed to read FEC DPCD register\n"); 4408 4409 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable); 4410 } 4411 } 4412 4413 static bool 4414 intel_edp_init_dpcd(struct intel_dp *intel_dp) 4415 { 4416 struct drm_i915_private *dev_priv = 4417 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4418 4419 /* this function is meant to be called only once */ 4420 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 4421 4422 if (!intel_dp_read_dpcd(intel_dp)) 4423 return false; 4424 4425 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4426 drm_dp_is_branch(intel_dp->dpcd)); 4427 4428 /* 4429 * Read the eDP display control registers. 4430 * 4431 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4432 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4433 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4434 * method). The display control registers should read zero if they're 4435 * not supported anyway. 4436 */ 4437 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4438 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4439 sizeof(intel_dp->edp_dpcd)) 4440 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 4441 (int)sizeof(intel_dp->edp_dpcd), 4442 intel_dp->edp_dpcd); 4443 4444 /* 4445 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4446 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4447 */ 4448 intel_psr_init_dpcd(intel_dp); 4449 4450 /* Read the eDP 1.4+ supported link rates. */ 4451 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4452 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4453 int i; 4454 4455 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4456 sink_rates, sizeof(sink_rates)); 4457 4458 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4459 int val = le16_to_cpu(sink_rates[i]); 4460 4461 if (val == 0) 4462 break; 4463 4464 /* Value read multiplied by 200kHz gives the per-lane 4465 * link rate in kHz. The source rates are, however, 4466 * stored in terms of LS_Clk kHz. The full conversion 4467 * back to symbols is 4468 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4469 */ 4470 intel_dp->sink_rates[i] = (val * 200) / 10; 4471 } 4472 intel_dp->num_sink_rates = i; 4473 } 4474 4475 /* 4476 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4477 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4478 */ 4479 if (intel_dp->num_sink_rates) 4480 intel_dp->use_rate_select = true; 4481 else 4482 intel_dp_set_sink_rates(intel_dp); 4483 4484 intel_dp_set_common_rates(intel_dp); 4485 4486 /* Read the eDP DSC DPCD registers */ 4487 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4488 intel_dp_get_dsc_sink_cap(intel_dp); 4489 4490 return true; 4491 } 4492 4493 4494 static bool 4495 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4496 { 4497 if (!intel_dp_read_dpcd(intel_dp)) 4498 return false; 4499 4500 /* 4501 * Don't clobber cached eDP rates. Also skip re-reading 4502 * the OUI/ID since we know it won't change. 4503 */ 4504 if (!intel_dp_is_edp(intel_dp)) { 4505 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4506 drm_dp_is_branch(intel_dp->dpcd)); 4507 4508 intel_dp_set_sink_rates(intel_dp); 4509 intel_dp_set_common_rates(intel_dp); 4510 } 4511 4512 /* 4513 * Some eDP panels do not set a valid value for sink count, that is why 4514 * it don't care about read it here and in intel_edp_init_dpcd(). 4515 */ 4516 if (!intel_dp_is_edp(intel_dp) && 4517 !drm_dp_has_quirk(&intel_dp->desc, 0, 4518 DP_DPCD_QUIRK_NO_SINK_COUNT)) { 4519 u8 count; 4520 ssize_t r; 4521 4522 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count); 4523 if (r < 1) 4524 return false; 4525 4526 /* 4527 * Sink count can change between short pulse hpd hence 4528 * a member variable in intel_dp will track any changes 4529 * between short pulse interrupts. 4530 */ 4531 intel_dp->sink_count = DP_GET_SINK_COUNT(count); 4532 4533 /* 4534 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4535 * a dongle is present but no display. Unless we require to know 4536 * if a dongle is present or not, we don't need to update 4537 * downstream port information. So, an early return here saves 4538 * time from performing other operations which are not required. 4539 */ 4540 if (!intel_dp->sink_count) 4541 return false; 4542 } 4543 4544 if (!drm_dp_is_branch(intel_dp->dpcd)) 4545 return true; /* native DP sink */ 4546 4547 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 4548 return true; /* no per-port downstream info */ 4549 4550 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, 4551 intel_dp->downstream_ports, 4552 DP_MAX_DOWNSTREAM_PORTS) < 0) 4553 return false; /* downstream port status fetch failed */ 4554 4555 return true; 4556 } 4557 4558 static bool 4559 intel_dp_sink_can_mst(struct intel_dp *intel_dp) 4560 { 4561 u8 mstm_cap; 4562 4563 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 4564 return false; 4565 4566 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1) 4567 return false; 4568 4569 return mstm_cap & DP_MST_CAP; 4570 } 4571 4572 static bool 4573 intel_dp_can_mst(struct intel_dp *intel_dp) 4574 { 4575 return i915_modparams.enable_dp_mst && 4576 intel_dp->can_mst && 4577 intel_dp_sink_can_mst(intel_dp); 4578 } 4579 4580 static void 4581 intel_dp_configure_mst(struct intel_dp *intel_dp) 4582 { 4583 struct intel_encoder *encoder = 4584 &dp_to_dig_port(intel_dp)->base; 4585 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); 4586 4587 DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4588 encoder->base.base.id, encoder->base.name, 4589 yesno(intel_dp->can_mst), yesno(sink_can_mst), 4590 yesno(i915_modparams.enable_dp_mst)); 4591 4592 if (!intel_dp->can_mst) 4593 return; 4594 4595 intel_dp->is_mst = sink_can_mst && 4596 i915_modparams.enable_dp_mst; 4597 4598 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4599 intel_dp->is_mst); 4600 } 4601 4602 static bool 4603 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4604 { 4605 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4606 sink_irq_vector, DP_DPRX_ESI_LEN) == 4607 DP_DPRX_ESI_LEN; 4608 } 4609 4610 bool 4611 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4612 const struct drm_connector_state *conn_state) 4613 { 4614 /* 4615 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4616 * of Color Encoding Format and Content Color Gamut], in order to 4617 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4618 */ 4619 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4620 return true; 4621 4622 switch (conn_state->colorspace) { 4623 case DRM_MODE_COLORIMETRY_SYCC_601: 4624 case DRM_MODE_COLORIMETRY_OPYCC_601: 4625 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4626 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4627 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4628 return true; 4629 default: 4630 break; 4631 } 4632 4633 return false; 4634 } 4635 4636 static void 4637 intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp, 4638 const struct intel_crtc_state *crtc_state, 4639 const struct drm_connector_state *conn_state) 4640 { 4641 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4642 struct dp_sdp vsc_sdp = {}; 4643 4644 /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */ 4645 vsc_sdp.sdp_header.HB0 = 0; 4646 vsc_sdp.sdp_header.HB1 = 0x7; 4647 4648 /* 4649 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 4650 * Colorimetry Format indication. 4651 */ 4652 vsc_sdp.sdp_header.HB2 = 0x5; 4653 4654 /* 4655 * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/ 4656 * Colorimetry Format indication (HB2 = 05h). 4657 */ 4658 vsc_sdp.sdp_header.HB3 = 0x13; 4659 4660 /* DP 1.4a spec, Table 2-120 */ 4661 switch (crtc_state->output_format) { 4662 case INTEL_OUTPUT_FORMAT_YCBCR444: 4663 vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */ 4664 break; 4665 case INTEL_OUTPUT_FORMAT_YCBCR420: 4666 vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */ 4667 break; 4668 case INTEL_OUTPUT_FORMAT_RGB: 4669 default: 4670 /* RGB: DB16[7:4] = 0h */ 4671 break; 4672 } 4673 4674 switch (conn_state->colorspace) { 4675 case DRM_MODE_COLORIMETRY_BT709_YCC: 4676 vsc_sdp.db[16] |= 0x1; 4677 break; 4678 case DRM_MODE_COLORIMETRY_XVYCC_601: 4679 vsc_sdp.db[16] |= 0x2; 4680 break; 4681 case DRM_MODE_COLORIMETRY_XVYCC_709: 4682 vsc_sdp.db[16] |= 0x3; 4683 break; 4684 case DRM_MODE_COLORIMETRY_SYCC_601: 4685 vsc_sdp.db[16] |= 0x4; 4686 break; 4687 case DRM_MODE_COLORIMETRY_OPYCC_601: 4688 vsc_sdp.db[16] |= 0x5; 4689 break; 4690 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4691 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4692 vsc_sdp.db[16] |= 0x6; 4693 break; 4694 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4695 vsc_sdp.db[16] |= 0x7; 4696 break; 4697 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 4698 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 4699 vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */ 4700 break; 4701 default: 4702 /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */ 4703 4704 /* RGB->YCBCR color conversion uses the BT.709 color space. */ 4705 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4706 vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */ 4707 break; 4708 } 4709 4710 /* 4711 * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only, 4712 * the following Component Bit Depth values are defined: 4713 * 001b = 8bpc. 4714 * 010b = 10bpc. 4715 * 011b = 12bpc. 4716 * 100b = 16bpc. 4717 */ 4718 switch (crtc_state->pipe_bpp) { 4719 case 24: /* 8bpc */ 4720 vsc_sdp.db[17] = 0x1; 4721 break; 4722 case 30: /* 10bpc */ 4723 vsc_sdp.db[17] = 0x2; 4724 break; 4725 case 36: /* 12bpc */ 4726 vsc_sdp.db[17] = 0x3; 4727 break; 4728 case 48: /* 16bpc */ 4729 vsc_sdp.db[17] = 0x4; 4730 break; 4731 default: 4732 MISSING_CASE(crtc_state->pipe_bpp); 4733 break; 4734 } 4735 4736 /* 4737 * Dynamic Range (Bit 7) 4738 * 0 = VESA range, 1 = CTA range. 4739 * all YCbCr are always limited range 4740 */ 4741 vsc_sdp.db[17] |= 0x80; 4742 4743 /* 4744 * Content Type (Bits 2:0) 4745 * 000b = Not defined. 4746 * 001b = Graphics. 4747 * 010b = Photo. 4748 * 011b = Video. 4749 * 100b = Game 4750 * All other values are RESERVED. 4751 * Note: See CTA-861-G for the definition and expected 4752 * processing by a stream sink for the above contect types. 4753 */ 4754 vsc_sdp.db[18] = 0; 4755 4756 intel_dig_port->write_infoframe(&intel_dig_port->base, 4757 crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp)); 4758 } 4759 4760 static void 4761 intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 4762 const struct intel_crtc_state *crtc_state, 4763 const struct drm_connector_state *conn_state) 4764 { 4765 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4766 struct dp_sdp infoframe_sdp = {}; 4767 struct hdmi_drm_infoframe drm_infoframe = {}; 4768 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4769 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4770 ssize_t len; 4771 int ret; 4772 4773 ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state); 4774 if (ret) { 4775 DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n"); 4776 return; 4777 } 4778 4779 len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf)); 4780 if (len < 0) { 4781 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 4782 return; 4783 } 4784 4785 if (len != infoframe_size) { 4786 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 4787 return; 4788 } 4789 4790 /* 4791 * Set up the infoframe sdp packet for HDR static metadata. 4792 * Prepare VSC Header for SU as per DP 1.4a spec, 4793 * Table 2-100 and Table 2-101 4794 */ 4795 4796 /* Packet ID, 00h for non-Audio INFOFRAME */ 4797 infoframe_sdp.sdp_header.HB0 = 0; 4798 /* 4799 * Packet Type 80h + Non-audio INFOFRAME Type value 4800 * HDMI_INFOFRAME_TYPE_DRM: 0x87, 4801 */ 4802 infoframe_sdp.sdp_header.HB1 = drm_infoframe.type; 4803 /* 4804 * Least Significant Eight Bits of (Data Byte Count – 1) 4805 * infoframe_size - 1, 4806 */ 4807 infoframe_sdp.sdp_header.HB2 = 0x1D; 4808 /* INFOFRAME SDP Version Number */ 4809 infoframe_sdp.sdp_header.HB3 = (0x13 << 2); 4810 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4811 infoframe_sdp.db[0] = drm_infoframe.version; 4812 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4813 infoframe_sdp.db[1] = drm_infoframe.length; 4814 /* 4815 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4816 * HDMI_INFOFRAME_HEADER_SIZE 4817 */ 4818 BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4819 memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4820 HDMI_DRM_INFOFRAME_SIZE); 4821 4822 /* 4823 * Size of DP infoframe sdp packet for HDR static metadata is consist of 4824 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4825 * - Two Data Blocks: 2 bytes 4826 * CTA Header Byte2 (INFOFRAME Version Number) 4827 * CTA Header Byte3 (Length of INFOFRAME) 4828 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4829 * 4830 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4831 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4832 * will pad rest of the size. 4833 */ 4834 intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state, 4835 HDMI_PACKET_TYPE_GAMUT_METADATA, 4836 &infoframe_sdp, 4837 sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE); 4838 } 4839 4840 void intel_dp_vsc_enable(struct intel_dp *intel_dp, 4841 const struct intel_crtc_state *crtc_state, 4842 const struct drm_connector_state *conn_state) 4843 { 4844 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 4845 return; 4846 4847 intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state); 4848 } 4849 4850 void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp, 4851 const struct intel_crtc_state *crtc_state, 4852 const struct drm_connector_state *conn_state) 4853 { 4854 if (!conn_state->hdr_output_metadata) 4855 return; 4856 4857 intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp, 4858 crtc_state, 4859 conn_state); 4860 } 4861 4862 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 4863 { 4864 int status = 0; 4865 int test_link_rate; 4866 u8 test_lane_count, test_link_bw; 4867 /* (DP CTS 1.2) 4868 * 4.3.1.11 4869 */ 4870 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 4871 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 4872 &test_lane_count); 4873 4874 if (status <= 0) { 4875 DRM_DEBUG_KMS("Lane count read failed\n"); 4876 return DP_TEST_NAK; 4877 } 4878 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 4879 4880 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 4881 &test_link_bw); 4882 if (status <= 0) { 4883 DRM_DEBUG_KMS("Link Rate read failed\n"); 4884 return DP_TEST_NAK; 4885 } 4886 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 4887 4888 /* Validate the requested link rate and lane count */ 4889 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 4890 test_lane_count)) 4891 return DP_TEST_NAK; 4892 4893 intel_dp->compliance.test_lane_count = test_lane_count; 4894 intel_dp->compliance.test_link_rate = test_link_rate; 4895 4896 return DP_TEST_ACK; 4897 } 4898 4899 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 4900 { 4901 u8 test_pattern; 4902 u8 test_misc; 4903 __be16 h_width, v_height; 4904 int status = 0; 4905 4906 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 4907 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 4908 &test_pattern); 4909 if (status <= 0) { 4910 DRM_DEBUG_KMS("Test pattern read failed\n"); 4911 return DP_TEST_NAK; 4912 } 4913 if (test_pattern != DP_COLOR_RAMP) 4914 return DP_TEST_NAK; 4915 4916 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 4917 &h_width, 2); 4918 if (status <= 0) { 4919 DRM_DEBUG_KMS("H Width read failed\n"); 4920 return DP_TEST_NAK; 4921 } 4922 4923 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 4924 &v_height, 2); 4925 if (status <= 0) { 4926 DRM_DEBUG_KMS("V Height read failed\n"); 4927 return DP_TEST_NAK; 4928 } 4929 4930 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 4931 &test_misc); 4932 if (status <= 0) { 4933 DRM_DEBUG_KMS("TEST MISC read failed\n"); 4934 return DP_TEST_NAK; 4935 } 4936 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 4937 return DP_TEST_NAK; 4938 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 4939 return DP_TEST_NAK; 4940 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 4941 case DP_TEST_BIT_DEPTH_6: 4942 intel_dp->compliance.test_data.bpc = 6; 4943 break; 4944 case DP_TEST_BIT_DEPTH_8: 4945 intel_dp->compliance.test_data.bpc = 8; 4946 break; 4947 default: 4948 return DP_TEST_NAK; 4949 } 4950 4951 intel_dp->compliance.test_data.video_pattern = test_pattern; 4952 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 4953 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 4954 /* Set test active flag here so userspace doesn't interrupt things */ 4955 intel_dp->compliance.test_active = true; 4956 4957 return DP_TEST_ACK; 4958 } 4959 4960 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 4961 { 4962 u8 test_result = DP_TEST_ACK; 4963 struct intel_connector *intel_connector = intel_dp->attached_connector; 4964 struct drm_connector *connector = &intel_connector->base; 4965 4966 if (intel_connector->detect_edid == NULL || 4967 connector->edid_corrupt || 4968 intel_dp->aux.i2c_defer_count > 6) { 4969 /* Check EDID read for NACKs, DEFERs and corruption 4970 * (DP CTS 1.2 Core r1.1) 4971 * 4.2.2.4 : Failed EDID read, I2C_NAK 4972 * 4.2.2.5 : Failed EDID read, I2C_DEFER 4973 * 4.2.2.6 : EDID corruption detected 4974 * Use failsafe mode for all cases 4975 */ 4976 if (intel_dp->aux.i2c_nack_count > 0 || 4977 intel_dp->aux.i2c_defer_count > 0) 4978 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n", 4979 intel_dp->aux.i2c_nack_count, 4980 intel_dp->aux.i2c_defer_count); 4981 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 4982 } else { 4983 struct edid *block = intel_connector->detect_edid; 4984 4985 /* We have to write the checksum 4986 * of the last block read 4987 */ 4988 block += intel_connector->detect_edid->extensions; 4989 4990 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 4991 block->checksum) <= 0) 4992 DRM_DEBUG_KMS("Failed to write EDID checksum\n"); 4993 4994 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 4995 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 4996 } 4997 4998 /* Set test active flag here so userspace doesn't interrupt things */ 4999 intel_dp->compliance.test_active = true; 5000 5001 return test_result; 5002 } 5003 5004 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 5005 { 5006 u8 test_result = DP_TEST_NAK; 5007 return test_result; 5008 } 5009 5010 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 5011 { 5012 u8 response = DP_TEST_NAK; 5013 u8 request = 0; 5014 int status; 5015 5016 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 5017 if (status <= 0) { 5018 DRM_DEBUG_KMS("Could not read test request from sink\n"); 5019 goto update_status; 5020 } 5021 5022 switch (request) { 5023 case DP_TEST_LINK_TRAINING: 5024 DRM_DEBUG_KMS("LINK_TRAINING test requested\n"); 5025 response = intel_dp_autotest_link_training(intel_dp); 5026 break; 5027 case DP_TEST_LINK_VIDEO_PATTERN: 5028 DRM_DEBUG_KMS("TEST_PATTERN test requested\n"); 5029 response = intel_dp_autotest_video_pattern(intel_dp); 5030 break; 5031 case DP_TEST_LINK_EDID_READ: 5032 DRM_DEBUG_KMS("EDID test requested\n"); 5033 response = intel_dp_autotest_edid(intel_dp); 5034 break; 5035 case DP_TEST_LINK_PHY_TEST_PATTERN: 5036 DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); 5037 response = intel_dp_autotest_phy_pattern(intel_dp); 5038 break; 5039 default: 5040 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); 5041 break; 5042 } 5043 5044 if (response & DP_TEST_ACK) 5045 intel_dp->compliance.test_type = request; 5046 5047 update_status: 5048 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 5049 if (status <= 0) 5050 DRM_DEBUG_KMS("Could not write test response to sink\n"); 5051 } 5052 5053 static int 5054 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5055 { 5056 bool bret; 5057 5058 if (intel_dp->is_mst) { 5059 u8 esi[DP_DPRX_ESI_LEN] = { 0 }; 5060 int ret = 0; 5061 int retry; 5062 bool handled; 5063 5064 WARN_ON_ONCE(intel_dp->active_mst_links < 0); 5065 bret = intel_dp_get_sink_irq_esi(intel_dp, esi); 5066 go_again: 5067 if (bret == true) { 5068 5069 /* check link status - esi[10] = 0x200c */ 5070 if (intel_dp->active_mst_links > 0 && 5071 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 5072 DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); 5073 intel_dp_start_link_train(intel_dp); 5074 intel_dp_stop_link_train(intel_dp); 5075 } 5076 5077 DRM_DEBUG_KMS("got esi %3ph\n", esi); 5078 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 5079 5080 if (handled) { 5081 for (retry = 0; retry < 3; retry++) { 5082 int wret; 5083 wret = drm_dp_dpcd_write(&intel_dp->aux, 5084 DP_SINK_COUNT_ESI+1, 5085 &esi[1], 3); 5086 if (wret == 3) { 5087 break; 5088 } 5089 } 5090 5091 bret = intel_dp_get_sink_irq_esi(intel_dp, esi); 5092 if (bret == true) { 5093 DRM_DEBUG_KMS("got esi2 %3ph\n", esi); 5094 goto go_again; 5095 } 5096 } else 5097 ret = 0; 5098 5099 return ret; 5100 } else { 5101 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n"); 5102 intel_dp->is_mst = false; 5103 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5104 intel_dp->is_mst); 5105 } 5106 } 5107 return -EINVAL; 5108 } 5109 5110 static bool 5111 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5112 { 5113 u8 link_status[DP_LINK_STATUS_SIZE]; 5114 5115 if (!intel_dp->link_trained) 5116 return false; 5117 5118 /* 5119 * While PSR source HW is enabled, it will control main-link sending 5120 * frames, enabling and disabling it so trying to do a retrain will fail 5121 * as the link would or not be on or it could mix training patterns 5122 * and frame data at the same time causing retrain to fail. 5123 * Also when exiting PSR, HW will retrain the link anyways fixing 5124 * any link status error. 5125 */ 5126 if (intel_psr_enabled(intel_dp)) 5127 return false; 5128 5129 if (!intel_dp_get_link_status(intel_dp, link_status)) 5130 return false; 5131 5132 /* 5133 * Validate the cached values of intel_dp->link_rate and 5134 * intel_dp->lane_count before attempting to retrain. 5135 */ 5136 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5137 intel_dp->lane_count)) 5138 return false; 5139 5140 /* Retrain if Channel EQ or CR not ok */ 5141 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5142 } 5143 5144 int intel_dp_retrain_link(struct intel_encoder *encoder, 5145 struct drm_modeset_acquire_ctx *ctx) 5146 { 5147 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5148 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5149 struct intel_connector *connector = intel_dp->attached_connector; 5150 struct drm_connector_state *conn_state; 5151 struct intel_crtc_state *crtc_state; 5152 struct intel_crtc *crtc; 5153 int ret; 5154 5155 /* FIXME handle the MST connectors as well */ 5156 5157 if (!connector || connector->base.status != connector_status_connected) 5158 return 0; 5159 5160 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5161 ctx); 5162 if (ret) 5163 return ret; 5164 5165 conn_state = connector->base.state; 5166 5167 crtc = to_intel_crtc(conn_state->crtc); 5168 if (!crtc) 5169 return 0; 5170 5171 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5172 if (ret) 5173 return ret; 5174 5175 crtc_state = to_intel_crtc_state(crtc->base.state); 5176 5177 drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5178 5179 if (!crtc_state->hw.active) 5180 return 0; 5181 5182 if (conn_state->commit && 5183 !try_wait_for_completion(&conn_state->commit->hw_done)) 5184 return 0; 5185 5186 if (!intel_dp_needs_link_retrain(intel_dp)) 5187 return 0; 5188 5189 /* Suppress underruns caused by re-training */ 5190 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5191 if (crtc_state->has_pch_encoder) 5192 intel_set_pch_fifo_underrun_reporting(dev_priv, 5193 intel_crtc_pch_transcoder(crtc), false); 5194 5195 intel_dp_start_link_train(intel_dp); 5196 intel_dp_stop_link_train(intel_dp); 5197 5198 /* Keep underrun reporting disabled until things are stable */ 5199 intel_wait_for_vblank(dev_priv, crtc->pipe); 5200 5201 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5202 if (crtc_state->has_pch_encoder) 5203 intel_set_pch_fifo_underrun_reporting(dev_priv, 5204 intel_crtc_pch_transcoder(crtc), true); 5205 5206 return 0; 5207 } 5208 5209 /* 5210 * If display is now connected check links status, 5211 * there has been known issues of link loss triggering 5212 * long pulse. 5213 * 5214 * Some sinks (eg. ASUS PB287Q) seem to perform some 5215 * weird HPD ping pong during modesets. So we can apparently 5216 * end up with HPD going low during a modeset, and then 5217 * going back up soon after. And once that happens we must 5218 * retrain the link to get a picture. That's in case no 5219 * userspace component reacted to intermittent HPD dip. 5220 */ 5221 static enum intel_hotplug_state 5222 intel_dp_hotplug(struct intel_encoder *encoder, 5223 struct intel_connector *connector, 5224 bool irq_received) 5225 { 5226 struct drm_modeset_acquire_ctx ctx; 5227 enum intel_hotplug_state state; 5228 int ret; 5229 5230 state = intel_encoder_hotplug(encoder, connector, irq_received); 5231 5232 drm_modeset_acquire_init(&ctx, 0); 5233 5234 for (;;) { 5235 ret = intel_dp_retrain_link(encoder, &ctx); 5236 5237 if (ret == -EDEADLK) { 5238 drm_modeset_backoff(&ctx); 5239 continue; 5240 } 5241 5242 break; 5243 } 5244 5245 drm_modeset_drop_locks(&ctx); 5246 drm_modeset_acquire_fini(&ctx); 5247 drm_WARN(encoder->base.dev, ret, 5248 "Acquiring modeset locks failed with %i\n", ret); 5249 5250 /* 5251 * Keeping it consistent with intel_ddi_hotplug() and 5252 * intel_hdmi_hotplug(). 5253 */ 5254 if (state == INTEL_HOTPLUG_UNCHANGED && irq_received) 5255 state = INTEL_HOTPLUG_RETRY; 5256 5257 return state; 5258 } 5259 5260 static void intel_dp_check_service_irq(struct intel_dp *intel_dp) 5261 { 5262 u8 val; 5263 5264 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5265 return; 5266 5267 if (drm_dp_dpcd_readb(&intel_dp->aux, 5268 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5269 return; 5270 5271 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5272 5273 if (val & DP_AUTOMATED_TEST_REQUEST) 5274 intel_dp_handle_test_request(intel_dp); 5275 5276 if (val & DP_CP_IRQ) 5277 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5278 5279 if (val & DP_SINK_SPECIFIC_IRQ) 5280 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n"); 5281 } 5282 5283 /* 5284 * According to DP spec 5285 * 5.1.2: 5286 * 1. Read DPCD 5287 * 2. Configure link according to Receiver Capabilities 5288 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5289 * 4. Check link status on receipt of hot-plug interrupt 5290 * 5291 * intel_dp_short_pulse - handles short pulse interrupts 5292 * when full detection is not required. 5293 * Returns %true if short pulse is handled and full detection 5294 * is NOT required and %false otherwise. 5295 */ 5296 static bool 5297 intel_dp_short_pulse(struct intel_dp *intel_dp) 5298 { 5299 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5300 u8 old_sink_count = intel_dp->sink_count; 5301 bool ret; 5302 5303 /* 5304 * Clearing compliance test variables to allow capturing 5305 * of values for next automated test request. 5306 */ 5307 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5308 5309 /* 5310 * Now read the DPCD to see if it's actually running 5311 * If the current value of sink count doesn't match with 5312 * the value that was stored earlier or dpcd read failed 5313 * we need to do full detection 5314 */ 5315 ret = intel_dp_get_dpcd(intel_dp); 5316 5317 if ((old_sink_count != intel_dp->sink_count) || !ret) { 5318 /* No need to proceed if we are going to do full detect */ 5319 return false; 5320 } 5321 5322 intel_dp_check_service_irq(intel_dp); 5323 5324 /* Handle CEC interrupts, if any */ 5325 drm_dp_cec_irq(&intel_dp->aux); 5326 5327 /* defer to the hotplug work for link retraining if needed */ 5328 if (intel_dp_needs_link_retrain(intel_dp)) 5329 return false; 5330 5331 intel_psr_short_pulse(intel_dp); 5332 5333 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 5334 drm_dbg_kms(&dev_priv->drm, 5335 "Link Training Compliance Test requested\n"); 5336 /* Send a Hotplug Uevent to userspace to start modeset */ 5337 drm_kms_helper_hotplug_event(&dev_priv->drm); 5338 } 5339 5340 return true; 5341 } 5342 5343 /* XXX this is probably wrong for multiple downstream ports */ 5344 static enum drm_connector_status 5345 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 5346 { 5347 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 5348 u8 *dpcd = intel_dp->dpcd; 5349 u8 type; 5350 5351 if (WARN_ON(intel_dp_is_edp(intel_dp))) 5352 return connector_status_connected; 5353 5354 if (lspcon->active) 5355 lspcon_resume(lspcon); 5356 5357 if (!intel_dp_get_dpcd(intel_dp)) 5358 return connector_status_disconnected; 5359 5360 /* if there's no downstream port, we're done */ 5361 if (!drm_dp_is_branch(dpcd)) 5362 return connector_status_connected; 5363 5364 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 5365 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 5366 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 5367 5368 return intel_dp->sink_count ? 5369 connector_status_connected : connector_status_disconnected; 5370 } 5371 5372 if (intel_dp_can_mst(intel_dp)) 5373 return connector_status_connected; 5374 5375 /* If no HPD, poke DDC gently */ 5376 if (drm_probe_ddc(&intel_dp->aux.ddc)) 5377 return connector_status_connected; 5378 5379 /* Well we tried, say unknown for unreliable port types */ 5380 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 5381 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 5382 if (type == DP_DS_PORT_TYPE_VGA || 5383 type == DP_DS_PORT_TYPE_NON_EDID) 5384 return connector_status_unknown; 5385 } else { 5386 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 5387 DP_DWN_STRM_PORT_TYPE_MASK; 5388 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 5389 type == DP_DWN_STRM_PORT_TYPE_OTHER) 5390 return connector_status_unknown; 5391 } 5392 5393 /* Anything else is out of spec, warn and ignore */ 5394 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 5395 return connector_status_disconnected; 5396 } 5397 5398 static enum drm_connector_status 5399 edp_detect(struct intel_dp *intel_dp) 5400 { 5401 return connector_status_connected; 5402 } 5403 5404 static bool ibx_digital_port_connected(struct intel_encoder *encoder) 5405 { 5406 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5407 u32 bit; 5408 5409 switch (encoder->hpd_pin) { 5410 case HPD_PORT_B: 5411 bit = SDE_PORTB_HOTPLUG; 5412 break; 5413 case HPD_PORT_C: 5414 bit = SDE_PORTC_HOTPLUG; 5415 break; 5416 case HPD_PORT_D: 5417 bit = SDE_PORTD_HOTPLUG; 5418 break; 5419 default: 5420 MISSING_CASE(encoder->hpd_pin); 5421 return false; 5422 } 5423 5424 return intel_de_read(dev_priv, SDEISR) & bit; 5425 } 5426 5427 static bool cpt_digital_port_connected(struct intel_encoder *encoder) 5428 { 5429 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5430 u32 bit; 5431 5432 switch (encoder->hpd_pin) { 5433 case HPD_PORT_B: 5434 bit = SDE_PORTB_HOTPLUG_CPT; 5435 break; 5436 case HPD_PORT_C: 5437 bit = SDE_PORTC_HOTPLUG_CPT; 5438 break; 5439 case HPD_PORT_D: 5440 bit = SDE_PORTD_HOTPLUG_CPT; 5441 break; 5442 default: 5443 MISSING_CASE(encoder->hpd_pin); 5444 return false; 5445 } 5446 5447 return intel_de_read(dev_priv, SDEISR) & bit; 5448 } 5449 5450 static bool spt_digital_port_connected(struct intel_encoder *encoder) 5451 { 5452 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5453 u32 bit; 5454 5455 switch (encoder->hpd_pin) { 5456 case HPD_PORT_A: 5457 bit = SDE_PORTA_HOTPLUG_SPT; 5458 break; 5459 case HPD_PORT_E: 5460 bit = SDE_PORTE_HOTPLUG_SPT; 5461 break; 5462 default: 5463 return cpt_digital_port_connected(encoder); 5464 } 5465 5466 return intel_de_read(dev_priv, SDEISR) & bit; 5467 } 5468 5469 static bool g4x_digital_port_connected(struct intel_encoder *encoder) 5470 { 5471 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5472 u32 bit; 5473 5474 switch (encoder->hpd_pin) { 5475 case HPD_PORT_B: 5476 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 5477 break; 5478 case HPD_PORT_C: 5479 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 5480 break; 5481 case HPD_PORT_D: 5482 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 5483 break; 5484 default: 5485 MISSING_CASE(encoder->hpd_pin); 5486 return false; 5487 } 5488 5489 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 5490 } 5491 5492 static bool gm45_digital_port_connected(struct intel_encoder *encoder) 5493 { 5494 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5495 u32 bit; 5496 5497 switch (encoder->hpd_pin) { 5498 case HPD_PORT_B: 5499 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 5500 break; 5501 case HPD_PORT_C: 5502 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 5503 break; 5504 case HPD_PORT_D: 5505 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 5506 break; 5507 default: 5508 MISSING_CASE(encoder->hpd_pin); 5509 return false; 5510 } 5511 5512 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 5513 } 5514 5515 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 5516 { 5517 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5518 5519 if (encoder->hpd_pin == HPD_PORT_A) 5520 return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG; 5521 else 5522 return ibx_digital_port_connected(encoder); 5523 } 5524 5525 static bool snb_digital_port_connected(struct intel_encoder *encoder) 5526 { 5527 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5528 5529 if (encoder->hpd_pin == HPD_PORT_A) 5530 return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG; 5531 else 5532 return cpt_digital_port_connected(encoder); 5533 } 5534 5535 static bool ivb_digital_port_connected(struct intel_encoder *encoder) 5536 { 5537 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5538 5539 if (encoder->hpd_pin == HPD_PORT_A) 5540 return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG_IVB; 5541 else 5542 return cpt_digital_port_connected(encoder); 5543 } 5544 5545 static bool bdw_digital_port_connected(struct intel_encoder *encoder) 5546 { 5547 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5548 5549 if (encoder->hpd_pin == HPD_PORT_A) 5550 return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG; 5551 else 5552 return cpt_digital_port_connected(encoder); 5553 } 5554 5555 static bool bxt_digital_port_connected(struct intel_encoder *encoder) 5556 { 5557 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5558 u32 bit; 5559 5560 switch (encoder->hpd_pin) { 5561 case HPD_PORT_A: 5562 bit = BXT_DE_PORT_HP_DDIA; 5563 break; 5564 case HPD_PORT_B: 5565 bit = BXT_DE_PORT_HP_DDIB; 5566 break; 5567 case HPD_PORT_C: 5568 bit = BXT_DE_PORT_HP_DDIC; 5569 break; 5570 default: 5571 MISSING_CASE(encoder->hpd_pin); 5572 return false; 5573 } 5574 5575 return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit; 5576 } 5577 5578 static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv, 5579 enum phy phy) 5580 { 5581 if (HAS_PCH_MCC(dev_priv) && phy == PHY_C) 5582 return intel_de_read(dev_priv, SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1); 5583 5584 return intel_de_read(dev_priv, SDEISR) & SDE_DDI_HOTPLUG_ICP(phy); 5585 } 5586 5587 static bool icp_digital_port_connected(struct intel_encoder *encoder) 5588 { 5589 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5590 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5591 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 5592 5593 if (intel_phy_is_combo(dev_priv, phy)) 5594 return intel_combo_phy_connected(dev_priv, phy); 5595 else if (intel_phy_is_tc(dev_priv, phy)) 5596 return intel_tc_port_connected(dig_port); 5597 else 5598 MISSING_CASE(encoder->hpd_pin); 5599 5600 return false; 5601 } 5602 5603 /* 5604 * intel_digital_port_connected - is the specified port connected? 5605 * @encoder: intel_encoder 5606 * 5607 * In cases where there's a connector physically connected but it can't be used 5608 * by our hardware we also return false, since the rest of the driver should 5609 * pretty much treat the port as disconnected. This is relevant for type-C 5610 * (starting on ICL) where there's ownership involved. 5611 * 5612 * Return %true if port is connected, %false otherwise. 5613 */ 5614 static bool __intel_digital_port_connected(struct intel_encoder *encoder) 5615 { 5616 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5617 5618 if (HAS_GMCH(dev_priv)) { 5619 if (IS_GM45(dev_priv)) 5620 return gm45_digital_port_connected(encoder); 5621 else 5622 return g4x_digital_port_connected(encoder); 5623 } 5624 5625 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 5626 return icp_digital_port_connected(encoder); 5627 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 5628 return spt_digital_port_connected(encoder); 5629 else if (IS_GEN9_LP(dev_priv)) 5630 return bxt_digital_port_connected(encoder); 5631 else if (IS_GEN(dev_priv, 8)) 5632 return bdw_digital_port_connected(encoder); 5633 else if (IS_GEN(dev_priv, 7)) 5634 return ivb_digital_port_connected(encoder); 5635 else if (IS_GEN(dev_priv, 6)) 5636 return snb_digital_port_connected(encoder); 5637 else if (IS_GEN(dev_priv, 5)) 5638 return ilk_digital_port_connected(encoder); 5639 5640 MISSING_CASE(INTEL_GEN(dev_priv)); 5641 return false; 5642 } 5643 5644 bool intel_digital_port_connected(struct intel_encoder *encoder) 5645 { 5646 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5647 bool is_connected = false; 5648 intel_wakeref_t wakeref; 5649 5650 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 5651 is_connected = __intel_digital_port_connected(encoder); 5652 5653 return is_connected; 5654 } 5655 5656 static struct edid * 5657 intel_dp_get_edid(struct intel_dp *intel_dp) 5658 { 5659 struct intel_connector *intel_connector = intel_dp->attached_connector; 5660 5661 /* use cached edid if we have one */ 5662 if (intel_connector->edid) { 5663 /* invalid edid */ 5664 if (IS_ERR(intel_connector->edid)) 5665 return NULL; 5666 5667 return drm_edid_duplicate(intel_connector->edid); 5668 } else 5669 return drm_get_edid(&intel_connector->base, 5670 &intel_dp->aux.ddc); 5671 } 5672 5673 static void 5674 intel_dp_set_edid(struct intel_dp *intel_dp) 5675 { 5676 struct intel_connector *intel_connector = intel_dp->attached_connector; 5677 struct edid *edid; 5678 5679 intel_dp_unset_edid(intel_dp); 5680 edid = intel_dp_get_edid(intel_dp); 5681 intel_connector->detect_edid = edid; 5682 5683 intel_dp->has_audio = drm_detect_monitor_audio(edid); 5684 drm_dp_cec_set_edid(&intel_dp->aux, edid); 5685 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 5686 } 5687 5688 static void 5689 intel_dp_unset_edid(struct intel_dp *intel_dp) 5690 { 5691 struct intel_connector *intel_connector = intel_dp->attached_connector; 5692 5693 drm_dp_cec_unset_edid(&intel_dp->aux); 5694 kfree(intel_connector->detect_edid); 5695 intel_connector->detect_edid = NULL; 5696 5697 intel_dp->has_audio = false; 5698 intel_dp->edid_quirks = 0; 5699 } 5700 5701 static int 5702 intel_dp_detect(struct drm_connector *connector, 5703 struct drm_modeset_acquire_ctx *ctx, 5704 bool force) 5705 { 5706 struct drm_i915_private *dev_priv = to_i915(connector->dev); 5707 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5708 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5709 struct intel_encoder *encoder = &dig_port->base; 5710 enum drm_connector_status status; 5711 5712 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 5713 connector->base.id, connector->name); 5714 drm_WARN_ON(&dev_priv->drm, 5715 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 5716 5717 /* Can't disconnect eDP */ 5718 if (intel_dp_is_edp(intel_dp)) 5719 status = edp_detect(intel_dp); 5720 else if (intel_digital_port_connected(encoder)) 5721 status = intel_dp_detect_dpcd(intel_dp); 5722 else 5723 status = connector_status_disconnected; 5724 5725 if (status == connector_status_disconnected) { 5726 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5727 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 5728 5729 if (intel_dp->is_mst) { 5730 drm_dbg_kms(&dev_priv->drm, 5731 "MST device may have disappeared %d vs %d\n", 5732 intel_dp->is_mst, 5733 intel_dp->mst_mgr.mst_state); 5734 intel_dp->is_mst = false; 5735 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5736 intel_dp->is_mst); 5737 } 5738 5739 goto out; 5740 } 5741 5742 if (intel_dp->reset_link_params) { 5743 /* Initial max link lane count */ 5744 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 5745 5746 /* Initial max link rate */ 5747 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 5748 5749 intel_dp->reset_link_params = false; 5750 } 5751 5752 intel_dp_print_rates(intel_dp); 5753 5754 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 5755 if (INTEL_GEN(dev_priv) >= 11) 5756 intel_dp_get_dsc_sink_cap(intel_dp); 5757 5758 intel_dp_configure_mst(intel_dp); 5759 5760 if (intel_dp->is_mst) { 5761 /* 5762 * If we are in MST mode then this connector 5763 * won't appear connected or have anything 5764 * with EDID on it 5765 */ 5766 status = connector_status_disconnected; 5767 goto out; 5768 } 5769 5770 /* 5771 * Some external monitors do not signal loss of link synchronization 5772 * with an IRQ_HPD, so force a link status check. 5773 */ 5774 if (!intel_dp_is_edp(intel_dp)) { 5775 int ret; 5776 5777 ret = intel_dp_retrain_link(encoder, ctx); 5778 if (ret) 5779 return ret; 5780 } 5781 5782 /* 5783 * Clearing NACK and defer counts to get their exact values 5784 * while reading EDID which are required by Compliance tests 5785 * 4.2.2.4 and 4.2.2.5 5786 */ 5787 intel_dp->aux.i2c_nack_count = 0; 5788 intel_dp->aux.i2c_defer_count = 0; 5789 5790 intel_dp_set_edid(intel_dp); 5791 if (intel_dp_is_edp(intel_dp) || 5792 to_intel_connector(connector)->detect_edid) 5793 status = connector_status_connected; 5794 5795 intel_dp_check_service_irq(intel_dp); 5796 5797 out: 5798 if (status != connector_status_connected && !intel_dp->is_mst) 5799 intel_dp_unset_edid(intel_dp); 5800 5801 /* 5802 * Make sure the refs for power wells enabled during detect are 5803 * dropped to avoid a new detect cycle triggered by HPD polling. 5804 */ 5805 intel_display_power_flush_work(dev_priv); 5806 5807 return status; 5808 } 5809 5810 static void 5811 intel_dp_force(struct drm_connector *connector) 5812 { 5813 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5814 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5815 struct intel_encoder *intel_encoder = &dig_port->base; 5816 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 5817 enum intel_display_power_domain aux_domain = 5818 intel_aux_power_domain(dig_port); 5819 intel_wakeref_t wakeref; 5820 5821 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 5822 connector->base.id, connector->name); 5823 intel_dp_unset_edid(intel_dp); 5824 5825 if (connector->status != connector_status_connected) 5826 return; 5827 5828 wakeref = intel_display_power_get(dev_priv, aux_domain); 5829 5830 intel_dp_set_edid(intel_dp); 5831 5832 intel_display_power_put(dev_priv, aux_domain, wakeref); 5833 } 5834 5835 static int intel_dp_get_modes(struct drm_connector *connector) 5836 { 5837 struct intel_connector *intel_connector = to_intel_connector(connector); 5838 struct edid *edid; 5839 5840 edid = intel_connector->detect_edid; 5841 if (edid) { 5842 int ret = intel_connector_update_modes(connector, edid); 5843 if (ret) 5844 return ret; 5845 } 5846 5847 /* if eDP has no EDID, fall back to fixed mode */ 5848 if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) && 5849 intel_connector->panel.fixed_mode) { 5850 struct drm_display_mode *mode; 5851 5852 mode = drm_mode_duplicate(connector->dev, 5853 intel_connector->panel.fixed_mode); 5854 if (mode) { 5855 drm_mode_probed_add(connector, mode); 5856 return 1; 5857 } 5858 } 5859 5860 return 0; 5861 } 5862 5863 static int 5864 intel_dp_connector_register(struct drm_connector *connector) 5865 { 5866 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5867 int ret; 5868 5869 ret = intel_connector_register(connector); 5870 if (ret) 5871 return ret; 5872 5873 intel_connector_debugfs_add(connector); 5874 5875 DRM_DEBUG_KMS("registering %s bus for %s\n", 5876 intel_dp->aux.name, connector->kdev->kobj.name); 5877 5878 intel_dp->aux.dev = connector->kdev; 5879 ret = drm_dp_aux_register(&intel_dp->aux); 5880 if (!ret) 5881 drm_dp_cec_register_connector(&intel_dp->aux, connector); 5882 return ret; 5883 } 5884 5885 static void 5886 intel_dp_connector_unregister(struct drm_connector *connector) 5887 { 5888 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 5889 5890 drm_dp_cec_unregister_connector(&intel_dp->aux); 5891 drm_dp_aux_unregister(&intel_dp->aux); 5892 intel_connector_unregister(connector); 5893 } 5894 5895 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 5896 { 5897 struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 5898 struct intel_dp *intel_dp = &intel_dig_port->dp; 5899 5900 intel_dp_mst_encoder_cleanup(intel_dig_port); 5901 if (intel_dp_is_edp(intel_dp)) { 5902 intel_wakeref_t wakeref; 5903 5904 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 5905 /* 5906 * vdd might still be enabled do to the delayed vdd off. 5907 * Make sure vdd is actually turned off here. 5908 */ 5909 with_pps_lock(intel_dp, wakeref) 5910 edp_panel_vdd_off_sync(intel_dp); 5911 5912 if (intel_dp->edp_notifier.notifier_call) { 5913 unregister_reboot_notifier(&intel_dp->edp_notifier); 5914 intel_dp->edp_notifier.notifier_call = NULL; 5915 } 5916 } 5917 5918 intel_dp_aux_fini(intel_dp); 5919 } 5920 5921 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 5922 { 5923 intel_dp_encoder_flush_work(encoder); 5924 5925 drm_encoder_cleanup(encoder); 5926 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 5927 } 5928 5929 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 5930 { 5931 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 5932 intel_wakeref_t wakeref; 5933 5934 if (!intel_dp_is_edp(intel_dp)) 5935 return; 5936 5937 /* 5938 * vdd might still be enabled do to the delayed vdd off. 5939 * Make sure vdd is actually turned off here. 5940 */ 5941 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 5942 with_pps_lock(intel_dp, wakeref) 5943 edp_panel_vdd_off_sync(intel_dp); 5944 } 5945 5946 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) 5947 { 5948 long ret; 5949 5950 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) 5951 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, 5952 msecs_to_jiffies(timeout)); 5953 5954 if (!ret) 5955 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); 5956 } 5957 5958 static 5959 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, 5960 u8 *an) 5961 { 5962 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base)); 5963 static const struct drm_dp_aux_msg msg = { 5964 .request = DP_AUX_NATIVE_WRITE, 5965 .address = DP_AUX_HDCP_AKSV, 5966 .size = DRM_HDCP_KSV_LEN, 5967 }; 5968 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; 5969 ssize_t dpcd_ret; 5970 int ret; 5971 5972 /* Output An first, that's easy */ 5973 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, 5974 an, DRM_HDCP_AN_LEN); 5975 if (dpcd_ret != DRM_HDCP_AN_LEN) { 5976 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n", 5977 dpcd_ret); 5978 return dpcd_ret >= 0 ? -EIO : dpcd_ret; 5979 } 5980 5981 /* 5982 * Since Aksv is Oh-So-Secret, we can't access it in software. So in 5983 * order to get it on the wire, we need to create the AUX header as if 5984 * we were writing the data, and then tickle the hardware to output the 5985 * data once the header is sent out. 5986 */ 5987 intel_dp_aux_header(txbuf, &msg); 5988 5989 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size, 5990 rxbuf, sizeof(rxbuf), 5991 DP_AUX_CH_CTL_AUX_AKSV_SELECT); 5992 if (ret < 0) { 5993 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret); 5994 return ret; 5995 } else if (ret == 0) { 5996 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n"); 5997 return -EIO; 5998 } 5999 6000 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; 6001 if (reply != DP_AUX_NATIVE_REPLY_ACK) { 6002 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", 6003 reply); 6004 return -EIO; 6005 } 6006 return 0; 6007 } 6008 6009 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, 6010 u8 *bksv) 6011 { 6012 ssize_t ret; 6013 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, 6014 DRM_HDCP_KSV_LEN); 6015 if (ret != DRM_HDCP_KSV_LEN) { 6016 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret); 6017 return ret >= 0 ? -EIO : ret; 6018 } 6019 return 0; 6020 } 6021 6022 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, 6023 u8 *bstatus) 6024 { 6025 ssize_t ret; 6026 /* 6027 * For some reason the HDMI and DP HDCP specs call this register 6028 * definition by different names. In the HDMI spec, it's called BSTATUS, 6029 * but in DP it's called BINFO. 6030 */ 6031 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, 6032 bstatus, DRM_HDCP_BSTATUS_LEN); 6033 if (ret != DRM_HDCP_BSTATUS_LEN) { 6034 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); 6035 return ret >= 0 ? -EIO : ret; 6036 } 6037 return 0; 6038 } 6039 6040 static 6041 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, 6042 u8 *bcaps) 6043 { 6044 ssize_t ret; 6045 6046 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, 6047 bcaps, 1); 6048 if (ret != 1) { 6049 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret); 6050 return ret >= 0 ? -EIO : ret; 6051 } 6052 6053 return 0; 6054 } 6055 6056 static 6057 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, 6058 bool *repeater_present) 6059 { 6060 ssize_t ret; 6061 u8 bcaps; 6062 6063 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); 6064 if (ret) 6065 return ret; 6066 6067 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; 6068 return 0; 6069 } 6070 6071 static 6072 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, 6073 u8 *ri_prime) 6074 { 6075 ssize_t ret; 6076 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, 6077 ri_prime, DRM_HDCP_RI_LEN); 6078 if (ret != DRM_HDCP_RI_LEN) { 6079 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret); 6080 return ret >= 0 ? -EIO : ret; 6081 } 6082 return 0; 6083 } 6084 6085 static 6086 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, 6087 bool *ksv_ready) 6088 { 6089 ssize_t ret; 6090 u8 bstatus; 6091 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6092 &bstatus, 1); 6093 if (ret != 1) { 6094 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); 6095 return ret >= 0 ? -EIO : ret; 6096 } 6097 *ksv_ready = bstatus & DP_BSTATUS_READY; 6098 return 0; 6099 } 6100 6101 static 6102 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, 6103 int num_downstream, u8 *ksv_fifo) 6104 { 6105 ssize_t ret; 6106 int i; 6107 6108 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ 6109 for (i = 0; i < num_downstream; i += 3) { 6110 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; 6111 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6112 DP_AUX_HDCP_KSV_FIFO, 6113 ksv_fifo + i * DRM_HDCP_KSV_LEN, 6114 len); 6115 if (ret != len) { 6116 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n", 6117 i, ret); 6118 return ret >= 0 ? -EIO : ret; 6119 } 6120 } 6121 return 0; 6122 } 6123 6124 static 6125 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, 6126 int i, u32 *part) 6127 { 6128 ssize_t ret; 6129 6130 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) 6131 return -EINVAL; 6132 6133 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6134 DP_AUX_HDCP_V_PRIME(i), part, 6135 DRM_HDCP_V_PRIME_PART_LEN); 6136 if (ret != DRM_HDCP_V_PRIME_PART_LEN) { 6137 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); 6138 return ret >= 0 ? -EIO : ret; 6139 } 6140 return 0; 6141 } 6142 6143 static 6144 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, 6145 bool enable) 6146 { 6147 /* Not used for single stream DisplayPort setups */ 6148 return 0; 6149 } 6150 6151 static 6152 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) 6153 { 6154 ssize_t ret; 6155 u8 bstatus; 6156 6157 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6158 &bstatus, 1); 6159 if (ret != 1) { 6160 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); 6161 return false; 6162 } 6163 6164 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); 6165 } 6166 6167 static 6168 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port, 6169 bool *hdcp_capable) 6170 { 6171 ssize_t ret; 6172 u8 bcaps; 6173 6174 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); 6175 if (ret) 6176 return ret; 6177 6178 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; 6179 return 0; 6180 } 6181 6182 struct hdcp2_dp_errata_stream_type { 6183 u8 msg_id; 6184 u8 stream_type; 6185 } __packed; 6186 6187 struct hdcp2_dp_msg_data { 6188 u8 msg_id; 6189 u32 offset; 6190 bool msg_detectable; 6191 u32 timeout; 6192 u32 timeout2; /* Added for non_paired situation */ 6193 }; 6194 6195 static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { 6196 { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 }, 6197 { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, 6198 false, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, 6199 { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, 6200 false, 0, 0 }, 6201 { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, 6202 false, 0, 0 }, 6203 { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, 6204 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, 6205 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, 6206 { HDCP_2_2_AKE_SEND_PAIRING_INFO, 6207 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, 6208 HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, 6209 { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 }, 6210 { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, 6211 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 }, 6212 { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, 6213 0, 0 }, 6214 { HDCP_2_2_REP_SEND_RECVID_LIST, 6215 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, 6216 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, 6217 { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, 6218 0, 0 }, 6219 { HDCP_2_2_REP_STREAM_MANAGE, 6220 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, 6221 0, 0 }, 6222 { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, 6223 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, 6224 /* local define to shovel this through the write_2_2 interface */ 6225 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 6226 { HDCP_2_2_ERRATA_DP_STREAM_TYPE, 6227 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, 6228 0, 0 }, 6229 }; 6230 6231 static inline 6232 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, 6233 u8 *rx_status) 6234 { 6235 ssize_t ret; 6236 6237 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6238 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, 6239 HDCP_2_2_DP_RXSTATUS_LEN); 6240 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { 6241 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); 6242 return ret >= 0 ? -EIO : ret; 6243 } 6244 6245 return 0; 6246 } 6247 6248 static 6249 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, 6250 u8 msg_id, bool *msg_ready) 6251 { 6252 u8 rx_status; 6253 int ret; 6254 6255 *msg_ready = false; 6256 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); 6257 if (ret < 0) 6258 return ret; 6259 6260 switch (msg_id) { 6261 case HDCP_2_2_AKE_SEND_HPRIME: 6262 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) 6263 *msg_ready = true; 6264 break; 6265 case HDCP_2_2_AKE_SEND_PAIRING_INFO: 6266 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) 6267 *msg_ready = true; 6268 break; 6269 case HDCP_2_2_REP_SEND_RECVID_LIST: 6270 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6271 *msg_ready = true; 6272 break; 6273 default: 6274 DRM_ERROR("Unidentified msg_id: %d\n", msg_id); 6275 return -EINVAL; 6276 } 6277 6278 return 0; 6279 } 6280 6281 static ssize_t 6282 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, 6283 const struct hdcp2_dp_msg_data *hdcp2_msg_data) 6284 { 6285 struct intel_dp *dp = &intel_dig_port->dp; 6286 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6287 u8 msg_id = hdcp2_msg_data->msg_id; 6288 int ret, timeout; 6289 bool msg_ready = false; 6290 6291 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) 6292 timeout = hdcp2_msg_data->timeout2; 6293 else 6294 timeout = hdcp2_msg_data->timeout; 6295 6296 /* 6297 * There is no way to detect the CERT, LPRIME and STREAM_READY 6298 * availability. So Wait for timeout and read the msg. 6299 */ 6300 if (!hdcp2_msg_data->msg_detectable) { 6301 mdelay(timeout); 6302 ret = 0; 6303 } else { 6304 /* 6305 * As we want to check the msg availability at timeout, Ignoring 6306 * the timeout at wait for CP_IRQ. 6307 */ 6308 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); 6309 ret = hdcp2_detect_msg_availability(intel_dig_port, 6310 msg_id, &msg_ready); 6311 if (!msg_ready) 6312 ret = -ETIMEDOUT; 6313 } 6314 6315 if (ret) 6316 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n", 6317 hdcp2_msg_data->msg_id, ret, timeout); 6318 6319 return ret; 6320 } 6321 6322 static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) 6323 { 6324 int i; 6325 6326 for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) 6327 if (hdcp2_dp_msg_data[i].msg_id == msg_id) 6328 return &hdcp2_dp_msg_data[i]; 6329 6330 return NULL; 6331 } 6332 6333 static 6334 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, 6335 void *buf, size_t size) 6336 { 6337 struct intel_dp *dp = &intel_dig_port->dp; 6338 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6339 unsigned int offset; 6340 u8 *byte = buf; 6341 ssize_t ret, bytes_to_write, len; 6342 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6343 6344 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); 6345 if (!hdcp2_msg_data) 6346 return -EINVAL; 6347 6348 offset = hdcp2_msg_data->offset; 6349 6350 /* No msg_id in DP HDCP2.2 msgs */ 6351 bytes_to_write = size - 1; 6352 byte++; 6353 6354 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); 6355 6356 while (bytes_to_write) { 6357 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? 6358 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; 6359 6360 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, 6361 offset, (void *)byte, len); 6362 if (ret < 0) 6363 return ret; 6364 6365 bytes_to_write -= ret; 6366 byte += ret; 6367 offset += ret; 6368 } 6369 6370 return size; 6371 } 6372 6373 static 6374 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port) 6375 { 6376 u8 rx_info[HDCP_2_2_RXINFO_LEN]; 6377 u32 dev_cnt; 6378 ssize_t ret; 6379 6380 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6381 DP_HDCP_2_2_REG_RXINFO_OFFSET, 6382 (void *)rx_info, HDCP_2_2_RXINFO_LEN); 6383 if (ret != HDCP_2_2_RXINFO_LEN) 6384 return ret >= 0 ? -EIO : ret; 6385 6386 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 6387 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 6388 6389 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) 6390 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; 6391 6392 ret = sizeof(struct hdcp2_rep_send_receiverid_list) - 6393 HDCP_2_2_RECEIVER_IDS_MAX_LEN + 6394 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); 6395 6396 return ret; 6397 } 6398 6399 static 6400 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, 6401 u8 msg_id, void *buf, size_t size) 6402 { 6403 unsigned int offset; 6404 u8 *byte = buf; 6405 ssize_t ret, bytes_to_recv, len; 6406 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6407 6408 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); 6409 if (!hdcp2_msg_data) 6410 return -EINVAL; 6411 offset = hdcp2_msg_data->offset; 6412 6413 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data); 6414 if (ret < 0) 6415 return ret; 6416 6417 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { 6418 ret = get_receiver_id_list_size(intel_dig_port); 6419 if (ret < 0) 6420 return ret; 6421 6422 size = ret; 6423 } 6424 bytes_to_recv = size - 1; 6425 6426 /* DP adaptation msgs has no msg_id */ 6427 byte++; 6428 6429 while (bytes_to_recv) { 6430 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? 6431 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; 6432 6433 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset, 6434 (void *)byte, len); 6435 if (ret < 0) { 6436 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret); 6437 return ret; 6438 } 6439 6440 bytes_to_recv -= ret; 6441 byte += ret; 6442 offset += ret; 6443 } 6444 byte = buf; 6445 *byte = msg_id; 6446 6447 return size; 6448 } 6449 6450 static 6451 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, 6452 bool is_repeater, u8 content_type) 6453 { 6454 int ret; 6455 struct hdcp2_dp_errata_stream_type stream_type_msg; 6456 6457 if (is_repeater) 6458 return 0; 6459 6460 /* 6461 * Errata for DP: As Stream type is used for encryption, Receiver 6462 * should be communicated with stream type for the decryption of the 6463 * content. 6464 * Repeater will be communicated with stream type as a part of it's 6465 * auth later in time. 6466 */ 6467 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; 6468 stream_type_msg.stream_type = content_type; 6469 6470 ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, 6471 sizeof(stream_type_msg)); 6472 6473 return ret < 0 ? ret : 0; 6474 6475 } 6476 6477 static 6478 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port) 6479 { 6480 u8 rx_status; 6481 int ret; 6482 6483 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); 6484 if (ret) 6485 return ret; 6486 6487 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) 6488 ret = HDCP_REAUTH_REQUEST; 6489 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) 6490 ret = HDCP_LINK_INTEGRITY_FAILURE; 6491 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6492 ret = HDCP_TOPOLOGY_CHANGE; 6493 6494 return ret; 6495 } 6496 6497 static 6498 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port, 6499 bool *capable) 6500 { 6501 u8 rx_caps[3]; 6502 int ret; 6503 6504 *capable = false; 6505 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6506 DP_HDCP_2_2_REG_RX_CAPS_OFFSET, 6507 rx_caps, HDCP_2_2_RXCAPS_LEN); 6508 if (ret != HDCP_2_2_RXCAPS_LEN) 6509 return ret >= 0 ? -EIO : ret; 6510 6511 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && 6512 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) 6513 *capable = true; 6514 6515 return 0; 6516 } 6517 6518 static const struct intel_hdcp_shim intel_dp_hdcp_shim = { 6519 .write_an_aksv = intel_dp_hdcp_write_an_aksv, 6520 .read_bksv = intel_dp_hdcp_read_bksv, 6521 .read_bstatus = intel_dp_hdcp_read_bstatus, 6522 .repeater_present = intel_dp_hdcp_repeater_present, 6523 .read_ri_prime = intel_dp_hdcp_read_ri_prime, 6524 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, 6525 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, 6526 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, 6527 .toggle_signalling = intel_dp_hdcp_toggle_signalling, 6528 .check_link = intel_dp_hdcp_check_link, 6529 .hdcp_capable = intel_dp_hdcp_capable, 6530 .write_2_2_msg = intel_dp_hdcp2_write_msg, 6531 .read_2_2_msg = intel_dp_hdcp2_read_msg, 6532 .config_stream_type = intel_dp_hdcp2_config_stream_type, 6533 .check_2_2_link = intel_dp_hdcp2_check_link, 6534 .hdcp_2_2_capable = intel_dp_hdcp2_capable, 6535 .protocol = HDCP_PROTOCOL_DP, 6536 }; 6537 6538 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 6539 { 6540 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6541 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6542 6543 lockdep_assert_held(&dev_priv->pps_mutex); 6544 6545 if (!edp_have_panel_vdd(intel_dp)) 6546 return; 6547 6548 /* 6549 * The VDD bit needs a power domain reference, so if the bit is 6550 * already enabled when we boot or resume, grab this reference and 6551 * schedule a vdd off, so we don't hold on to the reference 6552 * indefinitely. 6553 */ 6554 drm_dbg_kms(&dev_priv->drm, 6555 "VDD left on by BIOS, adjusting state tracking\n"); 6556 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); 6557 6558 edp_panel_vdd_schedule_off(intel_dp); 6559 } 6560 6561 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 6562 { 6563 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6564 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 6565 enum pipe pipe; 6566 6567 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 6568 encoder->port, &pipe)) 6569 return pipe; 6570 6571 return INVALID_PIPE; 6572 } 6573 6574 void intel_dp_encoder_reset(struct drm_encoder *encoder) 6575 { 6576 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 6577 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 6578 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 6579 intel_wakeref_t wakeref; 6580 6581 if (!HAS_DDI(dev_priv)) 6582 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 6583 6584 if (lspcon->active) 6585 lspcon_resume(lspcon); 6586 6587 intel_dp->reset_link_params = true; 6588 6589 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 6590 !intel_dp_is_edp(intel_dp)) 6591 return; 6592 6593 with_pps_lock(intel_dp, wakeref) { 6594 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6595 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 6596 6597 if (intel_dp_is_edp(intel_dp)) { 6598 /* 6599 * Reinit the power sequencer, in case BIOS did 6600 * something nasty with it. 6601 */ 6602 intel_dp_pps_init(intel_dp); 6603 intel_edp_panel_vdd_sanitize(intel_dp); 6604 } 6605 } 6606 } 6607 6608 static int intel_modeset_tile_group(struct intel_atomic_state *state, 6609 int tile_group_id) 6610 { 6611 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6612 struct drm_connector_list_iter conn_iter; 6613 struct drm_connector *connector; 6614 int ret = 0; 6615 6616 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 6617 drm_for_each_connector_iter(connector, &conn_iter) { 6618 struct drm_connector_state *conn_state; 6619 struct intel_crtc_state *crtc_state; 6620 struct intel_crtc *crtc; 6621 6622 if (!connector->has_tile || 6623 connector->tile_group->id != tile_group_id) 6624 continue; 6625 6626 conn_state = drm_atomic_get_connector_state(&state->base, 6627 connector); 6628 if (IS_ERR(conn_state)) { 6629 ret = PTR_ERR(conn_state); 6630 break; 6631 } 6632 6633 crtc = to_intel_crtc(conn_state->crtc); 6634 6635 if (!crtc) 6636 continue; 6637 6638 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 6639 crtc_state->uapi.mode_changed = true; 6640 6641 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6642 if (ret) 6643 break; 6644 } 6645 drm_connector_list_iter_end(&conn_iter); 6646 6647 return ret; 6648 } 6649 6650 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 6651 { 6652 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6653 struct intel_crtc *crtc; 6654 6655 if (transcoders == 0) 6656 return 0; 6657 6658 for_each_intel_crtc(&dev_priv->drm, crtc) { 6659 struct intel_crtc_state *crtc_state; 6660 int ret; 6661 6662 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 6663 if (IS_ERR(crtc_state)) 6664 return PTR_ERR(crtc_state); 6665 6666 if (!crtc_state->hw.enable) 6667 continue; 6668 6669 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 6670 continue; 6671 6672 crtc_state->uapi.mode_changed = true; 6673 6674 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 6675 if (ret) 6676 return ret; 6677 6678 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 6679 if (ret) 6680 return ret; 6681 6682 transcoders &= ~BIT(crtc_state->cpu_transcoder); 6683 } 6684 6685 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 6686 6687 return 0; 6688 } 6689 6690 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 6691 struct drm_connector *connector) 6692 { 6693 const struct drm_connector_state *old_conn_state = 6694 drm_atomic_get_old_connector_state(&state->base, connector); 6695 const struct intel_crtc_state *old_crtc_state; 6696 struct intel_crtc *crtc; 6697 u8 transcoders; 6698 6699 crtc = to_intel_crtc(old_conn_state->crtc); 6700 if (!crtc) 6701 return 0; 6702 6703 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 6704 6705 if (!old_crtc_state->hw.active) 6706 return 0; 6707 6708 transcoders = old_crtc_state->sync_mode_slaves_mask; 6709 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 6710 transcoders |= BIT(old_crtc_state->master_transcoder); 6711 6712 return intel_modeset_affected_transcoders(state, 6713 transcoders); 6714 } 6715 6716 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 6717 struct drm_atomic_state *_state) 6718 { 6719 struct drm_i915_private *dev_priv = to_i915(conn->dev); 6720 struct intel_atomic_state *state = to_intel_atomic_state(_state); 6721 int ret; 6722 6723 ret = intel_digital_connector_atomic_check(conn, &state->base); 6724 if (ret) 6725 return ret; 6726 6727 if (INTEL_GEN(dev_priv) < 11) 6728 return 0; 6729 6730 if (!intel_connector_needs_modeset(state, conn)) 6731 return 0; 6732 6733 if (conn->has_tile) { 6734 ret = intel_modeset_tile_group(state, conn->tile_group->id); 6735 if (ret) 6736 return ret; 6737 } 6738 6739 return intel_modeset_synced_crtcs(state, conn); 6740 } 6741 6742 static const struct drm_connector_funcs intel_dp_connector_funcs = { 6743 .force = intel_dp_force, 6744 .fill_modes = drm_helper_probe_single_connector_modes, 6745 .atomic_get_property = intel_digital_connector_atomic_get_property, 6746 .atomic_set_property = intel_digital_connector_atomic_set_property, 6747 .late_register = intel_dp_connector_register, 6748 .early_unregister = intel_dp_connector_unregister, 6749 .destroy = intel_connector_destroy, 6750 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6751 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 6752 }; 6753 6754 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 6755 .detect_ctx = intel_dp_detect, 6756 .get_modes = intel_dp_get_modes, 6757 .mode_valid = intel_dp_mode_valid, 6758 .atomic_check = intel_dp_connector_atomic_check, 6759 }; 6760 6761 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 6762 .reset = intel_dp_encoder_reset, 6763 .destroy = intel_dp_encoder_destroy, 6764 }; 6765 6766 enum irqreturn 6767 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) 6768 { 6769 struct intel_dp *intel_dp = &intel_dig_port->dp; 6770 6771 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { 6772 /* 6773 * vdd off can generate a long pulse on eDP which 6774 * would require vdd on to handle it, and thus we 6775 * would end up in an endless cycle of 6776 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..." 6777 */ 6778 DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n", 6779 intel_dig_port->base.base.base.id, 6780 intel_dig_port->base.base.name); 6781 return IRQ_HANDLED; 6782 } 6783 6784 DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n", 6785 intel_dig_port->base.base.base.id, 6786 intel_dig_port->base.base.name, 6787 long_hpd ? "long" : "short"); 6788 6789 if (long_hpd) { 6790 intel_dp->reset_link_params = true; 6791 return IRQ_NONE; 6792 } 6793 6794 if (intel_dp->is_mst) { 6795 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { 6796 /* 6797 * If we were in MST mode, and device is not 6798 * there, get out of MST mode 6799 */ 6800 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", 6801 intel_dp->is_mst, intel_dp->mst_mgr.mst_state); 6802 intel_dp->is_mst = false; 6803 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6804 intel_dp->is_mst); 6805 6806 return IRQ_NONE; 6807 } 6808 } 6809 6810 if (!intel_dp->is_mst) { 6811 bool handled; 6812 6813 handled = intel_dp_short_pulse(intel_dp); 6814 6815 if (!handled) 6816 return IRQ_NONE; 6817 } 6818 6819 return IRQ_HANDLED; 6820 } 6821 6822 /* check the VBT to see whether the eDP is on another port */ 6823 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 6824 { 6825 /* 6826 * eDP not supported on g4x. so bail out early just 6827 * for a bit extra safety in case the VBT is bonkers. 6828 */ 6829 if (INTEL_GEN(dev_priv) < 5) 6830 return false; 6831 6832 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 6833 return true; 6834 6835 return intel_bios_is_port_edp(dev_priv, port); 6836 } 6837 6838 static void 6839 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 6840 { 6841 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6842 enum port port = dp_to_dig_port(intel_dp)->base.port; 6843 6844 if (!IS_G4X(dev_priv) && port != PORT_A) 6845 intel_attach_force_audio_property(connector); 6846 6847 intel_attach_broadcast_rgb_property(connector); 6848 if (HAS_GMCH(dev_priv)) 6849 drm_connector_attach_max_bpc_property(connector, 6, 10); 6850 else if (INTEL_GEN(dev_priv) >= 5) 6851 drm_connector_attach_max_bpc_property(connector, 6, 12); 6852 6853 intel_attach_colorspace_property(connector); 6854 6855 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 6856 drm_object_attach_property(&connector->base, 6857 connector->dev->mode_config.hdr_output_metadata_property, 6858 0); 6859 6860 if (intel_dp_is_edp(intel_dp)) { 6861 u32 allowed_scalers; 6862 6863 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 6864 if (!HAS_GMCH(dev_priv)) 6865 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 6866 6867 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 6868 6869 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 6870 6871 } 6872 } 6873 6874 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 6875 { 6876 intel_dp->panel_power_off_time = ktime_get_boottime(); 6877 intel_dp->last_power_on = jiffies; 6878 intel_dp->last_backlight_off = jiffies; 6879 } 6880 6881 static void 6882 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 6883 { 6884 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6885 u32 pp_on, pp_off, pp_ctl; 6886 struct pps_registers regs; 6887 6888 intel_pps_get_registers(intel_dp, ®s); 6889 6890 pp_ctl = ilk_get_pp_control(intel_dp); 6891 6892 /* Ensure PPS is unlocked */ 6893 if (!HAS_DDI(dev_priv)) 6894 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 6895 6896 pp_on = intel_de_read(dev_priv, regs.pp_on); 6897 pp_off = intel_de_read(dev_priv, regs.pp_off); 6898 6899 /* Pull timing values out of registers */ 6900 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 6901 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 6902 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 6903 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 6904 6905 if (i915_mmio_reg_valid(regs.pp_div)) { 6906 u32 pp_div; 6907 6908 pp_div = intel_de_read(dev_priv, regs.pp_div); 6909 6910 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 6911 } else { 6912 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 6913 } 6914 } 6915 6916 static void 6917 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 6918 { 6919 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 6920 state_name, 6921 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 6922 } 6923 6924 static void 6925 intel_pps_verify_state(struct intel_dp *intel_dp) 6926 { 6927 struct edp_power_seq hw; 6928 struct edp_power_seq *sw = &intel_dp->pps_delays; 6929 6930 intel_pps_readout_hw_state(intel_dp, &hw); 6931 6932 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 6933 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 6934 DRM_ERROR("PPS state mismatch\n"); 6935 intel_pps_dump_state("sw", sw); 6936 intel_pps_dump_state("hw", &hw); 6937 } 6938 } 6939 6940 static void 6941 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) 6942 { 6943 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6944 struct edp_power_seq cur, vbt, spec, 6945 *final = &intel_dp->pps_delays; 6946 6947 lockdep_assert_held(&dev_priv->pps_mutex); 6948 6949 /* already initialized? */ 6950 if (final->t11_t12 != 0) 6951 return; 6952 6953 intel_pps_readout_hw_state(intel_dp, &cur); 6954 6955 intel_pps_dump_state("cur", &cur); 6956 6957 vbt = dev_priv->vbt.edp.pps; 6958 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 6959 * of 500ms appears to be too short. Ocassionally the panel 6960 * just fails to power back on. Increasing the delay to 800ms 6961 * seems sufficient to avoid this problem. 6962 */ 6963 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 6964 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 6965 drm_dbg_kms(&dev_priv->drm, 6966 "Increasing T12 panel delay as per the quirk to %d\n", 6967 vbt.t11_t12); 6968 } 6969 /* T11_T12 delay is special and actually in units of 100ms, but zero 6970 * based in the hw (so we need to add 100 ms). But the sw vbt 6971 * table multiplies it with 1000 to make it in units of 100usec, 6972 * too. */ 6973 vbt.t11_t12 += 100 * 10; 6974 6975 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 6976 * our hw here, which are all in 100usec. */ 6977 spec.t1_t3 = 210 * 10; 6978 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 6979 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 6980 spec.t10 = 500 * 10; 6981 /* This one is special and actually in units of 100ms, but zero 6982 * based in the hw (so we need to add 100 ms). But the sw vbt 6983 * table multiplies it with 1000 to make it in units of 100usec, 6984 * too. */ 6985 spec.t11_t12 = (510 + 100) * 10; 6986 6987 intel_pps_dump_state("vbt", &vbt); 6988 6989 /* Use the max of the register settings and vbt. If both are 6990 * unset, fall back to the spec limits. */ 6991 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 6992 spec.field : \ 6993 max(cur.field, vbt.field)) 6994 assign_final(t1_t3); 6995 assign_final(t8); 6996 assign_final(t9); 6997 assign_final(t10); 6998 assign_final(t11_t12); 6999 #undef assign_final 7000 7001 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 7002 intel_dp->panel_power_up_delay = get_delay(t1_t3); 7003 intel_dp->backlight_on_delay = get_delay(t8); 7004 intel_dp->backlight_off_delay = get_delay(t9); 7005 intel_dp->panel_power_down_delay = get_delay(t10); 7006 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 7007 #undef get_delay 7008 7009 drm_dbg_kms(&dev_priv->drm, 7010 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 7011 intel_dp->panel_power_up_delay, 7012 intel_dp->panel_power_down_delay, 7013 intel_dp->panel_power_cycle_delay); 7014 7015 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", 7016 intel_dp->backlight_on_delay, 7017 intel_dp->backlight_off_delay); 7018 7019 /* 7020 * We override the HW backlight delays to 1 because we do manual waits 7021 * on them. For T8, even BSpec recommends doing it. For T9, if we 7022 * don't do this, we'll end up waiting for the backlight off delay 7023 * twice: once when we do the manual sleep, and once when we disable 7024 * the panel and wait for the PP_STATUS bit to become zero. 7025 */ 7026 final->t8 = 1; 7027 final->t9 = 1; 7028 7029 /* 7030 * HW has only a 100msec granularity for t11_t12 so round it up 7031 * accordingly. 7032 */ 7033 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 7034 } 7035 7036 static void 7037 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 7038 bool force_disable_vdd) 7039 { 7040 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7041 u32 pp_on, pp_off, port_sel = 0; 7042 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; 7043 struct pps_registers regs; 7044 enum port port = dp_to_dig_port(intel_dp)->base.port; 7045 const struct edp_power_seq *seq = &intel_dp->pps_delays; 7046 7047 lockdep_assert_held(&dev_priv->pps_mutex); 7048 7049 intel_pps_get_registers(intel_dp, ®s); 7050 7051 /* 7052 * On some VLV machines the BIOS can leave the VDD 7053 * enabled even on power sequencers which aren't 7054 * hooked up to any port. This would mess up the 7055 * power domain tracking the first time we pick 7056 * one of these power sequencers for use since 7057 * edp_panel_vdd_on() would notice that the VDD was 7058 * already on and therefore wouldn't grab the power 7059 * domain reference. Disable VDD first to avoid this. 7060 * This also avoids spuriously turning the VDD on as 7061 * soon as the new power sequencer gets initialized. 7062 */ 7063 if (force_disable_vdd) { 7064 u32 pp = ilk_get_pp_control(intel_dp); 7065 7066 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, 7067 "Panel power already on\n"); 7068 7069 if (pp & EDP_FORCE_VDD) 7070 drm_dbg_kms(&dev_priv->drm, 7071 "VDD already on, disabling first\n"); 7072 7073 pp &= ~EDP_FORCE_VDD; 7074 7075 intel_de_write(dev_priv, regs.pp_ctrl, pp); 7076 } 7077 7078 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 7079 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 7080 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 7081 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 7082 7083 /* Haswell doesn't have any port selection bits for the panel 7084 * power sequencer any more. */ 7085 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7086 port_sel = PANEL_PORT_SELECT_VLV(port); 7087 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7088 switch (port) { 7089 case PORT_A: 7090 port_sel = PANEL_PORT_SELECT_DPA; 7091 break; 7092 case PORT_C: 7093 port_sel = PANEL_PORT_SELECT_DPC; 7094 break; 7095 case PORT_D: 7096 port_sel = PANEL_PORT_SELECT_DPD; 7097 break; 7098 default: 7099 MISSING_CASE(port); 7100 break; 7101 } 7102 } 7103 7104 pp_on |= port_sel; 7105 7106 intel_de_write(dev_priv, regs.pp_on, pp_on); 7107 intel_de_write(dev_priv, regs.pp_off, pp_off); 7108 7109 /* 7110 * Compute the divisor for the pp clock, simply match the Bspec formula. 7111 */ 7112 if (i915_mmio_reg_valid(regs.pp_div)) { 7113 intel_de_write(dev_priv, regs.pp_div, 7114 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 7115 } else { 7116 u32 pp_ctl; 7117 7118 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); 7119 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 7120 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 7121 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7122 } 7123 7124 drm_dbg_kms(&dev_priv->drm, 7125 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 7126 intel_de_read(dev_priv, regs.pp_on), 7127 intel_de_read(dev_priv, regs.pp_off), 7128 i915_mmio_reg_valid(regs.pp_div) ? 7129 intel_de_read(dev_priv, regs.pp_div) : 7130 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 7131 } 7132 7133 static void intel_dp_pps_init(struct intel_dp *intel_dp) 7134 { 7135 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7136 7137 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7138 vlv_initial_power_sequencer_setup(intel_dp); 7139 } else { 7140 intel_dp_init_panel_power_sequencer(intel_dp); 7141 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 7142 } 7143 } 7144 7145 /** 7146 * intel_dp_set_drrs_state - program registers for RR switch to take effect 7147 * @dev_priv: i915 device 7148 * @crtc_state: a pointer to the active intel_crtc_state 7149 * @refresh_rate: RR to be programmed 7150 * 7151 * This function gets called when refresh rate (RR) has to be changed from 7152 * one frequency to another. Switches can be between high and low RR 7153 * supported by the panel or to any other RR based on media playback (in 7154 * this case, RR value needs to be passed from user space). 7155 * 7156 * The caller of this function needs to take a lock on dev_priv->drrs. 7157 */ 7158 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 7159 const struct intel_crtc_state *crtc_state, 7160 int refresh_rate) 7161 { 7162 struct intel_dp *intel_dp = dev_priv->drrs.dp; 7163 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 7164 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 7165 7166 if (refresh_rate <= 0) { 7167 drm_dbg_kms(&dev_priv->drm, 7168 "Refresh rate should be positive non-zero.\n"); 7169 return; 7170 } 7171 7172 if (intel_dp == NULL) { 7173 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 7174 return; 7175 } 7176 7177 if (!intel_crtc) { 7178 drm_dbg_kms(&dev_priv->drm, 7179 "DRRS: intel_crtc not initialized\n"); 7180 return; 7181 } 7182 7183 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 7184 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 7185 return; 7186 } 7187 7188 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh == 7189 refresh_rate) 7190 index = DRRS_LOW_RR; 7191 7192 if (index == dev_priv->drrs.refresh_rate_type) { 7193 drm_dbg_kms(&dev_priv->drm, 7194 "DRRS requested for previously set RR...ignoring\n"); 7195 return; 7196 } 7197 7198 if (!crtc_state->hw.active) { 7199 drm_dbg_kms(&dev_priv->drm, 7200 "eDP encoder disabled. CRTC not Active\n"); 7201 return; 7202 } 7203 7204 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 7205 switch (index) { 7206 case DRRS_HIGH_RR: 7207 intel_dp_set_m_n(crtc_state, M1_N1); 7208 break; 7209 case DRRS_LOW_RR: 7210 intel_dp_set_m_n(crtc_state, M2_N2); 7211 break; 7212 case DRRS_MAX_RR: 7213 default: 7214 drm_err(&dev_priv->drm, 7215 "Unsupported refreshrate type\n"); 7216 } 7217 } else if (INTEL_GEN(dev_priv) > 6) { 7218 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 7219 u32 val; 7220 7221 val = intel_de_read(dev_priv, reg); 7222 if (index > DRRS_HIGH_RR) { 7223 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7224 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7225 else 7226 val |= PIPECONF_EDP_RR_MODE_SWITCH; 7227 } else { 7228 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7229 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7230 else 7231 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 7232 } 7233 intel_de_write(dev_priv, reg, val); 7234 } 7235 7236 dev_priv->drrs.refresh_rate_type = index; 7237 7238 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 7239 refresh_rate); 7240 } 7241 7242 /** 7243 * intel_edp_drrs_enable - init drrs struct if supported 7244 * @intel_dp: DP struct 7245 * @crtc_state: A pointer to the active crtc state. 7246 * 7247 * Initializes frontbuffer_bits and drrs.dp 7248 */ 7249 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 7250 const struct intel_crtc_state *crtc_state) 7251 { 7252 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7253 7254 if (!crtc_state->has_drrs) { 7255 drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n"); 7256 return; 7257 } 7258 7259 if (dev_priv->psr.enabled) { 7260 drm_dbg_kms(&dev_priv->drm, 7261 "PSR enabled. Not enabling DRRS.\n"); 7262 return; 7263 } 7264 7265 mutex_lock(&dev_priv->drrs.mutex); 7266 if (dev_priv->drrs.dp) { 7267 drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n"); 7268 goto unlock; 7269 } 7270 7271 dev_priv->drrs.busy_frontbuffer_bits = 0; 7272 7273 dev_priv->drrs.dp = intel_dp; 7274 7275 unlock: 7276 mutex_unlock(&dev_priv->drrs.mutex); 7277 } 7278 7279 /** 7280 * intel_edp_drrs_disable - Disable DRRS 7281 * @intel_dp: DP struct 7282 * @old_crtc_state: Pointer to old crtc_state. 7283 * 7284 */ 7285 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 7286 const struct intel_crtc_state *old_crtc_state) 7287 { 7288 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7289 7290 if (!old_crtc_state->has_drrs) 7291 return; 7292 7293 mutex_lock(&dev_priv->drrs.mutex); 7294 if (!dev_priv->drrs.dp) { 7295 mutex_unlock(&dev_priv->drrs.mutex); 7296 return; 7297 } 7298 7299 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7300 intel_dp_set_drrs_state(dev_priv, old_crtc_state, 7301 intel_dp->attached_connector->panel.fixed_mode->vrefresh); 7302 7303 dev_priv->drrs.dp = NULL; 7304 mutex_unlock(&dev_priv->drrs.mutex); 7305 7306 cancel_delayed_work_sync(&dev_priv->drrs.work); 7307 } 7308 7309 static void intel_edp_drrs_downclock_work(struct work_struct *work) 7310 { 7311 struct drm_i915_private *dev_priv = 7312 container_of(work, typeof(*dev_priv), drrs.work.work); 7313 struct intel_dp *intel_dp; 7314 7315 mutex_lock(&dev_priv->drrs.mutex); 7316 7317 intel_dp = dev_priv->drrs.dp; 7318 7319 if (!intel_dp) 7320 goto unlock; 7321 7322 /* 7323 * The delayed work can race with an invalidate hence we need to 7324 * recheck. 7325 */ 7326 7327 if (dev_priv->drrs.busy_frontbuffer_bits) 7328 goto unlock; 7329 7330 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 7331 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7332 7333 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7334 intel_dp->attached_connector->panel.downclock_mode->vrefresh); 7335 } 7336 7337 unlock: 7338 mutex_unlock(&dev_priv->drrs.mutex); 7339 } 7340 7341 /** 7342 * intel_edp_drrs_invalidate - Disable Idleness DRRS 7343 * @dev_priv: i915 device 7344 * @frontbuffer_bits: frontbuffer plane tracking bits 7345 * 7346 * This function gets called everytime rendering on the given planes start. 7347 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 7348 * 7349 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7350 */ 7351 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 7352 unsigned int frontbuffer_bits) 7353 { 7354 struct drm_crtc *crtc; 7355 enum pipe pipe; 7356 7357 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7358 return; 7359 7360 cancel_delayed_work(&dev_priv->drrs.work); 7361 7362 mutex_lock(&dev_priv->drrs.mutex); 7363 if (!dev_priv->drrs.dp) { 7364 mutex_unlock(&dev_priv->drrs.mutex); 7365 return; 7366 } 7367 7368 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 7369 pipe = to_intel_crtc(crtc)->pipe; 7370 7371 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7372 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 7373 7374 /* invalidate means busy screen hence upclock */ 7375 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7376 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7377 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 7378 7379 mutex_unlock(&dev_priv->drrs.mutex); 7380 } 7381 7382 /** 7383 * intel_edp_drrs_flush - Restart Idleness DRRS 7384 * @dev_priv: i915 device 7385 * @frontbuffer_bits: frontbuffer plane tracking bits 7386 * 7387 * This function gets called every time rendering on the given planes has 7388 * completed or flip on a crtc is completed. So DRRS should be upclocked 7389 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 7390 * if no other planes are dirty. 7391 * 7392 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7393 */ 7394 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 7395 unsigned int frontbuffer_bits) 7396 { 7397 struct drm_crtc *crtc; 7398 enum pipe pipe; 7399 7400 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7401 return; 7402 7403 cancel_delayed_work(&dev_priv->drrs.work); 7404 7405 mutex_lock(&dev_priv->drrs.mutex); 7406 if (!dev_priv->drrs.dp) { 7407 mutex_unlock(&dev_priv->drrs.mutex); 7408 return; 7409 } 7410 7411 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 7412 pipe = to_intel_crtc(crtc)->pipe; 7413 7414 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7415 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 7416 7417 /* flush means busy screen hence upclock */ 7418 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7419 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7420 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 7421 7422 /* 7423 * flush also means no more activity hence schedule downclock, if all 7424 * other fbs are quiescent too 7425 */ 7426 if (!dev_priv->drrs.busy_frontbuffer_bits) 7427 schedule_delayed_work(&dev_priv->drrs.work, 7428 msecs_to_jiffies(1000)); 7429 mutex_unlock(&dev_priv->drrs.mutex); 7430 } 7431 7432 /** 7433 * DOC: Display Refresh Rate Switching (DRRS) 7434 * 7435 * Display Refresh Rate Switching (DRRS) is a power conservation feature 7436 * which enables swtching between low and high refresh rates, 7437 * dynamically, based on the usage scenario. This feature is applicable 7438 * for internal panels. 7439 * 7440 * Indication that the panel supports DRRS is given by the panel EDID, which 7441 * would list multiple refresh rates for one resolution. 7442 * 7443 * DRRS is of 2 types - static and seamless. 7444 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 7445 * (may appear as a blink on screen) and is used in dock-undock scenario. 7446 * Seamless DRRS involves changing RR without any visual effect to the user 7447 * and can be used during normal system usage. This is done by programming 7448 * certain registers. 7449 * 7450 * Support for static/seamless DRRS may be indicated in the VBT based on 7451 * inputs from the panel spec. 7452 * 7453 * DRRS saves power by switching to low RR based on usage scenarios. 7454 * 7455 * The implementation is based on frontbuffer tracking implementation. When 7456 * there is a disturbance on the screen triggered by user activity or a periodic 7457 * system activity, DRRS is disabled (RR is changed to high RR). When there is 7458 * no movement on screen, after a timeout of 1 second, a switch to low RR is 7459 * made. 7460 * 7461 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 7462 * and intel_edp_drrs_flush() are called. 7463 * 7464 * DRRS can be further extended to support other internal panels and also 7465 * the scenario of video playback wherein RR is set based on the rate 7466 * requested by userspace. 7467 */ 7468 7469 /** 7470 * intel_dp_drrs_init - Init basic DRRS work and mutex. 7471 * @connector: eDP connector 7472 * @fixed_mode: preferred mode of panel 7473 * 7474 * This function is called only once at driver load to initialize basic 7475 * DRRS stuff. 7476 * 7477 * Returns: 7478 * Downclock mode if panel supports it, else return NULL. 7479 * DRRS support is determined by the presence of downclock mode (apart 7480 * from VBT setting). 7481 */ 7482 static struct drm_display_mode * 7483 intel_dp_drrs_init(struct intel_connector *connector, 7484 struct drm_display_mode *fixed_mode) 7485 { 7486 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 7487 struct drm_display_mode *downclock_mode = NULL; 7488 7489 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 7490 mutex_init(&dev_priv->drrs.mutex); 7491 7492 if (INTEL_GEN(dev_priv) <= 6) { 7493 drm_dbg_kms(&dev_priv->drm, 7494 "DRRS supported for Gen7 and above\n"); 7495 return NULL; 7496 } 7497 7498 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 7499 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 7500 return NULL; 7501 } 7502 7503 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 7504 if (!downclock_mode) { 7505 drm_dbg_kms(&dev_priv->drm, 7506 "Downclock mode is not found. DRRS not supported\n"); 7507 return NULL; 7508 } 7509 7510 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 7511 7512 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 7513 drm_dbg_kms(&dev_priv->drm, 7514 "seamless DRRS supported for eDP panel.\n"); 7515 return downclock_mode; 7516 } 7517 7518 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 7519 struct intel_connector *intel_connector) 7520 { 7521 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7522 struct drm_device *dev = &dev_priv->drm; 7523 struct drm_connector *connector = &intel_connector->base; 7524 struct drm_display_mode *fixed_mode = NULL; 7525 struct drm_display_mode *downclock_mode = NULL; 7526 bool has_dpcd; 7527 enum pipe pipe = INVALID_PIPE; 7528 intel_wakeref_t wakeref; 7529 struct edid *edid; 7530 7531 if (!intel_dp_is_edp(intel_dp)) 7532 return true; 7533 7534 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); 7535 7536 /* 7537 * On IBX/CPT we may get here with LVDS already registered. Since the 7538 * driver uses the only internal power sequencer available for both 7539 * eDP and LVDS bail out early in this case to prevent interfering 7540 * with an already powered-on LVDS power sequencer. 7541 */ 7542 if (intel_get_lvds_encoder(dev_priv)) { 7543 drm_WARN_ON(dev, 7544 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 7545 drm_info(&dev_priv->drm, 7546 "LVDS was detected, not registering eDP\n"); 7547 7548 return false; 7549 } 7550 7551 with_pps_lock(intel_dp, wakeref) { 7552 intel_dp_init_panel_power_timestamps(intel_dp); 7553 intel_dp_pps_init(intel_dp); 7554 intel_edp_panel_vdd_sanitize(intel_dp); 7555 } 7556 7557 /* Cache DPCD and EDID for edp. */ 7558 has_dpcd = intel_edp_init_dpcd(intel_dp); 7559 7560 if (!has_dpcd) { 7561 /* if this fails, presume the device is a ghost */ 7562 drm_info(&dev_priv->drm, 7563 "failed to retrieve link info, disabling eDP\n"); 7564 goto out_vdd_off; 7565 } 7566 7567 mutex_lock(&dev->mode_config.mutex); 7568 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 7569 if (edid) { 7570 if (drm_add_edid_modes(connector, edid)) { 7571 drm_connector_update_edid_property(connector, edid); 7572 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 7573 } else { 7574 kfree(edid); 7575 edid = ERR_PTR(-EINVAL); 7576 } 7577 } else { 7578 edid = ERR_PTR(-ENOENT); 7579 } 7580 intel_connector->edid = edid; 7581 7582 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 7583 if (fixed_mode) 7584 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 7585 7586 /* fallback to VBT if available for eDP */ 7587 if (!fixed_mode) 7588 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 7589 mutex_unlock(&dev->mode_config.mutex); 7590 7591 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7592 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 7593 register_reboot_notifier(&intel_dp->edp_notifier); 7594 7595 /* 7596 * Figure out the current pipe for the initial backlight setup. 7597 * If the current pipe isn't valid, try the PPS pipe, and if that 7598 * fails just assume pipe A. 7599 */ 7600 pipe = vlv_active_pipe(intel_dp); 7601 7602 if (pipe != PIPE_A && pipe != PIPE_B) 7603 pipe = intel_dp->pps_pipe; 7604 7605 if (pipe != PIPE_A && pipe != PIPE_B) 7606 pipe = PIPE_A; 7607 7608 drm_dbg_kms(&dev_priv->drm, 7609 "using pipe %c for initial backlight setup\n", 7610 pipe_name(pipe)); 7611 } 7612 7613 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 7614 intel_connector->panel.backlight.power = intel_edp_backlight_power; 7615 intel_panel_setup_backlight(connector, pipe); 7616 7617 if (fixed_mode) { 7618 drm_connector_set_panel_orientation_with_quirk(connector, 7619 dev_priv->vbt.orientation, 7620 fixed_mode->hdisplay, fixed_mode->vdisplay); 7621 } 7622 7623 return true; 7624 7625 out_vdd_off: 7626 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 7627 /* 7628 * vdd might still be enabled do to the delayed vdd off. 7629 * Make sure vdd is actually turned off here. 7630 */ 7631 with_pps_lock(intel_dp, wakeref) 7632 edp_panel_vdd_off_sync(intel_dp); 7633 7634 return false; 7635 } 7636 7637 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 7638 { 7639 struct intel_connector *intel_connector; 7640 struct drm_connector *connector; 7641 7642 intel_connector = container_of(work, typeof(*intel_connector), 7643 modeset_retry_work); 7644 connector = &intel_connector->base; 7645 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 7646 connector->name); 7647 7648 /* Grab the locks before changing connector property*/ 7649 mutex_lock(&connector->dev->mode_config.mutex); 7650 /* Set connector link status to BAD and send a Uevent to notify 7651 * userspace to do a modeset. 7652 */ 7653 drm_connector_set_link_status_property(connector, 7654 DRM_MODE_LINK_STATUS_BAD); 7655 mutex_unlock(&connector->dev->mode_config.mutex); 7656 /* Send Hotplug uevent so userspace can reprobe */ 7657 drm_kms_helper_hotplug_event(connector->dev); 7658 } 7659 7660 bool 7661 intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 7662 struct intel_connector *intel_connector) 7663 { 7664 struct drm_connector *connector = &intel_connector->base; 7665 struct intel_dp *intel_dp = &intel_dig_port->dp; 7666 struct intel_encoder *intel_encoder = &intel_dig_port->base; 7667 struct drm_device *dev = intel_encoder->base.dev; 7668 struct drm_i915_private *dev_priv = to_i915(dev); 7669 enum port port = intel_encoder->port; 7670 enum phy phy = intel_port_to_phy(dev_priv, port); 7671 int type; 7672 7673 /* Initialize the work for modeset in case of link train failure */ 7674 INIT_WORK(&intel_connector->modeset_retry_work, 7675 intel_dp_modeset_retry_work_fn); 7676 7677 if (drm_WARN(dev, intel_dig_port->max_lanes < 1, 7678 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 7679 intel_dig_port->max_lanes, intel_encoder->base.base.id, 7680 intel_encoder->base.name)) 7681 return false; 7682 7683 intel_dp_set_source_rates(intel_dp); 7684 7685 intel_dp->reset_link_params = true; 7686 intel_dp->pps_pipe = INVALID_PIPE; 7687 intel_dp->active_pipe = INVALID_PIPE; 7688 7689 /* Preserve the current hw state. */ 7690 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 7691 intel_dp->attached_connector = intel_connector; 7692 7693 if (intel_dp_is_port_edp(dev_priv, port)) { 7694 /* 7695 * Currently we don't support eDP on TypeC ports, although in 7696 * theory it could work on TypeC legacy ports. 7697 */ 7698 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 7699 type = DRM_MODE_CONNECTOR_eDP; 7700 } else { 7701 type = DRM_MODE_CONNECTOR_DisplayPort; 7702 } 7703 7704 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7705 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 7706 7707 /* 7708 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 7709 * for DP the encoder type can be set by the caller to 7710 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 7711 */ 7712 if (type == DRM_MODE_CONNECTOR_eDP) 7713 intel_encoder->type = INTEL_OUTPUT_EDP; 7714 7715 /* eDP only on port B and/or C on vlv/chv */ 7716 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 7717 IS_CHERRYVIEW(dev_priv)) && 7718 intel_dp_is_edp(intel_dp) && 7719 port != PORT_B && port != PORT_C)) 7720 return false; 7721 7722 drm_dbg_kms(&dev_priv->drm, 7723 "Adding %s connector on [ENCODER:%d:%s]\n", 7724 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 7725 intel_encoder->base.base.id, intel_encoder->base.name); 7726 7727 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 7728 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 7729 7730 if (!HAS_GMCH(dev_priv)) 7731 connector->interlace_allowed = true; 7732 connector->doublescan_allowed = 0; 7733 7734 if (INTEL_GEN(dev_priv) >= 11) 7735 connector->ycbcr_420_allowed = true; 7736 7737 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 7738 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 7739 7740 intel_dp_aux_init(intel_dp); 7741 7742 intel_connector_attach_encoder(intel_connector, intel_encoder); 7743 7744 if (HAS_DDI(dev_priv)) 7745 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 7746 else 7747 intel_connector->get_hw_state = intel_connector_get_hw_state; 7748 7749 /* init MST on ports that can support it */ 7750 intel_dp_mst_encoder_init(intel_dig_port, 7751 intel_connector->base.base.id); 7752 7753 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 7754 intel_dp_aux_fini(intel_dp); 7755 intel_dp_mst_encoder_cleanup(intel_dig_port); 7756 goto fail; 7757 } 7758 7759 intel_dp_add_properties(intel_dp, connector); 7760 7761 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 7762 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); 7763 if (ret) 7764 drm_dbg_kms(&dev_priv->drm, 7765 "HDCP init failed, skipping.\n"); 7766 } 7767 7768 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 7769 * 0xd. Failure to do so will result in spurious interrupts being 7770 * generated on the port when a cable is not attached. 7771 */ 7772 if (IS_G45(dev_priv)) { 7773 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 7774 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 7775 (temp & ~0xf) | 0xd); 7776 } 7777 7778 return true; 7779 7780 fail: 7781 drm_connector_cleanup(connector); 7782 7783 return false; 7784 } 7785 7786 bool intel_dp_init(struct drm_i915_private *dev_priv, 7787 i915_reg_t output_reg, 7788 enum port port) 7789 { 7790 struct intel_digital_port *intel_dig_port; 7791 struct intel_encoder *intel_encoder; 7792 struct drm_encoder *encoder; 7793 struct intel_connector *intel_connector; 7794 7795 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 7796 if (!intel_dig_port) 7797 return false; 7798 7799 intel_connector = intel_connector_alloc(); 7800 if (!intel_connector) 7801 goto err_connector_alloc; 7802 7803 intel_encoder = &intel_dig_port->base; 7804 encoder = &intel_encoder->base; 7805 7806 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 7807 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 7808 "DP %c", port_name(port))) 7809 goto err_encoder_init; 7810 7811 intel_encoder->hotplug = intel_dp_hotplug; 7812 intel_encoder->compute_config = intel_dp_compute_config; 7813 intel_encoder->get_hw_state = intel_dp_get_hw_state; 7814 intel_encoder->get_config = intel_dp_get_config; 7815 intel_encoder->update_pipe = intel_panel_update_backlight; 7816 intel_encoder->suspend = intel_dp_encoder_suspend; 7817 if (IS_CHERRYVIEW(dev_priv)) { 7818 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 7819 intel_encoder->pre_enable = chv_pre_enable_dp; 7820 intel_encoder->enable = vlv_enable_dp; 7821 intel_encoder->disable = vlv_disable_dp; 7822 intel_encoder->post_disable = chv_post_disable_dp; 7823 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 7824 } else if (IS_VALLEYVIEW(dev_priv)) { 7825 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 7826 intel_encoder->pre_enable = vlv_pre_enable_dp; 7827 intel_encoder->enable = vlv_enable_dp; 7828 intel_encoder->disable = vlv_disable_dp; 7829 intel_encoder->post_disable = vlv_post_disable_dp; 7830 } else { 7831 intel_encoder->pre_enable = g4x_pre_enable_dp; 7832 intel_encoder->enable = g4x_enable_dp; 7833 intel_encoder->disable = g4x_disable_dp; 7834 intel_encoder->post_disable = g4x_post_disable_dp; 7835 } 7836 7837 intel_dig_port->dp.output_reg = output_reg; 7838 intel_dig_port->max_lanes = 4; 7839 7840 intel_encoder->type = INTEL_OUTPUT_DP; 7841 intel_encoder->power_domain = intel_port_to_power_domain(port); 7842 if (IS_CHERRYVIEW(dev_priv)) { 7843 if (port == PORT_D) 7844 intel_encoder->pipe_mask = BIT(PIPE_C); 7845 else 7846 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 7847 } else { 7848 intel_encoder->pipe_mask = ~0; 7849 } 7850 intel_encoder->cloneable = 0; 7851 intel_encoder->port = port; 7852 7853 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; 7854 7855 if (port != PORT_A) 7856 intel_infoframe_init(intel_dig_port); 7857 7858 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 7859 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 7860 goto err_init_connector; 7861 7862 return true; 7863 7864 err_init_connector: 7865 drm_encoder_cleanup(encoder); 7866 err_encoder_init: 7867 kfree(intel_connector); 7868 err_connector_alloc: 7869 kfree(intel_dig_port); 7870 return false; 7871 } 7872 7873 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 7874 { 7875 struct intel_encoder *encoder; 7876 7877 for_each_intel_encoder(&dev_priv->drm, encoder) { 7878 struct intel_dp *intel_dp; 7879 7880 if (encoder->type != INTEL_OUTPUT_DDI) 7881 continue; 7882 7883 intel_dp = enc_to_intel_dp(encoder); 7884 7885 if (!intel_dp->can_mst) 7886 continue; 7887 7888 if (intel_dp->is_mst) 7889 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 7890 } 7891 } 7892 7893 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 7894 { 7895 struct intel_encoder *encoder; 7896 7897 for_each_intel_encoder(&dev_priv->drm, encoder) { 7898 struct intel_dp *intel_dp; 7899 int ret; 7900 7901 if (encoder->type != INTEL_OUTPUT_DDI) 7902 continue; 7903 7904 intel_dp = enc_to_intel_dp(encoder); 7905 7906 if (!intel_dp->can_mst) 7907 continue; 7908 7909 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 7910 true); 7911 if (ret) { 7912 intel_dp->is_mst = false; 7913 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 7914 false); 7915 } 7916 } 7917 } 7918