1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/reboot.h> 32 #include <linux/slab.h> 33 #include <linux/types.h> 34 35 #include <asm/byteorder.h> 36 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_crtc.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_hdcp.h> 42 #include <drm/drm_probe_helper.h> 43 44 #include "i915_debugfs.h" 45 #include "i915_drv.h" 46 #include "i915_trace.h" 47 #include "intel_atomic.h" 48 #include "intel_audio.h" 49 #include "intel_connector.h" 50 #include "intel_ddi.h" 51 #include "intel_display_types.h" 52 #include "intel_dp.h" 53 #include "intel_dp_link_training.h" 54 #include "intel_dp_mst.h" 55 #include "intel_dpio_phy.h" 56 #include "intel_fifo_underrun.h" 57 #include "intel_hdcp.h" 58 #include "intel_hdmi.h" 59 #include "intel_hotplug.h" 60 #include "intel_lspcon.h" 61 #include "intel_lvds.h" 62 #include "intel_panel.h" 63 #include "intel_psr.h" 64 #include "intel_sideband.h" 65 #include "intel_tc.h" 66 #include "intel_vdsc.h" 67 68 #define DP_DPRX_ESI_LEN 14 69 70 /* DP DSC throughput values used for slice count calculations KPixels/s */ 71 #define DP_DSC_PEAK_PIXEL_RATE 2720000 72 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 73 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 74 75 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 76 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 77 78 /* Compliance test status bits */ 79 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 80 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 81 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 82 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 83 84 struct dp_link_dpll { 85 int clock; 86 struct dpll dpll; 87 }; 88 89 static const struct dp_link_dpll g4x_dpll[] = { 90 { 162000, 91 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 92 { 270000, 93 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 94 }; 95 96 static const struct dp_link_dpll pch_dpll[] = { 97 { 162000, 98 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 99 { 270000, 100 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 101 }; 102 103 static const struct dp_link_dpll vlv_dpll[] = { 104 { 162000, 105 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 106 { 270000, 107 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 108 }; 109 110 /* 111 * CHV supports eDP 1.4 that have more link rates. 112 * Below only provides the fixed rate but exclude variable rate. 113 */ 114 static const struct dp_link_dpll chv_dpll[] = { 115 /* 116 * CHV requires to program fractional division for m2. 117 * m2 is stored in fixed point format using formula below 118 * (m2_int << 22) | m2_fraction 119 */ 120 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 121 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 122 { 270000, /* m2_int = 27, m2_fraction = 0 */ 123 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 124 }; 125 126 /* Constants for DP DSC configurations */ 127 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 128 129 /* With Single pipe configuration, HW is capable of supporting maximum 130 * of 4 slices per line. 131 */ 132 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 133 134 /** 135 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 136 * @intel_dp: DP struct 137 * 138 * If a CPU or PCH DP output is attached to an eDP panel, this function 139 * will return true, and false otherwise. 140 */ 141 bool intel_dp_is_edp(struct intel_dp *intel_dp) 142 { 143 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 144 145 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 146 } 147 148 static void intel_dp_link_down(struct intel_encoder *encoder, 149 const struct intel_crtc_state *old_crtc_state); 150 static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 151 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 152 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 153 const struct intel_crtc_state *crtc_state); 154 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 155 enum pipe pipe); 156 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 157 158 /* update sink rates from dpcd */ 159 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 160 { 161 static const int dp_rates[] = { 162 162000, 270000, 540000, 810000 163 }; 164 int i, max_rate; 165 166 if (drm_dp_has_quirk(&intel_dp->desc, 0, 167 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 168 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 169 static const int quirk_rates[] = { 162000, 270000, 324000 }; 170 171 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 172 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 173 174 return; 175 } 176 177 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 178 179 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 180 if (dp_rates[i] > max_rate) 181 break; 182 intel_dp->sink_rates[i] = dp_rates[i]; 183 } 184 185 intel_dp->num_sink_rates = i; 186 } 187 188 /* Get length of rates array potentially limited by max_rate. */ 189 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 190 { 191 int i; 192 193 /* Limit results by potentially reduced max rate */ 194 for (i = 0; i < len; i++) { 195 if (rates[len - i - 1] <= max_rate) 196 return len - i; 197 } 198 199 return 0; 200 } 201 202 /* Get length of common rates array potentially limited by max_rate. */ 203 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 204 int max_rate) 205 { 206 return intel_dp_rate_limit_len(intel_dp->common_rates, 207 intel_dp->num_common_rates, max_rate); 208 } 209 210 /* Theoretical max between source and sink */ 211 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 212 { 213 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 214 } 215 216 /* Theoretical max between source and sink */ 217 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 218 { 219 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 220 int source_max = intel_dig_port->max_lanes; 221 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 222 int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port); 223 224 return min3(source_max, sink_max, fia_max); 225 } 226 227 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 228 { 229 return intel_dp->max_link_lane_count; 230 } 231 232 int 233 intel_dp_link_required(int pixel_clock, int bpp) 234 { 235 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 236 return DIV_ROUND_UP(pixel_clock * bpp, 8); 237 } 238 239 int 240 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 241 { 242 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 243 * link rate that is generally expressed in Gbps. Since, 8 bits of data 244 * is transmitted every LS_Clk per lane, there is no need to account for 245 * the channel encoding that is done in the PHY layer here. 246 */ 247 248 return max_link_clock * max_lanes; 249 } 250 251 static int 252 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) 253 { 254 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 255 struct intel_encoder *encoder = &intel_dig_port->base; 256 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 257 int max_dotclk = dev_priv->max_dotclk_freq; 258 int ds_max_dotclk; 259 260 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 261 262 if (type != DP_DS_PORT_TYPE_VGA) 263 return max_dotclk; 264 265 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd, 266 intel_dp->downstream_ports); 267 268 if (ds_max_dotclk != 0) 269 max_dotclk = min(max_dotclk, ds_max_dotclk); 270 271 return max_dotclk; 272 } 273 274 static int cnl_max_source_rate(struct intel_dp *intel_dp) 275 { 276 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 277 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 278 enum port port = dig_port->base.port; 279 280 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 281 282 /* Low voltage SKUs are limited to max of 5.4G */ 283 if (voltage == VOLTAGE_INFO_0_85V) 284 return 540000; 285 286 /* For this SKU 8.1G is supported in all ports */ 287 if (IS_CNL_WITH_PORT_F(dev_priv)) 288 return 810000; 289 290 /* For other SKUs, max rate on ports A and D is 5.4G */ 291 if (port == PORT_A || port == PORT_D) 292 return 540000; 293 294 return 810000; 295 } 296 297 static int icl_max_source_rate(struct intel_dp *intel_dp) 298 { 299 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 300 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 301 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 302 303 if (intel_phy_is_combo(dev_priv, phy) && 304 !IS_ELKHARTLAKE(dev_priv) && 305 !intel_dp_is_edp(intel_dp)) 306 return 540000; 307 308 return 810000; 309 } 310 311 static void 312 intel_dp_set_source_rates(struct intel_dp *intel_dp) 313 { 314 /* The values must be in increasing order */ 315 static const int cnl_rates[] = { 316 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 317 }; 318 static const int bxt_rates[] = { 319 162000, 216000, 243000, 270000, 324000, 432000, 540000 320 }; 321 static const int skl_rates[] = { 322 162000, 216000, 270000, 324000, 432000, 540000 323 }; 324 static const int hsw_rates[] = { 325 162000, 270000, 540000 326 }; 327 static const int g4x_rates[] = { 328 162000, 270000 329 }; 330 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 331 struct intel_encoder *encoder = &dig_port->base; 332 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 333 const int *source_rates; 334 int size, max_rate = 0, vbt_max_rate; 335 336 /* This should only be done once */ 337 drm_WARN_ON(&dev_priv->drm, 338 intel_dp->source_rates || intel_dp->num_source_rates); 339 340 if (INTEL_GEN(dev_priv) >= 10) { 341 source_rates = cnl_rates; 342 size = ARRAY_SIZE(cnl_rates); 343 if (IS_GEN(dev_priv, 10)) 344 max_rate = cnl_max_source_rate(intel_dp); 345 else 346 max_rate = icl_max_source_rate(intel_dp); 347 } else if (IS_GEN9_LP(dev_priv)) { 348 source_rates = bxt_rates; 349 size = ARRAY_SIZE(bxt_rates); 350 } else if (IS_GEN9_BC(dev_priv)) { 351 source_rates = skl_rates; 352 size = ARRAY_SIZE(skl_rates); 353 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 354 IS_BROADWELL(dev_priv)) { 355 source_rates = hsw_rates; 356 size = ARRAY_SIZE(hsw_rates); 357 } else { 358 source_rates = g4x_rates; 359 size = ARRAY_SIZE(g4x_rates); 360 } 361 362 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 363 if (max_rate && vbt_max_rate) 364 max_rate = min(max_rate, vbt_max_rate); 365 else if (vbt_max_rate) 366 max_rate = vbt_max_rate; 367 368 if (max_rate) 369 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 370 371 intel_dp->source_rates = source_rates; 372 intel_dp->num_source_rates = size; 373 } 374 375 static int intersect_rates(const int *source_rates, int source_len, 376 const int *sink_rates, int sink_len, 377 int *common_rates) 378 { 379 int i = 0, j = 0, k = 0; 380 381 while (i < source_len && j < sink_len) { 382 if (source_rates[i] == sink_rates[j]) { 383 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 384 return k; 385 common_rates[k] = source_rates[i]; 386 ++k; 387 ++i; 388 ++j; 389 } else if (source_rates[i] < sink_rates[j]) { 390 ++i; 391 } else { 392 ++j; 393 } 394 } 395 return k; 396 } 397 398 /* return index of rate in rates array, or -1 if not found */ 399 static int intel_dp_rate_index(const int *rates, int len, int rate) 400 { 401 int i; 402 403 for (i = 0; i < len; i++) 404 if (rate == rates[i]) 405 return i; 406 407 return -1; 408 } 409 410 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 411 { 412 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates); 413 414 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 415 intel_dp->num_source_rates, 416 intel_dp->sink_rates, 417 intel_dp->num_sink_rates, 418 intel_dp->common_rates); 419 420 /* Paranoia, there should always be something in common. */ 421 if (WARN_ON(intel_dp->num_common_rates == 0)) { 422 intel_dp->common_rates[0] = 162000; 423 intel_dp->num_common_rates = 1; 424 } 425 } 426 427 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 428 u8 lane_count) 429 { 430 /* 431 * FIXME: we need to synchronize the current link parameters with 432 * hardware readout. Currently fast link training doesn't work on 433 * boot-up. 434 */ 435 if (link_rate == 0 || 436 link_rate > intel_dp->max_link_rate) 437 return false; 438 439 if (lane_count == 0 || 440 lane_count > intel_dp_max_lane_count(intel_dp)) 441 return false; 442 443 return true; 444 } 445 446 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 447 int link_rate, 448 u8 lane_count) 449 { 450 const struct drm_display_mode *fixed_mode = 451 intel_dp->attached_connector->panel.fixed_mode; 452 int mode_rate, max_rate; 453 454 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 455 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 456 if (mode_rate > max_rate) 457 return false; 458 459 return true; 460 } 461 462 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 463 int link_rate, u8 lane_count) 464 { 465 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 466 int index; 467 468 index = intel_dp_rate_index(intel_dp->common_rates, 469 intel_dp->num_common_rates, 470 link_rate); 471 if (index > 0) { 472 if (intel_dp_is_edp(intel_dp) && 473 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 474 intel_dp->common_rates[index - 1], 475 lane_count)) { 476 drm_dbg_kms(&i915->drm, 477 "Retrying Link training for eDP with same parameters\n"); 478 return 0; 479 } 480 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 481 intel_dp->max_link_lane_count = lane_count; 482 } else if (lane_count > 1) { 483 if (intel_dp_is_edp(intel_dp) && 484 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 485 intel_dp_max_common_rate(intel_dp), 486 lane_count >> 1)) { 487 drm_dbg_kms(&i915->drm, 488 "Retrying Link training for eDP with same parameters\n"); 489 return 0; 490 } 491 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 492 intel_dp->max_link_lane_count = lane_count >> 1; 493 } else { 494 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 495 return -1; 496 } 497 498 return 0; 499 } 500 501 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 502 { 503 return div_u64(mul_u32_u32(mode_clock, 1000000U), 504 DP_DSC_FEC_OVERHEAD_FACTOR); 505 } 506 507 static int 508 small_joiner_ram_size_bits(struct drm_i915_private *i915) 509 { 510 if (INTEL_GEN(i915) >= 11) 511 return 7680 * 8; 512 else 513 return 6144 * 8; 514 } 515 516 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 517 u32 link_clock, u32 lane_count, 518 u32 mode_clock, u32 mode_hdisplay) 519 { 520 u32 bits_per_pixel, max_bpp_small_joiner_ram; 521 int i; 522 523 /* 524 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 525 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 526 * for SST -> TimeSlotsPerMTP is 1, 527 * for MST -> TimeSlotsPerMTP has to be calculated 528 */ 529 bits_per_pixel = (link_clock * lane_count * 8) / 530 intel_dp_mode_to_fec_clock(mode_clock); 531 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 532 533 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 534 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 535 mode_hdisplay; 536 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 537 max_bpp_small_joiner_ram); 538 539 /* 540 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 541 * check, output bpp from small joiner RAM check) 542 */ 543 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 544 545 /* Error out if the max bpp is less than smallest allowed valid bpp */ 546 if (bits_per_pixel < valid_dsc_bpp[0]) { 547 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 548 bits_per_pixel, valid_dsc_bpp[0]); 549 return 0; 550 } 551 552 /* Find the nearest match in the array of known BPPs from VESA */ 553 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 554 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 555 break; 556 } 557 bits_per_pixel = valid_dsc_bpp[i]; 558 559 /* 560 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 561 * fractional part is 0 562 */ 563 return bits_per_pixel << 4; 564 } 565 566 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 567 int mode_clock, int mode_hdisplay) 568 { 569 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 570 u8 min_slice_count, i; 571 int max_slice_width; 572 573 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 574 min_slice_count = DIV_ROUND_UP(mode_clock, 575 DP_DSC_MAX_ENC_THROUGHPUT_0); 576 else 577 min_slice_count = DIV_ROUND_UP(mode_clock, 578 DP_DSC_MAX_ENC_THROUGHPUT_1); 579 580 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 581 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 582 drm_dbg_kms(&i915->drm, 583 "Unsupported slice width %d by DP DSC Sink device\n", 584 max_slice_width); 585 return 0; 586 } 587 /* Also take into account max slice width */ 588 min_slice_count = min_t(u8, min_slice_count, 589 DIV_ROUND_UP(mode_hdisplay, 590 max_slice_width)); 591 592 /* Find the closest match to the valid slice count values */ 593 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 594 if (valid_dsc_slicecount[i] > 595 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 596 false)) 597 break; 598 if (min_slice_count <= valid_dsc_slicecount[i]) 599 return valid_dsc_slicecount[i]; 600 } 601 602 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 603 min_slice_count); 604 return 0; 605 } 606 607 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 608 int hdisplay) 609 { 610 /* 611 * Older platforms don't like hdisplay==4096 with DP. 612 * 613 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 614 * and frame counter increment), but we don't get vblank interrupts, 615 * and the pipe underruns immediately. The link also doesn't seem 616 * to get trained properly. 617 * 618 * On CHV the vblank interrupts don't seem to disappear but 619 * otherwise the symptoms are similar. 620 * 621 * TODO: confirm the behaviour on HSW+ 622 */ 623 return hdisplay == 4096 && !HAS_DDI(dev_priv); 624 } 625 626 static enum drm_mode_status 627 intel_dp_mode_valid(struct drm_connector *connector, 628 struct drm_display_mode *mode) 629 { 630 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 631 struct intel_connector *intel_connector = to_intel_connector(connector); 632 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 633 struct drm_i915_private *dev_priv = to_i915(connector->dev); 634 int target_clock = mode->clock; 635 int max_rate, mode_rate, max_lanes, max_link_clock; 636 int max_dotclk; 637 u16 dsc_max_output_bpp = 0; 638 u8 dsc_slice_count = 0; 639 640 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 641 return MODE_NO_DBLESCAN; 642 643 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 644 645 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 646 if (mode->hdisplay > fixed_mode->hdisplay) 647 return MODE_PANEL; 648 649 if (mode->vdisplay > fixed_mode->vdisplay) 650 return MODE_PANEL; 651 652 target_clock = fixed_mode->clock; 653 } 654 655 max_link_clock = intel_dp_max_link_rate(intel_dp); 656 max_lanes = intel_dp_max_lane_count(intel_dp); 657 658 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 659 mode_rate = intel_dp_link_required(target_clock, 18); 660 661 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 662 return MODE_H_ILLEGAL; 663 664 /* 665 * Output bpp is stored in 6.4 format so right shift by 4 to get the 666 * integer value since we support only integer values of bpp. 667 */ 668 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 669 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 670 if (intel_dp_is_edp(intel_dp)) { 671 dsc_max_output_bpp = 672 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 673 dsc_slice_count = 674 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 675 true); 676 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 677 dsc_max_output_bpp = 678 intel_dp_dsc_get_output_bpp(dev_priv, 679 max_link_clock, 680 max_lanes, 681 target_clock, 682 mode->hdisplay) >> 4; 683 dsc_slice_count = 684 intel_dp_dsc_get_slice_count(intel_dp, 685 target_clock, 686 mode->hdisplay); 687 } 688 } 689 690 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || 691 target_clock > max_dotclk) 692 return MODE_CLOCK_HIGH; 693 694 if (mode->clock < 10000) 695 return MODE_CLOCK_LOW; 696 697 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 698 return MODE_H_ILLEGAL; 699 700 return intel_mode_valid_max_plane_size(dev_priv, mode); 701 } 702 703 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 704 { 705 int i; 706 u32 v = 0; 707 708 if (src_bytes > 4) 709 src_bytes = 4; 710 for (i = 0; i < src_bytes; i++) 711 v |= ((u32)src[i]) << ((3 - i) * 8); 712 return v; 713 } 714 715 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 716 { 717 int i; 718 if (dst_bytes > 4) 719 dst_bytes = 4; 720 for (i = 0; i < dst_bytes; i++) 721 dst[i] = src >> ((3-i) * 8); 722 } 723 724 static void 725 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); 726 static void 727 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 728 bool force_disable_vdd); 729 static void 730 intel_dp_pps_init(struct intel_dp *intel_dp); 731 732 static intel_wakeref_t 733 pps_lock(struct intel_dp *intel_dp) 734 { 735 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 736 intel_wakeref_t wakeref; 737 738 /* 739 * See intel_power_sequencer_reset() why we need 740 * a power domain reference here. 741 */ 742 wakeref = intel_display_power_get(dev_priv, 743 intel_aux_power_domain(dp_to_dig_port(intel_dp))); 744 745 mutex_lock(&dev_priv->pps_mutex); 746 747 return wakeref; 748 } 749 750 static intel_wakeref_t 751 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) 752 { 753 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 754 755 mutex_unlock(&dev_priv->pps_mutex); 756 intel_display_power_put(dev_priv, 757 intel_aux_power_domain(dp_to_dig_port(intel_dp)), 758 wakeref); 759 return 0; 760 } 761 762 #define with_pps_lock(dp, wf) \ 763 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) 764 765 static void 766 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 767 { 768 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 769 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 770 enum pipe pipe = intel_dp->pps_pipe; 771 bool pll_enabled, release_cl_override = false; 772 enum dpio_phy phy = DPIO_PHY(pipe); 773 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 774 u32 DP; 775 776 if (drm_WARN(&dev_priv->drm, 777 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, 778 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", 779 pipe_name(pipe), intel_dig_port->base.base.base.id, 780 intel_dig_port->base.base.name)) 781 return; 782 783 drm_dbg_kms(&dev_priv->drm, 784 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", 785 pipe_name(pipe), intel_dig_port->base.base.base.id, 786 intel_dig_port->base.base.name); 787 788 /* Preserve the BIOS-computed detected bit. This is 789 * supposed to be read-only. 790 */ 791 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 792 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 793 DP |= DP_PORT_WIDTH(1); 794 DP |= DP_LINK_TRAIN_PAT_1; 795 796 if (IS_CHERRYVIEW(dev_priv)) 797 DP |= DP_PIPE_SEL_CHV(pipe); 798 else 799 DP |= DP_PIPE_SEL(pipe); 800 801 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; 802 803 /* 804 * The DPLL for the pipe must be enabled for this to work. 805 * So enable temporarily it if it's not already enabled. 806 */ 807 if (!pll_enabled) { 808 release_cl_override = IS_CHERRYVIEW(dev_priv) && 809 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 810 811 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 812 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 813 drm_err(&dev_priv->drm, 814 "Failed to force on pll for pipe %c!\n", 815 pipe_name(pipe)); 816 return; 817 } 818 } 819 820 /* 821 * Similar magic as in intel_dp_enable_port(). 822 * We _must_ do this port enable + disable trick 823 * to make this power sequencer lock onto the port. 824 * Otherwise even VDD force bit won't work. 825 */ 826 intel_de_write(dev_priv, intel_dp->output_reg, DP); 827 intel_de_posting_read(dev_priv, intel_dp->output_reg); 828 829 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); 830 intel_de_posting_read(dev_priv, intel_dp->output_reg); 831 832 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); 833 intel_de_posting_read(dev_priv, intel_dp->output_reg); 834 835 if (!pll_enabled) { 836 vlv_force_pll_off(dev_priv, pipe); 837 838 if (release_cl_override) 839 chv_phy_powergate_ch(dev_priv, phy, ch, false); 840 } 841 } 842 843 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 844 { 845 struct intel_encoder *encoder; 846 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 847 848 /* 849 * We don't have power sequencer currently. 850 * Pick one that's not used by other ports. 851 */ 852 for_each_intel_dp(&dev_priv->drm, encoder) { 853 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 854 855 if (encoder->type == INTEL_OUTPUT_EDP) { 856 drm_WARN_ON(&dev_priv->drm, 857 intel_dp->active_pipe != INVALID_PIPE && 858 intel_dp->active_pipe != 859 intel_dp->pps_pipe); 860 861 if (intel_dp->pps_pipe != INVALID_PIPE) 862 pipes &= ~(1 << intel_dp->pps_pipe); 863 } else { 864 drm_WARN_ON(&dev_priv->drm, 865 intel_dp->pps_pipe != INVALID_PIPE); 866 867 if (intel_dp->active_pipe != INVALID_PIPE) 868 pipes &= ~(1 << intel_dp->active_pipe); 869 } 870 } 871 872 if (pipes == 0) 873 return INVALID_PIPE; 874 875 return ffs(pipes) - 1; 876 } 877 878 static enum pipe 879 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 880 { 881 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 882 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 883 enum pipe pipe; 884 885 lockdep_assert_held(&dev_priv->pps_mutex); 886 887 /* We should never land here with regular DP ports */ 888 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 889 890 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE && 891 intel_dp->active_pipe != intel_dp->pps_pipe); 892 893 if (intel_dp->pps_pipe != INVALID_PIPE) 894 return intel_dp->pps_pipe; 895 896 pipe = vlv_find_free_pps(dev_priv); 897 898 /* 899 * Didn't find one. This should not happen since there 900 * are two power sequencers and up to two eDP ports. 901 */ 902 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) 903 pipe = PIPE_A; 904 905 vlv_steal_power_sequencer(dev_priv, pipe); 906 intel_dp->pps_pipe = pipe; 907 908 drm_dbg_kms(&dev_priv->drm, 909 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", 910 pipe_name(intel_dp->pps_pipe), 911 intel_dig_port->base.base.base.id, 912 intel_dig_port->base.base.name); 913 914 /* init power sequencer on this pipe and port */ 915 intel_dp_init_panel_power_sequencer(intel_dp); 916 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 917 918 /* 919 * Even vdd force doesn't work until we've made 920 * the power sequencer lock in on the port. 921 */ 922 vlv_power_sequencer_kick(intel_dp); 923 924 return intel_dp->pps_pipe; 925 } 926 927 static int 928 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 929 { 930 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 931 int backlight_controller = dev_priv->vbt.backlight.controller; 932 933 lockdep_assert_held(&dev_priv->pps_mutex); 934 935 /* We should never land here with regular DP ports */ 936 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 937 938 if (!intel_dp->pps_reset) 939 return backlight_controller; 940 941 intel_dp->pps_reset = false; 942 943 /* 944 * Only the HW needs to be reprogrammed, the SW state is fixed and 945 * has been setup during connector init. 946 */ 947 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 948 949 return backlight_controller; 950 } 951 952 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 953 enum pipe pipe); 954 955 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 956 enum pipe pipe) 957 { 958 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON; 959 } 960 961 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 962 enum pipe pipe) 963 { 964 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD; 965 } 966 967 static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 968 enum pipe pipe) 969 { 970 return true; 971 } 972 973 static enum pipe 974 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 975 enum port port, 976 vlv_pipe_check pipe_check) 977 { 978 enum pipe pipe; 979 980 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 981 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & 982 PANEL_PORT_SELECT_MASK; 983 984 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 985 continue; 986 987 if (!pipe_check(dev_priv, pipe)) 988 continue; 989 990 return pipe; 991 } 992 993 return INVALID_PIPE; 994 } 995 996 static void 997 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 998 { 999 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1000 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1001 enum port port = intel_dig_port->base.port; 1002 1003 lockdep_assert_held(&dev_priv->pps_mutex); 1004 1005 /* try to find a pipe with this port selected */ 1006 /* first pick one where the panel is on */ 1007 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1008 vlv_pipe_has_pp_on); 1009 /* didn't find one? pick one where vdd is on */ 1010 if (intel_dp->pps_pipe == INVALID_PIPE) 1011 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1012 vlv_pipe_has_vdd_on); 1013 /* didn't find one? pick one with just the correct port */ 1014 if (intel_dp->pps_pipe == INVALID_PIPE) 1015 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1016 vlv_pipe_any); 1017 1018 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 1019 if (intel_dp->pps_pipe == INVALID_PIPE) { 1020 drm_dbg_kms(&dev_priv->drm, 1021 "no initial power sequencer for [ENCODER:%d:%s]\n", 1022 intel_dig_port->base.base.base.id, 1023 intel_dig_port->base.base.name); 1024 return; 1025 } 1026 1027 drm_dbg_kms(&dev_priv->drm, 1028 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", 1029 intel_dig_port->base.base.base.id, 1030 intel_dig_port->base.base.name, 1031 pipe_name(intel_dp->pps_pipe)); 1032 1033 intel_dp_init_panel_power_sequencer(intel_dp); 1034 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1035 } 1036 1037 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 1038 { 1039 struct intel_encoder *encoder; 1040 1041 if (drm_WARN_ON(&dev_priv->drm, 1042 !(IS_VALLEYVIEW(dev_priv) || 1043 IS_CHERRYVIEW(dev_priv) || 1044 IS_GEN9_LP(dev_priv)))) 1045 return; 1046 1047 /* 1048 * We can't grab pps_mutex here due to deadlock with power_domain 1049 * mutex when power_domain functions are called while holding pps_mutex. 1050 * That also means that in order to use pps_pipe the code needs to 1051 * hold both a power domain reference and pps_mutex, and the power domain 1052 * reference get/put must be done while _not_ holding pps_mutex. 1053 * pps_{lock,unlock}() do these steps in the correct order, so one 1054 * should use them always. 1055 */ 1056 1057 for_each_intel_dp(&dev_priv->drm, encoder) { 1058 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1059 1060 drm_WARN_ON(&dev_priv->drm, 1061 intel_dp->active_pipe != INVALID_PIPE); 1062 1063 if (encoder->type != INTEL_OUTPUT_EDP) 1064 continue; 1065 1066 if (IS_GEN9_LP(dev_priv)) 1067 intel_dp->pps_reset = true; 1068 else 1069 intel_dp->pps_pipe = INVALID_PIPE; 1070 } 1071 } 1072 1073 struct pps_registers { 1074 i915_reg_t pp_ctrl; 1075 i915_reg_t pp_stat; 1076 i915_reg_t pp_on; 1077 i915_reg_t pp_off; 1078 i915_reg_t pp_div; 1079 }; 1080 1081 static void intel_pps_get_registers(struct intel_dp *intel_dp, 1082 struct pps_registers *regs) 1083 { 1084 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1085 int pps_idx = 0; 1086 1087 memset(regs, 0, sizeof(*regs)); 1088 1089 if (IS_GEN9_LP(dev_priv)) 1090 pps_idx = bxt_power_sequencer_idx(intel_dp); 1091 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1092 pps_idx = vlv_power_sequencer_pipe(intel_dp); 1093 1094 regs->pp_ctrl = PP_CONTROL(pps_idx); 1095 regs->pp_stat = PP_STATUS(pps_idx); 1096 regs->pp_on = PP_ON_DELAYS(pps_idx); 1097 regs->pp_off = PP_OFF_DELAYS(pps_idx); 1098 1099 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 1100 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 1101 regs->pp_div = INVALID_MMIO_REG; 1102 else 1103 regs->pp_div = PP_DIVISOR(pps_idx); 1104 } 1105 1106 static i915_reg_t 1107 _pp_ctrl_reg(struct intel_dp *intel_dp) 1108 { 1109 struct pps_registers regs; 1110 1111 intel_pps_get_registers(intel_dp, ®s); 1112 1113 return regs.pp_ctrl; 1114 } 1115 1116 static i915_reg_t 1117 _pp_stat_reg(struct intel_dp *intel_dp) 1118 { 1119 struct pps_registers regs; 1120 1121 intel_pps_get_registers(intel_dp, ®s); 1122 1123 return regs.pp_stat; 1124 } 1125 1126 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing 1127 This function only applicable when panel PM state is not to be tracked */ 1128 static int edp_notify_handler(struct notifier_block *this, unsigned long code, 1129 void *unused) 1130 { 1131 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 1132 edp_notifier); 1133 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1134 intel_wakeref_t wakeref; 1135 1136 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) 1137 return 0; 1138 1139 with_pps_lock(intel_dp, wakeref) { 1140 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1141 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 1142 i915_reg_t pp_ctrl_reg, pp_div_reg; 1143 u32 pp_div; 1144 1145 pp_ctrl_reg = PP_CONTROL(pipe); 1146 pp_div_reg = PP_DIVISOR(pipe); 1147 pp_div = intel_de_read(dev_priv, pp_div_reg); 1148 pp_div &= PP_REFERENCE_DIVIDER_MASK; 1149 1150 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 1151 intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F); 1152 intel_de_write(dev_priv, pp_ctrl_reg, 1153 PANEL_UNLOCK_REGS); 1154 msleep(intel_dp->panel_power_cycle_delay); 1155 } 1156 } 1157 1158 return 0; 1159 } 1160 1161 static bool edp_have_panel_power(struct intel_dp *intel_dp) 1162 { 1163 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1164 1165 lockdep_assert_held(&dev_priv->pps_mutex); 1166 1167 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1168 intel_dp->pps_pipe == INVALID_PIPE) 1169 return false; 1170 1171 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 1172 } 1173 1174 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 1175 { 1176 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1177 1178 lockdep_assert_held(&dev_priv->pps_mutex); 1179 1180 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1181 intel_dp->pps_pipe == INVALID_PIPE) 1182 return false; 1183 1184 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 1185 } 1186 1187 static void 1188 intel_dp_check_edp(struct intel_dp *intel_dp) 1189 { 1190 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1191 1192 if (!intel_dp_is_edp(intel_dp)) 1193 return; 1194 1195 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 1196 drm_WARN(&dev_priv->drm, 1, 1197 "eDP powered off while attempting aux channel communication.\n"); 1198 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n", 1199 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), 1200 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); 1201 } 1202 } 1203 1204 static u32 1205 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 1206 { 1207 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1208 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1209 const unsigned int timeout_ms = 10; 1210 u32 status; 1211 bool done; 1212 1213 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1214 done = wait_event_timeout(i915->gmbus_wait_queue, C, 1215 msecs_to_jiffies_timeout(timeout_ms)); 1216 1217 /* just trace the final value */ 1218 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1219 1220 if (!done) 1221 drm_err(&i915->drm, 1222 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 1223 intel_dp->aux.name, timeout_ms, status); 1224 #undef C 1225 1226 return status; 1227 } 1228 1229 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1230 { 1231 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1232 1233 if (index) 1234 return 0; 1235 1236 /* 1237 * The clock divider is based off the hrawclk, and would like to run at 1238 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 1239 */ 1240 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 1241 } 1242 1243 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1244 { 1245 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1246 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1247 u32 freq; 1248 1249 if (index) 1250 return 0; 1251 1252 /* 1253 * The clock divider is based off the cdclk or PCH rawclk, and would 1254 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1255 * divide by 2000 and use that 1256 */ 1257 if (dig_port->aux_ch == AUX_CH_A) 1258 freq = dev_priv->cdclk.hw.cdclk; 1259 else 1260 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 1261 return DIV_ROUND_CLOSEST(freq, 2000); 1262 } 1263 1264 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1265 { 1266 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1267 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1268 1269 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1270 /* Workaround for non-ULT HSW */ 1271 switch (index) { 1272 case 0: return 63; 1273 case 1: return 72; 1274 default: return 0; 1275 } 1276 } 1277 1278 return ilk_get_aux_clock_divider(intel_dp, index); 1279 } 1280 1281 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1282 { 1283 /* 1284 * SKL doesn't need us to program the AUX clock divider (Hardware will 1285 * derive the clock from CDCLK automatically). We still implement the 1286 * get_aux_clock_divider vfunc to plug-in into the existing code. 1287 */ 1288 return index ? 0 : 1; 1289 } 1290 1291 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 1292 int send_bytes, 1293 u32 aux_clock_divider) 1294 { 1295 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1296 struct drm_i915_private *dev_priv = 1297 to_i915(intel_dig_port->base.base.dev); 1298 u32 precharge, timeout; 1299 1300 if (IS_GEN(dev_priv, 6)) 1301 precharge = 3; 1302 else 1303 precharge = 5; 1304 1305 if (IS_BROADWELL(dev_priv)) 1306 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1307 else 1308 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1309 1310 return DP_AUX_CH_CTL_SEND_BUSY | 1311 DP_AUX_CH_CTL_DONE | 1312 DP_AUX_CH_CTL_INTERRUPT | 1313 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1314 timeout | 1315 DP_AUX_CH_CTL_RECEIVE_ERROR | 1316 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1317 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1318 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1319 } 1320 1321 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1322 int send_bytes, 1323 u32 unused) 1324 { 1325 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1326 struct drm_i915_private *i915 = 1327 to_i915(intel_dig_port->base.base.dev); 1328 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); 1329 u32 ret; 1330 1331 ret = DP_AUX_CH_CTL_SEND_BUSY | 1332 DP_AUX_CH_CTL_DONE | 1333 DP_AUX_CH_CTL_INTERRUPT | 1334 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1335 DP_AUX_CH_CTL_TIME_OUT_MAX | 1336 DP_AUX_CH_CTL_RECEIVE_ERROR | 1337 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1338 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1339 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1340 1341 if (intel_phy_is_tc(i915, phy) && 1342 intel_dig_port->tc_mode == TC_PORT_TBT_ALT) 1343 ret |= DP_AUX_CH_CTL_TBT_IO; 1344 1345 return ret; 1346 } 1347 1348 static int 1349 intel_dp_aux_xfer(struct intel_dp *intel_dp, 1350 const u8 *send, int send_bytes, 1351 u8 *recv, int recv_size, 1352 u32 aux_send_ctl_flags) 1353 { 1354 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1355 struct drm_i915_private *i915 = 1356 to_i915(intel_dig_port->base.base.dev); 1357 struct intel_uncore *uncore = &i915->uncore; 1358 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); 1359 bool is_tc_port = intel_phy_is_tc(i915, phy); 1360 i915_reg_t ch_ctl, ch_data[5]; 1361 u32 aux_clock_divider; 1362 enum intel_display_power_domain aux_domain; 1363 intel_wakeref_t aux_wakeref; 1364 intel_wakeref_t pps_wakeref; 1365 int i, ret, recv_bytes; 1366 int try, clock = 0; 1367 u32 status; 1368 bool vdd; 1369 1370 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1371 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1372 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1373 1374 if (is_tc_port) 1375 intel_tc_port_lock(intel_dig_port); 1376 1377 aux_domain = intel_aux_power_domain(intel_dig_port); 1378 1379 aux_wakeref = intel_display_power_get(i915, aux_domain); 1380 pps_wakeref = pps_lock(intel_dp); 1381 1382 /* 1383 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1384 * In such cases we want to leave VDD enabled and it's up to upper layers 1385 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1386 * ourselves. 1387 */ 1388 vdd = edp_panel_vdd_on(intel_dp); 1389 1390 /* dp aux is extremely sensitive to irq latency, hence request the 1391 * lowest possible wakeup latency and so prevent the cpu from going into 1392 * deep sleep states. 1393 */ 1394 cpu_latency_qos_update_request(&i915->pm_qos, 0); 1395 1396 intel_dp_check_edp(intel_dp); 1397 1398 /* Try to wait for any previous AUX channel activity */ 1399 for (try = 0; try < 3; try++) { 1400 status = intel_uncore_read_notrace(uncore, ch_ctl); 1401 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1402 break; 1403 msleep(1); 1404 } 1405 /* just trace the final value */ 1406 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1407 1408 if (try == 3) { 1409 const u32 status = intel_uncore_read(uncore, ch_ctl); 1410 1411 if (status != intel_dp->aux_busy_last_status) { 1412 drm_WARN(&i915->drm, 1, 1413 "%s: not started (status 0x%08x)\n", 1414 intel_dp->aux.name, status); 1415 intel_dp->aux_busy_last_status = status; 1416 } 1417 1418 ret = -EBUSY; 1419 goto out; 1420 } 1421 1422 /* Only 5 data registers! */ 1423 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 1424 ret = -E2BIG; 1425 goto out; 1426 } 1427 1428 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1429 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1430 send_bytes, 1431 aux_clock_divider); 1432 1433 send_ctl |= aux_send_ctl_flags; 1434 1435 /* Must try at least 3 times according to DP spec */ 1436 for (try = 0; try < 5; try++) { 1437 /* Load the send data into the aux channel data registers */ 1438 for (i = 0; i < send_bytes; i += 4) 1439 intel_uncore_write(uncore, 1440 ch_data[i >> 2], 1441 intel_dp_pack_aux(send + i, 1442 send_bytes - i)); 1443 1444 /* Send the command and wait for it to complete */ 1445 intel_uncore_write(uncore, ch_ctl, send_ctl); 1446 1447 status = intel_dp_aux_wait_done(intel_dp); 1448 1449 /* Clear done status and any errors */ 1450 intel_uncore_write(uncore, 1451 ch_ctl, 1452 status | 1453 DP_AUX_CH_CTL_DONE | 1454 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1455 DP_AUX_CH_CTL_RECEIVE_ERROR); 1456 1457 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1458 * 400us delay required for errors and timeouts 1459 * Timeout errors from the HW already meet this 1460 * requirement so skip to next iteration 1461 */ 1462 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1463 continue; 1464 1465 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1466 usleep_range(400, 500); 1467 continue; 1468 } 1469 if (status & DP_AUX_CH_CTL_DONE) 1470 goto done; 1471 } 1472 } 1473 1474 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1475 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 1476 intel_dp->aux.name, status); 1477 ret = -EBUSY; 1478 goto out; 1479 } 1480 1481 done: 1482 /* Check for timeout or receive error. 1483 * Timeouts occur when the sink is not connected 1484 */ 1485 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1486 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 1487 intel_dp->aux.name, status); 1488 ret = -EIO; 1489 goto out; 1490 } 1491 1492 /* Timeouts occur when the device isn't connected, so they're 1493 * "normal" -- don't fill the kernel log with these */ 1494 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1495 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 1496 intel_dp->aux.name, status); 1497 ret = -ETIMEDOUT; 1498 goto out; 1499 } 1500 1501 /* Unload any bytes sent back from the other side */ 1502 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1503 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1504 1505 /* 1506 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1507 * We have no idea of what happened so we return -EBUSY so 1508 * drm layer takes care for the necessary retries. 1509 */ 1510 if (recv_bytes == 0 || recv_bytes > 20) { 1511 drm_dbg_kms(&i915->drm, 1512 "%s: Forbidden recv_bytes = %d on aux transaction\n", 1513 intel_dp->aux.name, recv_bytes); 1514 ret = -EBUSY; 1515 goto out; 1516 } 1517 1518 if (recv_bytes > recv_size) 1519 recv_bytes = recv_size; 1520 1521 for (i = 0; i < recv_bytes; i += 4) 1522 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 1523 recv + i, recv_bytes - i); 1524 1525 ret = recv_bytes; 1526 out: 1527 cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); 1528 1529 if (vdd) 1530 edp_panel_vdd_off(intel_dp, false); 1531 1532 pps_unlock(intel_dp, pps_wakeref); 1533 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 1534 1535 if (is_tc_port) 1536 intel_tc_port_unlock(intel_dig_port); 1537 1538 return ret; 1539 } 1540 1541 #define BARE_ADDRESS_SIZE 3 1542 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1543 1544 static void 1545 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1546 const struct drm_dp_aux_msg *msg) 1547 { 1548 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1549 txbuf[1] = (msg->address >> 8) & 0xff; 1550 txbuf[2] = msg->address & 0xff; 1551 txbuf[3] = msg->size - 1; 1552 } 1553 1554 static ssize_t 1555 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1556 { 1557 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1558 u8 txbuf[20], rxbuf[20]; 1559 size_t txsize, rxsize; 1560 int ret; 1561 1562 intel_dp_aux_header(txbuf, msg); 1563 1564 switch (msg->request & ~DP_AUX_I2C_MOT) { 1565 case DP_AUX_NATIVE_WRITE: 1566 case DP_AUX_I2C_WRITE: 1567 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1568 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1569 rxsize = 2; /* 0 or 1 data bytes */ 1570 1571 if (WARN_ON(txsize > 20)) 1572 return -E2BIG; 1573 1574 WARN_ON(!msg->buffer != !msg->size); 1575 1576 if (msg->buffer) 1577 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1578 1579 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1580 rxbuf, rxsize, 0); 1581 if (ret > 0) { 1582 msg->reply = rxbuf[0] >> 4; 1583 1584 if (ret > 1) { 1585 /* Number of bytes written in a short write. */ 1586 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1587 } else { 1588 /* Return payload size. */ 1589 ret = msg->size; 1590 } 1591 } 1592 break; 1593 1594 case DP_AUX_NATIVE_READ: 1595 case DP_AUX_I2C_READ: 1596 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1597 rxsize = msg->size + 1; 1598 1599 if (WARN_ON(rxsize > 20)) 1600 return -E2BIG; 1601 1602 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1603 rxbuf, rxsize, 0); 1604 if (ret > 0) { 1605 msg->reply = rxbuf[0] >> 4; 1606 /* 1607 * Assume happy day, and copy the data. The caller is 1608 * expected to check msg->reply before touching it. 1609 * 1610 * Return payload size. 1611 */ 1612 ret--; 1613 memcpy(msg->buffer, rxbuf + 1, ret); 1614 } 1615 break; 1616 1617 default: 1618 ret = -EINVAL; 1619 break; 1620 } 1621 1622 return ret; 1623 } 1624 1625 1626 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1627 { 1628 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1629 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1630 enum aux_ch aux_ch = dig_port->aux_ch; 1631 1632 switch (aux_ch) { 1633 case AUX_CH_B: 1634 case AUX_CH_C: 1635 case AUX_CH_D: 1636 return DP_AUX_CH_CTL(aux_ch); 1637 default: 1638 MISSING_CASE(aux_ch); 1639 return DP_AUX_CH_CTL(AUX_CH_B); 1640 } 1641 } 1642 1643 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1644 { 1645 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1646 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1647 enum aux_ch aux_ch = dig_port->aux_ch; 1648 1649 switch (aux_ch) { 1650 case AUX_CH_B: 1651 case AUX_CH_C: 1652 case AUX_CH_D: 1653 return DP_AUX_CH_DATA(aux_ch, index); 1654 default: 1655 MISSING_CASE(aux_ch); 1656 return DP_AUX_CH_DATA(AUX_CH_B, index); 1657 } 1658 } 1659 1660 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1661 { 1662 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1663 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1664 enum aux_ch aux_ch = dig_port->aux_ch; 1665 1666 switch (aux_ch) { 1667 case AUX_CH_A: 1668 return DP_AUX_CH_CTL(aux_ch); 1669 case AUX_CH_B: 1670 case AUX_CH_C: 1671 case AUX_CH_D: 1672 return PCH_DP_AUX_CH_CTL(aux_ch); 1673 default: 1674 MISSING_CASE(aux_ch); 1675 return DP_AUX_CH_CTL(AUX_CH_A); 1676 } 1677 } 1678 1679 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1680 { 1681 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1682 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1683 enum aux_ch aux_ch = dig_port->aux_ch; 1684 1685 switch (aux_ch) { 1686 case AUX_CH_A: 1687 return DP_AUX_CH_DATA(aux_ch, index); 1688 case AUX_CH_B: 1689 case AUX_CH_C: 1690 case AUX_CH_D: 1691 return PCH_DP_AUX_CH_DATA(aux_ch, index); 1692 default: 1693 MISSING_CASE(aux_ch); 1694 return DP_AUX_CH_DATA(AUX_CH_A, index); 1695 } 1696 } 1697 1698 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1699 { 1700 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1701 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1702 enum aux_ch aux_ch = dig_port->aux_ch; 1703 1704 switch (aux_ch) { 1705 case AUX_CH_A: 1706 case AUX_CH_B: 1707 case AUX_CH_C: 1708 case AUX_CH_D: 1709 case AUX_CH_E: 1710 case AUX_CH_F: 1711 case AUX_CH_G: 1712 return DP_AUX_CH_CTL(aux_ch); 1713 default: 1714 MISSING_CASE(aux_ch); 1715 return DP_AUX_CH_CTL(AUX_CH_A); 1716 } 1717 } 1718 1719 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1720 { 1721 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1722 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1723 enum aux_ch aux_ch = dig_port->aux_ch; 1724 1725 switch (aux_ch) { 1726 case AUX_CH_A: 1727 case AUX_CH_B: 1728 case AUX_CH_C: 1729 case AUX_CH_D: 1730 case AUX_CH_E: 1731 case AUX_CH_F: 1732 case AUX_CH_G: 1733 return DP_AUX_CH_DATA(aux_ch, index); 1734 default: 1735 MISSING_CASE(aux_ch); 1736 return DP_AUX_CH_DATA(AUX_CH_A, index); 1737 } 1738 } 1739 1740 static void 1741 intel_dp_aux_fini(struct intel_dp *intel_dp) 1742 { 1743 kfree(intel_dp->aux.name); 1744 } 1745 1746 static void 1747 intel_dp_aux_init(struct intel_dp *intel_dp) 1748 { 1749 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1750 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1751 struct intel_encoder *encoder = &dig_port->base; 1752 1753 if (INTEL_GEN(dev_priv) >= 9) { 1754 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1755 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1756 } else if (HAS_PCH_SPLIT(dev_priv)) { 1757 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1758 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1759 } else { 1760 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1761 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1762 } 1763 1764 if (INTEL_GEN(dev_priv) >= 9) 1765 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1766 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1767 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1768 else if (HAS_PCH_SPLIT(dev_priv)) 1769 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1770 else 1771 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1772 1773 if (INTEL_GEN(dev_priv) >= 9) 1774 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1775 else 1776 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1777 1778 drm_dp_aux_init(&intel_dp->aux); 1779 1780 /* Failure to allocate our preferred name is not critical */ 1781 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c", 1782 aux_ch_name(dig_port->aux_ch), 1783 port_name(encoder->port)); 1784 intel_dp->aux.transfer = intel_dp_aux_transfer; 1785 } 1786 1787 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1788 { 1789 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1790 1791 return max_rate >= 540000; 1792 } 1793 1794 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 1795 { 1796 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1797 1798 return max_rate >= 810000; 1799 } 1800 1801 static void 1802 intel_dp_set_clock(struct intel_encoder *encoder, 1803 struct intel_crtc_state *pipe_config) 1804 { 1805 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1806 const struct dp_link_dpll *divisor = NULL; 1807 int i, count = 0; 1808 1809 if (IS_G4X(dev_priv)) { 1810 divisor = g4x_dpll; 1811 count = ARRAY_SIZE(g4x_dpll); 1812 } else if (HAS_PCH_SPLIT(dev_priv)) { 1813 divisor = pch_dpll; 1814 count = ARRAY_SIZE(pch_dpll); 1815 } else if (IS_CHERRYVIEW(dev_priv)) { 1816 divisor = chv_dpll; 1817 count = ARRAY_SIZE(chv_dpll); 1818 } else if (IS_VALLEYVIEW(dev_priv)) { 1819 divisor = vlv_dpll; 1820 count = ARRAY_SIZE(vlv_dpll); 1821 } 1822 1823 if (divisor && count) { 1824 for (i = 0; i < count; i++) { 1825 if (pipe_config->port_clock == divisor[i].clock) { 1826 pipe_config->dpll = divisor[i].dpll; 1827 pipe_config->clock_set = true; 1828 break; 1829 } 1830 } 1831 } 1832 } 1833 1834 static void snprintf_int_array(char *str, size_t len, 1835 const int *array, int nelem) 1836 { 1837 int i; 1838 1839 str[0] = '\0'; 1840 1841 for (i = 0; i < nelem; i++) { 1842 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1843 if (r >= len) 1844 return; 1845 str += r; 1846 len -= r; 1847 } 1848 } 1849 1850 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1851 { 1852 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1853 char str[128]; /* FIXME: too big for stack? */ 1854 1855 if (!drm_debug_enabled(DRM_UT_KMS)) 1856 return; 1857 1858 snprintf_int_array(str, sizeof(str), 1859 intel_dp->source_rates, intel_dp->num_source_rates); 1860 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 1861 1862 snprintf_int_array(str, sizeof(str), 1863 intel_dp->sink_rates, intel_dp->num_sink_rates); 1864 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 1865 1866 snprintf_int_array(str, sizeof(str), 1867 intel_dp->common_rates, intel_dp->num_common_rates); 1868 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 1869 } 1870 1871 int 1872 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1873 { 1874 int len; 1875 1876 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1877 if (WARN_ON(len <= 0)) 1878 return 162000; 1879 1880 return intel_dp->common_rates[len - 1]; 1881 } 1882 1883 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1884 { 1885 int i = intel_dp_rate_index(intel_dp->sink_rates, 1886 intel_dp->num_sink_rates, rate); 1887 1888 if (WARN_ON(i < 0)) 1889 i = 0; 1890 1891 return i; 1892 } 1893 1894 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1895 u8 *link_bw, u8 *rate_select) 1896 { 1897 /* eDP 1.4 rate select method. */ 1898 if (intel_dp->use_rate_select) { 1899 *link_bw = 0; 1900 *rate_select = 1901 intel_dp_rate_select(intel_dp, port_clock); 1902 } else { 1903 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1904 *rate_select = 0; 1905 } 1906 } 1907 1908 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1909 const struct intel_crtc_state *pipe_config) 1910 { 1911 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1912 1913 /* On TGL, FEC is supported on all Pipes */ 1914 if (INTEL_GEN(dev_priv) >= 12) 1915 return true; 1916 1917 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 1918 return true; 1919 1920 return false; 1921 } 1922 1923 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1924 const struct intel_crtc_state *pipe_config) 1925 { 1926 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1927 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1928 } 1929 1930 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1931 const struct intel_crtc_state *crtc_state) 1932 { 1933 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1934 1935 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) 1936 return false; 1937 1938 return intel_dsc_source_support(encoder, crtc_state) && 1939 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1940 } 1941 1942 static int intel_dp_compute_bpp(struct intel_dp *intel_dp, 1943 struct intel_crtc_state *pipe_config) 1944 { 1945 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1946 struct intel_connector *intel_connector = intel_dp->attached_connector; 1947 int bpp, bpc; 1948 1949 bpp = pipe_config->pipe_bpp; 1950 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports); 1951 1952 if (bpc > 0) 1953 bpp = min(bpp, 3*bpc); 1954 1955 if (intel_dp_is_edp(intel_dp)) { 1956 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1957 if (intel_connector->base.display_info.bpc == 0 && 1958 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1959 drm_dbg_kms(&dev_priv->drm, 1960 "clamping bpp for eDP panel to BIOS-provided %i\n", 1961 dev_priv->vbt.edp.bpp); 1962 bpp = dev_priv->vbt.edp.bpp; 1963 } 1964 } 1965 1966 return bpp; 1967 } 1968 1969 /* Adjust link config limits based on compliance test requests. */ 1970 void 1971 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1972 struct intel_crtc_state *pipe_config, 1973 struct link_config_limits *limits) 1974 { 1975 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1976 1977 /* For DP Compliance we override the computed bpp for the pipe */ 1978 if (intel_dp->compliance.test_data.bpc != 0) { 1979 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1980 1981 limits->min_bpp = limits->max_bpp = bpp; 1982 pipe_config->dither_force_disable = bpp == 6 * 3; 1983 1984 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1985 } 1986 1987 /* Use values requested by Compliance Test Request */ 1988 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1989 int index; 1990 1991 /* Validate the compliance test data since max values 1992 * might have changed due to link train fallback. 1993 */ 1994 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1995 intel_dp->compliance.test_lane_count)) { 1996 index = intel_dp_rate_index(intel_dp->common_rates, 1997 intel_dp->num_common_rates, 1998 intel_dp->compliance.test_link_rate); 1999 if (index >= 0) 2000 limits->min_clock = limits->max_clock = index; 2001 limits->min_lane_count = limits->max_lane_count = 2002 intel_dp->compliance.test_lane_count; 2003 } 2004 } 2005 } 2006 2007 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp) 2008 { 2009 /* 2010 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 2011 * format of the number of bytes per pixel will be half the number 2012 * of bytes of RGB pixel. 2013 */ 2014 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2015 bpp /= 2; 2016 2017 return bpp; 2018 } 2019 2020 /* Optimize link config in order: max bpp, min clock, min lanes */ 2021 static int 2022 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 2023 struct intel_crtc_state *pipe_config, 2024 const struct link_config_limits *limits) 2025 { 2026 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2027 int bpp, clock, lane_count; 2028 int mode_rate, link_clock, link_avail; 2029 2030 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2031 int output_bpp = intel_dp_output_bpp(pipe_config, bpp); 2032 2033 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2034 output_bpp); 2035 2036 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2037 for (lane_count = limits->min_lane_count; 2038 lane_count <= limits->max_lane_count; 2039 lane_count <<= 1) { 2040 link_clock = intel_dp->common_rates[clock]; 2041 link_avail = intel_dp_max_data_rate(link_clock, 2042 lane_count); 2043 2044 if (mode_rate <= link_avail) { 2045 pipe_config->lane_count = lane_count; 2046 pipe_config->pipe_bpp = bpp; 2047 pipe_config->port_clock = link_clock; 2048 2049 return 0; 2050 } 2051 } 2052 } 2053 } 2054 2055 return -EINVAL; 2056 } 2057 2058 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 2059 { 2060 int i, num_bpc; 2061 u8 dsc_bpc[3] = {0}; 2062 2063 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 2064 dsc_bpc); 2065 for (i = 0; i < num_bpc; i++) { 2066 if (dsc_max_bpc >= dsc_bpc[i]) 2067 return dsc_bpc[i] * 3; 2068 } 2069 2070 return 0; 2071 } 2072 2073 #define DSC_SUPPORTED_VERSION_MIN 1 2074 2075 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 2076 struct intel_crtc_state *crtc_state) 2077 { 2078 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2079 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2080 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2081 u8 line_buf_depth; 2082 int ret; 2083 2084 ret = intel_dsc_compute_params(encoder, crtc_state); 2085 if (ret) 2086 return ret; 2087 2088 /* 2089 * Slice Height of 8 works for all currently available panels. So start 2090 * with that if pic_height is an integral multiple of 8. Eventually add 2091 * logic to try multiple slice heights. 2092 */ 2093 if (vdsc_cfg->pic_height % 8 == 0) 2094 vdsc_cfg->slice_height = 8; 2095 else if (vdsc_cfg->pic_height % 4 == 0) 2096 vdsc_cfg->slice_height = 4; 2097 else 2098 vdsc_cfg->slice_height = 2; 2099 2100 vdsc_cfg->dsc_version_major = 2101 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2102 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 2103 vdsc_cfg->dsc_version_minor = 2104 min(DSC_SUPPORTED_VERSION_MIN, 2105 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2106 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 2107 2108 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2109 DP_DSC_RGB; 2110 2111 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 2112 if (!line_buf_depth) { 2113 drm_dbg_kms(&i915->drm, 2114 "DSC Sink Line Buffer Depth invalid\n"); 2115 return -EINVAL; 2116 } 2117 2118 if (vdsc_cfg->dsc_version_minor == 2) 2119 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 2120 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 2121 else 2122 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 2123 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 2124 2125 vdsc_cfg->block_pred_enable = 2126 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2127 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2128 2129 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2130 } 2131 2132 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2133 struct intel_crtc_state *pipe_config, 2134 struct drm_connector_state *conn_state, 2135 struct link_config_limits *limits) 2136 { 2137 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2138 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2139 const struct drm_display_mode *adjusted_mode = 2140 &pipe_config->hw.adjusted_mode; 2141 u8 dsc_max_bpc; 2142 int pipe_bpp; 2143 int ret; 2144 2145 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2146 intel_dp_supports_fec(intel_dp, pipe_config); 2147 2148 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 2149 return -EINVAL; 2150 2151 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 2152 if (INTEL_GEN(dev_priv) >= 12) 2153 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 2154 else 2155 dsc_max_bpc = min_t(u8, 10, 2156 conn_state->max_requested_bpc); 2157 2158 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 2159 2160 /* Min Input BPC for ICL+ is 8 */ 2161 if (pipe_bpp < 8 * 3) { 2162 drm_dbg_kms(&dev_priv->drm, 2163 "No DSC support for less than 8bpc\n"); 2164 return -EINVAL; 2165 } 2166 2167 /* 2168 * For now enable DSC for max bpp, max link rate, max lane count. 2169 * Optimize this later for the minimum possible link rate/lane count 2170 * with DSC enabled for the requested mode. 2171 */ 2172 pipe_config->pipe_bpp = pipe_bpp; 2173 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 2174 pipe_config->lane_count = limits->max_lane_count; 2175 2176 if (intel_dp_is_edp(intel_dp)) { 2177 pipe_config->dsc.compressed_bpp = 2178 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 2179 pipe_config->pipe_bpp); 2180 pipe_config->dsc.slice_count = 2181 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 2182 true); 2183 } else { 2184 u16 dsc_max_output_bpp; 2185 u8 dsc_dp_slice_count; 2186 2187 dsc_max_output_bpp = 2188 intel_dp_dsc_get_output_bpp(dev_priv, 2189 pipe_config->port_clock, 2190 pipe_config->lane_count, 2191 adjusted_mode->crtc_clock, 2192 adjusted_mode->crtc_hdisplay); 2193 dsc_dp_slice_count = 2194 intel_dp_dsc_get_slice_count(intel_dp, 2195 adjusted_mode->crtc_clock, 2196 adjusted_mode->crtc_hdisplay); 2197 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 2198 drm_dbg_kms(&dev_priv->drm, 2199 "Compressed BPP/Slice Count not supported\n"); 2200 return -EINVAL; 2201 } 2202 pipe_config->dsc.compressed_bpp = min_t(u16, 2203 dsc_max_output_bpp >> 4, 2204 pipe_config->pipe_bpp); 2205 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2206 } 2207 /* 2208 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2209 * is greater than the maximum Cdclock and if slice count is even 2210 * then we need to use 2 VDSC instances. 2211 */ 2212 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { 2213 if (pipe_config->dsc.slice_count > 1) { 2214 pipe_config->dsc.dsc_split = true; 2215 } else { 2216 drm_dbg_kms(&dev_priv->drm, 2217 "Cannot split stream to use 2 VDSC instances\n"); 2218 return -EINVAL; 2219 } 2220 } 2221 2222 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 2223 if (ret < 0) { 2224 drm_dbg_kms(&dev_priv->drm, 2225 "Cannot compute valid DSC parameters for Input Bpp = %d " 2226 "Compressed BPP = %d\n", 2227 pipe_config->pipe_bpp, 2228 pipe_config->dsc.compressed_bpp); 2229 return ret; 2230 } 2231 2232 pipe_config->dsc.compression_enable = true; 2233 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2234 "Compressed Bpp = %d Slice Count = %d\n", 2235 pipe_config->pipe_bpp, 2236 pipe_config->dsc.compressed_bpp, 2237 pipe_config->dsc.slice_count); 2238 2239 return 0; 2240 } 2241 2242 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state) 2243 { 2244 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) 2245 return 6 * 3; 2246 else 2247 return 8 * 3; 2248 } 2249 2250 static int 2251 intel_dp_compute_link_config(struct intel_encoder *encoder, 2252 struct intel_crtc_state *pipe_config, 2253 struct drm_connector_state *conn_state) 2254 { 2255 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2256 const struct drm_display_mode *adjusted_mode = 2257 &pipe_config->hw.adjusted_mode; 2258 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2259 struct link_config_limits limits; 2260 int common_len; 2261 int ret; 2262 2263 common_len = intel_dp_common_len_rate_limit(intel_dp, 2264 intel_dp->max_link_rate); 2265 2266 /* No common link rates between source and sink */ 2267 drm_WARN_ON(encoder->base.dev, common_len <= 0); 2268 2269 limits.min_clock = 0; 2270 limits.max_clock = common_len - 1; 2271 2272 limits.min_lane_count = 1; 2273 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 2274 2275 limits.min_bpp = intel_dp_min_bpp(pipe_config); 2276 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); 2277 2278 if (intel_dp_is_edp(intel_dp)) { 2279 /* 2280 * Use the maximum clock and number of lanes the eDP panel 2281 * advertizes being capable of. The panels are generally 2282 * designed to support only a single clock and lane 2283 * configuration, and typically these values correspond to the 2284 * native resolution of the panel. 2285 */ 2286 limits.min_lane_count = limits.max_lane_count; 2287 limits.min_clock = limits.max_clock; 2288 } 2289 2290 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 2291 2292 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 2293 "max rate %d max bpp %d pixel clock %iKHz\n", 2294 limits.max_lane_count, 2295 intel_dp->common_rates[limits.max_clock], 2296 limits.max_bpp, adjusted_mode->crtc_clock); 2297 2298 /* 2299 * Optimize for slow and wide. This is the place to add alternative 2300 * optimization policy. 2301 */ 2302 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 2303 2304 /* enable compression if the mode doesn't fit available BW */ 2305 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 2306 if (ret || intel_dp->force_dsc_en) { 2307 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2308 conn_state, &limits); 2309 if (ret < 0) 2310 return ret; 2311 } 2312 2313 if (pipe_config->dsc.compression_enable) { 2314 drm_dbg_kms(&i915->drm, 2315 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2316 pipe_config->lane_count, pipe_config->port_clock, 2317 pipe_config->pipe_bpp, 2318 pipe_config->dsc.compressed_bpp); 2319 2320 drm_dbg_kms(&i915->drm, 2321 "DP link rate required %i available %i\n", 2322 intel_dp_link_required(adjusted_mode->crtc_clock, 2323 pipe_config->dsc.compressed_bpp), 2324 intel_dp_max_data_rate(pipe_config->port_clock, 2325 pipe_config->lane_count)); 2326 } else { 2327 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 2328 pipe_config->lane_count, pipe_config->port_clock, 2329 pipe_config->pipe_bpp); 2330 2331 drm_dbg_kms(&i915->drm, 2332 "DP link rate required %i available %i\n", 2333 intel_dp_link_required(adjusted_mode->crtc_clock, 2334 pipe_config->pipe_bpp), 2335 intel_dp_max_data_rate(pipe_config->port_clock, 2336 pipe_config->lane_count)); 2337 } 2338 return 0; 2339 } 2340 2341 static int 2342 intel_dp_ycbcr420_config(struct intel_dp *intel_dp, 2343 struct intel_crtc_state *crtc_state, 2344 const struct drm_connector_state *conn_state) 2345 { 2346 struct drm_connector *connector = conn_state->connector; 2347 const struct drm_display_info *info = &connector->display_info; 2348 const struct drm_display_mode *adjusted_mode = 2349 &crtc_state->hw.adjusted_mode; 2350 2351 if (!drm_mode_is_420_only(info, adjusted_mode) || 2352 !intel_dp_get_colorimetry_status(intel_dp) || 2353 !connector->ycbcr_420_allowed) 2354 return 0; 2355 2356 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2357 2358 return intel_pch_panel_fitting(crtc_state, conn_state); 2359 } 2360 2361 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2362 const struct drm_connector_state *conn_state) 2363 { 2364 const struct intel_digital_connector_state *intel_conn_state = 2365 to_intel_digital_connector_state(conn_state); 2366 const struct drm_display_mode *adjusted_mode = 2367 &crtc_state->hw.adjusted_mode; 2368 2369 /* 2370 * Our YCbCr output is always limited range. 2371 * crtc_state->limited_color_range only applies to RGB, 2372 * and it must never be set for YCbCr or we risk setting 2373 * some conflicting bits in PIPECONF which will mess up 2374 * the colors on the monitor. 2375 */ 2376 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2377 return false; 2378 2379 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2380 /* 2381 * See: 2382 * CEA-861-E - 5.1 Default Encoding Parameters 2383 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2384 */ 2385 return crtc_state->pipe_bpp != 18 && 2386 drm_default_rgb_quant_range(adjusted_mode) == 2387 HDMI_QUANTIZATION_RANGE_LIMITED; 2388 } else { 2389 return intel_conn_state->broadcast_rgb == 2390 INTEL_BROADCAST_RGB_LIMITED; 2391 } 2392 } 2393 2394 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2395 enum port port) 2396 { 2397 if (IS_G4X(dev_priv)) 2398 return false; 2399 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 2400 return false; 2401 2402 return true; 2403 } 2404 2405 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 2406 const struct drm_connector_state *conn_state, 2407 struct drm_dp_vsc_sdp *vsc) 2408 { 2409 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2410 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2411 2412 /* 2413 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2414 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2415 * Colorimetry Format indication. 2416 */ 2417 vsc->revision = 0x5; 2418 vsc->length = 0x13; 2419 2420 /* DP 1.4a spec, Table 2-120 */ 2421 switch (crtc_state->output_format) { 2422 case INTEL_OUTPUT_FORMAT_YCBCR444: 2423 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 2424 break; 2425 case INTEL_OUTPUT_FORMAT_YCBCR420: 2426 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 2427 break; 2428 case INTEL_OUTPUT_FORMAT_RGB: 2429 default: 2430 vsc->pixelformat = DP_PIXELFORMAT_RGB; 2431 } 2432 2433 switch (conn_state->colorspace) { 2434 case DRM_MODE_COLORIMETRY_BT709_YCC: 2435 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2436 break; 2437 case DRM_MODE_COLORIMETRY_XVYCC_601: 2438 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 2439 break; 2440 case DRM_MODE_COLORIMETRY_XVYCC_709: 2441 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 2442 break; 2443 case DRM_MODE_COLORIMETRY_SYCC_601: 2444 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 2445 break; 2446 case DRM_MODE_COLORIMETRY_OPYCC_601: 2447 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 2448 break; 2449 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2450 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 2451 break; 2452 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2453 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 2454 break; 2455 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2456 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 2457 break; 2458 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 2459 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 2460 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 2461 break; 2462 default: 2463 /* 2464 * RGB->YCBCR color conversion uses the BT.709 2465 * color space. 2466 */ 2467 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2468 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2469 else 2470 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 2471 break; 2472 } 2473 2474 vsc->bpc = crtc_state->pipe_bpp / 3; 2475 2476 /* only RGB pixelformat supports 6 bpc */ 2477 drm_WARN_ON(&dev_priv->drm, 2478 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 2479 2480 /* all YCbCr are always limited range */ 2481 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 2482 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 2483 } 2484 2485 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 2486 struct intel_crtc_state *crtc_state, 2487 const struct drm_connector_state *conn_state) 2488 { 2489 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 2490 2491 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 2492 if (crtc_state->has_psr) 2493 return; 2494 2495 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 2496 return; 2497 2498 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 2499 vsc->sdp_type = DP_SDP_VSC; 2500 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2501 &crtc_state->infoframes.vsc); 2502 } 2503 2504 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 2505 const struct intel_crtc_state *crtc_state, 2506 const struct drm_connector_state *conn_state, 2507 struct drm_dp_vsc_sdp *vsc) 2508 { 2509 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2510 2511 vsc->sdp_type = DP_SDP_VSC; 2512 2513 if (dev_priv->psr.psr2_enabled) { 2514 if (dev_priv->psr.colorimetry_support && 2515 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2516 /* [PSR2, +Colorimetry] */ 2517 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2518 vsc); 2519 } else { 2520 /* 2521 * [PSR2, -Colorimetry] 2522 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 2523 * 3D stereo + PSR/PSR2 + Y-coordinate. 2524 */ 2525 vsc->revision = 0x4; 2526 vsc->length = 0xe; 2527 } 2528 } else { 2529 /* 2530 * [PSR1] 2531 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2532 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 2533 * higher). 2534 */ 2535 vsc->revision = 0x2; 2536 vsc->length = 0x8; 2537 } 2538 } 2539 2540 static void 2541 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 2542 struct intel_crtc_state *crtc_state, 2543 const struct drm_connector_state *conn_state) 2544 { 2545 int ret; 2546 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2547 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 2548 2549 if (!conn_state->hdr_output_metadata) 2550 return; 2551 2552 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 2553 2554 if (ret) { 2555 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 2556 return; 2557 } 2558 2559 crtc_state->infoframes.enable |= 2560 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2561 } 2562 2563 int 2564 intel_dp_compute_config(struct intel_encoder *encoder, 2565 struct intel_crtc_state *pipe_config, 2566 struct drm_connector_state *conn_state) 2567 { 2568 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2569 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2570 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2571 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); 2572 enum port port = encoder->port; 2573 struct intel_connector *intel_connector = intel_dp->attached_connector; 2574 struct intel_digital_connector_state *intel_conn_state = 2575 to_intel_digital_connector_state(conn_state); 2576 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, 2577 DP_DPCD_QUIRK_CONSTANT_N); 2578 int ret = 0, output_bpp; 2579 2580 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 2581 pipe_config->has_pch_encoder = true; 2582 2583 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2584 2585 if (lspcon->active) 2586 lspcon_ycbcr420_config(&intel_connector->base, pipe_config); 2587 else 2588 ret = intel_dp_ycbcr420_config(intel_dp, pipe_config, 2589 conn_state); 2590 if (ret) 2591 return ret; 2592 2593 pipe_config->has_drrs = false; 2594 if (!intel_dp_port_has_audio(dev_priv, port)) 2595 pipe_config->has_audio = false; 2596 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2597 pipe_config->has_audio = intel_dp->has_audio; 2598 else 2599 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 2600 2601 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2602 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 2603 adjusted_mode); 2604 2605 if (HAS_GMCH(dev_priv)) 2606 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 2607 else 2608 ret = intel_pch_panel_fitting(pipe_config, conn_state); 2609 if (ret) 2610 return ret; 2611 } 2612 2613 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2614 return -EINVAL; 2615 2616 if (HAS_GMCH(dev_priv) && 2617 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2618 return -EINVAL; 2619 2620 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2621 return -EINVAL; 2622 2623 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2624 return -EINVAL; 2625 2626 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2627 if (ret < 0) 2628 return ret; 2629 2630 pipe_config->limited_color_range = 2631 intel_dp_limited_color_range(pipe_config, conn_state); 2632 2633 if (pipe_config->dsc.compression_enable) 2634 output_bpp = pipe_config->dsc.compressed_bpp; 2635 else 2636 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); 2637 2638 intel_link_compute_m_n(output_bpp, 2639 pipe_config->lane_count, 2640 adjusted_mode->crtc_clock, 2641 pipe_config->port_clock, 2642 &pipe_config->dp_m_n, 2643 constant_n, pipe_config->fec_enable); 2644 2645 if (intel_connector->panel.downclock_mode != NULL && 2646 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { 2647 pipe_config->has_drrs = true; 2648 intel_link_compute_m_n(output_bpp, 2649 pipe_config->lane_count, 2650 intel_connector->panel.downclock_mode->clock, 2651 pipe_config->port_clock, 2652 &pipe_config->dp_m2_n2, 2653 constant_n, pipe_config->fec_enable); 2654 } 2655 2656 if (!HAS_DDI(dev_priv)) 2657 intel_dp_set_clock(encoder, pipe_config); 2658 2659 intel_psr_compute_config(intel_dp, pipe_config); 2660 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 2661 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 2662 2663 return 0; 2664 } 2665 2666 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2667 int link_rate, u8 lane_count, 2668 bool link_mst) 2669 { 2670 intel_dp->link_trained = false; 2671 intel_dp->link_rate = link_rate; 2672 intel_dp->lane_count = lane_count; 2673 intel_dp->link_mst = link_mst; 2674 } 2675 2676 static void intel_dp_prepare(struct intel_encoder *encoder, 2677 const struct intel_crtc_state *pipe_config) 2678 { 2679 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2680 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2681 enum port port = encoder->port; 2682 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2683 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2684 2685 intel_dp_set_link_params(intel_dp, pipe_config->port_clock, 2686 pipe_config->lane_count, 2687 intel_crtc_has_type(pipe_config, 2688 INTEL_OUTPUT_DP_MST)); 2689 2690 /* 2691 * There are four kinds of DP registers: 2692 * 2693 * IBX PCH 2694 * SNB CPU 2695 * IVB CPU 2696 * CPT PCH 2697 * 2698 * IBX PCH and CPU are the same for almost everything, 2699 * except that the CPU DP PLL is configured in this 2700 * register 2701 * 2702 * CPT PCH is quite different, having many bits moved 2703 * to the TRANS_DP_CTL register instead. That 2704 * configuration happens (oddly) in ilk_pch_enable 2705 */ 2706 2707 /* Preserve the BIOS-computed detected bit. This is 2708 * supposed to be read-only. 2709 */ 2710 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 2711 2712 /* Handle DP bits in common between all three register formats */ 2713 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 2714 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 2715 2716 /* Split out the IBX/CPU vs CPT settings */ 2717 2718 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 2719 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2720 intel_dp->DP |= DP_SYNC_HS_HIGH; 2721 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2722 intel_dp->DP |= DP_SYNC_VS_HIGH; 2723 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2724 2725 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2726 intel_dp->DP |= DP_ENHANCED_FRAMING; 2727 2728 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 2729 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2730 u32 trans_dp; 2731 2732 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2733 2734 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 2735 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2736 trans_dp |= TRANS_DP_ENH_FRAMING; 2737 else 2738 trans_dp &= ~TRANS_DP_ENH_FRAMING; 2739 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 2740 } else { 2741 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 2742 intel_dp->DP |= DP_COLOR_RANGE_16_235; 2743 2744 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2745 intel_dp->DP |= DP_SYNC_HS_HIGH; 2746 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2747 intel_dp->DP |= DP_SYNC_VS_HIGH; 2748 intel_dp->DP |= DP_LINK_TRAIN_OFF; 2749 2750 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2751 intel_dp->DP |= DP_ENHANCED_FRAMING; 2752 2753 if (IS_CHERRYVIEW(dev_priv)) 2754 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 2755 else 2756 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 2757 } 2758 } 2759 2760 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 2761 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 2762 2763 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 2764 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 2765 2766 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 2767 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 2768 2769 static void intel_pps_verify_state(struct intel_dp *intel_dp); 2770 2771 static void wait_panel_status(struct intel_dp *intel_dp, 2772 u32 mask, 2773 u32 value) 2774 { 2775 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2776 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2777 2778 lockdep_assert_held(&dev_priv->pps_mutex); 2779 2780 intel_pps_verify_state(intel_dp); 2781 2782 pp_stat_reg = _pp_stat_reg(intel_dp); 2783 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2784 2785 drm_dbg_kms(&dev_priv->drm, 2786 "mask %08x value %08x status %08x control %08x\n", 2787 mask, value, 2788 intel_de_read(dev_priv, pp_stat_reg), 2789 intel_de_read(dev_priv, pp_ctrl_reg)); 2790 2791 if (intel_de_wait_for_register(dev_priv, pp_stat_reg, 2792 mask, value, 5000)) 2793 drm_err(&dev_priv->drm, 2794 "Panel status timeout: status %08x control %08x\n", 2795 intel_de_read(dev_priv, pp_stat_reg), 2796 intel_de_read(dev_priv, pp_ctrl_reg)); 2797 2798 drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); 2799 } 2800 2801 static void wait_panel_on(struct intel_dp *intel_dp) 2802 { 2803 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2804 2805 drm_dbg_kms(&i915->drm, "Wait for panel power on\n"); 2806 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 2807 } 2808 2809 static void wait_panel_off(struct intel_dp *intel_dp) 2810 { 2811 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2812 2813 drm_dbg_kms(&i915->drm, "Wait for panel power off time\n"); 2814 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 2815 } 2816 2817 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 2818 { 2819 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2820 ktime_t panel_power_on_time; 2821 s64 panel_power_off_duration; 2822 2823 drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n"); 2824 2825 /* take the difference of currrent time and panel power off time 2826 * and then make panel wait for t11_t12 if needed. */ 2827 panel_power_on_time = ktime_get_boottime(); 2828 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 2829 2830 /* When we disable the VDD override bit last we have to do the manual 2831 * wait. */ 2832 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 2833 wait_remaining_ms_from_jiffies(jiffies, 2834 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 2835 2836 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 2837 } 2838 2839 static void wait_backlight_on(struct intel_dp *intel_dp) 2840 { 2841 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 2842 intel_dp->backlight_on_delay); 2843 } 2844 2845 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 2846 { 2847 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 2848 intel_dp->backlight_off_delay); 2849 } 2850 2851 /* Read the current pp_control value, unlocking the register if it 2852 * is locked 2853 */ 2854 2855 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 2856 { 2857 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2858 u32 control; 2859 2860 lockdep_assert_held(&dev_priv->pps_mutex); 2861 2862 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); 2863 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && 2864 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 2865 control &= ~PANEL_UNLOCK_MASK; 2866 control |= PANEL_UNLOCK_REGS; 2867 } 2868 return control; 2869 } 2870 2871 /* 2872 * Must be paired with edp_panel_vdd_off(). 2873 * Must hold pps_mutex around the whole on/off sequence. 2874 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2875 */ 2876 static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 2877 { 2878 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2879 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2880 u32 pp; 2881 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2882 bool need_to_disable = !intel_dp->want_panel_vdd; 2883 2884 lockdep_assert_held(&dev_priv->pps_mutex); 2885 2886 if (!intel_dp_is_edp(intel_dp)) 2887 return false; 2888 2889 cancel_delayed_work(&intel_dp->panel_vdd_work); 2890 intel_dp->want_panel_vdd = true; 2891 2892 if (edp_have_panel_vdd(intel_dp)) 2893 return need_to_disable; 2894 2895 intel_display_power_get(dev_priv, 2896 intel_aux_power_domain(intel_dig_port)); 2897 2898 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", 2899 intel_dig_port->base.base.base.id, 2900 intel_dig_port->base.base.name); 2901 2902 if (!edp_have_panel_power(intel_dp)) 2903 wait_panel_power_cycle(intel_dp); 2904 2905 pp = ilk_get_pp_control(intel_dp); 2906 pp |= EDP_FORCE_VDD; 2907 2908 pp_stat_reg = _pp_stat_reg(intel_dp); 2909 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2910 2911 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2912 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2913 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2914 intel_de_read(dev_priv, pp_stat_reg), 2915 intel_de_read(dev_priv, pp_ctrl_reg)); 2916 /* 2917 * If the panel wasn't on, delay before accessing aux channel 2918 */ 2919 if (!edp_have_panel_power(intel_dp)) { 2920 drm_dbg_kms(&dev_priv->drm, 2921 "[ENCODER:%d:%s] panel power wasn't enabled\n", 2922 intel_dig_port->base.base.base.id, 2923 intel_dig_port->base.base.name); 2924 msleep(intel_dp->panel_power_up_delay); 2925 } 2926 2927 return need_to_disable; 2928 } 2929 2930 /* 2931 * Must be paired with intel_edp_panel_vdd_off() or 2932 * intel_edp_panel_off(). 2933 * Nested calls to these functions are not allowed since 2934 * we drop the lock. Caller must use some higher level 2935 * locking to prevent nested calls from other threads. 2936 */ 2937 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 2938 { 2939 intel_wakeref_t wakeref; 2940 bool vdd; 2941 2942 if (!intel_dp_is_edp(intel_dp)) 2943 return; 2944 2945 vdd = false; 2946 with_pps_lock(intel_dp, wakeref) 2947 vdd = edp_panel_vdd_on(intel_dp); 2948 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", 2949 dp_to_dig_port(intel_dp)->base.base.base.id, 2950 dp_to_dig_port(intel_dp)->base.base.name); 2951 } 2952 2953 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 2954 { 2955 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2956 struct intel_digital_port *intel_dig_port = 2957 dp_to_dig_port(intel_dp); 2958 u32 pp; 2959 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2960 2961 lockdep_assert_held(&dev_priv->pps_mutex); 2962 2963 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd); 2964 2965 if (!edp_have_panel_vdd(intel_dp)) 2966 return; 2967 2968 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", 2969 intel_dig_port->base.base.base.id, 2970 intel_dig_port->base.base.name); 2971 2972 pp = ilk_get_pp_control(intel_dp); 2973 pp &= ~EDP_FORCE_VDD; 2974 2975 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2976 pp_stat_reg = _pp_stat_reg(intel_dp); 2977 2978 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2979 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2980 2981 /* Make sure sequencer is idle before allowing subsequent activity */ 2982 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2983 intel_de_read(dev_priv, pp_stat_reg), 2984 intel_de_read(dev_priv, pp_ctrl_reg)); 2985 2986 if ((pp & PANEL_POWER_ON) == 0) 2987 intel_dp->panel_power_off_time = ktime_get_boottime(); 2988 2989 intel_display_power_put_unchecked(dev_priv, 2990 intel_aux_power_domain(intel_dig_port)); 2991 } 2992 2993 static void edp_panel_vdd_work(struct work_struct *__work) 2994 { 2995 struct intel_dp *intel_dp = 2996 container_of(to_delayed_work(__work), 2997 struct intel_dp, panel_vdd_work); 2998 intel_wakeref_t wakeref; 2999 3000 with_pps_lock(intel_dp, wakeref) { 3001 if (!intel_dp->want_panel_vdd) 3002 edp_panel_vdd_off_sync(intel_dp); 3003 } 3004 } 3005 3006 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 3007 { 3008 unsigned long delay; 3009 3010 /* 3011 * Queue the timer to fire a long time from now (relative to the power 3012 * down delay) to keep the panel power up across a sequence of 3013 * operations. 3014 */ 3015 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 3016 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 3017 } 3018 3019 /* 3020 * Must be paired with edp_panel_vdd_on(). 3021 * Must hold pps_mutex around the whole on/off sequence. 3022 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3023 */ 3024 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 3025 { 3026 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3027 3028 lockdep_assert_held(&dev_priv->pps_mutex); 3029 3030 if (!intel_dp_is_edp(intel_dp)) 3031 return; 3032 3033 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", 3034 dp_to_dig_port(intel_dp)->base.base.base.id, 3035 dp_to_dig_port(intel_dp)->base.base.name); 3036 3037 intel_dp->want_panel_vdd = false; 3038 3039 if (sync) 3040 edp_panel_vdd_off_sync(intel_dp); 3041 else 3042 edp_panel_vdd_schedule_off(intel_dp); 3043 } 3044 3045 static void edp_panel_on(struct intel_dp *intel_dp) 3046 { 3047 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3048 u32 pp; 3049 i915_reg_t pp_ctrl_reg; 3050 3051 lockdep_assert_held(&dev_priv->pps_mutex); 3052 3053 if (!intel_dp_is_edp(intel_dp)) 3054 return; 3055 3056 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n", 3057 dp_to_dig_port(intel_dp)->base.base.base.id, 3058 dp_to_dig_port(intel_dp)->base.base.name); 3059 3060 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), 3061 "[ENCODER:%d:%s] panel power already on\n", 3062 dp_to_dig_port(intel_dp)->base.base.base.id, 3063 dp_to_dig_port(intel_dp)->base.base.name)) 3064 return; 3065 3066 wait_panel_power_cycle(intel_dp); 3067 3068 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3069 pp = ilk_get_pp_control(intel_dp); 3070 if (IS_GEN(dev_priv, 5)) { 3071 /* ILK workaround: disable reset around power sequence */ 3072 pp &= ~PANEL_POWER_RESET; 3073 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3074 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3075 } 3076 3077 pp |= PANEL_POWER_ON; 3078 if (!IS_GEN(dev_priv, 5)) 3079 pp |= PANEL_POWER_RESET; 3080 3081 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3082 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3083 3084 wait_panel_on(intel_dp); 3085 intel_dp->last_power_on = jiffies; 3086 3087 if (IS_GEN(dev_priv, 5)) { 3088 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 3089 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3090 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3091 } 3092 } 3093 3094 void intel_edp_panel_on(struct intel_dp *intel_dp) 3095 { 3096 intel_wakeref_t wakeref; 3097 3098 if (!intel_dp_is_edp(intel_dp)) 3099 return; 3100 3101 with_pps_lock(intel_dp, wakeref) 3102 edp_panel_on(intel_dp); 3103 } 3104 3105 3106 static void edp_panel_off(struct intel_dp *intel_dp) 3107 { 3108 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3109 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3110 u32 pp; 3111 i915_reg_t pp_ctrl_reg; 3112 3113 lockdep_assert_held(&dev_priv->pps_mutex); 3114 3115 if (!intel_dp_is_edp(intel_dp)) 3116 return; 3117 3118 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n", 3119 dig_port->base.base.base.id, dig_port->base.base.name); 3120 3121 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd, 3122 "Need [ENCODER:%d:%s] VDD to turn off panel\n", 3123 dig_port->base.base.base.id, dig_port->base.base.name); 3124 3125 pp = ilk_get_pp_control(intel_dp); 3126 /* We need to switch off panel power _and_ force vdd, for otherwise some 3127 * panels get very unhappy and cease to work. */ 3128 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 3129 EDP_BLC_ENABLE); 3130 3131 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3132 3133 intel_dp->want_panel_vdd = false; 3134 3135 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3136 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3137 3138 wait_panel_off(intel_dp); 3139 intel_dp->panel_power_off_time = ktime_get_boottime(); 3140 3141 /* We got a reference when we enabled the VDD. */ 3142 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); 3143 } 3144 3145 void intel_edp_panel_off(struct intel_dp *intel_dp) 3146 { 3147 intel_wakeref_t wakeref; 3148 3149 if (!intel_dp_is_edp(intel_dp)) 3150 return; 3151 3152 with_pps_lock(intel_dp, wakeref) 3153 edp_panel_off(intel_dp); 3154 } 3155 3156 /* Enable backlight in the panel power control. */ 3157 static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 3158 { 3159 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3160 intel_wakeref_t wakeref; 3161 3162 /* 3163 * If we enable the backlight right away following a panel power 3164 * on, we may see slight flicker as the panel syncs with the eDP 3165 * link. So delay a bit to make sure the image is solid before 3166 * allowing it to appear. 3167 */ 3168 wait_backlight_on(intel_dp); 3169 3170 with_pps_lock(intel_dp, wakeref) { 3171 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3172 u32 pp; 3173 3174 pp = ilk_get_pp_control(intel_dp); 3175 pp |= EDP_BLC_ENABLE; 3176 3177 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3178 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3179 } 3180 } 3181 3182 /* Enable backlight PWM and backlight PP control. */ 3183 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3184 const struct drm_connector_state *conn_state) 3185 { 3186 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3187 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3188 3189 if (!intel_dp_is_edp(intel_dp)) 3190 return; 3191 3192 drm_dbg_kms(&i915->drm, "\n"); 3193 3194 intel_panel_enable_backlight(crtc_state, conn_state); 3195 _intel_edp_backlight_on(intel_dp); 3196 } 3197 3198 /* Disable backlight in the panel power control. */ 3199 static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 3200 { 3201 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3202 intel_wakeref_t wakeref; 3203 3204 if (!intel_dp_is_edp(intel_dp)) 3205 return; 3206 3207 with_pps_lock(intel_dp, wakeref) { 3208 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3209 u32 pp; 3210 3211 pp = ilk_get_pp_control(intel_dp); 3212 pp &= ~EDP_BLC_ENABLE; 3213 3214 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3215 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3216 } 3217 3218 intel_dp->last_backlight_off = jiffies; 3219 edp_wait_backlight_off(intel_dp); 3220 } 3221 3222 /* Disable backlight PP control and backlight PWM. */ 3223 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3224 { 3225 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3226 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3227 3228 if (!intel_dp_is_edp(intel_dp)) 3229 return; 3230 3231 drm_dbg_kms(&i915->drm, "\n"); 3232 3233 _intel_edp_backlight_off(intel_dp); 3234 intel_panel_disable_backlight(old_conn_state); 3235 } 3236 3237 /* 3238 * Hook for controlling the panel power control backlight through the bl_power 3239 * sysfs attribute. Take care to handle multiple calls. 3240 */ 3241 static void intel_edp_backlight_power(struct intel_connector *connector, 3242 bool enable) 3243 { 3244 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3245 struct intel_dp *intel_dp = intel_attached_dp(connector); 3246 intel_wakeref_t wakeref; 3247 bool is_enabled; 3248 3249 is_enabled = false; 3250 with_pps_lock(intel_dp, wakeref) 3251 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 3252 if (is_enabled == enable) 3253 return; 3254 3255 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n", 3256 enable ? "enable" : "disable"); 3257 3258 if (enable) 3259 _intel_edp_backlight_on(intel_dp); 3260 else 3261 _intel_edp_backlight_off(intel_dp); 3262 } 3263 3264 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 3265 { 3266 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3267 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3268 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; 3269 3270 I915_STATE_WARN(cur_state != state, 3271 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 3272 dig_port->base.base.base.id, dig_port->base.base.name, 3273 onoff(state), onoff(cur_state)); 3274 } 3275 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 3276 3277 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 3278 { 3279 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; 3280 3281 I915_STATE_WARN(cur_state != state, 3282 "eDP PLL state assertion failure (expected %s, current %s)\n", 3283 onoff(state), onoff(cur_state)); 3284 } 3285 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 3286 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 3287 3288 static void ilk_edp_pll_on(struct intel_dp *intel_dp, 3289 const struct intel_crtc_state *pipe_config) 3290 { 3291 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3292 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3293 3294 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 3295 assert_dp_port_disabled(intel_dp); 3296 assert_edp_pll_disabled(dev_priv); 3297 3298 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", 3299 pipe_config->port_clock); 3300 3301 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 3302 3303 if (pipe_config->port_clock == 162000) 3304 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 3305 else 3306 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 3307 3308 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3309 intel_de_posting_read(dev_priv, DP_A); 3310 udelay(500); 3311 3312 /* 3313 * [DevILK] Work around required when enabling DP PLL 3314 * while a pipe is enabled going to FDI: 3315 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 3316 * 2. Program DP PLL enable 3317 */ 3318 if (IS_GEN(dev_priv, 5)) 3319 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 3320 3321 intel_dp->DP |= DP_PLL_ENABLE; 3322 3323 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3324 intel_de_posting_read(dev_priv, DP_A); 3325 udelay(200); 3326 } 3327 3328 static void ilk_edp_pll_off(struct intel_dp *intel_dp, 3329 const struct intel_crtc_state *old_crtc_state) 3330 { 3331 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3332 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3333 3334 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 3335 assert_dp_port_disabled(intel_dp); 3336 assert_edp_pll_enabled(dev_priv); 3337 3338 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); 3339 3340 intel_dp->DP &= ~DP_PLL_ENABLE; 3341 3342 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3343 intel_de_posting_read(dev_priv, DP_A); 3344 udelay(200); 3345 } 3346 3347 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3348 { 3349 /* 3350 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3351 * be capable of signalling downstream hpd with a long pulse. 3352 * Whether or not that means D3 is safe to use is not clear, 3353 * but let's assume so until proven otherwise. 3354 * 3355 * FIXME should really check all downstream ports... 3356 */ 3357 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3358 drm_dp_is_branch(intel_dp->dpcd) && 3359 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3360 } 3361 3362 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3363 const struct intel_crtc_state *crtc_state, 3364 bool enable) 3365 { 3366 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3367 int ret; 3368 3369 if (!crtc_state->dsc.compression_enable) 3370 return; 3371 3372 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3373 enable ? DP_DECOMPRESSION_EN : 0); 3374 if (ret < 0) 3375 drm_dbg_kms(&i915->drm, 3376 "Failed to %s sink decompression state\n", 3377 enable ? "enable" : "disable"); 3378 } 3379 3380 /* If the sink supports it, try to set the power state appropriately */ 3381 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 3382 { 3383 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3384 int ret, i; 3385 3386 /* Should have a valid DPCD by this point */ 3387 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3388 return; 3389 3390 if (mode != DRM_MODE_DPMS_ON) { 3391 if (downstream_hpd_needs_d0(intel_dp)) 3392 return; 3393 3394 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3395 DP_SET_POWER_D3); 3396 } else { 3397 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3398 3399 /* 3400 * When turning on, we need to retry for 1ms to give the sink 3401 * time to wake up. 3402 */ 3403 for (i = 0; i < 3; i++) { 3404 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3405 DP_SET_POWER_D0); 3406 if (ret == 1) 3407 break; 3408 msleep(1); 3409 } 3410 3411 if (ret == 1 && lspcon->active) 3412 lspcon_wait_pcon_mode(lspcon); 3413 } 3414 3415 if (ret != 1) 3416 drm_dbg_kms(&i915->drm, "failed to %s sink power state\n", 3417 mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); 3418 } 3419 3420 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 3421 enum port port, enum pipe *pipe) 3422 { 3423 enum pipe p; 3424 3425 for_each_pipe(dev_priv, p) { 3426 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); 3427 3428 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 3429 *pipe = p; 3430 return true; 3431 } 3432 } 3433 3434 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", 3435 port_name(port)); 3436 3437 /* must initialize pipe to something for the asserts */ 3438 *pipe = PIPE_A; 3439 3440 return false; 3441 } 3442 3443 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 3444 i915_reg_t dp_reg, enum port port, 3445 enum pipe *pipe) 3446 { 3447 bool ret; 3448 u32 val; 3449 3450 val = intel_de_read(dev_priv, dp_reg); 3451 3452 ret = val & DP_PORT_EN; 3453 3454 /* asserts want to know the pipe even if the port is disabled */ 3455 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3456 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 3457 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3458 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 3459 else if (IS_CHERRYVIEW(dev_priv)) 3460 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 3461 else 3462 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 3463 3464 return ret; 3465 } 3466 3467 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 3468 enum pipe *pipe) 3469 { 3470 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3471 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3472 intel_wakeref_t wakeref; 3473 bool ret; 3474 3475 wakeref = intel_display_power_get_if_enabled(dev_priv, 3476 encoder->power_domain); 3477 if (!wakeref) 3478 return false; 3479 3480 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 3481 encoder->port, pipe); 3482 3483 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 3484 3485 return ret; 3486 } 3487 3488 static void intel_dp_get_config(struct intel_encoder *encoder, 3489 struct intel_crtc_state *pipe_config) 3490 { 3491 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3492 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3493 u32 tmp, flags = 0; 3494 enum port port = encoder->port; 3495 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3496 3497 if (encoder->type == INTEL_OUTPUT_EDP) 3498 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 3499 else 3500 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 3501 3502 tmp = intel_de_read(dev_priv, intel_dp->output_reg); 3503 3504 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 3505 3506 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3507 u32 trans_dp = intel_de_read(dev_priv, 3508 TRANS_DP_CTL(crtc->pipe)); 3509 3510 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 3511 flags |= DRM_MODE_FLAG_PHSYNC; 3512 else 3513 flags |= DRM_MODE_FLAG_NHSYNC; 3514 3515 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 3516 flags |= DRM_MODE_FLAG_PVSYNC; 3517 else 3518 flags |= DRM_MODE_FLAG_NVSYNC; 3519 } else { 3520 if (tmp & DP_SYNC_HS_HIGH) 3521 flags |= DRM_MODE_FLAG_PHSYNC; 3522 else 3523 flags |= DRM_MODE_FLAG_NHSYNC; 3524 3525 if (tmp & DP_SYNC_VS_HIGH) 3526 flags |= DRM_MODE_FLAG_PVSYNC; 3527 else 3528 flags |= DRM_MODE_FLAG_NVSYNC; 3529 } 3530 3531 pipe_config->hw.adjusted_mode.flags |= flags; 3532 3533 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 3534 pipe_config->limited_color_range = true; 3535 3536 pipe_config->lane_count = 3537 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 3538 3539 intel_dp_get_m_n(crtc, pipe_config); 3540 3541 if (port == PORT_A) { 3542 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 3543 pipe_config->port_clock = 162000; 3544 else 3545 pipe_config->port_clock = 270000; 3546 } 3547 3548 pipe_config->hw.adjusted_mode.crtc_clock = 3549 intel_dotclock_calculate(pipe_config->port_clock, 3550 &pipe_config->dp_m_n); 3551 3552 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 3553 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 3554 /* 3555 * This is a big fat ugly hack. 3556 * 3557 * Some machines in UEFI boot mode provide us a VBT that has 18 3558 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3559 * unknown we fail to light up. Yet the same BIOS boots up with 3560 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3561 * max, not what it tells us to use. 3562 * 3563 * Note: This will still be broken if the eDP panel is not lit 3564 * up by the BIOS, and thus we can't get the mode at module 3565 * load. 3566 */ 3567 drm_dbg_kms(&dev_priv->drm, 3568 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3569 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 3570 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3571 } 3572 } 3573 3574 static void intel_disable_dp(struct intel_atomic_state *state, 3575 struct intel_encoder *encoder, 3576 const struct intel_crtc_state *old_crtc_state, 3577 const struct drm_connector_state *old_conn_state) 3578 { 3579 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3580 3581 intel_dp->link_trained = false; 3582 3583 if (old_crtc_state->has_audio) 3584 intel_audio_codec_disable(encoder, 3585 old_crtc_state, old_conn_state); 3586 3587 /* Make sure the panel is off before trying to change the mode. But also 3588 * ensure that we have vdd while we switch off the panel. */ 3589 intel_edp_panel_vdd_on(intel_dp); 3590 intel_edp_backlight_off(old_conn_state); 3591 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 3592 intel_edp_panel_off(intel_dp); 3593 } 3594 3595 static void g4x_disable_dp(struct intel_atomic_state *state, 3596 struct intel_encoder *encoder, 3597 const struct intel_crtc_state *old_crtc_state, 3598 const struct drm_connector_state *old_conn_state) 3599 { 3600 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3601 } 3602 3603 static void vlv_disable_dp(struct intel_atomic_state *state, 3604 struct intel_encoder *encoder, 3605 const struct intel_crtc_state *old_crtc_state, 3606 const struct drm_connector_state *old_conn_state) 3607 { 3608 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3609 } 3610 3611 static void g4x_post_disable_dp(struct intel_atomic_state *state, 3612 struct intel_encoder *encoder, 3613 const struct intel_crtc_state *old_crtc_state, 3614 const struct drm_connector_state *old_conn_state) 3615 { 3616 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3617 enum port port = encoder->port; 3618 3619 /* 3620 * Bspec does not list a specific disable sequence for g4x DP. 3621 * Follow the ilk+ sequence (disable pipe before the port) for 3622 * g4x DP as it does not suffer from underruns like the normal 3623 * g4x modeset sequence (disable pipe after the port). 3624 */ 3625 intel_dp_link_down(encoder, old_crtc_state); 3626 3627 /* Only ilk+ has port A */ 3628 if (port == PORT_A) 3629 ilk_edp_pll_off(intel_dp, old_crtc_state); 3630 } 3631 3632 static void vlv_post_disable_dp(struct intel_atomic_state *state, 3633 struct intel_encoder *encoder, 3634 const struct intel_crtc_state *old_crtc_state, 3635 const struct drm_connector_state *old_conn_state) 3636 { 3637 intel_dp_link_down(encoder, old_crtc_state); 3638 } 3639 3640 static void chv_post_disable_dp(struct intel_atomic_state *state, 3641 struct intel_encoder *encoder, 3642 const struct intel_crtc_state *old_crtc_state, 3643 const struct drm_connector_state *old_conn_state) 3644 { 3645 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3646 3647 intel_dp_link_down(encoder, old_crtc_state); 3648 3649 vlv_dpio_get(dev_priv); 3650 3651 /* Assert data lane reset */ 3652 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 3653 3654 vlv_dpio_put(dev_priv); 3655 } 3656 3657 static void 3658 cpt_set_link_train(struct intel_dp *intel_dp, 3659 u8 dp_train_pat) 3660 { 3661 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3662 u32 *DP = &intel_dp->DP; 3663 3664 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 3665 3666 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3667 case DP_TRAINING_PATTERN_DISABLE: 3668 *DP |= DP_LINK_TRAIN_OFF_CPT; 3669 break; 3670 case DP_TRAINING_PATTERN_1: 3671 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 3672 break; 3673 case DP_TRAINING_PATTERN_2: 3674 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3675 break; 3676 case DP_TRAINING_PATTERN_3: 3677 drm_dbg_kms(&dev_priv->drm, 3678 "TPS3 not supported, using TPS2 instead\n"); 3679 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3680 break; 3681 } 3682 3683 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3684 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3685 } 3686 3687 static void 3688 g4x_set_link_train(struct intel_dp *intel_dp, 3689 u8 dp_train_pat) 3690 { 3691 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3692 u32 *DP = &intel_dp->DP; 3693 3694 *DP &= ~DP_LINK_TRAIN_MASK; 3695 3696 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3697 case DP_TRAINING_PATTERN_DISABLE: 3698 *DP |= DP_LINK_TRAIN_OFF; 3699 break; 3700 case DP_TRAINING_PATTERN_1: 3701 *DP |= DP_LINK_TRAIN_PAT_1; 3702 break; 3703 case DP_TRAINING_PATTERN_2: 3704 *DP |= DP_LINK_TRAIN_PAT_2; 3705 break; 3706 case DP_TRAINING_PATTERN_3: 3707 drm_dbg_kms(&dev_priv->drm, 3708 "TPS3 not supported, using TPS2 instead\n"); 3709 *DP |= DP_LINK_TRAIN_PAT_2; 3710 break; 3711 } 3712 3713 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3714 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3715 } 3716 3717 static void intel_dp_enable_port(struct intel_dp *intel_dp, 3718 const struct intel_crtc_state *old_crtc_state) 3719 { 3720 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3721 3722 /* enable with pattern 1 (as per spec) */ 3723 3724 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); 3725 3726 /* 3727 * Magic for VLV/CHV. We _must_ first set up the register 3728 * without actually enabling the port, and then do another 3729 * write to enable the port. Otherwise link training will 3730 * fail when the power sequencer is freshly used for this port. 3731 */ 3732 intel_dp->DP |= DP_PORT_EN; 3733 if (old_crtc_state->has_audio) 3734 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 3735 3736 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3737 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3738 } 3739 3740 static void intel_enable_dp(struct intel_atomic_state *state, 3741 struct intel_encoder *encoder, 3742 const struct intel_crtc_state *pipe_config, 3743 const struct drm_connector_state *conn_state) 3744 { 3745 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3746 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3747 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3748 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); 3749 enum pipe pipe = crtc->pipe; 3750 intel_wakeref_t wakeref; 3751 3752 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) 3753 return; 3754 3755 with_pps_lock(intel_dp, wakeref) { 3756 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3757 vlv_init_panel_power_sequencer(encoder, pipe_config); 3758 3759 intel_dp_enable_port(intel_dp, pipe_config); 3760 3761 edp_panel_vdd_on(intel_dp); 3762 edp_panel_on(intel_dp); 3763 edp_panel_vdd_off(intel_dp, true); 3764 } 3765 3766 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3767 unsigned int lane_mask = 0x0; 3768 3769 if (IS_CHERRYVIEW(dev_priv)) 3770 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 3771 3772 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 3773 lane_mask); 3774 } 3775 3776 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 3777 intel_dp_start_link_train(intel_dp); 3778 intel_dp_stop_link_train(intel_dp); 3779 3780 if (pipe_config->has_audio) { 3781 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", 3782 pipe_name(pipe)); 3783 intel_audio_codec_enable(encoder, pipe_config, conn_state); 3784 } 3785 } 3786 3787 static void g4x_enable_dp(struct intel_atomic_state *state, 3788 struct intel_encoder *encoder, 3789 const struct intel_crtc_state *pipe_config, 3790 const struct drm_connector_state *conn_state) 3791 { 3792 intel_enable_dp(state, encoder, pipe_config, conn_state); 3793 intel_edp_backlight_on(pipe_config, conn_state); 3794 } 3795 3796 static void vlv_enable_dp(struct intel_atomic_state *state, 3797 struct intel_encoder *encoder, 3798 const struct intel_crtc_state *pipe_config, 3799 const struct drm_connector_state *conn_state) 3800 { 3801 intel_edp_backlight_on(pipe_config, conn_state); 3802 } 3803 3804 static void g4x_pre_enable_dp(struct intel_atomic_state *state, 3805 struct intel_encoder *encoder, 3806 const struct intel_crtc_state *pipe_config, 3807 const struct drm_connector_state *conn_state) 3808 { 3809 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3810 enum port port = encoder->port; 3811 3812 intel_dp_prepare(encoder, pipe_config); 3813 3814 /* Only ilk+ has port A */ 3815 if (port == PORT_A) 3816 ilk_edp_pll_on(intel_dp, pipe_config); 3817 } 3818 3819 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 3820 { 3821 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3822 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 3823 enum pipe pipe = intel_dp->pps_pipe; 3824 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 3825 3826 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3827 3828 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) 3829 return; 3830 3831 edp_panel_vdd_off_sync(intel_dp); 3832 3833 /* 3834 * VLV seems to get confused when multiple power sequencers 3835 * have the same port selected (even if only one has power/vdd 3836 * enabled). The failure manifests as vlv_wait_port_ready() failing 3837 * CHV on the other hand doesn't seem to mind having the same port 3838 * selected in multiple power sequencers, but let's clear the 3839 * port select always when logically disconnecting a power sequencer 3840 * from a port. 3841 */ 3842 drm_dbg_kms(&dev_priv->drm, 3843 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", 3844 pipe_name(pipe), intel_dig_port->base.base.base.id, 3845 intel_dig_port->base.base.name); 3846 intel_de_write(dev_priv, pp_on_reg, 0); 3847 intel_de_posting_read(dev_priv, pp_on_reg); 3848 3849 intel_dp->pps_pipe = INVALID_PIPE; 3850 } 3851 3852 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 3853 enum pipe pipe) 3854 { 3855 struct intel_encoder *encoder; 3856 3857 lockdep_assert_held(&dev_priv->pps_mutex); 3858 3859 for_each_intel_dp(&dev_priv->drm, encoder) { 3860 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3861 3862 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe, 3863 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", 3864 pipe_name(pipe), encoder->base.base.id, 3865 encoder->base.name); 3866 3867 if (intel_dp->pps_pipe != pipe) 3868 continue; 3869 3870 drm_dbg_kms(&dev_priv->drm, 3871 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", 3872 pipe_name(pipe), encoder->base.base.id, 3873 encoder->base.name); 3874 3875 /* make sure vdd is off before we steal it */ 3876 vlv_detach_power_sequencer(intel_dp); 3877 } 3878 } 3879 3880 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 3881 const struct intel_crtc_state *crtc_state) 3882 { 3883 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3884 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3885 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3886 3887 lockdep_assert_held(&dev_priv->pps_mutex); 3888 3889 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3890 3891 if (intel_dp->pps_pipe != INVALID_PIPE && 3892 intel_dp->pps_pipe != crtc->pipe) { 3893 /* 3894 * If another power sequencer was being used on this 3895 * port previously make sure to turn off vdd there while 3896 * we still have control of it. 3897 */ 3898 vlv_detach_power_sequencer(intel_dp); 3899 } 3900 3901 /* 3902 * We may be stealing the power 3903 * sequencer from another port. 3904 */ 3905 vlv_steal_power_sequencer(dev_priv, crtc->pipe); 3906 3907 intel_dp->active_pipe = crtc->pipe; 3908 3909 if (!intel_dp_is_edp(intel_dp)) 3910 return; 3911 3912 /* now it's all ours */ 3913 intel_dp->pps_pipe = crtc->pipe; 3914 3915 drm_dbg_kms(&dev_priv->drm, 3916 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", 3917 pipe_name(intel_dp->pps_pipe), encoder->base.base.id, 3918 encoder->base.name); 3919 3920 /* init power sequencer on this pipe and port */ 3921 intel_dp_init_panel_power_sequencer(intel_dp); 3922 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 3923 } 3924 3925 static void vlv_pre_enable_dp(struct intel_atomic_state *state, 3926 struct intel_encoder *encoder, 3927 const struct intel_crtc_state *pipe_config, 3928 const struct drm_connector_state *conn_state) 3929 { 3930 vlv_phy_pre_encoder_enable(encoder, pipe_config); 3931 3932 intel_enable_dp(state, encoder, pipe_config, conn_state); 3933 } 3934 3935 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, 3936 struct intel_encoder *encoder, 3937 const struct intel_crtc_state *pipe_config, 3938 const struct drm_connector_state *conn_state) 3939 { 3940 intel_dp_prepare(encoder, pipe_config); 3941 3942 vlv_phy_pre_pll_enable(encoder, pipe_config); 3943 } 3944 3945 static void chv_pre_enable_dp(struct intel_atomic_state *state, 3946 struct intel_encoder *encoder, 3947 const struct intel_crtc_state *pipe_config, 3948 const struct drm_connector_state *conn_state) 3949 { 3950 chv_phy_pre_encoder_enable(encoder, pipe_config); 3951 3952 intel_enable_dp(state, encoder, pipe_config, conn_state); 3953 3954 /* Second common lane will stay alive on its own now */ 3955 chv_phy_release_cl2_override(encoder); 3956 } 3957 3958 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, 3959 struct intel_encoder *encoder, 3960 const struct intel_crtc_state *pipe_config, 3961 const struct drm_connector_state *conn_state) 3962 { 3963 intel_dp_prepare(encoder, pipe_config); 3964 3965 chv_phy_pre_pll_enable(encoder, pipe_config); 3966 } 3967 3968 static void chv_dp_post_pll_disable(struct intel_atomic_state *state, 3969 struct intel_encoder *encoder, 3970 const struct intel_crtc_state *old_crtc_state, 3971 const struct drm_connector_state *old_conn_state) 3972 { 3973 chv_phy_post_pll_disable(encoder, old_crtc_state); 3974 } 3975 3976 /* 3977 * Fetch AUX CH registers 0x202 - 0x207 which contain 3978 * link status information 3979 */ 3980 bool 3981 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) 3982 { 3983 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 3984 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 3985 } 3986 3987 /* These are source-specific values. */ 3988 u8 3989 intel_dp_voltage_max(struct intel_dp *intel_dp) 3990 { 3991 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3992 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3993 enum port port = encoder->port; 3994 3995 if (HAS_DDI(dev_priv)) 3996 return intel_ddi_dp_voltage_max(encoder); 3997 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3998 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 3999 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 4000 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 4001 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 4002 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 4003 else 4004 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 4005 } 4006 4007 u8 4008 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing) 4009 { 4010 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4011 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4012 enum port port = encoder->port; 4013 4014 if (HAS_DDI(dev_priv)) { 4015 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing); 4016 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4017 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 4018 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4019 return DP_TRAIN_PRE_EMPH_LEVEL_3; 4020 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4021 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4022 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4023 return DP_TRAIN_PRE_EMPH_LEVEL_1; 4024 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4025 default: 4026 return DP_TRAIN_PRE_EMPH_LEVEL_0; 4027 } 4028 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 4029 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 4030 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4031 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4032 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4034 return DP_TRAIN_PRE_EMPH_LEVEL_1; 4035 default: 4036 return DP_TRAIN_PRE_EMPH_LEVEL_0; 4037 } 4038 } else { 4039 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 4040 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4041 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4042 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4043 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4044 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4045 return DP_TRAIN_PRE_EMPH_LEVEL_1; 4046 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4047 default: 4048 return DP_TRAIN_PRE_EMPH_LEVEL_0; 4049 } 4050 } 4051 } 4052 4053 static void vlv_set_signal_levels(struct intel_dp *intel_dp) 4054 { 4055 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4056 unsigned long demph_reg_value, preemph_reg_value, 4057 uniqtranscale_reg_value; 4058 u8 train_set = intel_dp->train_set[0]; 4059 4060 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4061 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4062 preemph_reg_value = 0x0004000; 4063 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4065 demph_reg_value = 0x2B405555; 4066 uniqtranscale_reg_value = 0x552AB83A; 4067 break; 4068 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4069 demph_reg_value = 0x2B404040; 4070 uniqtranscale_reg_value = 0x5548B83A; 4071 break; 4072 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4073 demph_reg_value = 0x2B245555; 4074 uniqtranscale_reg_value = 0x5560B83A; 4075 break; 4076 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4077 demph_reg_value = 0x2B405555; 4078 uniqtranscale_reg_value = 0x5598DA3A; 4079 break; 4080 default: 4081 return; 4082 } 4083 break; 4084 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4085 preemph_reg_value = 0x0002000; 4086 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4088 demph_reg_value = 0x2B404040; 4089 uniqtranscale_reg_value = 0x5552B83A; 4090 break; 4091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4092 demph_reg_value = 0x2B404848; 4093 uniqtranscale_reg_value = 0x5580B83A; 4094 break; 4095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4096 demph_reg_value = 0x2B404040; 4097 uniqtranscale_reg_value = 0x55ADDA3A; 4098 break; 4099 default: 4100 return; 4101 } 4102 break; 4103 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4104 preemph_reg_value = 0x0000000; 4105 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4106 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4107 demph_reg_value = 0x2B305555; 4108 uniqtranscale_reg_value = 0x5570B83A; 4109 break; 4110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4111 demph_reg_value = 0x2B2B4040; 4112 uniqtranscale_reg_value = 0x55ADDA3A; 4113 break; 4114 default: 4115 return; 4116 } 4117 break; 4118 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4119 preemph_reg_value = 0x0006000; 4120 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4122 demph_reg_value = 0x1B405555; 4123 uniqtranscale_reg_value = 0x55ADDA3A; 4124 break; 4125 default: 4126 return; 4127 } 4128 break; 4129 default: 4130 return; 4131 } 4132 4133 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, 4134 uniqtranscale_reg_value, 0); 4135 } 4136 4137 static void chv_set_signal_levels(struct intel_dp *intel_dp) 4138 { 4139 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4140 u32 deemph_reg_value, margin_reg_value; 4141 bool uniq_trans_scale = false; 4142 u8 train_set = intel_dp->train_set[0]; 4143 4144 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4145 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4146 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4147 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4148 deemph_reg_value = 128; 4149 margin_reg_value = 52; 4150 break; 4151 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4152 deemph_reg_value = 128; 4153 margin_reg_value = 77; 4154 break; 4155 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4156 deemph_reg_value = 128; 4157 margin_reg_value = 102; 4158 break; 4159 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4160 deemph_reg_value = 128; 4161 margin_reg_value = 154; 4162 uniq_trans_scale = true; 4163 break; 4164 default: 4165 return; 4166 } 4167 break; 4168 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4169 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4170 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4171 deemph_reg_value = 85; 4172 margin_reg_value = 78; 4173 break; 4174 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4175 deemph_reg_value = 85; 4176 margin_reg_value = 116; 4177 break; 4178 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4179 deemph_reg_value = 85; 4180 margin_reg_value = 154; 4181 break; 4182 default: 4183 return; 4184 } 4185 break; 4186 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4187 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4188 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4189 deemph_reg_value = 64; 4190 margin_reg_value = 104; 4191 break; 4192 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4193 deemph_reg_value = 64; 4194 margin_reg_value = 154; 4195 break; 4196 default: 4197 return; 4198 } 4199 break; 4200 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4201 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4203 deemph_reg_value = 43; 4204 margin_reg_value = 154; 4205 break; 4206 default: 4207 return; 4208 } 4209 break; 4210 default: 4211 return; 4212 } 4213 4214 chv_set_phy_signal_level(encoder, deemph_reg_value, 4215 margin_reg_value, uniq_trans_scale); 4216 } 4217 4218 static u32 g4x_signal_levels(u8 train_set) 4219 { 4220 u32 signal_levels = 0; 4221 4222 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4223 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4224 default: 4225 signal_levels |= DP_VOLTAGE_0_4; 4226 break; 4227 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4228 signal_levels |= DP_VOLTAGE_0_6; 4229 break; 4230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4231 signal_levels |= DP_VOLTAGE_0_8; 4232 break; 4233 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4234 signal_levels |= DP_VOLTAGE_1_2; 4235 break; 4236 } 4237 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4238 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4239 default: 4240 signal_levels |= DP_PRE_EMPHASIS_0; 4241 break; 4242 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4243 signal_levels |= DP_PRE_EMPHASIS_3_5; 4244 break; 4245 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4246 signal_levels |= DP_PRE_EMPHASIS_6; 4247 break; 4248 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4249 signal_levels |= DP_PRE_EMPHASIS_9_5; 4250 break; 4251 } 4252 return signal_levels; 4253 } 4254 4255 static void 4256 g4x_set_signal_levels(struct intel_dp *intel_dp) 4257 { 4258 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4259 u8 train_set = intel_dp->train_set[0]; 4260 u32 signal_levels; 4261 4262 signal_levels = g4x_signal_levels(train_set); 4263 4264 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4265 signal_levels); 4266 4267 intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); 4268 intel_dp->DP |= signal_levels; 4269 4270 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4271 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4272 } 4273 4274 /* SNB CPU eDP voltage swing and pre-emphasis control */ 4275 static u32 snb_cpu_edp_signal_levels(u8 train_set) 4276 { 4277 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4278 DP_TRAIN_PRE_EMPHASIS_MASK); 4279 4280 switch (signal_levels) { 4281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4283 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4285 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 4286 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4287 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4288 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 4289 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4291 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 4292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4294 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 4295 default: 4296 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4297 "0x%x\n", signal_levels); 4298 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4299 } 4300 } 4301 4302 static void 4303 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4304 { 4305 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4306 u8 train_set = intel_dp->train_set[0]; 4307 u32 signal_levels; 4308 4309 signal_levels = snb_cpu_edp_signal_levels(train_set); 4310 4311 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4312 signal_levels); 4313 4314 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 4315 intel_dp->DP |= signal_levels; 4316 4317 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4318 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4319 } 4320 4321 /* IVB CPU eDP voltage swing and pre-emphasis control */ 4322 static u32 ivb_cpu_edp_signal_levels(u8 train_set) 4323 { 4324 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4325 DP_TRAIN_PRE_EMPHASIS_MASK); 4326 4327 switch (signal_levels) { 4328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4329 return EDP_LINK_TRAIN_400MV_0DB_IVB; 4330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4331 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 4332 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4333 return EDP_LINK_TRAIN_400MV_6DB_IVB; 4334 4335 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4336 return EDP_LINK_TRAIN_600MV_0DB_IVB; 4337 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4338 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 4339 4340 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4341 return EDP_LINK_TRAIN_800MV_0DB_IVB; 4342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4343 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 4344 4345 default: 4346 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4347 "0x%x\n", signal_levels); 4348 return EDP_LINK_TRAIN_500MV_0DB_IVB; 4349 } 4350 } 4351 4352 static void 4353 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4354 { 4355 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4356 u8 train_set = intel_dp->train_set[0]; 4357 u32 signal_levels; 4358 4359 signal_levels = ivb_cpu_edp_signal_levels(train_set); 4360 4361 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4362 signal_levels); 4363 4364 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 4365 intel_dp->DP |= signal_levels; 4366 4367 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4368 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4369 } 4370 4371 void intel_dp_set_signal_levels(struct intel_dp *intel_dp) 4372 { 4373 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4374 u8 train_set = intel_dp->train_set[0]; 4375 4376 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", 4377 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 4378 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); 4379 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", 4380 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 4381 DP_TRAIN_PRE_EMPHASIS_SHIFT, 4382 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 4383 " (max)" : ""); 4384 4385 intel_dp->set_signal_levels(intel_dp); 4386 } 4387 4388 void 4389 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 4390 u8 dp_train_pat) 4391 { 4392 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4393 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); 4394 4395 if (dp_train_pat & train_pat_mask) 4396 drm_dbg_kms(&dev_priv->drm, 4397 "Using DP training pattern TPS%d\n", 4398 dp_train_pat & train_pat_mask); 4399 4400 intel_dp->set_link_train(intel_dp, dp_train_pat); 4401 } 4402 4403 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 4404 { 4405 if (intel_dp->set_idle_link_train) 4406 intel_dp->set_idle_link_train(intel_dp); 4407 } 4408 4409 static void 4410 intel_dp_link_down(struct intel_encoder *encoder, 4411 const struct intel_crtc_state *old_crtc_state) 4412 { 4413 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4414 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4415 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 4416 enum port port = encoder->port; 4417 u32 DP = intel_dp->DP; 4418 4419 if (drm_WARN_ON(&dev_priv->drm, 4420 (intel_de_read(dev_priv, intel_dp->output_reg) & 4421 DP_PORT_EN) == 0)) 4422 return; 4423 4424 drm_dbg_kms(&dev_priv->drm, "\n"); 4425 4426 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 4427 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 4428 DP &= ~DP_LINK_TRAIN_MASK_CPT; 4429 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 4430 } else { 4431 DP &= ~DP_LINK_TRAIN_MASK; 4432 DP |= DP_LINK_TRAIN_PAT_IDLE; 4433 } 4434 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4435 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4436 4437 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 4438 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4439 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4440 4441 /* 4442 * HW workaround for IBX, we need to move the port 4443 * to transcoder A after disabling it to allow the 4444 * matching HDMI port to be enabled on transcoder A. 4445 */ 4446 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 4447 /* 4448 * We get CPU/PCH FIFO underruns on the other pipe when 4449 * doing the workaround. Sweep them under the rug. 4450 */ 4451 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4452 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4453 4454 /* always enable with pattern 1 (as per spec) */ 4455 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 4456 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 4457 DP_LINK_TRAIN_PAT_1; 4458 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4459 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4460 4461 DP &= ~DP_PORT_EN; 4462 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4463 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4464 4465 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 4466 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4467 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4468 } 4469 4470 msleep(intel_dp->panel_power_down_delay); 4471 4472 intel_dp->DP = DP; 4473 4474 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4475 intel_wakeref_t wakeref; 4476 4477 with_pps_lock(intel_dp, wakeref) 4478 intel_dp->active_pipe = INVALID_PIPE; 4479 } 4480 } 4481 4482 static void 4483 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) 4484 { 4485 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4486 u8 dpcd_ext[6]; 4487 4488 /* 4489 * Prior to DP1.3 the bit represented by 4490 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. 4491 * if it is set DP_DPCD_REV at 0000h could be at a value less than 4492 * the true capability of the panel. The only way to check is to 4493 * then compare 0000h and 2200h. 4494 */ 4495 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] & 4496 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) 4497 return; 4498 4499 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV, 4500 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) { 4501 drm_err(&i915->drm, 4502 "DPCD failed read at extended capabilities\n"); 4503 return; 4504 } 4505 4506 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { 4507 drm_dbg_kms(&i915->drm, 4508 "DPCD extended DPCD rev less than base DPCD rev\n"); 4509 return; 4510 } 4511 4512 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext))) 4513 return; 4514 4515 drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n", 4516 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); 4517 4518 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)); 4519 } 4520 4521 bool 4522 intel_dp_read_dpcd(struct intel_dp *intel_dp) 4523 { 4524 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4525 4526 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 4527 sizeof(intel_dp->dpcd)) < 0) 4528 return false; /* aux transfer failed */ 4529 4530 intel_dp_extended_receiver_capabilities(intel_dp); 4531 4532 drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd), 4533 intel_dp->dpcd); 4534 4535 return intel_dp->dpcd[DP_DPCD_REV] != 0; 4536 } 4537 4538 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 4539 { 4540 u8 dprx = 0; 4541 4542 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 4543 &dprx) != 1) 4544 return false; 4545 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 4546 } 4547 4548 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 4549 { 4550 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4551 4552 /* 4553 * Clear the cached register set to avoid using stale values 4554 * for the sinks that do not support DSC. 4555 */ 4556 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4557 4558 /* Clear fec_capable to avoid using stale values */ 4559 intel_dp->fec_capable = 0; 4560 4561 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 4562 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 4563 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4564 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 4565 intel_dp->dsc_dpcd, 4566 sizeof(intel_dp->dsc_dpcd)) < 0) 4567 drm_err(&i915->drm, 4568 "Failed to read DPCD register 0x%x\n", 4569 DP_DSC_SUPPORT); 4570 4571 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 4572 (int)sizeof(intel_dp->dsc_dpcd), 4573 intel_dp->dsc_dpcd); 4574 4575 /* FEC is supported only on DP 1.4 */ 4576 if (!intel_dp_is_edp(intel_dp) && 4577 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 4578 &intel_dp->fec_capable) < 0) 4579 drm_err(&i915->drm, 4580 "Failed to read FEC DPCD register\n"); 4581 4582 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 4583 intel_dp->fec_capable); 4584 } 4585 } 4586 4587 static bool 4588 intel_edp_init_dpcd(struct intel_dp *intel_dp) 4589 { 4590 struct drm_i915_private *dev_priv = 4591 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4592 4593 /* this function is meant to be called only once */ 4594 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 4595 4596 if (!intel_dp_read_dpcd(intel_dp)) 4597 return false; 4598 4599 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4600 drm_dp_is_branch(intel_dp->dpcd)); 4601 4602 /* 4603 * Read the eDP display control registers. 4604 * 4605 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4606 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4607 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4608 * method). The display control registers should read zero if they're 4609 * not supported anyway. 4610 */ 4611 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4612 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4613 sizeof(intel_dp->edp_dpcd)) 4614 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 4615 (int)sizeof(intel_dp->edp_dpcd), 4616 intel_dp->edp_dpcd); 4617 4618 /* 4619 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4620 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4621 */ 4622 intel_psr_init_dpcd(intel_dp); 4623 4624 /* Read the eDP 1.4+ supported link rates. */ 4625 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4626 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4627 int i; 4628 4629 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4630 sink_rates, sizeof(sink_rates)); 4631 4632 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4633 int val = le16_to_cpu(sink_rates[i]); 4634 4635 if (val == 0) 4636 break; 4637 4638 /* Value read multiplied by 200kHz gives the per-lane 4639 * link rate in kHz. The source rates are, however, 4640 * stored in terms of LS_Clk kHz. The full conversion 4641 * back to symbols is 4642 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4643 */ 4644 intel_dp->sink_rates[i] = (val * 200) / 10; 4645 } 4646 intel_dp->num_sink_rates = i; 4647 } 4648 4649 /* 4650 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4651 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4652 */ 4653 if (intel_dp->num_sink_rates) 4654 intel_dp->use_rate_select = true; 4655 else 4656 intel_dp_set_sink_rates(intel_dp); 4657 4658 intel_dp_set_common_rates(intel_dp); 4659 4660 /* Read the eDP DSC DPCD registers */ 4661 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4662 intel_dp_get_dsc_sink_cap(intel_dp); 4663 4664 return true; 4665 } 4666 4667 4668 static bool 4669 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4670 { 4671 if (!intel_dp_read_dpcd(intel_dp)) 4672 return false; 4673 4674 /* 4675 * Don't clobber cached eDP rates. Also skip re-reading 4676 * the OUI/ID since we know it won't change. 4677 */ 4678 if (!intel_dp_is_edp(intel_dp)) { 4679 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4680 drm_dp_is_branch(intel_dp->dpcd)); 4681 4682 intel_dp_set_sink_rates(intel_dp); 4683 intel_dp_set_common_rates(intel_dp); 4684 } 4685 4686 /* 4687 * Some eDP panels do not set a valid value for sink count, that is why 4688 * it don't care about read it here and in intel_edp_init_dpcd(). 4689 */ 4690 if (!intel_dp_is_edp(intel_dp) && 4691 !drm_dp_has_quirk(&intel_dp->desc, 0, 4692 DP_DPCD_QUIRK_NO_SINK_COUNT)) { 4693 u8 count; 4694 ssize_t r; 4695 4696 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count); 4697 if (r < 1) 4698 return false; 4699 4700 /* 4701 * Sink count can change between short pulse hpd hence 4702 * a member variable in intel_dp will track any changes 4703 * between short pulse interrupts. 4704 */ 4705 intel_dp->sink_count = DP_GET_SINK_COUNT(count); 4706 4707 /* 4708 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4709 * a dongle is present but no display. Unless we require to know 4710 * if a dongle is present or not, we don't need to update 4711 * downstream port information. So, an early return here saves 4712 * time from performing other operations which are not required. 4713 */ 4714 if (!intel_dp->sink_count) 4715 return false; 4716 } 4717 4718 if (!drm_dp_is_branch(intel_dp->dpcd)) 4719 return true; /* native DP sink */ 4720 4721 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 4722 return true; /* no per-port downstream info */ 4723 4724 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, 4725 intel_dp->downstream_ports, 4726 DP_MAX_DOWNSTREAM_PORTS) < 0) 4727 return false; /* downstream port status fetch failed */ 4728 4729 return true; 4730 } 4731 4732 static bool 4733 intel_dp_sink_can_mst(struct intel_dp *intel_dp) 4734 { 4735 u8 mstm_cap; 4736 4737 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 4738 return false; 4739 4740 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1) 4741 return false; 4742 4743 return mstm_cap & DP_MST_CAP; 4744 } 4745 4746 static bool 4747 intel_dp_can_mst(struct intel_dp *intel_dp) 4748 { 4749 return i915_modparams.enable_dp_mst && 4750 intel_dp->can_mst && 4751 intel_dp_sink_can_mst(intel_dp); 4752 } 4753 4754 static void 4755 intel_dp_configure_mst(struct intel_dp *intel_dp) 4756 { 4757 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4758 struct intel_encoder *encoder = 4759 &dp_to_dig_port(intel_dp)->base; 4760 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); 4761 4762 drm_dbg_kms(&i915->drm, 4763 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4764 encoder->base.base.id, encoder->base.name, 4765 yesno(intel_dp->can_mst), yesno(sink_can_mst), 4766 yesno(i915_modparams.enable_dp_mst)); 4767 4768 if (!intel_dp->can_mst) 4769 return; 4770 4771 intel_dp->is_mst = sink_can_mst && 4772 i915_modparams.enable_dp_mst; 4773 4774 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4775 intel_dp->is_mst); 4776 } 4777 4778 static bool 4779 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4780 { 4781 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4782 sink_irq_vector, DP_DPRX_ESI_LEN) == 4783 DP_DPRX_ESI_LEN; 4784 } 4785 4786 bool 4787 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4788 const struct drm_connector_state *conn_state) 4789 { 4790 /* 4791 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4792 * of Color Encoding Format and Content Color Gamut], in order to 4793 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4794 */ 4795 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4796 return true; 4797 4798 switch (conn_state->colorspace) { 4799 case DRM_MODE_COLORIMETRY_SYCC_601: 4800 case DRM_MODE_COLORIMETRY_OPYCC_601: 4801 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4802 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4803 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4804 return true; 4805 default: 4806 break; 4807 } 4808 4809 return false; 4810 } 4811 4812 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 4813 struct dp_sdp *sdp, size_t size) 4814 { 4815 size_t length = sizeof(struct dp_sdp); 4816 4817 if (size < length) 4818 return -ENOSPC; 4819 4820 memset(sdp, 0, size); 4821 4822 /* 4823 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 4824 * VSC SDP Header Bytes 4825 */ 4826 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 4827 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 4828 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 4829 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 4830 4831 /* 4832 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 4833 * per DP 1.4a spec. 4834 */ 4835 if (vsc->revision != 0x5) 4836 goto out; 4837 4838 /* VSC SDP Payload for DB16 through DB18 */ 4839 /* Pixel Encoding and Colorimetry Formats */ 4840 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 4841 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 4842 4843 switch (vsc->bpc) { 4844 case 6: 4845 /* 6bpc: 0x0 */ 4846 break; 4847 case 8: 4848 sdp->db[17] = 0x1; /* DB17[3:0] */ 4849 break; 4850 case 10: 4851 sdp->db[17] = 0x2; 4852 break; 4853 case 12: 4854 sdp->db[17] = 0x3; 4855 break; 4856 case 16: 4857 sdp->db[17] = 0x4; 4858 break; 4859 default: 4860 MISSING_CASE(vsc->bpc); 4861 break; 4862 } 4863 /* Dynamic Range and Component Bit Depth */ 4864 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 4865 sdp->db[17] |= 0x80; /* DB17[7] */ 4866 4867 /* Content Type */ 4868 sdp->db[18] = vsc->content_type & 0x7; 4869 4870 out: 4871 return length; 4872 } 4873 4874 static ssize_t 4875 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 4876 struct dp_sdp *sdp, 4877 size_t size) 4878 { 4879 size_t length = sizeof(struct dp_sdp); 4880 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4881 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4882 ssize_t len; 4883 4884 if (size < length) 4885 return -ENOSPC; 4886 4887 memset(sdp, 0, size); 4888 4889 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 4890 if (len < 0) { 4891 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 4892 return -ENOSPC; 4893 } 4894 4895 if (len != infoframe_size) { 4896 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 4897 return -ENOSPC; 4898 } 4899 4900 /* 4901 * Set up the infoframe sdp packet for HDR static metadata. 4902 * Prepare VSC Header for SU as per DP 1.4a spec, 4903 * Table 2-100 and Table 2-101 4904 */ 4905 4906 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 4907 sdp->sdp_header.HB0 = 0; 4908 /* 4909 * Packet Type 80h + Non-audio INFOFRAME Type value 4910 * HDMI_INFOFRAME_TYPE_DRM: 0x87 4911 * - 80h + Non-audio INFOFRAME Type value 4912 * - InfoFrame Type: 0x07 4913 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 4914 */ 4915 sdp->sdp_header.HB1 = drm_infoframe->type; 4916 /* 4917 * Least Significant Eight Bits of (Data Byte Count – 1) 4918 * infoframe_size - 1 4919 */ 4920 sdp->sdp_header.HB2 = 0x1D; 4921 /* INFOFRAME SDP Version Number */ 4922 sdp->sdp_header.HB3 = (0x13 << 2); 4923 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4924 sdp->db[0] = drm_infoframe->version; 4925 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4926 sdp->db[1] = drm_infoframe->length; 4927 /* 4928 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4929 * HDMI_INFOFRAME_HEADER_SIZE 4930 */ 4931 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4932 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4933 HDMI_DRM_INFOFRAME_SIZE); 4934 4935 /* 4936 * Size of DP infoframe sdp packet for HDR static metadata consists of 4937 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4938 * - Two Data Blocks: 2 bytes 4939 * CTA Header Byte2 (INFOFRAME Version Number) 4940 * CTA Header Byte3 (Length of INFOFRAME) 4941 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4942 * 4943 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4944 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4945 * will pad rest of the size. 4946 */ 4947 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 4948 } 4949 4950 static void intel_write_dp_sdp(struct intel_encoder *encoder, 4951 const struct intel_crtc_state *crtc_state, 4952 unsigned int type) 4953 { 4954 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 4955 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4956 struct dp_sdp sdp = {}; 4957 ssize_t len; 4958 4959 if ((crtc_state->infoframes.enable & 4960 intel_hdmi_infoframe_enable(type)) == 0) 4961 return; 4962 4963 switch (type) { 4964 case DP_SDP_VSC: 4965 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 4966 sizeof(sdp)); 4967 break; 4968 case HDMI_PACKET_TYPE_GAMUT_METADATA: 4969 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 4970 &sdp, sizeof(sdp)); 4971 break; 4972 default: 4973 MISSING_CASE(type); 4974 return; 4975 } 4976 4977 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4978 return; 4979 4980 intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 4981 } 4982 4983 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 4984 const struct intel_crtc_state *crtc_state, 4985 struct drm_dp_vsc_sdp *vsc) 4986 { 4987 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 4988 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4989 struct dp_sdp sdp = {}; 4990 ssize_t len; 4991 4992 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 4993 4994 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4995 return; 4996 4997 intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 4998 &sdp, len); 4999 } 5000 5001 void intel_dp_set_infoframes(struct intel_encoder *encoder, 5002 bool enable, 5003 const struct intel_crtc_state *crtc_state, 5004 const struct drm_connector_state *conn_state) 5005 { 5006 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5007 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5008 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 5009 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 5010 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 5011 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 5012 u32 val = intel_de_read(dev_priv, reg); 5013 5014 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 5015 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 5016 if (intel_psr_enabled(intel_dp)) 5017 val &= ~dip_enable; 5018 else 5019 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); 5020 5021 if (!enable) { 5022 intel_de_write(dev_priv, reg, val); 5023 intel_de_posting_read(dev_priv, reg); 5024 return; 5025 } 5026 5027 intel_de_write(dev_priv, reg, val); 5028 intel_de_posting_read(dev_priv, reg); 5029 5030 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5031 if (!intel_psr_enabled(intel_dp)) 5032 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 5033 5034 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 5035 } 5036 5037 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 5038 const void *buffer, size_t size) 5039 { 5040 const struct dp_sdp *sdp = buffer; 5041 5042 if (size < sizeof(struct dp_sdp)) 5043 return -EINVAL; 5044 5045 memset(vsc, 0, size); 5046 5047 if (sdp->sdp_header.HB0 != 0) 5048 return -EINVAL; 5049 5050 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 5051 return -EINVAL; 5052 5053 vsc->sdp_type = sdp->sdp_header.HB1; 5054 vsc->revision = sdp->sdp_header.HB2; 5055 vsc->length = sdp->sdp_header.HB3; 5056 5057 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 5058 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 5059 /* 5060 * - HB2 = 0x2, HB3 = 0x8 5061 * VSC SDP supporting 3D stereo + PSR 5062 * - HB2 = 0x4, HB3 = 0xe 5063 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 5064 * first scan line of the SU region (applies to eDP v1.4b 5065 * and higher). 5066 */ 5067 return 0; 5068 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 5069 /* 5070 * - HB2 = 0x5, HB3 = 0x13 5071 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 5072 * Format. 5073 */ 5074 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 5075 vsc->colorimetry = sdp->db[16] & 0xf; 5076 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 5077 5078 switch (sdp->db[17] & 0x7) { 5079 case 0x0: 5080 vsc->bpc = 6; 5081 break; 5082 case 0x1: 5083 vsc->bpc = 8; 5084 break; 5085 case 0x2: 5086 vsc->bpc = 10; 5087 break; 5088 case 0x3: 5089 vsc->bpc = 12; 5090 break; 5091 case 0x4: 5092 vsc->bpc = 16; 5093 break; 5094 default: 5095 MISSING_CASE(sdp->db[17] & 0x7); 5096 return -EINVAL; 5097 } 5098 5099 vsc->content_type = sdp->db[18] & 0x7; 5100 } else { 5101 return -EINVAL; 5102 } 5103 5104 return 0; 5105 } 5106 5107 static int 5108 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 5109 const void *buffer, size_t size) 5110 { 5111 int ret; 5112 5113 const struct dp_sdp *sdp = buffer; 5114 5115 if (size < sizeof(struct dp_sdp)) 5116 return -EINVAL; 5117 5118 if (sdp->sdp_header.HB0 != 0) 5119 return -EINVAL; 5120 5121 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 5122 return -EINVAL; 5123 5124 /* 5125 * Least Significant Eight Bits of (Data Byte Count – 1) 5126 * 1Dh (i.e., Data Byte Count = 30 bytes). 5127 */ 5128 if (sdp->sdp_header.HB2 != 0x1D) 5129 return -EINVAL; 5130 5131 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 5132 if ((sdp->sdp_header.HB3 & 0x3) != 0) 5133 return -EINVAL; 5134 5135 /* INFOFRAME SDP Version Number */ 5136 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 5137 return -EINVAL; 5138 5139 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5140 if (sdp->db[0] != 1) 5141 return -EINVAL; 5142 5143 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5144 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 5145 return -EINVAL; 5146 5147 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 5148 HDMI_DRM_INFOFRAME_SIZE); 5149 5150 return ret; 5151 } 5152 5153 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 5154 struct intel_crtc_state *crtc_state, 5155 struct drm_dp_vsc_sdp *vsc) 5156 { 5157 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 5158 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5159 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5160 unsigned int type = DP_SDP_VSC; 5161 struct dp_sdp sdp = {}; 5162 int ret; 5163 5164 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5165 if (intel_psr_enabled(intel_dp)) 5166 return; 5167 5168 if ((crtc_state->infoframes.enable & 5169 intel_hdmi_infoframe_enable(type)) == 0) 5170 return; 5171 5172 intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 5173 5174 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 5175 5176 if (ret) 5177 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 5178 } 5179 5180 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 5181 struct intel_crtc_state *crtc_state, 5182 struct hdmi_drm_infoframe *drm_infoframe) 5183 { 5184 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 5185 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5186 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 5187 struct dp_sdp sdp = {}; 5188 int ret; 5189 5190 if ((crtc_state->infoframes.enable & 5191 intel_hdmi_infoframe_enable(type)) == 0) 5192 return; 5193 5194 intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 5195 sizeof(sdp)); 5196 5197 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 5198 sizeof(sdp)); 5199 5200 if (ret) 5201 drm_dbg_kms(&dev_priv->drm, 5202 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 5203 } 5204 5205 void intel_read_dp_sdp(struct intel_encoder *encoder, 5206 struct intel_crtc_state *crtc_state, 5207 unsigned int type) 5208 { 5209 switch (type) { 5210 case DP_SDP_VSC: 5211 intel_read_dp_vsc_sdp(encoder, crtc_state, 5212 &crtc_state->infoframes.vsc); 5213 break; 5214 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5215 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 5216 &crtc_state->infoframes.drm.drm); 5217 break; 5218 default: 5219 MISSING_CASE(type); 5220 break; 5221 } 5222 } 5223 5224 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 5225 { 5226 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5227 int status = 0; 5228 int test_link_rate; 5229 u8 test_lane_count, test_link_bw; 5230 /* (DP CTS 1.2) 5231 * 4.3.1.11 5232 */ 5233 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 5234 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 5235 &test_lane_count); 5236 5237 if (status <= 0) { 5238 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 5239 return DP_TEST_NAK; 5240 } 5241 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 5242 5243 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 5244 &test_link_bw); 5245 if (status <= 0) { 5246 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 5247 return DP_TEST_NAK; 5248 } 5249 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 5250 5251 /* Validate the requested link rate and lane count */ 5252 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 5253 test_lane_count)) 5254 return DP_TEST_NAK; 5255 5256 intel_dp->compliance.test_lane_count = test_lane_count; 5257 intel_dp->compliance.test_link_rate = test_link_rate; 5258 5259 return DP_TEST_ACK; 5260 } 5261 5262 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 5263 { 5264 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5265 u8 test_pattern; 5266 u8 test_misc; 5267 __be16 h_width, v_height; 5268 int status = 0; 5269 5270 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 5271 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 5272 &test_pattern); 5273 if (status <= 0) { 5274 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 5275 return DP_TEST_NAK; 5276 } 5277 if (test_pattern != DP_COLOR_RAMP) 5278 return DP_TEST_NAK; 5279 5280 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 5281 &h_width, 2); 5282 if (status <= 0) { 5283 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 5284 return DP_TEST_NAK; 5285 } 5286 5287 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 5288 &v_height, 2); 5289 if (status <= 0) { 5290 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 5291 return DP_TEST_NAK; 5292 } 5293 5294 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 5295 &test_misc); 5296 if (status <= 0) { 5297 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 5298 return DP_TEST_NAK; 5299 } 5300 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 5301 return DP_TEST_NAK; 5302 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 5303 return DP_TEST_NAK; 5304 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 5305 case DP_TEST_BIT_DEPTH_6: 5306 intel_dp->compliance.test_data.bpc = 6; 5307 break; 5308 case DP_TEST_BIT_DEPTH_8: 5309 intel_dp->compliance.test_data.bpc = 8; 5310 break; 5311 default: 5312 return DP_TEST_NAK; 5313 } 5314 5315 intel_dp->compliance.test_data.video_pattern = test_pattern; 5316 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 5317 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 5318 /* Set test active flag here so userspace doesn't interrupt things */ 5319 intel_dp->compliance.test_active = true; 5320 5321 return DP_TEST_ACK; 5322 } 5323 5324 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 5325 { 5326 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5327 u8 test_result = DP_TEST_ACK; 5328 struct intel_connector *intel_connector = intel_dp->attached_connector; 5329 struct drm_connector *connector = &intel_connector->base; 5330 5331 if (intel_connector->detect_edid == NULL || 5332 connector->edid_corrupt || 5333 intel_dp->aux.i2c_defer_count > 6) { 5334 /* Check EDID read for NACKs, DEFERs and corruption 5335 * (DP CTS 1.2 Core r1.1) 5336 * 4.2.2.4 : Failed EDID read, I2C_NAK 5337 * 4.2.2.5 : Failed EDID read, I2C_DEFER 5338 * 4.2.2.6 : EDID corruption detected 5339 * Use failsafe mode for all cases 5340 */ 5341 if (intel_dp->aux.i2c_nack_count > 0 || 5342 intel_dp->aux.i2c_defer_count > 0) 5343 drm_dbg_kms(&i915->drm, 5344 "EDID read had %d NACKs, %d DEFERs\n", 5345 intel_dp->aux.i2c_nack_count, 5346 intel_dp->aux.i2c_defer_count); 5347 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 5348 } else { 5349 struct edid *block = intel_connector->detect_edid; 5350 5351 /* We have to write the checksum 5352 * of the last block read 5353 */ 5354 block += intel_connector->detect_edid->extensions; 5355 5356 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 5357 block->checksum) <= 0) 5358 drm_dbg_kms(&i915->drm, 5359 "Failed to write EDID checksum\n"); 5360 5361 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 5362 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 5363 } 5364 5365 /* Set test active flag here so userspace doesn't interrupt things */ 5366 intel_dp->compliance.test_active = true; 5367 5368 return test_result; 5369 } 5370 5371 static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp) 5372 { 5373 struct drm_dp_phy_test_params *data = 5374 &intel_dp->compliance.test_data.phytest; 5375 5376 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 5377 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 5378 return DP_TEST_NAK; 5379 } 5380 5381 /* 5382 * link_mst is set to false to avoid executing mst related code 5383 * during compliance testing. 5384 */ 5385 intel_dp->link_mst = false; 5386 5387 return DP_TEST_ACK; 5388 } 5389 5390 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp) 5391 { 5392 struct drm_i915_private *dev_priv = 5393 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 5394 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 5395 struct drm_dp_phy_test_params *data = 5396 &intel_dp->compliance.test_data.phytest; 5397 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 5398 enum pipe pipe = crtc->pipe; 5399 u32 pattern_val; 5400 5401 switch (data->phy_pattern) { 5402 case DP_PHY_TEST_PATTERN_NONE: 5403 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 5404 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 5405 break; 5406 case DP_PHY_TEST_PATTERN_D10_2: 5407 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 5408 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5409 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 5410 break; 5411 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 5412 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 5413 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5414 DDI_DP_COMP_CTL_ENABLE | 5415 DDI_DP_COMP_CTL_SCRAMBLED_0); 5416 break; 5417 case DP_PHY_TEST_PATTERN_PRBS7: 5418 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 5419 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5420 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 5421 break; 5422 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 5423 /* 5424 * FIXME: Ideally pattern should come from DPCD 0x250. As 5425 * current firmware of DPR-100 could not set it, so hardcoding 5426 * now for complaince test. 5427 */ 5428 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 5429 pattern_val = 0x3e0f83e0; 5430 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 5431 pattern_val = 0x0f83e0f8; 5432 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 5433 pattern_val = 0x0000f83e; 5434 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 5435 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5436 DDI_DP_COMP_CTL_ENABLE | 5437 DDI_DP_COMP_CTL_CUSTOM80); 5438 break; 5439 case DP_PHY_TEST_PATTERN_CP2520: 5440 /* 5441 * FIXME: Ideally pattern should come from DPCD 0x24A. As 5442 * current firmware of DPR-100 could not set it, so hardcoding 5443 * now for complaince test. 5444 */ 5445 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 5446 pattern_val = 0xFB; 5447 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5448 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 5449 pattern_val); 5450 break; 5451 default: 5452 WARN(1, "Invalid Phy Test Pattern\n"); 5453 } 5454 } 5455 5456 static void 5457 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp) 5458 { 5459 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 5460 struct drm_device *dev = intel_dig_port->base.base.dev; 5461 struct drm_i915_private *dev_priv = to_i915(dev); 5462 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 5463 enum pipe pipe = crtc->pipe; 5464 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5465 5466 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5467 TRANS_DDI_FUNC_CTL(pipe)); 5468 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5469 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5470 5471 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 5472 TGL_TRANS_DDI_PORT_MASK); 5473 trans_conf_value &= ~PIPECONF_ENABLE; 5474 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 5475 5476 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5477 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5478 trans_ddi_func_ctl_value); 5479 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5480 } 5481 5482 static void 5483 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt) 5484 { 5485 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 5486 struct drm_device *dev = intel_dig_port->base.base.dev; 5487 struct drm_i915_private *dev_priv = to_i915(dev); 5488 enum port port = intel_dig_port->base.port; 5489 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 5490 enum pipe pipe = crtc->pipe; 5491 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5492 5493 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5494 TRANS_DDI_FUNC_CTL(pipe)); 5495 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5496 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5497 5498 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 5499 TGL_TRANS_DDI_SELECT_PORT(port); 5500 trans_conf_value |= PIPECONF_ENABLE; 5501 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 5502 5503 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5504 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5505 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5506 trans_ddi_func_ctl_value); 5507 } 5508 5509 void intel_dp_process_phy_request(struct intel_dp *intel_dp) 5510 { 5511 struct drm_dp_phy_test_params *data = 5512 &intel_dp->compliance.test_data.phytest; 5513 u8 link_status[DP_LINK_STATUS_SIZE]; 5514 5515 if (!intel_dp_get_link_status(intel_dp, link_status)) { 5516 DRM_DEBUG_KMS("failed to get link status\n"); 5517 return; 5518 } 5519 5520 /* retrieve vswing & pre-emphasis setting */ 5521 intel_dp_get_adjust_train(intel_dp, link_status); 5522 5523 intel_dp_autotest_phy_ddi_disable(intel_dp); 5524 5525 intel_dp_set_signal_levels(intel_dp); 5526 5527 intel_dp_phy_pattern_update(intel_dp); 5528 5529 intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes); 5530 5531 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 5532 link_status[DP_DPCD_REV]); 5533 } 5534 5535 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 5536 { 5537 u8 test_result; 5538 5539 test_result = intel_dp_prepare_phytest(intel_dp); 5540 if (test_result != DP_TEST_ACK) 5541 DRM_ERROR("Phy test preparation failed\n"); 5542 5543 intel_dp_process_phy_request(intel_dp); 5544 5545 return test_result; 5546 } 5547 5548 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 5549 { 5550 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5551 u8 response = DP_TEST_NAK; 5552 u8 request = 0; 5553 int status; 5554 5555 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 5556 if (status <= 0) { 5557 drm_dbg_kms(&i915->drm, 5558 "Could not read test request from sink\n"); 5559 goto update_status; 5560 } 5561 5562 switch (request) { 5563 case DP_TEST_LINK_TRAINING: 5564 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 5565 response = intel_dp_autotest_link_training(intel_dp); 5566 break; 5567 case DP_TEST_LINK_VIDEO_PATTERN: 5568 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 5569 response = intel_dp_autotest_video_pattern(intel_dp); 5570 break; 5571 case DP_TEST_LINK_EDID_READ: 5572 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 5573 response = intel_dp_autotest_edid(intel_dp); 5574 break; 5575 case DP_TEST_LINK_PHY_TEST_PATTERN: 5576 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 5577 response = intel_dp_autotest_phy_pattern(intel_dp); 5578 break; 5579 default: 5580 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 5581 request); 5582 break; 5583 } 5584 5585 if (response & DP_TEST_ACK) 5586 intel_dp->compliance.test_type = request; 5587 5588 update_status: 5589 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 5590 if (status <= 0) 5591 drm_dbg_kms(&i915->drm, 5592 "Could not write test response to sink\n"); 5593 } 5594 5595 static int 5596 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5597 { 5598 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5599 bool need_retrain = false; 5600 5601 if (!intel_dp->is_mst) 5602 return -EINVAL; 5603 5604 WARN_ON_ONCE(intel_dp->active_mst_links < 0); 5605 5606 for (;;) { 5607 u8 esi[DP_DPRX_ESI_LEN] = {}; 5608 bool bret, handled; 5609 int retry; 5610 5611 bret = intel_dp_get_sink_irq_esi(intel_dp, esi); 5612 if (!bret) { 5613 drm_dbg_kms(&i915->drm, 5614 "failed to get ESI - device may have failed\n"); 5615 return -EINVAL; 5616 } 5617 5618 /* check link status - esi[10] = 0x200c */ 5619 if (intel_dp->active_mst_links > 0 && !need_retrain && 5620 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 5621 drm_dbg_kms(&i915->drm, 5622 "channel EQ not ok, retraining\n"); 5623 need_retrain = true; 5624 } 5625 5626 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 5627 5628 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 5629 if (!handled) 5630 break; 5631 5632 for (retry = 0; retry < 3; retry++) { 5633 int wret; 5634 5635 wret = drm_dp_dpcd_write(&intel_dp->aux, 5636 DP_SINK_COUNT_ESI+1, 5637 &esi[1], 3); 5638 if (wret == 3) 5639 break; 5640 } 5641 } 5642 5643 return need_retrain; 5644 } 5645 5646 static bool 5647 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5648 { 5649 u8 link_status[DP_LINK_STATUS_SIZE]; 5650 5651 if (!intel_dp->link_trained) 5652 return false; 5653 5654 /* 5655 * While PSR source HW is enabled, it will control main-link sending 5656 * frames, enabling and disabling it so trying to do a retrain will fail 5657 * as the link would or not be on or it could mix training patterns 5658 * and frame data at the same time causing retrain to fail. 5659 * Also when exiting PSR, HW will retrain the link anyways fixing 5660 * any link status error. 5661 */ 5662 if (intel_psr_enabled(intel_dp)) 5663 return false; 5664 5665 if (!intel_dp_get_link_status(intel_dp, link_status)) 5666 return false; 5667 5668 /* 5669 * Validate the cached values of intel_dp->link_rate and 5670 * intel_dp->lane_count before attempting to retrain. 5671 */ 5672 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5673 intel_dp->lane_count)) 5674 return false; 5675 5676 /* Retrain if Channel EQ or CR not ok */ 5677 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5678 } 5679 5680 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 5681 const struct drm_connector_state *conn_state) 5682 { 5683 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5684 struct intel_encoder *encoder; 5685 enum pipe pipe; 5686 5687 if (!conn_state->best_encoder) 5688 return false; 5689 5690 /* SST */ 5691 encoder = &dp_to_dig_port(intel_dp)->base; 5692 if (conn_state->best_encoder == &encoder->base) 5693 return true; 5694 5695 /* MST */ 5696 for_each_pipe(i915, pipe) { 5697 encoder = &intel_dp->mst_encoders[pipe]->base; 5698 if (conn_state->best_encoder == &encoder->base) 5699 return true; 5700 } 5701 5702 return false; 5703 } 5704 5705 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 5706 struct drm_modeset_acquire_ctx *ctx, 5707 u32 *crtc_mask) 5708 { 5709 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5710 struct drm_connector_list_iter conn_iter; 5711 struct intel_connector *connector; 5712 int ret = 0; 5713 5714 *crtc_mask = 0; 5715 5716 if (!intel_dp_needs_link_retrain(intel_dp)) 5717 return 0; 5718 5719 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5720 for_each_intel_connector_iter(connector, &conn_iter) { 5721 struct drm_connector_state *conn_state = 5722 connector->base.state; 5723 struct intel_crtc_state *crtc_state; 5724 struct intel_crtc *crtc; 5725 5726 if (!intel_dp_has_connector(intel_dp, conn_state)) 5727 continue; 5728 5729 crtc = to_intel_crtc(conn_state->crtc); 5730 if (!crtc) 5731 continue; 5732 5733 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5734 if (ret) 5735 break; 5736 5737 crtc_state = to_intel_crtc_state(crtc->base.state); 5738 5739 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5740 5741 if (!crtc_state->hw.active) 5742 continue; 5743 5744 if (conn_state->commit && 5745 !try_wait_for_completion(&conn_state->commit->hw_done)) 5746 continue; 5747 5748 *crtc_mask |= drm_crtc_mask(&crtc->base); 5749 } 5750 drm_connector_list_iter_end(&conn_iter); 5751 5752 if (!intel_dp_needs_link_retrain(intel_dp)) 5753 *crtc_mask = 0; 5754 5755 return ret; 5756 } 5757 5758 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 5759 { 5760 struct intel_connector *connector = intel_dp->attached_connector; 5761 5762 return connector->base.status == connector_status_connected || 5763 intel_dp->is_mst; 5764 } 5765 5766 int intel_dp_retrain_link(struct intel_encoder *encoder, 5767 struct drm_modeset_acquire_ctx *ctx) 5768 { 5769 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5770 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5771 struct intel_crtc *crtc; 5772 u32 crtc_mask; 5773 int ret; 5774 5775 if (!intel_dp_is_connected(intel_dp)) 5776 return 0; 5777 5778 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5779 ctx); 5780 if (ret) 5781 return ret; 5782 5783 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 5784 if (ret) 5785 return ret; 5786 5787 if (crtc_mask == 0) 5788 return 0; 5789 5790 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 5791 encoder->base.base.id, encoder->base.name); 5792 5793 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5794 const struct intel_crtc_state *crtc_state = 5795 to_intel_crtc_state(crtc->base.state); 5796 5797 /* Suppress underruns caused by re-training */ 5798 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5799 if (crtc_state->has_pch_encoder) 5800 intel_set_pch_fifo_underrun_reporting(dev_priv, 5801 intel_crtc_pch_transcoder(crtc), false); 5802 } 5803 5804 intel_dp_start_link_train(intel_dp); 5805 intel_dp_stop_link_train(intel_dp); 5806 5807 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5808 const struct intel_crtc_state *crtc_state = 5809 to_intel_crtc_state(crtc->base.state); 5810 5811 /* Keep underrun reporting disabled until things are stable */ 5812 intel_wait_for_vblank(dev_priv, crtc->pipe); 5813 5814 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5815 if (crtc_state->has_pch_encoder) 5816 intel_set_pch_fifo_underrun_reporting(dev_priv, 5817 intel_crtc_pch_transcoder(crtc), true); 5818 } 5819 5820 return 0; 5821 } 5822 5823 /* 5824 * If display is now connected check links status, 5825 * there has been known issues of link loss triggering 5826 * long pulse. 5827 * 5828 * Some sinks (eg. ASUS PB287Q) seem to perform some 5829 * weird HPD ping pong during modesets. So we can apparently 5830 * end up with HPD going low during a modeset, and then 5831 * going back up soon after. And once that happens we must 5832 * retrain the link to get a picture. That's in case no 5833 * userspace component reacted to intermittent HPD dip. 5834 */ 5835 static enum intel_hotplug_state 5836 intel_dp_hotplug(struct intel_encoder *encoder, 5837 struct intel_connector *connector) 5838 { 5839 struct drm_modeset_acquire_ctx ctx; 5840 enum intel_hotplug_state state; 5841 int ret; 5842 5843 state = intel_encoder_hotplug(encoder, connector); 5844 5845 drm_modeset_acquire_init(&ctx, 0); 5846 5847 for (;;) { 5848 ret = intel_dp_retrain_link(encoder, &ctx); 5849 5850 if (ret == -EDEADLK) { 5851 drm_modeset_backoff(&ctx); 5852 continue; 5853 } 5854 5855 break; 5856 } 5857 5858 drm_modeset_drop_locks(&ctx); 5859 drm_modeset_acquire_fini(&ctx); 5860 drm_WARN(encoder->base.dev, ret, 5861 "Acquiring modeset locks failed with %i\n", ret); 5862 5863 /* 5864 * Keeping it consistent with intel_ddi_hotplug() and 5865 * intel_hdmi_hotplug(). 5866 */ 5867 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) 5868 state = INTEL_HOTPLUG_RETRY; 5869 5870 return state; 5871 } 5872 5873 static void intel_dp_check_service_irq(struct intel_dp *intel_dp) 5874 { 5875 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5876 u8 val; 5877 5878 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5879 return; 5880 5881 if (drm_dp_dpcd_readb(&intel_dp->aux, 5882 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5883 return; 5884 5885 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5886 5887 if (val & DP_AUTOMATED_TEST_REQUEST) 5888 intel_dp_handle_test_request(intel_dp); 5889 5890 if (val & DP_CP_IRQ) 5891 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5892 5893 if (val & DP_SINK_SPECIFIC_IRQ) 5894 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 5895 } 5896 5897 /* 5898 * According to DP spec 5899 * 5.1.2: 5900 * 1. Read DPCD 5901 * 2. Configure link according to Receiver Capabilities 5902 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5903 * 4. Check link status on receipt of hot-plug interrupt 5904 * 5905 * intel_dp_short_pulse - handles short pulse interrupts 5906 * when full detection is not required. 5907 * Returns %true if short pulse is handled and full detection 5908 * is NOT required and %false otherwise. 5909 */ 5910 static bool 5911 intel_dp_short_pulse(struct intel_dp *intel_dp) 5912 { 5913 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5914 u8 old_sink_count = intel_dp->sink_count; 5915 bool ret; 5916 5917 /* 5918 * Clearing compliance test variables to allow capturing 5919 * of values for next automated test request. 5920 */ 5921 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5922 5923 /* 5924 * Now read the DPCD to see if it's actually running 5925 * If the current value of sink count doesn't match with 5926 * the value that was stored earlier or dpcd read failed 5927 * we need to do full detection 5928 */ 5929 ret = intel_dp_get_dpcd(intel_dp); 5930 5931 if ((old_sink_count != intel_dp->sink_count) || !ret) { 5932 /* No need to proceed if we are going to do full detect */ 5933 return false; 5934 } 5935 5936 intel_dp_check_service_irq(intel_dp); 5937 5938 /* Handle CEC interrupts, if any */ 5939 drm_dp_cec_irq(&intel_dp->aux); 5940 5941 /* defer to the hotplug work for link retraining if needed */ 5942 if (intel_dp_needs_link_retrain(intel_dp)) 5943 return false; 5944 5945 intel_psr_short_pulse(intel_dp); 5946 5947 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 5948 drm_dbg_kms(&dev_priv->drm, 5949 "Link Training Compliance Test requested\n"); 5950 /* Send a Hotplug Uevent to userspace to start modeset */ 5951 drm_kms_helper_hotplug_event(&dev_priv->drm); 5952 } 5953 5954 return true; 5955 } 5956 5957 /* XXX this is probably wrong for multiple downstream ports */ 5958 static enum drm_connector_status 5959 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 5960 { 5961 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5962 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 5963 u8 *dpcd = intel_dp->dpcd; 5964 u8 type; 5965 5966 if (WARN_ON(intel_dp_is_edp(intel_dp))) 5967 return connector_status_connected; 5968 5969 if (lspcon->active) 5970 lspcon_resume(lspcon); 5971 5972 if (!intel_dp_get_dpcd(intel_dp)) 5973 return connector_status_disconnected; 5974 5975 /* if there's no downstream port, we're done */ 5976 if (!drm_dp_is_branch(dpcd)) 5977 return connector_status_connected; 5978 5979 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 5980 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 5981 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 5982 5983 return intel_dp->sink_count ? 5984 connector_status_connected : connector_status_disconnected; 5985 } 5986 5987 if (intel_dp_can_mst(intel_dp)) 5988 return connector_status_connected; 5989 5990 /* If no HPD, poke DDC gently */ 5991 if (drm_probe_ddc(&intel_dp->aux.ddc)) 5992 return connector_status_connected; 5993 5994 /* Well we tried, say unknown for unreliable port types */ 5995 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 5996 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 5997 if (type == DP_DS_PORT_TYPE_VGA || 5998 type == DP_DS_PORT_TYPE_NON_EDID) 5999 return connector_status_unknown; 6000 } else { 6001 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 6002 DP_DWN_STRM_PORT_TYPE_MASK; 6003 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 6004 type == DP_DWN_STRM_PORT_TYPE_OTHER) 6005 return connector_status_unknown; 6006 } 6007 6008 /* Anything else is out of spec, warn and ignore */ 6009 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 6010 return connector_status_disconnected; 6011 } 6012 6013 static enum drm_connector_status 6014 edp_detect(struct intel_dp *intel_dp) 6015 { 6016 return connector_status_connected; 6017 } 6018 6019 static bool ibx_digital_port_connected(struct intel_encoder *encoder) 6020 { 6021 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6022 u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; 6023 6024 return intel_de_read(dev_priv, SDEISR) & bit; 6025 } 6026 6027 static bool g4x_digital_port_connected(struct intel_encoder *encoder) 6028 { 6029 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6030 u32 bit; 6031 6032 switch (encoder->hpd_pin) { 6033 case HPD_PORT_B: 6034 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 6035 break; 6036 case HPD_PORT_C: 6037 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 6038 break; 6039 case HPD_PORT_D: 6040 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 6041 break; 6042 default: 6043 MISSING_CASE(encoder->hpd_pin); 6044 return false; 6045 } 6046 6047 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6048 } 6049 6050 static bool gm45_digital_port_connected(struct intel_encoder *encoder) 6051 { 6052 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6053 u32 bit; 6054 6055 switch (encoder->hpd_pin) { 6056 case HPD_PORT_B: 6057 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 6058 break; 6059 case HPD_PORT_C: 6060 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 6061 break; 6062 case HPD_PORT_D: 6063 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 6064 break; 6065 default: 6066 MISSING_CASE(encoder->hpd_pin); 6067 return false; 6068 } 6069 6070 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6071 } 6072 6073 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 6074 { 6075 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6076 u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; 6077 6078 return intel_de_read(dev_priv, DEISR) & bit; 6079 } 6080 6081 /* 6082 * intel_digital_port_connected - is the specified port connected? 6083 * @encoder: intel_encoder 6084 * 6085 * In cases where there's a connector physically connected but it can't be used 6086 * by our hardware we also return false, since the rest of the driver should 6087 * pretty much treat the port as disconnected. This is relevant for type-C 6088 * (starting on ICL) where there's ownership involved. 6089 * 6090 * Return %true if port is connected, %false otherwise. 6091 */ 6092 bool intel_digital_port_connected(struct intel_encoder *encoder) 6093 { 6094 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6095 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6096 bool is_connected = false; 6097 intel_wakeref_t wakeref; 6098 6099 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 6100 is_connected = dig_port->connected(encoder); 6101 6102 return is_connected; 6103 } 6104 6105 static struct edid * 6106 intel_dp_get_edid(struct intel_dp *intel_dp) 6107 { 6108 struct intel_connector *intel_connector = intel_dp->attached_connector; 6109 6110 /* use cached edid if we have one */ 6111 if (intel_connector->edid) { 6112 /* invalid edid */ 6113 if (IS_ERR(intel_connector->edid)) 6114 return NULL; 6115 6116 return drm_edid_duplicate(intel_connector->edid); 6117 } else 6118 return drm_get_edid(&intel_connector->base, 6119 &intel_dp->aux.ddc); 6120 } 6121 6122 static void 6123 intel_dp_set_edid(struct intel_dp *intel_dp) 6124 { 6125 struct intel_connector *intel_connector = intel_dp->attached_connector; 6126 struct edid *edid; 6127 6128 intel_dp_unset_edid(intel_dp); 6129 edid = intel_dp_get_edid(intel_dp); 6130 intel_connector->detect_edid = edid; 6131 6132 intel_dp->has_audio = drm_detect_monitor_audio(edid); 6133 drm_dp_cec_set_edid(&intel_dp->aux, edid); 6134 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 6135 } 6136 6137 static void 6138 intel_dp_unset_edid(struct intel_dp *intel_dp) 6139 { 6140 struct intel_connector *intel_connector = intel_dp->attached_connector; 6141 6142 drm_dp_cec_unset_edid(&intel_dp->aux); 6143 kfree(intel_connector->detect_edid); 6144 intel_connector->detect_edid = NULL; 6145 6146 intel_dp->has_audio = false; 6147 intel_dp->edid_quirks = 0; 6148 } 6149 6150 static int 6151 intel_dp_detect(struct drm_connector *connector, 6152 struct drm_modeset_acquire_ctx *ctx, 6153 bool force) 6154 { 6155 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6156 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6157 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6158 struct intel_encoder *encoder = &dig_port->base; 6159 enum drm_connector_status status; 6160 6161 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6162 connector->base.id, connector->name); 6163 drm_WARN_ON(&dev_priv->drm, 6164 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 6165 6166 /* Can't disconnect eDP */ 6167 if (intel_dp_is_edp(intel_dp)) 6168 status = edp_detect(intel_dp); 6169 else if (intel_digital_port_connected(encoder)) 6170 status = intel_dp_detect_dpcd(intel_dp); 6171 else 6172 status = connector_status_disconnected; 6173 6174 if (status == connector_status_disconnected) { 6175 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 6176 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 6177 6178 if (intel_dp->is_mst) { 6179 drm_dbg_kms(&dev_priv->drm, 6180 "MST device may have disappeared %d vs %d\n", 6181 intel_dp->is_mst, 6182 intel_dp->mst_mgr.mst_state); 6183 intel_dp->is_mst = false; 6184 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6185 intel_dp->is_mst); 6186 } 6187 6188 goto out; 6189 } 6190 6191 if (intel_dp->reset_link_params) { 6192 /* Initial max link lane count */ 6193 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 6194 6195 /* Initial max link rate */ 6196 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 6197 6198 intel_dp->reset_link_params = false; 6199 } 6200 6201 intel_dp_print_rates(intel_dp); 6202 6203 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 6204 if (INTEL_GEN(dev_priv) >= 11) 6205 intel_dp_get_dsc_sink_cap(intel_dp); 6206 6207 intel_dp_configure_mst(intel_dp); 6208 6209 if (intel_dp->is_mst) { 6210 /* 6211 * If we are in MST mode then this connector 6212 * won't appear connected or have anything 6213 * with EDID on it 6214 */ 6215 status = connector_status_disconnected; 6216 goto out; 6217 } 6218 6219 /* 6220 * Some external monitors do not signal loss of link synchronization 6221 * with an IRQ_HPD, so force a link status check. 6222 */ 6223 if (!intel_dp_is_edp(intel_dp)) { 6224 int ret; 6225 6226 ret = intel_dp_retrain_link(encoder, ctx); 6227 if (ret) 6228 return ret; 6229 } 6230 6231 /* 6232 * Clearing NACK and defer counts to get their exact values 6233 * while reading EDID which are required by Compliance tests 6234 * 4.2.2.4 and 4.2.2.5 6235 */ 6236 intel_dp->aux.i2c_nack_count = 0; 6237 intel_dp->aux.i2c_defer_count = 0; 6238 6239 intel_dp_set_edid(intel_dp); 6240 if (intel_dp_is_edp(intel_dp) || 6241 to_intel_connector(connector)->detect_edid) 6242 status = connector_status_connected; 6243 6244 intel_dp_check_service_irq(intel_dp); 6245 6246 out: 6247 if (status != connector_status_connected && !intel_dp->is_mst) 6248 intel_dp_unset_edid(intel_dp); 6249 6250 /* 6251 * Make sure the refs for power wells enabled during detect are 6252 * dropped to avoid a new detect cycle triggered by HPD polling. 6253 */ 6254 intel_display_power_flush_work(dev_priv); 6255 6256 return status; 6257 } 6258 6259 static void 6260 intel_dp_force(struct drm_connector *connector) 6261 { 6262 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6263 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6264 struct intel_encoder *intel_encoder = &dig_port->base; 6265 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 6266 enum intel_display_power_domain aux_domain = 6267 intel_aux_power_domain(dig_port); 6268 intel_wakeref_t wakeref; 6269 6270 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6271 connector->base.id, connector->name); 6272 intel_dp_unset_edid(intel_dp); 6273 6274 if (connector->status != connector_status_connected) 6275 return; 6276 6277 wakeref = intel_display_power_get(dev_priv, aux_domain); 6278 6279 intel_dp_set_edid(intel_dp); 6280 6281 intel_display_power_put(dev_priv, aux_domain, wakeref); 6282 } 6283 6284 static int intel_dp_get_modes(struct drm_connector *connector) 6285 { 6286 struct intel_connector *intel_connector = to_intel_connector(connector); 6287 struct edid *edid; 6288 6289 edid = intel_connector->detect_edid; 6290 if (edid) { 6291 int ret = intel_connector_update_modes(connector, edid); 6292 if (ret) 6293 return ret; 6294 } 6295 6296 /* if eDP has no EDID, fall back to fixed mode */ 6297 if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) && 6298 intel_connector->panel.fixed_mode) { 6299 struct drm_display_mode *mode; 6300 6301 mode = drm_mode_duplicate(connector->dev, 6302 intel_connector->panel.fixed_mode); 6303 if (mode) { 6304 drm_mode_probed_add(connector, mode); 6305 return 1; 6306 } 6307 } 6308 6309 return 0; 6310 } 6311 6312 static int 6313 intel_dp_connector_register(struct drm_connector *connector) 6314 { 6315 struct drm_i915_private *i915 = to_i915(connector->dev); 6316 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6317 int ret; 6318 6319 ret = intel_connector_register(connector); 6320 if (ret) 6321 return ret; 6322 6323 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 6324 intel_dp->aux.name, connector->kdev->kobj.name); 6325 6326 intel_dp->aux.dev = connector->kdev; 6327 ret = drm_dp_aux_register(&intel_dp->aux); 6328 if (!ret) 6329 drm_dp_cec_register_connector(&intel_dp->aux, connector); 6330 return ret; 6331 } 6332 6333 static void 6334 intel_dp_connector_unregister(struct drm_connector *connector) 6335 { 6336 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6337 6338 drm_dp_cec_unregister_connector(&intel_dp->aux); 6339 drm_dp_aux_unregister(&intel_dp->aux); 6340 intel_connector_unregister(connector); 6341 } 6342 6343 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 6344 { 6345 struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 6346 struct intel_dp *intel_dp = &intel_dig_port->dp; 6347 6348 intel_dp_mst_encoder_cleanup(intel_dig_port); 6349 if (intel_dp_is_edp(intel_dp)) { 6350 intel_wakeref_t wakeref; 6351 6352 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6353 /* 6354 * vdd might still be enabled do to the delayed vdd off. 6355 * Make sure vdd is actually turned off here. 6356 */ 6357 with_pps_lock(intel_dp, wakeref) 6358 edp_panel_vdd_off_sync(intel_dp); 6359 6360 if (intel_dp->edp_notifier.notifier_call) { 6361 unregister_reboot_notifier(&intel_dp->edp_notifier); 6362 intel_dp->edp_notifier.notifier_call = NULL; 6363 } 6364 } 6365 6366 intel_dp_aux_fini(intel_dp); 6367 } 6368 6369 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 6370 { 6371 intel_dp_encoder_flush_work(encoder); 6372 6373 drm_encoder_cleanup(encoder); 6374 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 6375 } 6376 6377 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 6378 { 6379 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6380 intel_wakeref_t wakeref; 6381 6382 if (!intel_dp_is_edp(intel_dp)) 6383 return; 6384 6385 /* 6386 * vdd might still be enabled do to the delayed vdd off. 6387 * Make sure vdd is actually turned off here. 6388 */ 6389 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6390 with_pps_lock(intel_dp, wakeref) 6391 edp_panel_vdd_off_sync(intel_dp); 6392 } 6393 6394 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) 6395 { 6396 long ret; 6397 6398 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) 6399 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, 6400 msecs_to_jiffies(timeout)); 6401 6402 if (!ret) 6403 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); 6404 } 6405 6406 static 6407 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, 6408 u8 *an) 6409 { 6410 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6411 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base)); 6412 static const struct drm_dp_aux_msg msg = { 6413 .request = DP_AUX_NATIVE_WRITE, 6414 .address = DP_AUX_HDCP_AKSV, 6415 .size = DRM_HDCP_KSV_LEN, 6416 }; 6417 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; 6418 ssize_t dpcd_ret; 6419 int ret; 6420 6421 /* Output An first, that's easy */ 6422 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, 6423 an, DRM_HDCP_AN_LEN); 6424 if (dpcd_ret != DRM_HDCP_AN_LEN) { 6425 drm_dbg_kms(&i915->drm, 6426 "Failed to write An over DP/AUX (%zd)\n", 6427 dpcd_ret); 6428 return dpcd_ret >= 0 ? -EIO : dpcd_ret; 6429 } 6430 6431 /* 6432 * Since Aksv is Oh-So-Secret, we can't access it in software. So in 6433 * order to get it on the wire, we need to create the AUX header as if 6434 * we were writing the data, and then tickle the hardware to output the 6435 * data once the header is sent out. 6436 */ 6437 intel_dp_aux_header(txbuf, &msg); 6438 6439 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size, 6440 rxbuf, sizeof(rxbuf), 6441 DP_AUX_CH_CTL_AUX_AKSV_SELECT); 6442 if (ret < 0) { 6443 drm_dbg_kms(&i915->drm, 6444 "Write Aksv over DP/AUX failed (%d)\n", ret); 6445 return ret; 6446 } else if (ret == 0) { 6447 drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n"); 6448 return -EIO; 6449 } 6450 6451 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; 6452 if (reply != DP_AUX_NATIVE_REPLY_ACK) { 6453 drm_dbg_kms(&i915->drm, 6454 "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", 6455 reply); 6456 return -EIO; 6457 } 6458 return 0; 6459 } 6460 6461 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, 6462 u8 *bksv) 6463 { 6464 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6465 ssize_t ret; 6466 6467 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, 6468 DRM_HDCP_KSV_LEN); 6469 if (ret != DRM_HDCP_KSV_LEN) { 6470 drm_dbg_kms(&i915->drm, 6471 "Read Bksv from DP/AUX failed (%zd)\n", ret); 6472 return ret >= 0 ? -EIO : ret; 6473 } 6474 return 0; 6475 } 6476 6477 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, 6478 u8 *bstatus) 6479 { 6480 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6481 ssize_t ret; 6482 6483 /* 6484 * For some reason the HDMI and DP HDCP specs call this register 6485 * definition by different names. In the HDMI spec, it's called BSTATUS, 6486 * but in DP it's called BINFO. 6487 */ 6488 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, 6489 bstatus, DRM_HDCP_BSTATUS_LEN); 6490 if (ret != DRM_HDCP_BSTATUS_LEN) { 6491 drm_dbg_kms(&i915->drm, 6492 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6493 return ret >= 0 ? -EIO : ret; 6494 } 6495 return 0; 6496 } 6497 6498 static 6499 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, 6500 u8 *bcaps) 6501 { 6502 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6503 ssize_t ret; 6504 6505 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, 6506 bcaps, 1); 6507 if (ret != 1) { 6508 drm_dbg_kms(&i915->drm, 6509 "Read bcaps from DP/AUX failed (%zd)\n", ret); 6510 return ret >= 0 ? -EIO : ret; 6511 } 6512 6513 return 0; 6514 } 6515 6516 static 6517 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, 6518 bool *repeater_present) 6519 { 6520 ssize_t ret; 6521 u8 bcaps; 6522 6523 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); 6524 if (ret) 6525 return ret; 6526 6527 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; 6528 return 0; 6529 } 6530 6531 static 6532 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, 6533 u8 *ri_prime) 6534 { 6535 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6536 ssize_t ret; 6537 6538 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, 6539 ri_prime, DRM_HDCP_RI_LEN); 6540 if (ret != DRM_HDCP_RI_LEN) { 6541 drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", 6542 ret); 6543 return ret >= 0 ? -EIO : ret; 6544 } 6545 return 0; 6546 } 6547 6548 static 6549 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, 6550 bool *ksv_ready) 6551 { 6552 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6553 ssize_t ret; 6554 u8 bstatus; 6555 6556 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6557 &bstatus, 1); 6558 if (ret != 1) { 6559 drm_dbg_kms(&i915->drm, 6560 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6561 return ret >= 0 ? -EIO : ret; 6562 } 6563 *ksv_ready = bstatus & DP_BSTATUS_READY; 6564 return 0; 6565 } 6566 6567 static 6568 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, 6569 int num_downstream, u8 *ksv_fifo) 6570 { 6571 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6572 ssize_t ret; 6573 int i; 6574 6575 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ 6576 for (i = 0; i < num_downstream; i += 3) { 6577 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; 6578 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6579 DP_AUX_HDCP_KSV_FIFO, 6580 ksv_fifo + i * DRM_HDCP_KSV_LEN, 6581 len); 6582 if (ret != len) { 6583 drm_dbg_kms(&i915->drm, 6584 "Read ksv[%d] from DP/AUX failed (%zd)\n", 6585 i, ret); 6586 return ret >= 0 ? -EIO : ret; 6587 } 6588 } 6589 return 0; 6590 } 6591 6592 static 6593 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, 6594 int i, u32 *part) 6595 { 6596 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6597 ssize_t ret; 6598 6599 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) 6600 return -EINVAL; 6601 6602 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6603 DP_AUX_HDCP_V_PRIME(i), part, 6604 DRM_HDCP_V_PRIME_PART_LEN); 6605 if (ret != DRM_HDCP_V_PRIME_PART_LEN) { 6606 drm_dbg_kms(&i915->drm, 6607 "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); 6608 return ret >= 0 ? -EIO : ret; 6609 } 6610 return 0; 6611 } 6612 6613 static 6614 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, 6615 bool enable) 6616 { 6617 /* Not used for single stream DisplayPort setups */ 6618 return 0; 6619 } 6620 6621 static 6622 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) 6623 { 6624 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6625 ssize_t ret; 6626 u8 bstatus; 6627 6628 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6629 &bstatus, 1); 6630 if (ret != 1) { 6631 drm_dbg_kms(&i915->drm, 6632 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6633 return false; 6634 } 6635 6636 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); 6637 } 6638 6639 static 6640 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port, 6641 bool *hdcp_capable) 6642 { 6643 ssize_t ret; 6644 u8 bcaps; 6645 6646 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); 6647 if (ret) 6648 return ret; 6649 6650 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; 6651 return 0; 6652 } 6653 6654 struct hdcp2_dp_errata_stream_type { 6655 u8 msg_id; 6656 u8 stream_type; 6657 } __packed; 6658 6659 struct hdcp2_dp_msg_data { 6660 u8 msg_id; 6661 u32 offset; 6662 bool msg_detectable; 6663 u32 timeout; 6664 u32 timeout2; /* Added for non_paired situation */ 6665 }; 6666 6667 static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { 6668 { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 }, 6669 { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, 6670 false, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, 6671 { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, 6672 false, 0, 0 }, 6673 { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, 6674 false, 0, 0 }, 6675 { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, 6676 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, 6677 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, 6678 { HDCP_2_2_AKE_SEND_PAIRING_INFO, 6679 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, 6680 HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, 6681 { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 }, 6682 { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, 6683 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 }, 6684 { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, 6685 0, 0 }, 6686 { HDCP_2_2_REP_SEND_RECVID_LIST, 6687 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, 6688 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, 6689 { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, 6690 0, 0 }, 6691 { HDCP_2_2_REP_STREAM_MANAGE, 6692 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, 6693 0, 0 }, 6694 { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, 6695 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, 6696 /* local define to shovel this through the write_2_2 interface */ 6697 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 6698 { HDCP_2_2_ERRATA_DP_STREAM_TYPE, 6699 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, 6700 0, 0 }, 6701 }; 6702 6703 static int 6704 intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, 6705 u8 *rx_status) 6706 { 6707 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6708 ssize_t ret; 6709 6710 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6711 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, 6712 HDCP_2_2_DP_RXSTATUS_LEN); 6713 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { 6714 drm_dbg_kms(&i915->drm, 6715 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6716 return ret >= 0 ? -EIO : ret; 6717 } 6718 6719 return 0; 6720 } 6721 6722 static 6723 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, 6724 u8 msg_id, bool *msg_ready) 6725 { 6726 u8 rx_status; 6727 int ret; 6728 6729 *msg_ready = false; 6730 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); 6731 if (ret < 0) 6732 return ret; 6733 6734 switch (msg_id) { 6735 case HDCP_2_2_AKE_SEND_HPRIME: 6736 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) 6737 *msg_ready = true; 6738 break; 6739 case HDCP_2_2_AKE_SEND_PAIRING_INFO: 6740 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) 6741 *msg_ready = true; 6742 break; 6743 case HDCP_2_2_REP_SEND_RECVID_LIST: 6744 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6745 *msg_ready = true; 6746 break; 6747 default: 6748 DRM_ERROR("Unidentified msg_id: %d\n", msg_id); 6749 return -EINVAL; 6750 } 6751 6752 return 0; 6753 } 6754 6755 static ssize_t 6756 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, 6757 const struct hdcp2_dp_msg_data *hdcp2_msg_data) 6758 { 6759 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6760 struct intel_dp *dp = &intel_dig_port->dp; 6761 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6762 u8 msg_id = hdcp2_msg_data->msg_id; 6763 int ret, timeout; 6764 bool msg_ready = false; 6765 6766 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) 6767 timeout = hdcp2_msg_data->timeout2; 6768 else 6769 timeout = hdcp2_msg_data->timeout; 6770 6771 /* 6772 * There is no way to detect the CERT, LPRIME and STREAM_READY 6773 * availability. So Wait for timeout and read the msg. 6774 */ 6775 if (!hdcp2_msg_data->msg_detectable) { 6776 mdelay(timeout); 6777 ret = 0; 6778 } else { 6779 /* 6780 * As we want to check the msg availability at timeout, Ignoring 6781 * the timeout at wait for CP_IRQ. 6782 */ 6783 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); 6784 ret = hdcp2_detect_msg_availability(intel_dig_port, 6785 msg_id, &msg_ready); 6786 if (!msg_ready) 6787 ret = -ETIMEDOUT; 6788 } 6789 6790 if (ret) 6791 drm_dbg_kms(&i915->drm, 6792 "msg_id %d, ret %d, timeout(mSec): %d\n", 6793 hdcp2_msg_data->msg_id, ret, timeout); 6794 6795 return ret; 6796 } 6797 6798 static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) 6799 { 6800 int i; 6801 6802 for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) 6803 if (hdcp2_dp_msg_data[i].msg_id == msg_id) 6804 return &hdcp2_dp_msg_data[i]; 6805 6806 return NULL; 6807 } 6808 6809 static 6810 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, 6811 void *buf, size_t size) 6812 { 6813 struct intel_dp *dp = &intel_dig_port->dp; 6814 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6815 unsigned int offset; 6816 u8 *byte = buf; 6817 ssize_t ret, bytes_to_write, len; 6818 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6819 6820 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); 6821 if (!hdcp2_msg_data) 6822 return -EINVAL; 6823 6824 offset = hdcp2_msg_data->offset; 6825 6826 /* No msg_id in DP HDCP2.2 msgs */ 6827 bytes_to_write = size - 1; 6828 byte++; 6829 6830 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); 6831 6832 while (bytes_to_write) { 6833 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? 6834 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; 6835 6836 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, 6837 offset, (void *)byte, len); 6838 if (ret < 0) 6839 return ret; 6840 6841 bytes_to_write -= ret; 6842 byte += ret; 6843 offset += ret; 6844 } 6845 6846 return size; 6847 } 6848 6849 static 6850 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port) 6851 { 6852 u8 rx_info[HDCP_2_2_RXINFO_LEN]; 6853 u32 dev_cnt; 6854 ssize_t ret; 6855 6856 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6857 DP_HDCP_2_2_REG_RXINFO_OFFSET, 6858 (void *)rx_info, HDCP_2_2_RXINFO_LEN); 6859 if (ret != HDCP_2_2_RXINFO_LEN) 6860 return ret >= 0 ? -EIO : ret; 6861 6862 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 6863 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 6864 6865 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) 6866 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; 6867 6868 ret = sizeof(struct hdcp2_rep_send_receiverid_list) - 6869 HDCP_2_2_RECEIVER_IDS_MAX_LEN + 6870 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); 6871 6872 return ret; 6873 } 6874 6875 static 6876 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, 6877 u8 msg_id, void *buf, size_t size) 6878 { 6879 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6880 unsigned int offset; 6881 u8 *byte = buf; 6882 ssize_t ret, bytes_to_recv, len; 6883 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6884 6885 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); 6886 if (!hdcp2_msg_data) 6887 return -EINVAL; 6888 offset = hdcp2_msg_data->offset; 6889 6890 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data); 6891 if (ret < 0) 6892 return ret; 6893 6894 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { 6895 ret = get_receiver_id_list_size(intel_dig_port); 6896 if (ret < 0) 6897 return ret; 6898 6899 size = ret; 6900 } 6901 bytes_to_recv = size - 1; 6902 6903 /* DP adaptation msgs has no msg_id */ 6904 byte++; 6905 6906 while (bytes_to_recv) { 6907 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? 6908 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; 6909 6910 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset, 6911 (void *)byte, len); 6912 if (ret < 0) { 6913 drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", 6914 msg_id, ret); 6915 return ret; 6916 } 6917 6918 bytes_to_recv -= ret; 6919 byte += ret; 6920 offset += ret; 6921 } 6922 byte = buf; 6923 *byte = msg_id; 6924 6925 return size; 6926 } 6927 6928 static 6929 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, 6930 bool is_repeater, u8 content_type) 6931 { 6932 int ret; 6933 struct hdcp2_dp_errata_stream_type stream_type_msg; 6934 6935 if (is_repeater) 6936 return 0; 6937 6938 /* 6939 * Errata for DP: As Stream type is used for encryption, Receiver 6940 * should be communicated with stream type for the decryption of the 6941 * content. 6942 * Repeater will be communicated with stream type as a part of it's 6943 * auth later in time. 6944 */ 6945 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; 6946 stream_type_msg.stream_type = content_type; 6947 6948 ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, 6949 sizeof(stream_type_msg)); 6950 6951 return ret < 0 ? ret : 0; 6952 6953 } 6954 6955 static 6956 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port) 6957 { 6958 u8 rx_status; 6959 int ret; 6960 6961 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); 6962 if (ret) 6963 return ret; 6964 6965 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) 6966 ret = HDCP_REAUTH_REQUEST; 6967 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) 6968 ret = HDCP_LINK_INTEGRITY_FAILURE; 6969 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6970 ret = HDCP_TOPOLOGY_CHANGE; 6971 6972 return ret; 6973 } 6974 6975 static 6976 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port, 6977 bool *capable) 6978 { 6979 u8 rx_caps[3]; 6980 int ret; 6981 6982 *capable = false; 6983 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6984 DP_HDCP_2_2_REG_RX_CAPS_OFFSET, 6985 rx_caps, HDCP_2_2_RXCAPS_LEN); 6986 if (ret != HDCP_2_2_RXCAPS_LEN) 6987 return ret >= 0 ? -EIO : ret; 6988 6989 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && 6990 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) 6991 *capable = true; 6992 6993 return 0; 6994 } 6995 6996 static const struct intel_hdcp_shim intel_dp_hdcp_shim = { 6997 .write_an_aksv = intel_dp_hdcp_write_an_aksv, 6998 .read_bksv = intel_dp_hdcp_read_bksv, 6999 .read_bstatus = intel_dp_hdcp_read_bstatus, 7000 .repeater_present = intel_dp_hdcp_repeater_present, 7001 .read_ri_prime = intel_dp_hdcp_read_ri_prime, 7002 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, 7003 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, 7004 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, 7005 .toggle_signalling = intel_dp_hdcp_toggle_signalling, 7006 .check_link = intel_dp_hdcp_check_link, 7007 .hdcp_capable = intel_dp_hdcp_capable, 7008 .write_2_2_msg = intel_dp_hdcp2_write_msg, 7009 .read_2_2_msg = intel_dp_hdcp2_read_msg, 7010 .config_stream_type = intel_dp_hdcp2_config_stream_type, 7011 .check_2_2_link = intel_dp_hdcp2_check_link, 7012 .hdcp_2_2_capable = intel_dp_hdcp2_capable, 7013 .protocol = HDCP_PROTOCOL_DP, 7014 }; 7015 7016 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 7017 { 7018 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7019 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 7020 7021 lockdep_assert_held(&dev_priv->pps_mutex); 7022 7023 if (!edp_have_panel_vdd(intel_dp)) 7024 return; 7025 7026 /* 7027 * The VDD bit needs a power domain reference, so if the bit is 7028 * already enabled when we boot or resume, grab this reference and 7029 * schedule a vdd off, so we don't hold on to the reference 7030 * indefinitely. 7031 */ 7032 drm_dbg_kms(&dev_priv->drm, 7033 "VDD left on by BIOS, adjusting state tracking\n"); 7034 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); 7035 7036 edp_panel_vdd_schedule_off(intel_dp); 7037 } 7038 7039 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 7040 { 7041 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7042 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 7043 enum pipe pipe; 7044 7045 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 7046 encoder->port, &pipe)) 7047 return pipe; 7048 7049 return INVALID_PIPE; 7050 } 7051 7052 void intel_dp_encoder_reset(struct drm_encoder *encoder) 7053 { 7054 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 7055 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 7056 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 7057 intel_wakeref_t wakeref; 7058 7059 if (!HAS_DDI(dev_priv)) 7060 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 7061 7062 if (lspcon->active) 7063 lspcon_resume(lspcon); 7064 7065 intel_dp->reset_link_params = true; 7066 7067 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 7068 !intel_dp_is_edp(intel_dp)) 7069 return; 7070 7071 with_pps_lock(intel_dp, wakeref) { 7072 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7073 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 7074 7075 if (intel_dp_is_edp(intel_dp)) { 7076 /* 7077 * Reinit the power sequencer, in case BIOS did 7078 * something nasty with it. 7079 */ 7080 intel_dp_pps_init(intel_dp); 7081 intel_edp_panel_vdd_sanitize(intel_dp); 7082 } 7083 } 7084 } 7085 7086 static int intel_modeset_tile_group(struct intel_atomic_state *state, 7087 int tile_group_id) 7088 { 7089 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7090 struct drm_connector_list_iter conn_iter; 7091 struct drm_connector *connector; 7092 int ret = 0; 7093 7094 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 7095 drm_for_each_connector_iter(connector, &conn_iter) { 7096 struct drm_connector_state *conn_state; 7097 struct intel_crtc_state *crtc_state; 7098 struct intel_crtc *crtc; 7099 7100 if (!connector->has_tile || 7101 connector->tile_group->id != tile_group_id) 7102 continue; 7103 7104 conn_state = drm_atomic_get_connector_state(&state->base, 7105 connector); 7106 if (IS_ERR(conn_state)) { 7107 ret = PTR_ERR(conn_state); 7108 break; 7109 } 7110 7111 crtc = to_intel_crtc(conn_state->crtc); 7112 7113 if (!crtc) 7114 continue; 7115 7116 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 7117 crtc_state->uapi.mode_changed = true; 7118 7119 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 7120 if (ret) 7121 break; 7122 } 7123 drm_connector_list_iter_end(&conn_iter); 7124 7125 return ret; 7126 } 7127 7128 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 7129 { 7130 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7131 struct intel_crtc *crtc; 7132 7133 if (transcoders == 0) 7134 return 0; 7135 7136 for_each_intel_crtc(&dev_priv->drm, crtc) { 7137 struct intel_crtc_state *crtc_state; 7138 int ret; 7139 7140 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 7141 if (IS_ERR(crtc_state)) 7142 return PTR_ERR(crtc_state); 7143 7144 if (!crtc_state->hw.enable) 7145 continue; 7146 7147 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 7148 continue; 7149 7150 crtc_state->uapi.mode_changed = true; 7151 7152 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 7153 if (ret) 7154 return ret; 7155 7156 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 7157 if (ret) 7158 return ret; 7159 7160 transcoders &= ~BIT(crtc_state->cpu_transcoder); 7161 } 7162 7163 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 7164 7165 return 0; 7166 } 7167 7168 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 7169 struct drm_connector *connector) 7170 { 7171 const struct drm_connector_state *old_conn_state = 7172 drm_atomic_get_old_connector_state(&state->base, connector); 7173 const struct intel_crtc_state *old_crtc_state; 7174 struct intel_crtc *crtc; 7175 u8 transcoders; 7176 7177 crtc = to_intel_crtc(old_conn_state->crtc); 7178 if (!crtc) 7179 return 0; 7180 7181 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 7182 7183 if (!old_crtc_state->hw.active) 7184 return 0; 7185 7186 transcoders = old_crtc_state->sync_mode_slaves_mask; 7187 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 7188 transcoders |= BIT(old_crtc_state->master_transcoder); 7189 7190 return intel_modeset_affected_transcoders(state, 7191 transcoders); 7192 } 7193 7194 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 7195 struct drm_atomic_state *_state) 7196 { 7197 struct drm_i915_private *dev_priv = to_i915(conn->dev); 7198 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7199 int ret; 7200 7201 ret = intel_digital_connector_atomic_check(conn, &state->base); 7202 if (ret) 7203 return ret; 7204 7205 /* 7206 * We don't enable port sync on BDW due to missing w/as and 7207 * due to not having adjusted the modeset sequence appropriately. 7208 */ 7209 if (INTEL_GEN(dev_priv) < 9) 7210 return 0; 7211 7212 if (!intel_connector_needs_modeset(state, conn)) 7213 return 0; 7214 7215 if (conn->has_tile) { 7216 ret = intel_modeset_tile_group(state, conn->tile_group->id); 7217 if (ret) 7218 return ret; 7219 } 7220 7221 return intel_modeset_synced_crtcs(state, conn); 7222 } 7223 7224 static const struct drm_connector_funcs intel_dp_connector_funcs = { 7225 .force = intel_dp_force, 7226 .fill_modes = drm_helper_probe_single_connector_modes, 7227 .atomic_get_property = intel_digital_connector_atomic_get_property, 7228 .atomic_set_property = intel_digital_connector_atomic_set_property, 7229 .late_register = intel_dp_connector_register, 7230 .early_unregister = intel_dp_connector_unregister, 7231 .destroy = intel_connector_destroy, 7232 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7233 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 7234 }; 7235 7236 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 7237 .detect_ctx = intel_dp_detect, 7238 .get_modes = intel_dp_get_modes, 7239 .mode_valid = intel_dp_mode_valid, 7240 .atomic_check = intel_dp_connector_atomic_check, 7241 }; 7242 7243 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 7244 .reset = intel_dp_encoder_reset, 7245 .destroy = intel_dp_encoder_destroy, 7246 }; 7247 7248 static bool intel_edp_have_power(struct intel_dp *intel_dp) 7249 { 7250 intel_wakeref_t wakeref; 7251 bool have_power = false; 7252 7253 with_pps_lock(intel_dp, wakeref) { 7254 have_power = edp_have_panel_power(intel_dp) && 7255 edp_have_panel_vdd(intel_dp); 7256 } 7257 7258 return have_power; 7259 } 7260 7261 enum irqreturn 7262 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) 7263 { 7264 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 7265 struct intel_dp *intel_dp = &intel_dig_port->dp; 7266 7267 if (intel_dig_port->base.type == INTEL_OUTPUT_EDP && 7268 (long_hpd || !intel_edp_have_power(intel_dp))) { 7269 /* 7270 * vdd off can generate a long/short pulse on eDP which 7271 * would require vdd on to handle it, and thus we 7272 * would end up in an endless cycle of 7273 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 7274 */ 7275 drm_dbg_kms(&i915->drm, 7276 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 7277 long_hpd ? "long" : "short", 7278 intel_dig_port->base.base.base.id, 7279 intel_dig_port->base.base.name); 7280 return IRQ_HANDLED; 7281 } 7282 7283 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 7284 intel_dig_port->base.base.base.id, 7285 intel_dig_port->base.base.name, 7286 long_hpd ? "long" : "short"); 7287 7288 if (long_hpd) { 7289 intel_dp->reset_link_params = true; 7290 return IRQ_NONE; 7291 } 7292 7293 if (intel_dp->is_mst) { 7294 switch (intel_dp_check_mst_status(intel_dp)) { 7295 case -EINVAL: 7296 /* 7297 * If we were in MST mode, and device is not 7298 * there, get out of MST mode 7299 */ 7300 drm_dbg_kms(&i915->drm, 7301 "MST device may have disappeared %d vs %d\n", 7302 intel_dp->is_mst, 7303 intel_dp->mst_mgr.mst_state); 7304 intel_dp->is_mst = false; 7305 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 7306 intel_dp->is_mst); 7307 7308 return IRQ_NONE; 7309 case 1: 7310 return IRQ_NONE; 7311 default: 7312 break; 7313 } 7314 } 7315 7316 if (!intel_dp->is_mst) { 7317 bool handled; 7318 7319 handled = intel_dp_short_pulse(intel_dp); 7320 7321 if (!handled) 7322 return IRQ_NONE; 7323 } 7324 7325 return IRQ_HANDLED; 7326 } 7327 7328 /* check the VBT to see whether the eDP is on another port */ 7329 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 7330 { 7331 /* 7332 * eDP not supported on g4x. so bail out early just 7333 * for a bit extra safety in case the VBT is bonkers. 7334 */ 7335 if (INTEL_GEN(dev_priv) < 5) 7336 return false; 7337 7338 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 7339 return true; 7340 7341 return intel_bios_is_port_edp(dev_priv, port); 7342 } 7343 7344 static void 7345 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 7346 { 7347 struct drm_i915_private *dev_priv = to_i915(connector->dev); 7348 enum port port = dp_to_dig_port(intel_dp)->base.port; 7349 7350 if (!IS_G4X(dev_priv) && port != PORT_A) 7351 intel_attach_force_audio_property(connector); 7352 7353 intel_attach_broadcast_rgb_property(connector); 7354 if (HAS_GMCH(dev_priv)) 7355 drm_connector_attach_max_bpc_property(connector, 6, 10); 7356 else if (INTEL_GEN(dev_priv) >= 5) 7357 drm_connector_attach_max_bpc_property(connector, 6, 12); 7358 7359 intel_attach_colorspace_property(connector); 7360 7361 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 7362 drm_object_attach_property(&connector->base, 7363 connector->dev->mode_config.hdr_output_metadata_property, 7364 0); 7365 7366 if (intel_dp_is_edp(intel_dp)) { 7367 u32 allowed_scalers; 7368 7369 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 7370 if (!HAS_GMCH(dev_priv)) 7371 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 7372 7373 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 7374 7375 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 7376 7377 } 7378 } 7379 7380 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 7381 { 7382 intel_dp->panel_power_off_time = ktime_get_boottime(); 7383 intel_dp->last_power_on = jiffies; 7384 intel_dp->last_backlight_off = jiffies; 7385 } 7386 7387 static void 7388 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 7389 { 7390 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7391 u32 pp_on, pp_off, pp_ctl; 7392 struct pps_registers regs; 7393 7394 intel_pps_get_registers(intel_dp, ®s); 7395 7396 pp_ctl = ilk_get_pp_control(intel_dp); 7397 7398 /* Ensure PPS is unlocked */ 7399 if (!HAS_DDI(dev_priv)) 7400 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7401 7402 pp_on = intel_de_read(dev_priv, regs.pp_on); 7403 pp_off = intel_de_read(dev_priv, regs.pp_off); 7404 7405 /* Pull timing values out of registers */ 7406 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 7407 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 7408 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 7409 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 7410 7411 if (i915_mmio_reg_valid(regs.pp_div)) { 7412 u32 pp_div; 7413 7414 pp_div = intel_de_read(dev_priv, regs.pp_div); 7415 7416 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 7417 } else { 7418 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 7419 } 7420 } 7421 7422 static void 7423 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 7424 { 7425 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 7426 state_name, 7427 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 7428 } 7429 7430 static void 7431 intel_pps_verify_state(struct intel_dp *intel_dp) 7432 { 7433 struct edp_power_seq hw; 7434 struct edp_power_seq *sw = &intel_dp->pps_delays; 7435 7436 intel_pps_readout_hw_state(intel_dp, &hw); 7437 7438 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 7439 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 7440 DRM_ERROR("PPS state mismatch\n"); 7441 intel_pps_dump_state("sw", sw); 7442 intel_pps_dump_state("hw", &hw); 7443 } 7444 } 7445 7446 static void 7447 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) 7448 { 7449 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7450 struct edp_power_seq cur, vbt, spec, 7451 *final = &intel_dp->pps_delays; 7452 7453 lockdep_assert_held(&dev_priv->pps_mutex); 7454 7455 /* already initialized? */ 7456 if (final->t11_t12 != 0) 7457 return; 7458 7459 intel_pps_readout_hw_state(intel_dp, &cur); 7460 7461 intel_pps_dump_state("cur", &cur); 7462 7463 vbt = dev_priv->vbt.edp.pps; 7464 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 7465 * of 500ms appears to be too short. Ocassionally the panel 7466 * just fails to power back on. Increasing the delay to 800ms 7467 * seems sufficient to avoid this problem. 7468 */ 7469 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 7470 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 7471 drm_dbg_kms(&dev_priv->drm, 7472 "Increasing T12 panel delay as per the quirk to %d\n", 7473 vbt.t11_t12); 7474 } 7475 /* T11_T12 delay is special and actually in units of 100ms, but zero 7476 * based in the hw (so we need to add 100 ms). But the sw vbt 7477 * table multiplies it with 1000 to make it in units of 100usec, 7478 * too. */ 7479 vbt.t11_t12 += 100 * 10; 7480 7481 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 7482 * our hw here, which are all in 100usec. */ 7483 spec.t1_t3 = 210 * 10; 7484 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 7485 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 7486 spec.t10 = 500 * 10; 7487 /* This one is special and actually in units of 100ms, but zero 7488 * based in the hw (so we need to add 100 ms). But the sw vbt 7489 * table multiplies it with 1000 to make it in units of 100usec, 7490 * too. */ 7491 spec.t11_t12 = (510 + 100) * 10; 7492 7493 intel_pps_dump_state("vbt", &vbt); 7494 7495 /* Use the max of the register settings and vbt. If both are 7496 * unset, fall back to the spec limits. */ 7497 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 7498 spec.field : \ 7499 max(cur.field, vbt.field)) 7500 assign_final(t1_t3); 7501 assign_final(t8); 7502 assign_final(t9); 7503 assign_final(t10); 7504 assign_final(t11_t12); 7505 #undef assign_final 7506 7507 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 7508 intel_dp->panel_power_up_delay = get_delay(t1_t3); 7509 intel_dp->backlight_on_delay = get_delay(t8); 7510 intel_dp->backlight_off_delay = get_delay(t9); 7511 intel_dp->panel_power_down_delay = get_delay(t10); 7512 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 7513 #undef get_delay 7514 7515 drm_dbg_kms(&dev_priv->drm, 7516 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 7517 intel_dp->panel_power_up_delay, 7518 intel_dp->panel_power_down_delay, 7519 intel_dp->panel_power_cycle_delay); 7520 7521 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", 7522 intel_dp->backlight_on_delay, 7523 intel_dp->backlight_off_delay); 7524 7525 /* 7526 * We override the HW backlight delays to 1 because we do manual waits 7527 * on them. For T8, even BSpec recommends doing it. For T9, if we 7528 * don't do this, we'll end up waiting for the backlight off delay 7529 * twice: once when we do the manual sleep, and once when we disable 7530 * the panel and wait for the PP_STATUS bit to become zero. 7531 */ 7532 final->t8 = 1; 7533 final->t9 = 1; 7534 7535 /* 7536 * HW has only a 100msec granularity for t11_t12 so round it up 7537 * accordingly. 7538 */ 7539 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 7540 } 7541 7542 static void 7543 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 7544 bool force_disable_vdd) 7545 { 7546 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7547 u32 pp_on, pp_off, port_sel = 0; 7548 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; 7549 struct pps_registers regs; 7550 enum port port = dp_to_dig_port(intel_dp)->base.port; 7551 const struct edp_power_seq *seq = &intel_dp->pps_delays; 7552 7553 lockdep_assert_held(&dev_priv->pps_mutex); 7554 7555 intel_pps_get_registers(intel_dp, ®s); 7556 7557 /* 7558 * On some VLV machines the BIOS can leave the VDD 7559 * enabled even on power sequencers which aren't 7560 * hooked up to any port. This would mess up the 7561 * power domain tracking the first time we pick 7562 * one of these power sequencers for use since 7563 * edp_panel_vdd_on() would notice that the VDD was 7564 * already on and therefore wouldn't grab the power 7565 * domain reference. Disable VDD first to avoid this. 7566 * This also avoids spuriously turning the VDD on as 7567 * soon as the new power sequencer gets initialized. 7568 */ 7569 if (force_disable_vdd) { 7570 u32 pp = ilk_get_pp_control(intel_dp); 7571 7572 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, 7573 "Panel power already on\n"); 7574 7575 if (pp & EDP_FORCE_VDD) 7576 drm_dbg_kms(&dev_priv->drm, 7577 "VDD already on, disabling first\n"); 7578 7579 pp &= ~EDP_FORCE_VDD; 7580 7581 intel_de_write(dev_priv, regs.pp_ctrl, pp); 7582 } 7583 7584 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 7585 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 7586 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 7587 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 7588 7589 /* Haswell doesn't have any port selection bits for the panel 7590 * power sequencer any more. */ 7591 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7592 port_sel = PANEL_PORT_SELECT_VLV(port); 7593 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7594 switch (port) { 7595 case PORT_A: 7596 port_sel = PANEL_PORT_SELECT_DPA; 7597 break; 7598 case PORT_C: 7599 port_sel = PANEL_PORT_SELECT_DPC; 7600 break; 7601 case PORT_D: 7602 port_sel = PANEL_PORT_SELECT_DPD; 7603 break; 7604 default: 7605 MISSING_CASE(port); 7606 break; 7607 } 7608 } 7609 7610 pp_on |= port_sel; 7611 7612 intel_de_write(dev_priv, regs.pp_on, pp_on); 7613 intel_de_write(dev_priv, regs.pp_off, pp_off); 7614 7615 /* 7616 * Compute the divisor for the pp clock, simply match the Bspec formula. 7617 */ 7618 if (i915_mmio_reg_valid(regs.pp_div)) { 7619 intel_de_write(dev_priv, regs.pp_div, 7620 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 7621 } else { 7622 u32 pp_ctl; 7623 7624 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); 7625 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 7626 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 7627 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7628 } 7629 7630 drm_dbg_kms(&dev_priv->drm, 7631 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 7632 intel_de_read(dev_priv, regs.pp_on), 7633 intel_de_read(dev_priv, regs.pp_off), 7634 i915_mmio_reg_valid(regs.pp_div) ? 7635 intel_de_read(dev_priv, regs.pp_div) : 7636 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 7637 } 7638 7639 static void intel_dp_pps_init(struct intel_dp *intel_dp) 7640 { 7641 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7642 7643 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7644 vlv_initial_power_sequencer_setup(intel_dp); 7645 } else { 7646 intel_dp_init_panel_power_sequencer(intel_dp); 7647 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 7648 } 7649 } 7650 7651 /** 7652 * intel_dp_set_drrs_state - program registers for RR switch to take effect 7653 * @dev_priv: i915 device 7654 * @crtc_state: a pointer to the active intel_crtc_state 7655 * @refresh_rate: RR to be programmed 7656 * 7657 * This function gets called when refresh rate (RR) has to be changed from 7658 * one frequency to another. Switches can be between high and low RR 7659 * supported by the panel or to any other RR based on media playback (in 7660 * this case, RR value needs to be passed from user space). 7661 * 7662 * The caller of this function needs to take a lock on dev_priv->drrs. 7663 */ 7664 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 7665 const struct intel_crtc_state *crtc_state, 7666 int refresh_rate) 7667 { 7668 struct intel_dp *intel_dp = dev_priv->drrs.dp; 7669 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 7670 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 7671 7672 if (refresh_rate <= 0) { 7673 drm_dbg_kms(&dev_priv->drm, 7674 "Refresh rate should be positive non-zero.\n"); 7675 return; 7676 } 7677 7678 if (intel_dp == NULL) { 7679 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 7680 return; 7681 } 7682 7683 if (!intel_crtc) { 7684 drm_dbg_kms(&dev_priv->drm, 7685 "DRRS: intel_crtc not initialized\n"); 7686 return; 7687 } 7688 7689 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 7690 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 7691 return; 7692 } 7693 7694 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh == 7695 refresh_rate) 7696 index = DRRS_LOW_RR; 7697 7698 if (index == dev_priv->drrs.refresh_rate_type) { 7699 drm_dbg_kms(&dev_priv->drm, 7700 "DRRS requested for previously set RR...ignoring\n"); 7701 return; 7702 } 7703 7704 if (!crtc_state->hw.active) { 7705 drm_dbg_kms(&dev_priv->drm, 7706 "eDP encoder disabled. CRTC not Active\n"); 7707 return; 7708 } 7709 7710 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 7711 switch (index) { 7712 case DRRS_HIGH_RR: 7713 intel_dp_set_m_n(crtc_state, M1_N1); 7714 break; 7715 case DRRS_LOW_RR: 7716 intel_dp_set_m_n(crtc_state, M2_N2); 7717 break; 7718 case DRRS_MAX_RR: 7719 default: 7720 drm_err(&dev_priv->drm, 7721 "Unsupported refreshrate type\n"); 7722 } 7723 } else if (INTEL_GEN(dev_priv) > 6) { 7724 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 7725 u32 val; 7726 7727 val = intel_de_read(dev_priv, reg); 7728 if (index > DRRS_HIGH_RR) { 7729 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7730 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7731 else 7732 val |= PIPECONF_EDP_RR_MODE_SWITCH; 7733 } else { 7734 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7735 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7736 else 7737 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 7738 } 7739 intel_de_write(dev_priv, reg, val); 7740 } 7741 7742 dev_priv->drrs.refresh_rate_type = index; 7743 7744 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 7745 refresh_rate); 7746 } 7747 7748 /** 7749 * intel_edp_drrs_enable - init drrs struct if supported 7750 * @intel_dp: DP struct 7751 * @crtc_state: A pointer to the active crtc state. 7752 * 7753 * Initializes frontbuffer_bits and drrs.dp 7754 */ 7755 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 7756 const struct intel_crtc_state *crtc_state) 7757 { 7758 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7759 7760 if (!crtc_state->has_drrs) { 7761 drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n"); 7762 return; 7763 } 7764 7765 if (dev_priv->psr.enabled) { 7766 drm_dbg_kms(&dev_priv->drm, 7767 "PSR enabled. Not enabling DRRS.\n"); 7768 return; 7769 } 7770 7771 mutex_lock(&dev_priv->drrs.mutex); 7772 if (dev_priv->drrs.dp) { 7773 drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n"); 7774 goto unlock; 7775 } 7776 7777 dev_priv->drrs.busy_frontbuffer_bits = 0; 7778 7779 dev_priv->drrs.dp = intel_dp; 7780 7781 unlock: 7782 mutex_unlock(&dev_priv->drrs.mutex); 7783 } 7784 7785 /** 7786 * intel_edp_drrs_disable - Disable DRRS 7787 * @intel_dp: DP struct 7788 * @old_crtc_state: Pointer to old crtc_state. 7789 * 7790 */ 7791 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 7792 const struct intel_crtc_state *old_crtc_state) 7793 { 7794 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7795 7796 if (!old_crtc_state->has_drrs) 7797 return; 7798 7799 mutex_lock(&dev_priv->drrs.mutex); 7800 if (!dev_priv->drrs.dp) { 7801 mutex_unlock(&dev_priv->drrs.mutex); 7802 return; 7803 } 7804 7805 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7806 intel_dp_set_drrs_state(dev_priv, old_crtc_state, 7807 intel_dp->attached_connector->panel.fixed_mode->vrefresh); 7808 7809 dev_priv->drrs.dp = NULL; 7810 mutex_unlock(&dev_priv->drrs.mutex); 7811 7812 cancel_delayed_work_sync(&dev_priv->drrs.work); 7813 } 7814 7815 static void intel_edp_drrs_downclock_work(struct work_struct *work) 7816 { 7817 struct drm_i915_private *dev_priv = 7818 container_of(work, typeof(*dev_priv), drrs.work.work); 7819 struct intel_dp *intel_dp; 7820 7821 mutex_lock(&dev_priv->drrs.mutex); 7822 7823 intel_dp = dev_priv->drrs.dp; 7824 7825 if (!intel_dp) 7826 goto unlock; 7827 7828 /* 7829 * The delayed work can race with an invalidate hence we need to 7830 * recheck. 7831 */ 7832 7833 if (dev_priv->drrs.busy_frontbuffer_bits) 7834 goto unlock; 7835 7836 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 7837 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7838 7839 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7840 intel_dp->attached_connector->panel.downclock_mode->vrefresh); 7841 } 7842 7843 unlock: 7844 mutex_unlock(&dev_priv->drrs.mutex); 7845 } 7846 7847 /** 7848 * intel_edp_drrs_invalidate - Disable Idleness DRRS 7849 * @dev_priv: i915 device 7850 * @frontbuffer_bits: frontbuffer plane tracking bits 7851 * 7852 * This function gets called everytime rendering on the given planes start. 7853 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 7854 * 7855 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7856 */ 7857 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 7858 unsigned int frontbuffer_bits) 7859 { 7860 struct drm_crtc *crtc; 7861 enum pipe pipe; 7862 7863 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7864 return; 7865 7866 cancel_delayed_work(&dev_priv->drrs.work); 7867 7868 mutex_lock(&dev_priv->drrs.mutex); 7869 if (!dev_priv->drrs.dp) { 7870 mutex_unlock(&dev_priv->drrs.mutex); 7871 return; 7872 } 7873 7874 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 7875 pipe = to_intel_crtc(crtc)->pipe; 7876 7877 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7878 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 7879 7880 /* invalidate means busy screen hence upclock */ 7881 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7882 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7883 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 7884 7885 mutex_unlock(&dev_priv->drrs.mutex); 7886 } 7887 7888 /** 7889 * intel_edp_drrs_flush - Restart Idleness DRRS 7890 * @dev_priv: i915 device 7891 * @frontbuffer_bits: frontbuffer plane tracking bits 7892 * 7893 * This function gets called every time rendering on the given planes has 7894 * completed or flip on a crtc is completed. So DRRS should be upclocked 7895 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 7896 * if no other planes are dirty. 7897 * 7898 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7899 */ 7900 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 7901 unsigned int frontbuffer_bits) 7902 { 7903 struct drm_crtc *crtc; 7904 enum pipe pipe; 7905 7906 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7907 return; 7908 7909 cancel_delayed_work(&dev_priv->drrs.work); 7910 7911 mutex_lock(&dev_priv->drrs.mutex); 7912 if (!dev_priv->drrs.dp) { 7913 mutex_unlock(&dev_priv->drrs.mutex); 7914 return; 7915 } 7916 7917 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 7918 pipe = to_intel_crtc(crtc)->pipe; 7919 7920 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7921 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 7922 7923 /* flush means busy screen hence upclock */ 7924 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7925 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7926 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 7927 7928 /* 7929 * flush also means no more activity hence schedule downclock, if all 7930 * other fbs are quiescent too 7931 */ 7932 if (!dev_priv->drrs.busy_frontbuffer_bits) 7933 schedule_delayed_work(&dev_priv->drrs.work, 7934 msecs_to_jiffies(1000)); 7935 mutex_unlock(&dev_priv->drrs.mutex); 7936 } 7937 7938 /** 7939 * DOC: Display Refresh Rate Switching (DRRS) 7940 * 7941 * Display Refresh Rate Switching (DRRS) is a power conservation feature 7942 * which enables swtching between low and high refresh rates, 7943 * dynamically, based on the usage scenario. This feature is applicable 7944 * for internal panels. 7945 * 7946 * Indication that the panel supports DRRS is given by the panel EDID, which 7947 * would list multiple refresh rates for one resolution. 7948 * 7949 * DRRS is of 2 types - static and seamless. 7950 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 7951 * (may appear as a blink on screen) and is used in dock-undock scenario. 7952 * Seamless DRRS involves changing RR without any visual effect to the user 7953 * and can be used during normal system usage. This is done by programming 7954 * certain registers. 7955 * 7956 * Support for static/seamless DRRS may be indicated in the VBT based on 7957 * inputs from the panel spec. 7958 * 7959 * DRRS saves power by switching to low RR based on usage scenarios. 7960 * 7961 * The implementation is based on frontbuffer tracking implementation. When 7962 * there is a disturbance on the screen triggered by user activity or a periodic 7963 * system activity, DRRS is disabled (RR is changed to high RR). When there is 7964 * no movement on screen, after a timeout of 1 second, a switch to low RR is 7965 * made. 7966 * 7967 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 7968 * and intel_edp_drrs_flush() are called. 7969 * 7970 * DRRS can be further extended to support other internal panels and also 7971 * the scenario of video playback wherein RR is set based on the rate 7972 * requested by userspace. 7973 */ 7974 7975 /** 7976 * intel_dp_drrs_init - Init basic DRRS work and mutex. 7977 * @connector: eDP connector 7978 * @fixed_mode: preferred mode of panel 7979 * 7980 * This function is called only once at driver load to initialize basic 7981 * DRRS stuff. 7982 * 7983 * Returns: 7984 * Downclock mode if panel supports it, else return NULL. 7985 * DRRS support is determined by the presence of downclock mode (apart 7986 * from VBT setting). 7987 */ 7988 static struct drm_display_mode * 7989 intel_dp_drrs_init(struct intel_connector *connector, 7990 struct drm_display_mode *fixed_mode) 7991 { 7992 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 7993 struct drm_display_mode *downclock_mode = NULL; 7994 7995 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 7996 mutex_init(&dev_priv->drrs.mutex); 7997 7998 if (INTEL_GEN(dev_priv) <= 6) { 7999 drm_dbg_kms(&dev_priv->drm, 8000 "DRRS supported for Gen7 and above\n"); 8001 return NULL; 8002 } 8003 8004 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 8005 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 8006 return NULL; 8007 } 8008 8009 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 8010 if (!downclock_mode) { 8011 drm_dbg_kms(&dev_priv->drm, 8012 "Downclock mode is not found. DRRS not supported\n"); 8013 return NULL; 8014 } 8015 8016 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 8017 8018 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 8019 drm_dbg_kms(&dev_priv->drm, 8020 "seamless DRRS supported for eDP panel.\n"); 8021 return downclock_mode; 8022 } 8023 8024 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 8025 struct intel_connector *intel_connector) 8026 { 8027 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 8028 struct drm_device *dev = &dev_priv->drm; 8029 struct drm_connector *connector = &intel_connector->base; 8030 struct drm_display_mode *fixed_mode = NULL; 8031 struct drm_display_mode *downclock_mode = NULL; 8032 bool has_dpcd; 8033 enum pipe pipe = INVALID_PIPE; 8034 intel_wakeref_t wakeref; 8035 struct edid *edid; 8036 8037 if (!intel_dp_is_edp(intel_dp)) 8038 return true; 8039 8040 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); 8041 8042 /* 8043 * On IBX/CPT we may get here with LVDS already registered. Since the 8044 * driver uses the only internal power sequencer available for both 8045 * eDP and LVDS bail out early in this case to prevent interfering 8046 * with an already powered-on LVDS power sequencer. 8047 */ 8048 if (intel_get_lvds_encoder(dev_priv)) { 8049 drm_WARN_ON(dev, 8050 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 8051 drm_info(&dev_priv->drm, 8052 "LVDS was detected, not registering eDP\n"); 8053 8054 return false; 8055 } 8056 8057 with_pps_lock(intel_dp, wakeref) { 8058 intel_dp_init_panel_power_timestamps(intel_dp); 8059 intel_dp_pps_init(intel_dp); 8060 intel_edp_panel_vdd_sanitize(intel_dp); 8061 } 8062 8063 /* Cache DPCD and EDID for edp. */ 8064 has_dpcd = intel_edp_init_dpcd(intel_dp); 8065 8066 if (!has_dpcd) { 8067 /* if this fails, presume the device is a ghost */ 8068 drm_info(&dev_priv->drm, 8069 "failed to retrieve link info, disabling eDP\n"); 8070 goto out_vdd_off; 8071 } 8072 8073 mutex_lock(&dev->mode_config.mutex); 8074 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 8075 if (edid) { 8076 if (drm_add_edid_modes(connector, edid)) { 8077 drm_connector_update_edid_property(connector, edid); 8078 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 8079 } else { 8080 kfree(edid); 8081 edid = ERR_PTR(-EINVAL); 8082 } 8083 } else { 8084 edid = ERR_PTR(-ENOENT); 8085 } 8086 intel_connector->edid = edid; 8087 8088 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 8089 if (fixed_mode) 8090 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 8091 8092 /* fallback to VBT if available for eDP */ 8093 if (!fixed_mode) 8094 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 8095 mutex_unlock(&dev->mode_config.mutex); 8096 8097 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 8098 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 8099 register_reboot_notifier(&intel_dp->edp_notifier); 8100 8101 /* 8102 * Figure out the current pipe for the initial backlight setup. 8103 * If the current pipe isn't valid, try the PPS pipe, and if that 8104 * fails just assume pipe A. 8105 */ 8106 pipe = vlv_active_pipe(intel_dp); 8107 8108 if (pipe != PIPE_A && pipe != PIPE_B) 8109 pipe = intel_dp->pps_pipe; 8110 8111 if (pipe != PIPE_A && pipe != PIPE_B) 8112 pipe = PIPE_A; 8113 8114 drm_dbg_kms(&dev_priv->drm, 8115 "using pipe %c for initial backlight setup\n", 8116 pipe_name(pipe)); 8117 } 8118 8119 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 8120 intel_connector->panel.backlight.power = intel_edp_backlight_power; 8121 intel_panel_setup_backlight(connector, pipe); 8122 8123 if (fixed_mode) { 8124 drm_connector_set_panel_orientation_with_quirk(connector, 8125 dev_priv->vbt.orientation, 8126 fixed_mode->hdisplay, fixed_mode->vdisplay); 8127 } 8128 8129 return true; 8130 8131 out_vdd_off: 8132 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 8133 /* 8134 * vdd might still be enabled do to the delayed vdd off. 8135 * Make sure vdd is actually turned off here. 8136 */ 8137 with_pps_lock(intel_dp, wakeref) 8138 edp_panel_vdd_off_sync(intel_dp); 8139 8140 return false; 8141 } 8142 8143 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 8144 { 8145 struct intel_connector *intel_connector; 8146 struct drm_connector *connector; 8147 8148 intel_connector = container_of(work, typeof(*intel_connector), 8149 modeset_retry_work); 8150 connector = &intel_connector->base; 8151 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 8152 connector->name); 8153 8154 /* Grab the locks before changing connector property*/ 8155 mutex_lock(&connector->dev->mode_config.mutex); 8156 /* Set connector link status to BAD and send a Uevent to notify 8157 * userspace to do a modeset. 8158 */ 8159 drm_connector_set_link_status_property(connector, 8160 DRM_MODE_LINK_STATUS_BAD); 8161 mutex_unlock(&connector->dev->mode_config.mutex); 8162 /* Send Hotplug uevent so userspace can reprobe */ 8163 drm_kms_helper_hotplug_event(connector->dev); 8164 } 8165 8166 bool 8167 intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 8168 struct intel_connector *intel_connector) 8169 { 8170 struct drm_connector *connector = &intel_connector->base; 8171 struct intel_dp *intel_dp = &intel_dig_port->dp; 8172 struct intel_encoder *intel_encoder = &intel_dig_port->base; 8173 struct drm_device *dev = intel_encoder->base.dev; 8174 struct drm_i915_private *dev_priv = to_i915(dev); 8175 enum port port = intel_encoder->port; 8176 enum phy phy = intel_port_to_phy(dev_priv, port); 8177 int type; 8178 8179 /* Initialize the work for modeset in case of link train failure */ 8180 INIT_WORK(&intel_connector->modeset_retry_work, 8181 intel_dp_modeset_retry_work_fn); 8182 8183 if (drm_WARN(dev, intel_dig_port->max_lanes < 1, 8184 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 8185 intel_dig_port->max_lanes, intel_encoder->base.base.id, 8186 intel_encoder->base.name)) 8187 return false; 8188 8189 intel_dp_set_source_rates(intel_dp); 8190 8191 intel_dp->reset_link_params = true; 8192 intel_dp->pps_pipe = INVALID_PIPE; 8193 intel_dp->active_pipe = INVALID_PIPE; 8194 8195 /* Preserve the current hw state. */ 8196 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 8197 intel_dp->attached_connector = intel_connector; 8198 8199 if (intel_dp_is_port_edp(dev_priv, port)) { 8200 /* 8201 * Currently we don't support eDP on TypeC ports, although in 8202 * theory it could work on TypeC legacy ports. 8203 */ 8204 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 8205 type = DRM_MODE_CONNECTOR_eDP; 8206 } else { 8207 type = DRM_MODE_CONNECTOR_DisplayPort; 8208 } 8209 8210 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 8211 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 8212 8213 /* 8214 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 8215 * for DP the encoder type can be set by the caller to 8216 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 8217 */ 8218 if (type == DRM_MODE_CONNECTOR_eDP) 8219 intel_encoder->type = INTEL_OUTPUT_EDP; 8220 8221 /* eDP only on port B and/or C on vlv/chv */ 8222 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 8223 IS_CHERRYVIEW(dev_priv)) && 8224 intel_dp_is_edp(intel_dp) && 8225 port != PORT_B && port != PORT_C)) 8226 return false; 8227 8228 drm_dbg_kms(&dev_priv->drm, 8229 "Adding %s connector on [ENCODER:%d:%s]\n", 8230 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 8231 intel_encoder->base.base.id, intel_encoder->base.name); 8232 8233 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 8234 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 8235 8236 if (!HAS_GMCH(dev_priv)) 8237 connector->interlace_allowed = true; 8238 connector->doublescan_allowed = 0; 8239 8240 if (INTEL_GEN(dev_priv) >= 11) 8241 connector->ycbcr_420_allowed = true; 8242 8243 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 8244 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 8245 8246 intel_dp_aux_init(intel_dp); 8247 8248 intel_connector_attach_encoder(intel_connector, intel_encoder); 8249 8250 if (HAS_DDI(dev_priv)) 8251 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 8252 else 8253 intel_connector->get_hw_state = intel_connector_get_hw_state; 8254 8255 /* init MST on ports that can support it */ 8256 intel_dp_mst_encoder_init(intel_dig_port, 8257 intel_connector->base.base.id); 8258 8259 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 8260 intel_dp_aux_fini(intel_dp); 8261 intel_dp_mst_encoder_cleanup(intel_dig_port); 8262 goto fail; 8263 } 8264 8265 intel_dp_add_properties(intel_dp, connector); 8266 8267 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 8268 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); 8269 if (ret) 8270 drm_dbg_kms(&dev_priv->drm, 8271 "HDCP init failed, skipping.\n"); 8272 } 8273 8274 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 8275 * 0xd. Failure to do so will result in spurious interrupts being 8276 * generated on the port when a cable is not attached. 8277 */ 8278 if (IS_G45(dev_priv)) { 8279 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 8280 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 8281 (temp & ~0xf) | 0xd); 8282 } 8283 8284 return true; 8285 8286 fail: 8287 drm_connector_cleanup(connector); 8288 8289 return false; 8290 } 8291 8292 bool intel_dp_init(struct drm_i915_private *dev_priv, 8293 i915_reg_t output_reg, 8294 enum port port) 8295 { 8296 struct intel_digital_port *intel_dig_port; 8297 struct intel_encoder *intel_encoder; 8298 struct drm_encoder *encoder; 8299 struct intel_connector *intel_connector; 8300 8301 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 8302 if (!intel_dig_port) 8303 return false; 8304 8305 intel_connector = intel_connector_alloc(); 8306 if (!intel_connector) 8307 goto err_connector_alloc; 8308 8309 intel_encoder = &intel_dig_port->base; 8310 encoder = &intel_encoder->base; 8311 8312 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 8313 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 8314 "DP %c", port_name(port))) 8315 goto err_encoder_init; 8316 8317 intel_encoder->hotplug = intel_dp_hotplug; 8318 intel_encoder->compute_config = intel_dp_compute_config; 8319 intel_encoder->get_hw_state = intel_dp_get_hw_state; 8320 intel_encoder->get_config = intel_dp_get_config; 8321 intel_encoder->update_pipe = intel_panel_update_backlight; 8322 intel_encoder->suspend = intel_dp_encoder_suspend; 8323 if (IS_CHERRYVIEW(dev_priv)) { 8324 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 8325 intel_encoder->pre_enable = chv_pre_enable_dp; 8326 intel_encoder->enable = vlv_enable_dp; 8327 intel_encoder->disable = vlv_disable_dp; 8328 intel_encoder->post_disable = chv_post_disable_dp; 8329 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 8330 } else if (IS_VALLEYVIEW(dev_priv)) { 8331 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 8332 intel_encoder->pre_enable = vlv_pre_enable_dp; 8333 intel_encoder->enable = vlv_enable_dp; 8334 intel_encoder->disable = vlv_disable_dp; 8335 intel_encoder->post_disable = vlv_post_disable_dp; 8336 } else { 8337 intel_encoder->pre_enable = g4x_pre_enable_dp; 8338 intel_encoder->enable = g4x_enable_dp; 8339 intel_encoder->disable = g4x_disable_dp; 8340 intel_encoder->post_disable = g4x_post_disable_dp; 8341 } 8342 8343 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 8344 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) 8345 intel_dig_port->dp.set_link_train = cpt_set_link_train; 8346 else 8347 intel_dig_port->dp.set_link_train = g4x_set_link_train; 8348 8349 if (IS_CHERRYVIEW(dev_priv)) 8350 intel_dig_port->dp.set_signal_levels = chv_set_signal_levels; 8351 else if (IS_VALLEYVIEW(dev_priv)) 8352 intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels; 8353 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 8354 intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; 8355 else if (IS_GEN(dev_priv, 6) && port == PORT_A) 8356 intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; 8357 else 8358 intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels; 8359 8360 intel_dig_port->dp.output_reg = output_reg; 8361 intel_dig_port->max_lanes = 4; 8362 intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); 8363 intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); 8364 8365 intel_encoder->type = INTEL_OUTPUT_DP; 8366 intel_encoder->power_domain = intel_port_to_power_domain(port); 8367 if (IS_CHERRYVIEW(dev_priv)) { 8368 if (port == PORT_D) 8369 intel_encoder->pipe_mask = BIT(PIPE_C); 8370 else 8371 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 8372 } else { 8373 intel_encoder->pipe_mask = ~0; 8374 } 8375 intel_encoder->cloneable = 0; 8376 intel_encoder->port = port; 8377 8378 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; 8379 8380 if (HAS_GMCH(dev_priv)) { 8381 if (IS_GM45(dev_priv)) 8382 intel_dig_port->connected = gm45_digital_port_connected; 8383 else 8384 intel_dig_port->connected = g4x_digital_port_connected; 8385 } else { 8386 if (port == PORT_A) 8387 intel_dig_port->connected = ilk_digital_port_connected; 8388 else 8389 intel_dig_port->connected = ibx_digital_port_connected; 8390 } 8391 8392 if (port != PORT_A) 8393 intel_infoframe_init(intel_dig_port); 8394 8395 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 8396 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 8397 goto err_init_connector; 8398 8399 return true; 8400 8401 err_init_connector: 8402 drm_encoder_cleanup(encoder); 8403 err_encoder_init: 8404 kfree(intel_connector); 8405 err_connector_alloc: 8406 kfree(intel_dig_port); 8407 return false; 8408 } 8409 8410 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 8411 { 8412 struct intel_encoder *encoder; 8413 8414 for_each_intel_encoder(&dev_priv->drm, encoder) { 8415 struct intel_dp *intel_dp; 8416 8417 if (encoder->type != INTEL_OUTPUT_DDI) 8418 continue; 8419 8420 intel_dp = enc_to_intel_dp(encoder); 8421 8422 if (!intel_dp->can_mst) 8423 continue; 8424 8425 if (intel_dp->is_mst) 8426 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 8427 } 8428 } 8429 8430 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 8431 { 8432 struct intel_encoder *encoder; 8433 8434 for_each_intel_encoder(&dev_priv->drm, encoder) { 8435 struct intel_dp *intel_dp; 8436 int ret; 8437 8438 if (encoder->type != INTEL_OUTPUT_DDI) 8439 continue; 8440 8441 intel_dp = enc_to_intel_dp(encoder); 8442 8443 if (!intel_dp->can_mst) 8444 continue; 8445 8446 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 8447 true); 8448 if (ret) { 8449 intel_dp->is_mst = false; 8450 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 8451 false); 8452 } 8453 } 8454 } 8455