1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/reboot.h> 32 #include <linux/slab.h> 33 #include <linux/types.h> 34 35 #include <asm/byteorder.h> 36 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_crtc.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_hdcp.h> 42 #include <drm/drm_probe_helper.h> 43 44 #include "i915_debugfs.h" 45 #include "i915_drv.h" 46 #include "i915_trace.h" 47 #include "intel_atomic.h" 48 #include "intel_audio.h" 49 #include "intel_connector.h" 50 #include "intel_ddi.h" 51 #include "intel_display_types.h" 52 #include "intel_dp.h" 53 #include "intel_dp_link_training.h" 54 #include "intel_dp_mst.h" 55 #include "intel_dpio_phy.h" 56 #include "intel_fifo_underrun.h" 57 #include "intel_hdcp.h" 58 #include "intel_hdmi.h" 59 #include "intel_hotplug.h" 60 #include "intel_lspcon.h" 61 #include "intel_lvds.h" 62 #include "intel_panel.h" 63 #include "intel_psr.h" 64 #include "intel_sideband.h" 65 #include "intel_tc.h" 66 #include "intel_vdsc.h" 67 68 #define DP_DPRX_ESI_LEN 14 69 70 /* DP DSC throughput values used for slice count calculations KPixels/s */ 71 #define DP_DSC_PEAK_PIXEL_RATE 2720000 72 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 73 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 74 75 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 76 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 77 78 /* Compliance test status bits */ 79 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 80 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 81 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 82 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 83 84 struct dp_link_dpll { 85 int clock; 86 struct dpll dpll; 87 }; 88 89 static const struct dp_link_dpll g4x_dpll[] = { 90 { 162000, 91 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 92 { 270000, 93 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 94 }; 95 96 static const struct dp_link_dpll pch_dpll[] = { 97 { 162000, 98 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 99 { 270000, 100 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 101 }; 102 103 static const struct dp_link_dpll vlv_dpll[] = { 104 { 162000, 105 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 106 { 270000, 107 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 108 }; 109 110 /* 111 * CHV supports eDP 1.4 that have more link rates. 112 * Below only provides the fixed rate but exclude variable rate. 113 */ 114 static const struct dp_link_dpll chv_dpll[] = { 115 /* 116 * CHV requires to program fractional division for m2. 117 * m2 is stored in fixed point format using formula below 118 * (m2_int << 22) | m2_fraction 119 */ 120 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 121 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 122 { 270000, /* m2_int = 27, m2_fraction = 0 */ 123 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 124 }; 125 126 /* Constants for DP DSC configurations */ 127 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 128 129 /* With Single pipe configuration, HW is capable of supporting maximum 130 * of 4 slices per line. 131 */ 132 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 133 134 /** 135 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 136 * @intel_dp: DP struct 137 * 138 * If a CPU or PCH DP output is attached to an eDP panel, this function 139 * will return true, and false otherwise. 140 */ 141 bool intel_dp_is_edp(struct intel_dp *intel_dp) 142 { 143 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 144 145 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 146 } 147 148 static void intel_dp_link_down(struct intel_encoder *encoder, 149 const struct intel_crtc_state *old_crtc_state); 150 static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 151 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 152 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 153 const struct intel_crtc_state *crtc_state); 154 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 155 enum pipe pipe); 156 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 157 158 /* update sink rates from dpcd */ 159 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 160 { 161 static const int dp_rates[] = { 162 162000, 270000, 540000, 810000 163 }; 164 int i, max_rate; 165 166 if (drm_dp_has_quirk(&intel_dp->desc, 0, 167 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 168 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 169 static const int quirk_rates[] = { 162000, 270000, 324000 }; 170 171 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 172 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 173 174 return; 175 } 176 177 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 178 179 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 180 if (dp_rates[i] > max_rate) 181 break; 182 intel_dp->sink_rates[i] = dp_rates[i]; 183 } 184 185 intel_dp->num_sink_rates = i; 186 } 187 188 /* Get length of rates array potentially limited by max_rate. */ 189 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 190 { 191 int i; 192 193 /* Limit results by potentially reduced max rate */ 194 for (i = 0; i < len; i++) { 195 if (rates[len - i - 1] <= max_rate) 196 return len - i; 197 } 198 199 return 0; 200 } 201 202 /* Get length of common rates array potentially limited by max_rate. */ 203 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 204 int max_rate) 205 { 206 return intel_dp_rate_limit_len(intel_dp->common_rates, 207 intel_dp->num_common_rates, max_rate); 208 } 209 210 /* Theoretical max between source and sink */ 211 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 212 { 213 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 214 } 215 216 /* Theoretical max between source and sink */ 217 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 218 { 219 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 220 int source_max = intel_dig_port->max_lanes; 221 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 222 int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port); 223 224 return min3(source_max, sink_max, fia_max); 225 } 226 227 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 228 { 229 return intel_dp->max_link_lane_count; 230 } 231 232 int 233 intel_dp_link_required(int pixel_clock, int bpp) 234 { 235 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 236 return DIV_ROUND_UP(pixel_clock * bpp, 8); 237 } 238 239 int 240 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 241 { 242 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 243 * link rate that is generally expressed in Gbps. Since, 8 bits of data 244 * is transmitted every LS_Clk per lane, there is no need to account for 245 * the channel encoding that is done in the PHY layer here. 246 */ 247 248 return max_link_clock * max_lanes; 249 } 250 251 static int 252 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) 253 { 254 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 255 struct intel_encoder *encoder = &intel_dig_port->base; 256 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 257 int max_dotclk = dev_priv->max_dotclk_freq; 258 int ds_max_dotclk; 259 260 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 261 262 if (type != DP_DS_PORT_TYPE_VGA) 263 return max_dotclk; 264 265 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd, 266 intel_dp->downstream_ports); 267 268 if (ds_max_dotclk != 0) 269 max_dotclk = min(max_dotclk, ds_max_dotclk); 270 271 return max_dotclk; 272 } 273 274 static int cnl_max_source_rate(struct intel_dp *intel_dp) 275 { 276 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 277 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 278 enum port port = dig_port->base.port; 279 280 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 281 282 /* Low voltage SKUs are limited to max of 5.4G */ 283 if (voltage == VOLTAGE_INFO_0_85V) 284 return 540000; 285 286 /* For this SKU 8.1G is supported in all ports */ 287 if (IS_CNL_WITH_PORT_F(dev_priv)) 288 return 810000; 289 290 /* For other SKUs, max rate on ports A and D is 5.4G */ 291 if (port == PORT_A || port == PORT_D) 292 return 540000; 293 294 return 810000; 295 } 296 297 static int icl_max_source_rate(struct intel_dp *intel_dp) 298 { 299 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 300 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 301 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 302 303 if (intel_phy_is_combo(dev_priv, phy) && 304 !IS_ELKHARTLAKE(dev_priv) && 305 !intel_dp_is_edp(intel_dp)) 306 return 540000; 307 308 return 810000; 309 } 310 311 static void 312 intel_dp_set_source_rates(struct intel_dp *intel_dp) 313 { 314 /* The values must be in increasing order */ 315 static const int cnl_rates[] = { 316 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 317 }; 318 static const int bxt_rates[] = { 319 162000, 216000, 243000, 270000, 324000, 432000, 540000 320 }; 321 static const int skl_rates[] = { 322 162000, 216000, 270000, 324000, 432000, 540000 323 }; 324 static const int hsw_rates[] = { 325 162000, 270000, 540000 326 }; 327 static const int g4x_rates[] = { 328 162000, 270000 329 }; 330 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 331 struct intel_encoder *encoder = &dig_port->base; 332 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 333 const int *source_rates; 334 int size, max_rate = 0, vbt_max_rate; 335 336 /* This should only be done once */ 337 drm_WARN_ON(&dev_priv->drm, 338 intel_dp->source_rates || intel_dp->num_source_rates); 339 340 if (INTEL_GEN(dev_priv) >= 10) { 341 source_rates = cnl_rates; 342 size = ARRAY_SIZE(cnl_rates); 343 if (IS_GEN(dev_priv, 10)) 344 max_rate = cnl_max_source_rate(intel_dp); 345 else 346 max_rate = icl_max_source_rate(intel_dp); 347 } else if (IS_GEN9_LP(dev_priv)) { 348 source_rates = bxt_rates; 349 size = ARRAY_SIZE(bxt_rates); 350 } else if (IS_GEN9_BC(dev_priv)) { 351 source_rates = skl_rates; 352 size = ARRAY_SIZE(skl_rates); 353 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 354 IS_BROADWELL(dev_priv)) { 355 source_rates = hsw_rates; 356 size = ARRAY_SIZE(hsw_rates); 357 } else { 358 source_rates = g4x_rates; 359 size = ARRAY_SIZE(g4x_rates); 360 } 361 362 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 363 if (max_rate && vbt_max_rate) 364 max_rate = min(max_rate, vbt_max_rate); 365 else if (vbt_max_rate) 366 max_rate = vbt_max_rate; 367 368 if (max_rate) 369 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 370 371 intel_dp->source_rates = source_rates; 372 intel_dp->num_source_rates = size; 373 } 374 375 static int intersect_rates(const int *source_rates, int source_len, 376 const int *sink_rates, int sink_len, 377 int *common_rates) 378 { 379 int i = 0, j = 0, k = 0; 380 381 while (i < source_len && j < sink_len) { 382 if (source_rates[i] == sink_rates[j]) { 383 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 384 return k; 385 common_rates[k] = source_rates[i]; 386 ++k; 387 ++i; 388 ++j; 389 } else if (source_rates[i] < sink_rates[j]) { 390 ++i; 391 } else { 392 ++j; 393 } 394 } 395 return k; 396 } 397 398 /* return index of rate in rates array, or -1 if not found */ 399 static int intel_dp_rate_index(const int *rates, int len, int rate) 400 { 401 int i; 402 403 for (i = 0; i < len; i++) 404 if (rate == rates[i]) 405 return i; 406 407 return -1; 408 } 409 410 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 411 { 412 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates); 413 414 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 415 intel_dp->num_source_rates, 416 intel_dp->sink_rates, 417 intel_dp->num_sink_rates, 418 intel_dp->common_rates); 419 420 /* Paranoia, there should always be something in common. */ 421 if (WARN_ON(intel_dp->num_common_rates == 0)) { 422 intel_dp->common_rates[0] = 162000; 423 intel_dp->num_common_rates = 1; 424 } 425 } 426 427 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 428 u8 lane_count) 429 { 430 /* 431 * FIXME: we need to synchronize the current link parameters with 432 * hardware readout. Currently fast link training doesn't work on 433 * boot-up. 434 */ 435 if (link_rate == 0 || 436 link_rate > intel_dp->max_link_rate) 437 return false; 438 439 if (lane_count == 0 || 440 lane_count > intel_dp_max_lane_count(intel_dp)) 441 return false; 442 443 return true; 444 } 445 446 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 447 int link_rate, 448 u8 lane_count) 449 { 450 const struct drm_display_mode *fixed_mode = 451 intel_dp->attached_connector->panel.fixed_mode; 452 int mode_rate, max_rate; 453 454 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 455 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 456 if (mode_rate > max_rate) 457 return false; 458 459 return true; 460 } 461 462 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 463 int link_rate, u8 lane_count) 464 { 465 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 466 int index; 467 468 index = intel_dp_rate_index(intel_dp->common_rates, 469 intel_dp->num_common_rates, 470 link_rate); 471 if (index > 0) { 472 if (intel_dp_is_edp(intel_dp) && 473 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 474 intel_dp->common_rates[index - 1], 475 lane_count)) { 476 drm_dbg_kms(&i915->drm, 477 "Retrying Link training for eDP with same parameters\n"); 478 return 0; 479 } 480 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 481 intel_dp->max_link_lane_count = lane_count; 482 } else if (lane_count > 1) { 483 if (intel_dp_is_edp(intel_dp) && 484 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 485 intel_dp_max_common_rate(intel_dp), 486 lane_count >> 1)) { 487 drm_dbg_kms(&i915->drm, 488 "Retrying Link training for eDP with same parameters\n"); 489 return 0; 490 } 491 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 492 intel_dp->max_link_lane_count = lane_count >> 1; 493 } else { 494 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 495 return -1; 496 } 497 498 return 0; 499 } 500 501 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 502 { 503 return div_u64(mul_u32_u32(mode_clock, 1000000U), 504 DP_DSC_FEC_OVERHEAD_FACTOR); 505 } 506 507 static int 508 small_joiner_ram_size_bits(struct drm_i915_private *i915) 509 { 510 if (INTEL_GEN(i915) >= 11) 511 return 7680 * 8; 512 else 513 return 6144 * 8; 514 } 515 516 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 517 u32 link_clock, u32 lane_count, 518 u32 mode_clock, u32 mode_hdisplay) 519 { 520 u32 bits_per_pixel, max_bpp_small_joiner_ram; 521 int i; 522 523 /* 524 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 525 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 526 * for SST -> TimeSlotsPerMTP is 1, 527 * for MST -> TimeSlotsPerMTP has to be calculated 528 */ 529 bits_per_pixel = (link_clock * lane_count * 8) / 530 intel_dp_mode_to_fec_clock(mode_clock); 531 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 532 533 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 534 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 535 mode_hdisplay; 536 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 537 max_bpp_small_joiner_ram); 538 539 /* 540 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 541 * check, output bpp from small joiner RAM check) 542 */ 543 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 544 545 /* Error out if the max bpp is less than smallest allowed valid bpp */ 546 if (bits_per_pixel < valid_dsc_bpp[0]) { 547 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 548 bits_per_pixel, valid_dsc_bpp[0]); 549 return 0; 550 } 551 552 /* Find the nearest match in the array of known BPPs from VESA */ 553 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 554 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 555 break; 556 } 557 bits_per_pixel = valid_dsc_bpp[i]; 558 559 /* 560 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 561 * fractional part is 0 562 */ 563 return bits_per_pixel << 4; 564 } 565 566 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 567 int mode_clock, int mode_hdisplay) 568 { 569 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 570 u8 min_slice_count, i; 571 int max_slice_width; 572 573 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 574 min_slice_count = DIV_ROUND_UP(mode_clock, 575 DP_DSC_MAX_ENC_THROUGHPUT_0); 576 else 577 min_slice_count = DIV_ROUND_UP(mode_clock, 578 DP_DSC_MAX_ENC_THROUGHPUT_1); 579 580 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 581 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 582 drm_dbg_kms(&i915->drm, 583 "Unsupported slice width %d by DP DSC Sink device\n", 584 max_slice_width); 585 return 0; 586 } 587 /* Also take into account max slice width */ 588 min_slice_count = min_t(u8, min_slice_count, 589 DIV_ROUND_UP(mode_hdisplay, 590 max_slice_width)); 591 592 /* Find the closest match to the valid slice count values */ 593 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 594 if (valid_dsc_slicecount[i] > 595 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 596 false)) 597 break; 598 if (min_slice_count <= valid_dsc_slicecount[i]) 599 return valid_dsc_slicecount[i]; 600 } 601 602 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 603 min_slice_count); 604 return 0; 605 } 606 607 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 608 int hdisplay) 609 { 610 /* 611 * Older platforms don't like hdisplay==4096 with DP. 612 * 613 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 614 * and frame counter increment), but we don't get vblank interrupts, 615 * and the pipe underruns immediately. The link also doesn't seem 616 * to get trained properly. 617 * 618 * On CHV the vblank interrupts don't seem to disappear but 619 * otherwise the symptoms are similar. 620 * 621 * TODO: confirm the behaviour on HSW+ 622 */ 623 return hdisplay == 4096 && !HAS_DDI(dev_priv); 624 } 625 626 static enum drm_mode_status 627 intel_dp_mode_valid(struct drm_connector *connector, 628 struct drm_display_mode *mode) 629 { 630 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 631 struct intel_connector *intel_connector = to_intel_connector(connector); 632 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 633 struct drm_i915_private *dev_priv = to_i915(connector->dev); 634 int target_clock = mode->clock; 635 int max_rate, mode_rate, max_lanes, max_link_clock; 636 int max_dotclk; 637 u16 dsc_max_output_bpp = 0; 638 u8 dsc_slice_count = 0; 639 640 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 641 return MODE_NO_DBLESCAN; 642 643 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 644 645 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 646 if (mode->hdisplay > fixed_mode->hdisplay) 647 return MODE_PANEL; 648 649 if (mode->vdisplay > fixed_mode->vdisplay) 650 return MODE_PANEL; 651 652 target_clock = fixed_mode->clock; 653 } 654 655 max_link_clock = intel_dp_max_link_rate(intel_dp); 656 max_lanes = intel_dp_max_lane_count(intel_dp); 657 658 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 659 mode_rate = intel_dp_link_required(target_clock, 18); 660 661 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 662 return MODE_H_ILLEGAL; 663 664 /* 665 * Output bpp is stored in 6.4 format so right shift by 4 to get the 666 * integer value since we support only integer values of bpp. 667 */ 668 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 669 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 670 if (intel_dp_is_edp(intel_dp)) { 671 dsc_max_output_bpp = 672 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 673 dsc_slice_count = 674 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 675 true); 676 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 677 dsc_max_output_bpp = 678 intel_dp_dsc_get_output_bpp(dev_priv, 679 max_link_clock, 680 max_lanes, 681 target_clock, 682 mode->hdisplay) >> 4; 683 dsc_slice_count = 684 intel_dp_dsc_get_slice_count(intel_dp, 685 target_clock, 686 mode->hdisplay); 687 } 688 } 689 690 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || 691 target_clock > max_dotclk) 692 return MODE_CLOCK_HIGH; 693 694 if (mode->clock < 10000) 695 return MODE_CLOCK_LOW; 696 697 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 698 return MODE_H_ILLEGAL; 699 700 return intel_mode_valid_max_plane_size(dev_priv, mode); 701 } 702 703 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 704 { 705 int i; 706 u32 v = 0; 707 708 if (src_bytes > 4) 709 src_bytes = 4; 710 for (i = 0; i < src_bytes; i++) 711 v |= ((u32)src[i]) << ((3 - i) * 8); 712 return v; 713 } 714 715 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 716 { 717 int i; 718 if (dst_bytes > 4) 719 dst_bytes = 4; 720 for (i = 0; i < dst_bytes; i++) 721 dst[i] = src >> ((3-i) * 8); 722 } 723 724 static void 725 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); 726 static void 727 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 728 bool force_disable_vdd); 729 static void 730 intel_dp_pps_init(struct intel_dp *intel_dp); 731 732 static intel_wakeref_t 733 pps_lock(struct intel_dp *intel_dp) 734 { 735 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 736 intel_wakeref_t wakeref; 737 738 /* 739 * See intel_power_sequencer_reset() why we need 740 * a power domain reference here. 741 */ 742 wakeref = intel_display_power_get(dev_priv, 743 intel_aux_power_domain(dp_to_dig_port(intel_dp))); 744 745 mutex_lock(&dev_priv->pps_mutex); 746 747 return wakeref; 748 } 749 750 static intel_wakeref_t 751 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) 752 { 753 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 754 755 mutex_unlock(&dev_priv->pps_mutex); 756 intel_display_power_put(dev_priv, 757 intel_aux_power_domain(dp_to_dig_port(intel_dp)), 758 wakeref); 759 return 0; 760 } 761 762 #define with_pps_lock(dp, wf) \ 763 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) 764 765 static void 766 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 767 { 768 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 769 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 770 enum pipe pipe = intel_dp->pps_pipe; 771 bool pll_enabled, release_cl_override = false; 772 enum dpio_phy phy = DPIO_PHY(pipe); 773 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 774 u32 DP; 775 776 if (drm_WARN(&dev_priv->drm, 777 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, 778 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", 779 pipe_name(pipe), intel_dig_port->base.base.base.id, 780 intel_dig_port->base.base.name)) 781 return; 782 783 drm_dbg_kms(&dev_priv->drm, 784 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", 785 pipe_name(pipe), intel_dig_port->base.base.base.id, 786 intel_dig_port->base.base.name); 787 788 /* Preserve the BIOS-computed detected bit. This is 789 * supposed to be read-only. 790 */ 791 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 792 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 793 DP |= DP_PORT_WIDTH(1); 794 DP |= DP_LINK_TRAIN_PAT_1; 795 796 if (IS_CHERRYVIEW(dev_priv)) 797 DP |= DP_PIPE_SEL_CHV(pipe); 798 else 799 DP |= DP_PIPE_SEL(pipe); 800 801 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; 802 803 /* 804 * The DPLL for the pipe must be enabled for this to work. 805 * So enable temporarily it if it's not already enabled. 806 */ 807 if (!pll_enabled) { 808 release_cl_override = IS_CHERRYVIEW(dev_priv) && 809 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 810 811 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 812 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 813 drm_err(&dev_priv->drm, 814 "Failed to force on pll for pipe %c!\n", 815 pipe_name(pipe)); 816 return; 817 } 818 } 819 820 /* 821 * Similar magic as in intel_dp_enable_port(). 822 * We _must_ do this port enable + disable trick 823 * to make this power sequencer lock onto the port. 824 * Otherwise even VDD force bit won't work. 825 */ 826 intel_de_write(dev_priv, intel_dp->output_reg, DP); 827 intel_de_posting_read(dev_priv, intel_dp->output_reg); 828 829 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); 830 intel_de_posting_read(dev_priv, intel_dp->output_reg); 831 832 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); 833 intel_de_posting_read(dev_priv, intel_dp->output_reg); 834 835 if (!pll_enabled) { 836 vlv_force_pll_off(dev_priv, pipe); 837 838 if (release_cl_override) 839 chv_phy_powergate_ch(dev_priv, phy, ch, false); 840 } 841 } 842 843 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 844 { 845 struct intel_encoder *encoder; 846 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 847 848 /* 849 * We don't have power sequencer currently. 850 * Pick one that's not used by other ports. 851 */ 852 for_each_intel_dp(&dev_priv->drm, encoder) { 853 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 854 855 if (encoder->type == INTEL_OUTPUT_EDP) { 856 drm_WARN_ON(&dev_priv->drm, 857 intel_dp->active_pipe != INVALID_PIPE && 858 intel_dp->active_pipe != 859 intel_dp->pps_pipe); 860 861 if (intel_dp->pps_pipe != INVALID_PIPE) 862 pipes &= ~(1 << intel_dp->pps_pipe); 863 } else { 864 drm_WARN_ON(&dev_priv->drm, 865 intel_dp->pps_pipe != INVALID_PIPE); 866 867 if (intel_dp->active_pipe != INVALID_PIPE) 868 pipes &= ~(1 << intel_dp->active_pipe); 869 } 870 } 871 872 if (pipes == 0) 873 return INVALID_PIPE; 874 875 return ffs(pipes) - 1; 876 } 877 878 static enum pipe 879 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 880 { 881 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 882 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 883 enum pipe pipe; 884 885 lockdep_assert_held(&dev_priv->pps_mutex); 886 887 /* We should never land here with regular DP ports */ 888 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 889 890 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE && 891 intel_dp->active_pipe != intel_dp->pps_pipe); 892 893 if (intel_dp->pps_pipe != INVALID_PIPE) 894 return intel_dp->pps_pipe; 895 896 pipe = vlv_find_free_pps(dev_priv); 897 898 /* 899 * Didn't find one. This should not happen since there 900 * are two power sequencers and up to two eDP ports. 901 */ 902 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) 903 pipe = PIPE_A; 904 905 vlv_steal_power_sequencer(dev_priv, pipe); 906 intel_dp->pps_pipe = pipe; 907 908 drm_dbg_kms(&dev_priv->drm, 909 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", 910 pipe_name(intel_dp->pps_pipe), 911 intel_dig_port->base.base.base.id, 912 intel_dig_port->base.base.name); 913 914 /* init power sequencer on this pipe and port */ 915 intel_dp_init_panel_power_sequencer(intel_dp); 916 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 917 918 /* 919 * Even vdd force doesn't work until we've made 920 * the power sequencer lock in on the port. 921 */ 922 vlv_power_sequencer_kick(intel_dp); 923 924 return intel_dp->pps_pipe; 925 } 926 927 static int 928 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 929 { 930 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 931 int backlight_controller = dev_priv->vbt.backlight.controller; 932 933 lockdep_assert_held(&dev_priv->pps_mutex); 934 935 /* We should never land here with regular DP ports */ 936 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 937 938 if (!intel_dp->pps_reset) 939 return backlight_controller; 940 941 intel_dp->pps_reset = false; 942 943 /* 944 * Only the HW needs to be reprogrammed, the SW state is fixed and 945 * has been setup during connector init. 946 */ 947 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 948 949 return backlight_controller; 950 } 951 952 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 953 enum pipe pipe); 954 955 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 956 enum pipe pipe) 957 { 958 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON; 959 } 960 961 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 962 enum pipe pipe) 963 { 964 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD; 965 } 966 967 static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 968 enum pipe pipe) 969 { 970 return true; 971 } 972 973 static enum pipe 974 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 975 enum port port, 976 vlv_pipe_check pipe_check) 977 { 978 enum pipe pipe; 979 980 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 981 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & 982 PANEL_PORT_SELECT_MASK; 983 984 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 985 continue; 986 987 if (!pipe_check(dev_priv, pipe)) 988 continue; 989 990 return pipe; 991 } 992 993 return INVALID_PIPE; 994 } 995 996 static void 997 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 998 { 999 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1000 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1001 enum port port = intel_dig_port->base.port; 1002 1003 lockdep_assert_held(&dev_priv->pps_mutex); 1004 1005 /* try to find a pipe with this port selected */ 1006 /* first pick one where the panel is on */ 1007 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1008 vlv_pipe_has_pp_on); 1009 /* didn't find one? pick one where vdd is on */ 1010 if (intel_dp->pps_pipe == INVALID_PIPE) 1011 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1012 vlv_pipe_has_vdd_on); 1013 /* didn't find one? pick one with just the correct port */ 1014 if (intel_dp->pps_pipe == INVALID_PIPE) 1015 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1016 vlv_pipe_any); 1017 1018 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 1019 if (intel_dp->pps_pipe == INVALID_PIPE) { 1020 drm_dbg_kms(&dev_priv->drm, 1021 "no initial power sequencer for [ENCODER:%d:%s]\n", 1022 intel_dig_port->base.base.base.id, 1023 intel_dig_port->base.base.name); 1024 return; 1025 } 1026 1027 drm_dbg_kms(&dev_priv->drm, 1028 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", 1029 intel_dig_port->base.base.base.id, 1030 intel_dig_port->base.base.name, 1031 pipe_name(intel_dp->pps_pipe)); 1032 1033 intel_dp_init_panel_power_sequencer(intel_dp); 1034 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1035 } 1036 1037 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 1038 { 1039 struct intel_encoder *encoder; 1040 1041 if (drm_WARN_ON(&dev_priv->drm, 1042 !(IS_VALLEYVIEW(dev_priv) || 1043 IS_CHERRYVIEW(dev_priv) || 1044 IS_GEN9_LP(dev_priv)))) 1045 return; 1046 1047 /* 1048 * We can't grab pps_mutex here due to deadlock with power_domain 1049 * mutex when power_domain functions are called while holding pps_mutex. 1050 * That also means that in order to use pps_pipe the code needs to 1051 * hold both a power domain reference and pps_mutex, and the power domain 1052 * reference get/put must be done while _not_ holding pps_mutex. 1053 * pps_{lock,unlock}() do these steps in the correct order, so one 1054 * should use them always. 1055 */ 1056 1057 for_each_intel_dp(&dev_priv->drm, encoder) { 1058 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1059 1060 drm_WARN_ON(&dev_priv->drm, 1061 intel_dp->active_pipe != INVALID_PIPE); 1062 1063 if (encoder->type != INTEL_OUTPUT_EDP) 1064 continue; 1065 1066 if (IS_GEN9_LP(dev_priv)) 1067 intel_dp->pps_reset = true; 1068 else 1069 intel_dp->pps_pipe = INVALID_PIPE; 1070 } 1071 } 1072 1073 struct pps_registers { 1074 i915_reg_t pp_ctrl; 1075 i915_reg_t pp_stat; 1076 i915_reg_t pp_on; 1077 i915_reg_t pp_off; 1078 i915_reg_t pp_div; 1079 }; 1080 1081 static void intel_pps_get_registers(struct intel_dp *intel_dp, 1082 struct pps_registers *regs) 1083 { 1084 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1085 int pps_idx = 0; 1086 1087 memset(regs, 0, sizeof(*regs)); 1088 1089 if (IS_GEN9_LP(dev_priv)) 1090 pps_idx = bxt_power_sequencer_idx(intel_dp); 1091 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1092 pps_idx = vlv_power_sequencer_pipe(intel_dp); 1093 1094 regs->pp_ctrl = PP_CONTROL(pps_idx); 1095 regs->pp_stat = PP_STATUS(pps_idx); 1096 regs->pp_on = PP_ON_DELAYS(pps_idx); 1097 regs->pp_off = PP_OFF_DELAYS(pps_idx); 1098 1099 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 1100 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 1101 regs->pp_div = INVALID_MMIO_REG; 1102 else 1103 regs->pp_div = PP_DIVISOR(pps_idx); 1104 } 1105 1106 static i915_reg_t 1107 _pp_ctrl_reg(struct intel_dp *intel_dp) 1108 { 1109 struct pps_registers regs; 1110 1111 intel_pps_get_registers(intel_dp, ®s); 1112 1113 return regs.pp_ctrl; 1114 } 1115 1116 static i915_reg_t 1117 _pp_stat_reg(struct intel_dp *intel_dp) 1118 { 1119 struct pps_registers regs; 1120 1121 intel_pps_get_registers(intel_dp, ®s); 1122 1123 return regs.pp_stat; 1124 } 1125 1126 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing 1127 This function only applicable when panel PM state is not to be tracked */ 1128 static int edp_notify_handler(struct notifier_block *this, unsigned long code, 1129 void *unused) 1130 { 1131 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 1132 edp_notifier); 1133 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1134 intel_wakeref_t wakeref; 1135 1136 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) 1137 return 0; 1138 1139 with_pps_lock(intel_dp, wakeref) { 1140 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1141 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 1142 i915_reg_t pp_ctrl_reg, pp_div_reg; 1143 u32 pp_div; 1144 1145 pp_ctrl_reg = PP_CONTROL(pipe); 1146 pp_div_reg = PP_DIVISOR(pipe); 1147 pp_div = intel_de_read(dev_priv, pp_div_reg); 1148 pp_div &= PP_REFERENCE_DIVIDER_MASK; 1149 1150 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 1151 intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F); 1152 intel_de_write(dev_priv, pp_ctrl_reg, 1153 PANEL_UNLOCK_REGS); 1154 msleep(intel_dp->panel_power_cycle_delay); 1155 } 1156 } 1157 1158 return 0; 1159 } 1160 1161 static bool edp_have_panel_power(struct intel_dp *intel_dp) 1162 { 1163 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1164 1165 lockdep_assert_held(&dev_priv->pps_mutex); 1166 1167 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1168 intel_dp->pps_pipe == INVALID_PIPE) 1169 return false; 1170 1171 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 1172 } 1173 1174 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 1175 { 1176 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1177 1178 lockdep_assert_held(&dev_priv->pps_mutex); 1179 1180 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1181 intel_dp->pps_pipe == INVALID_PIPE) 1182 return false; 1183 1184 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 1185 } 1186 1187 static void 1188 intel_dp_check_edp(struct intel_dp *intel_dp) 1189 { 1190 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1191 1192 if (!intel_dp_is_edp(intel_dp)) 1193 return; 1194 1195 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 1196 drm_WARN(&dev_priv->drm, 1, 1197 "eDP powered off while attempting aux channel communication.\n"); 1198 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n", 1199 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), 1200 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); 1201 } 1202 } 1203 1204 static u32 1205 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 1206 { 1207 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1208 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1209 const unsigned int timeout_ms = 10; 1210 u32 status; 1211 bool done; 1212 1213 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1214 done = wait_event_timeout(i915->gmbus_wait_queue, C, 1215 msecs_to_jiffies_timeout(timeout_ms)); 1216 1217 /* just trace the final value */ 1218 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1219 1220 if (!done) 1221 drm_err(&i915->drm, 1222 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 1223 intel_dp->aux.name, timeout_ms, status); 1224 #undef C 1225 1226 return status; 1227 } 1228 1229 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1230 { 1231 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1232 1233 if (index) 1234 return 0; 1235 1236 /* 1237 * The clock divider is based off the hrawclk, and would like to run at 1238 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 1239 */ 1240 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 1241 } 1242 1243 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1244 { 1245 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1246 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1247 u32 freq; 1248 1249 if (index) 1250 return 0; 1251 1252 /* 1253 * The clock divider is based off the cdclk or PCH rawclk, and would 1254 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1255 * divide by 2000 and use that 1256 */ 1257 if (dig_port->aux_ch == AUX_CH_A) 1258 freq = dev_priv->cdclk.hw.cdclk; 1259 else 1260 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 1261 return DIV_ROUND_CLOSEST(freq, 2000); 1262 } 1263 1264 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1265 { 1266 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1267 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1268 1269 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1270 /* Workaround for non-ULT HSW */ 1271 switch (index) { 1272 case 0: return 63; 1273 case 1: return 72; 1274 default: return 0; 1275 } 1276 } 1277 1278 return ilk_get_aux_clock_divider(intel_dp, index); 1279 } 1280 1281 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1282 { 1283 /* 1284 * SKL doesn't need us to program the AUX clock divider (Hardware will 1285 * derive the clock from CDCLK automatically). We still implement the 1286 * get_aux_clock_divider vfunc to plug-in into the existing code. 1287 */ 1288 return index ? 0 : 1; 1289 } 1290 1291 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 1292 int send_bytes, 1293 u32 aux_clock_divider) 1294 { 1295 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1296 struct drm_i915_private *dev_priv = 1297 to_i915(intel_dig_port->base.base.dev); 1298 u32 precharge, timeout; 1299 1300 if (IS_GEN(dev_priv, 6)) 1301 precharge = 3; 1302 else 1303 precharge = 5; 1304 1305 if (IS_BROADWELL(dev_priv)) 1306 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1307 else 1308 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1309 1310 return DP_AUX_CH_CTL_SEND_BUSY | 1311 DP_AUX_CH_CTL_DONE | 1312 DP_AUX_CH_CTL_INTERRUPT | 1313 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1314 timeout | 1315 DP_AUX_CH_CTL_RECEIVE_ERROR | 1316 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1317 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1318 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1319 } 1320 1321 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1322 int send_bytes, 1323 u32 unused) 1324 { 1325 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1326 struct drm_i915_private *i915 = 1327 to_i915(intel_dig_port->base.base.dev); 1328 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); 1329 u32 ret; 1330 1331 ret = DP_AUX_CH_CTL_SEND_BUSY | 1332 DP_AUX_CH_CTL_DONE | 1333 DP_AUX_CH_CTL_INTERRUPT | 1334 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1335 DP_AUX_CH_CTL_TIME_OUT_MAX | 1336 DP_AUX_CH_CTL_RECEIVE_ERROR | 1337 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1338 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1339 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1340 1341 if (intel_phy_is_tc(i915, phy) && 1342 intel_dig_port->tc_mode == TC_PORT_TBT_ALT) 1343 ret |= DP_AUX_CH_CTL_TBT_IO; 1344 1345 return ret; 1346 } 1347 1348 static int 1349 intel_dp_aux_xfer(struct intel_dp *intel_dp, 1350 const u8 *send, int send_bytes, 1351 u8 *recv, int recv_size, 1352 u32 aux_send_ctl_flags) 1353 { 1354 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1355 struct drm_i915_private *i915 = 1356 to_i915(intel_dig_port->base.base.dev); 1357 struct intel_uncore *uncore = &i915->uncore; 1358 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); 1359 bool is_tc_port = intel_phy_is_tc(i915, phy); 1360 i915_reg_t ch_ctl, ch_data[5]; 1361 u32 aux_clock_divider; 1362 enum intel_display_power_domain aux_domain; 1363 intel_wakeref_t aux_wakeref; 1364 intel_wakeref_t pps_wakeref; 1365 int i, ret, recv_bytes; 1366 int try, clock = 0; 1367 u32 status; 1368 bool vdd; 1369 1370 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1371 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1372 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1373 1374 if (is_tc_port) 1375 intel_tc_port_lock(intel_dig_port); 1376 1377 aux_domain = intel_aux_power_domain(intel_dig_port); 1378 1379 aux_wakeref = intel_display_power_get(i915, aux_domain); 1380 pps_wakeref = pps_lock(intel_dp); 1381 1382 /* 1383 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1384 * In such cases we want to leave VDD enabled and it's up to upper layers 1385 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1386 * ourselves. 1387 */ 1388 vdd = edp_panel_vdd_on(intel_dp); 1389 1390 /* dp aux is extremely sensitive to irq latency, hence request the 1391 * lowest possible wakeup latency and so prevent the cpu from going into 1392 * deep sleep states. 1393 */ 1394 cpu_latency_qos_update_request(&i915->pm_qos, 0); 1395 1396 intel_dp_check_edp(intel_dp); 1397 1398 /* Try to wait for any previous AUX channel activity */ 1399 for (try = 0; try < 3; try++) { 1400 status = intel_uncore_read_notrace(uncore, ch_ctl); 1401 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1402 break; 1403 msleep(1); 1404 } 1405 /* just trace the final value */ 1406 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1407 1408 if (try == 3) { 1409 const u32 status = intel_uncore_read(uncore, ch_ctl); 1410 1411 if (status != intel_dp->aux_busy_last_status) { 1412 drm_WARN(&i915->drm, 1, 1413 "%s: not started (status 0x%08x)\n", 1414 intel_dp->aux.name, status); 1415 intel_dp->aux_busy_last_status = status; 1416 } 1417 1418 ret = -EBUSY; 1419 goto out; 1420 } 1421 1422 /* Only 5 data registers! */ 1423 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 1424 ret = -E2BIG; 1425 goto out; 1426 } 1427 1428 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1429 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1430 send_bytes, 1431 aux_clock_divider); 1432 1433 send_ctl |= aux_send_ctl_flags; 1434 1435 /* Must try at least 3 times according to DP spec */ 1436 for (try = 0; try < 5; try++) { 1437 /* Load the send data into the aux channel data registers */ 1438 for (i = 0; i < send_bytes; i += 4) 1439 intel_uncore_write(uncore, 1440 ch_data[i >> 2], 1441 intel_dp_pack_aux(send + i, 1442 send_bytes - i)); 1443 1444 /* Send the command and wait for it to complete */ 1445 intel_uncore_write(uncore, ch_ctl, send_ctl); 1446 1447 status = intel_dp_aux_wait_done(intel_dp); 1448 1449 /* Clear done status and any errors */ 1450 intel_uncore_write(uncore, 1451 ch_ctl, 1452 status | 1453 DP_AUX_CH_CTL_DONE | 1454 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1455 DP_AUX_CH_CTL_RECEIVE_ERROR); 1456 1457 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1458 * 400us delay required for errors and timeouts 1459 * Timeout errors from the HW already meet this 1460 * requirement so skip to next iteration 1461 */ 1462 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1463 continue; 1464 1465 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1466 usleep_range(400, 500); 1467 continue; 1468 } 1469 if (status & DP_AUX_CH_CTL_DONE) 1470 goto done; 1471 } 1472 } 1473 1474 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1475 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 1476 intel_dp->aux.name, status); 1477 ret = -EBUSY; 1478 goto out; 1479 } 1480 1481 done: 1482 /* Check for timeout or receive error. 1483 * Timeouts occur when the sink is not connected 1484 */ 1485 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1486 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 1487 intel_dp->aux.name, status); 1488 ret = -EIO; 1489 goto out; 1490 } 1491 1492 /* Timeouts occur when the device isn't connected, so they're 1493 * "normal" -- don't fill the kernel log with these */ 1494 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1495 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 1496 intel_dp->aux.name, status); 1497 ret = -ETIMEDOUT; 1498 goto out; 1499 } 1500 1501 /* Unload any bytes sent back from the other side */ 1502 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1503 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1504 1505 /* 1506 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1507 * We have no idea of what happened so we return -EBUSY so 1508 * drm layer takes care for the necessary retries. 1509 */ 1510 if (recv_bytes == 0 || recv_bytes > 20) { 1511 drm_dbg_kms(&i915->drm, 1512 "%s: Forbidden recv_bytes = %d on aux transaction\n", 1513 intel_dp->aux.name, recv_bytes); 1514 ret = -EBUSY; 1515 goto out; 1516 } 1517 1518 if (recv_bytes > recv_size) 1519 recv_bytes = recv_size; 1520 1521 for (i = 0; i < recv_bytes; i += 4) 1522 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 1523 recv + i, recv_bytes - i); 1524 1525 ret = recv_bytes; 1526 out: 1527 cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); 1528 1529 if (vdd) 1530 edp_panel_vdd_off(intel_dp, false); 1531 1532 pps_unlock(intel_dp, pps_wakeref); 1533 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 1534 1535 if (is_tc_port) 1536 intel_tc_port_unlock(intel_dig_port); 1537 1538 return ret; 1539 } 1540 1541 #define BARE_ADDRESS_SIZE 3 1542 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1543 1544 static void 1545 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1546 const struct drm_dp_aux_msg *msg) 1547 { 1548 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1549 txbuf[1] = (msg->address >> 8) & 0xff; 1550 txbuf[2] = msg->address & 0xff; 1551 txbuf[3] = msg->size - 1; 1552 } 1553 1554 static ssize_t 1555 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1556 { 1557 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1558 u8 txbuf[20], rxbuf[20]; 1559 size_t txsize, rxsize; 1560 int ret; 1561 1562 intel_dp_aux_header(txbuf, msg); 1563 1564 switch (msg->request & ~DP_AUX_I2C_MOT) { 1565 case DP_AUX_NATIVE_WRITE: 1566 case DP_AUX_I2C_WRITE: 1567 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1568 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1569 rxsize = 2; /* 0 or 1 data bytes */ 1570 1571 if (WARN_ON(txsize > 20)) 1572 return -E2BIG; 1573 1574 WARN_ON(!msg->buffer != !msg->size); 1575 1576 if (msg->buffer) 1577 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1578 1579 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1580 rxbuf, rxsize, 0); 1581 if (ret > 0) { 1582 msg->reply = rxbuf[0] >> 4; 1583 1584 if (ret > 1) { 1585 /* Number of bytes written in a short write. */ 1586 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1587 } else { 1588 /* Return payload size. */ 1589 ret = msg->size; 1590 } 1591 } 1592 break; 1593 1594 case DP_AUX_NATIVE_READ: 1595 case DP_AUX_I2C_READ: 1596 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1597 rxsize = msg->size + 1; 1598 1599 if (WARN_ON(rxsize > 20)) 1600 return -E2BIG; 1601 1602 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1603 rxbuf, rxsize, 0); 1604 if (ret > 0) { 1605 msg->reply = rxbuf[0] >> 4; 1606 /* 1607 * Assume happy day, and copy the data. The caller is 1608 * expected to check msg->reply before touching it. 1609 * 1610 * Return payload size. 1611 */ 1612 ret--; 1613 memcpy(msg->buffer, rxbuf + 1, ret); 1614 } 1615 break; 1616 1617 default: 1618 ret = -EINVAL; 1619 break; 1620 } 1621 1622 return ret; 1623 } 1624 1625 1626 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1627 { 1628 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1629 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1630 enum aux_ch aux_ch = dig_port->aux_ch; 1631 1632 switch (aux_ch) { 1633 case AUX_CH_B: 1634 case AUX_CH_C: 1635 case AUX_CH_D: 1636 return DP_AUX_CH_CTL(aux_ch); 1637 default: 1638 MISSING_CASE(aux_ch); 1639 return DP_AUX_CH_CTL(AUX_CH_B); 1640 } 1641 } 1642 1643 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1644 { 1645 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1646 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1647 enum aux_ch aux_ch = dig_port->aux_ch; 1648 1649 switch (aux_ch) { 1650 case AUX_CH_B: 1651 case AUX_CH_C: 1652 case AUX_CH_D: 1653 return DP_AUX_CH_DATA(aux_ch, index); 1654 default: 1655 MISSING_CASE(aux_ch); 1656 return DP_AUX_CH_DATA(AUX_CH_B, index); 1657 } 1658 } 1659 1660 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1661 { 1662 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1663 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1664 enum aux_ch aux_ch = dig_port->aux_ch; 1665 1666 switch (aux_ch) { 1667 case AUX_CH_A: 1668 return DP_AUX_CH_CTL(aux_ch); 1669 case AUX_CH_B: 1670 case AUX_CH_C: 1671 case AUX_CH_D: 1672 return PCH_DP_AUX_CH_CTL(aux_ch); 1673 default: 1674 MISSING_CASE(aux_ch); 1675 return DP_AUX_CH_CTL(AUX_CH_A); 1676 } 1677 } 1678 1679 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1680 { 1681 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1682 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1683 enum aux_ch aux_ch = dig_port->aux_ch; 1684 1685 switch (aux_ch) { 1686 case AUX_CH_A: 1687 return DP_AUX_CH_DATA(aux_ch, index); 1688 case AUX_CH_B: 1689 case AUX_CH_C: 1690 case AUX_CH_D: 1691 return PCH_DP_AUX_CH_DATA(aux_ch, index); 1692 default: 1693 MISSING_CASE(aux_ch); 1694 return DP_AUX_CH_DATA(AUX_CH_A, index); 1695 } 1696 } 1697 1698 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1699 { 1700 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1701 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1702 enum aux_ch aux_ch = dig_port->aux_ch; 1703 1704 switch (aux_ch) { 1705 case AUX_CH_A: 1706 case AUX_CH_B: 1707 case AUX_CH_C: 1708 case AUX_CH_D: 1709 case AUX_CH_E: 1710 case AUX_CH_F: 1711 case AUX_CH_G: 1712 return DP_AUX_CH_CTL(aux_ch); 1713 default: 1714 MISSING_CASE(aux_ch); 1715 return DP_AUX_CH_CTL(AUX_CH_A); 1716 } 1717 } 1718 1719 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1720 { 1721 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1722 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1723 enum aux_ch aux_ch = dig_port->aux_ch; 1724 1725 switch (aux_ch) { 1726 case AUX_CH_A: 1727 case AUX_CH_B: 1728 case AUX_CH_C: 1729 case AUX_CH_D: 1730 case AUX_CH_E: 1731 case AUX_CH_F: 1732 case AUX_CH_G: 1733 return DP_AUX_CH_DATA(aux_ch, index); 1734 default: 1735 MISSING_CASE(aux_ch); 1736 return DP_AUX_CH_DATA(AUX_CH_A, index); 1737 } 1738 } 1739 1740 static void 1741 intel_dp_aux_fini(struct intel_dp *intel_dp) 1742 { 1743 kfree(intel_dp->aux.name); 1744 } 1745 1746 static void 1747 intel_dp_aux_init(struct intel_dp *intel_dp) 1748 { 1749 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1750 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1751 struct intel_encoder *encoder = &dig_port->base; 1752 1753 if (INTEL_GEN(dev_priv) >= 9) { 1754 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1755 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1756 } else if (HAS_PCH_SPLIT(dev_priv)) { 1757 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1758 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1759 } else { 1760 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1761 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1762 } 1763 1764 if (INTEL_GEN(dev_priv) >= 9) 1765 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1766 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1767 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1768 else if (HAS_PCH_SPLIT(dev_priv)) 1769 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1770 else 1771 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1772 1773 if (INTEL_GEN(dev_priv) >= 9) 1774 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1775 else 1776 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1777 1778 drm_dp_aux_init(&intel_dp->aux); 1779 1780 /* Failure to allocate our preferred name is not critical */ 1781 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c", 1782 aux_ch_name(dig_port->aux_ch), 1783 port_name(encoder->port)); 1784 intel_dp->aux.transfer = intel_dp_aux_transfer; 1785 } 1786 1787 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1788 { 1789 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1790 1791 return max_rate >= 540000; 1792 } 1793 1794 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 1795 { 1796 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1797 1798 return max_rate >= 810000; 1799 } 1800 1801 static void 1802 intel_dp_set_clock(struct intel_encoder *encoder, 1803 struct intel_crtc_state *pipe_config) 1804 { 1805 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1806 const struct dp_link_dpll *divisor = NULL; 1807 int i, count = 0; 1808 1809 if (IS_G4X(dev_priv)) { 1810 divisor = g4x_dpll; 1811 count = ARRAY_SIZE(g4x_dpll); 1812 } else if (HAS_PCH_SPLIT(dev_priv)) { 1813 divisor = pch_dpll; 1814 count = ARRAY_SIZE(pch_dpll); 1815 } else if (IS_CHERRYVIEW(dev_priv)) { 1816 divisor = chv_dpll; 1817 count = ARRAY_SIZE(chv_dpll); 1818 } else if (IS_VALLEYVIEW(dev_priv)) { 1819 divisor = vlv_dpll; 1820 count = ARRAY_SIZE(vlv_dpll); 1821 } 1822 1823 if (divisor && count) { 1824 for (i = 0; i < count; i++) { 1825 if (pipe_config->port_clock == divisor[i].clock) { 1826 pipe_config->dpll = divisor[i].dpll; 1827 pipe_config->clock_set = true; 1828 break; 1829 } 1830 } 1831 } 1832 } 1833 1834 static void snprintf_int_array(char *str, size_t len, 1835 const int *array, int nelem) 1836 { 1837 int i; 1838 1839 str[0] = '\0'; 1840 1841 for (i = 0; i < nelem; i++) { 1842 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1843 if (r >= len) 1844 return; 1845 str += r; 1846 len -= r; 1847 } 1848 } 1849 1850 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1851 { 1852 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1853 char str[128]; /* FIXME: too big for stack? */ 1854 1855 if (!drm_debug_enabled(DRM_UT_KMS)) 1856 return; 1857 1858 snprintf_int_array(str, sizeof(str), 1859 intel_dp->source_rates, intel_dp->num_source_rates); 1860 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 1861 1862 snprintf_int_array(str, sizeof(str), 1863 intel_dp->sink_rates, intel_dp->num_sink_rates); 1864 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 1865 1866 snprintf_int_array(str, sizeof(str), 1867 intel_dp->common_rates, intel_dp->num_common_rates); 1868 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 1869 } 1870 1871 int 1872 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1873 { 1874 int len; 1875 1876 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1877 if (WARN_ON(len <= 0)) 1878 return 162000; 1879 1880 return intel_dp->common_rates[len - 1]; 1881 } 1882 1883 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1884 { 1885 int i = intel_dp_rate_index(intel_dp->sink_rates, 1886 intel_dp->num_sink_rates, rate); 1887 1888 if (WARN_ON(i < 0)) 1889 i = 0; 1890 1891 return i; 1892 } 1893 1894 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1895 u8 *link_bw, u8 *rate_select) 1896 { 1897 /* eDP 1.4 rate select method. */ 1898 if (intel_dp->use_rate_select) { 1899 *link_bw = 0; 1900 *rate_select = 1901 intel_dp_rate_select(intel_dp, port_clock); 1902 } else { 1903 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1904 *rate_select = 0; 1905 } 1906 } 1907 1908 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1909 const struct intel_crtc_state *pipe_config) 1910 { 1911 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1912 1913 /* On TGL, FEC is supported on all Pipes */ 1914 if (INTEL_GEN(dev_priv) >= 12) 1915 return true; 1916 1917 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 1918 return true; 1919 1920 return false; 1921 } 1922 1923 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1924 const struct intel_crtc_state *pipe_config) 1925 { 1926 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1927 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1928 } 1929 1930 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1931 const struct intel_crtc_state *crtc_state) 1932 { 1933 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1934 1935 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) 1936 return false; 1937 1938 return intel_dsc_source_support(encoder, crtc_state) && 1939 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1940 } 1941 1942 static int intel_dp_compute_bpp(struct intel_dp *intel_dp, 1943 struct intel_crtc_state *pipe_config) 1944 { 1945 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1946 struct intel_connector *intel_connector = intel_dp->attached_connector; 1947 int bpp, bpc; 1948 1949 bpp = pipe_config->pipe_bpp; 1950 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports); 1951 1952 if (bpc > 0) 1953 bpp = min(bpp, 3*bpc); 1954 1955 if (intel_dp_is_edp(intel_dp)) { 1956 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1957 if (intel_connector->base.display_info.bpc == 0 && 1958 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1959 drm_dbg_kms(&dev_priv->drm, 1960 "clamping bpp for eDP panel to BIOS-provided %i\n", 1961 dev_priv->vbt.edp.bpp); 1962 bpp = dev_priv->vbt.edp.bpp; 1963 } 1964 } 1965 1966 return bpp; 1967 } 1968 1969 /* Adjust link config limits based on compliance test requests. */ 1970 void 1971 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1972 struct intel_crtc_state *pipe_config, 1973 struct link_config_limits *limits) 1974 { 1975 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1976 1977 /* For DP Compliance we override the computed bpp for the pipe */ 1978 if (intel_dp->compliance.test_data.bpc != 0) { 1979 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1980 1981 limits->min_bpp = limits->max_bpp = bpp; 1982 pipe_config->dither_force_disable = bpp == 6 * 3; 1983 1984 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1985 } 1986 1987 /* Use values requested by Compliance Test Request */ 1988 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1989 int index; 1990 1991 /* Validate the compliance test data since max values 1992 * might have changed due to link train fallback. 1993 */ 1994 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1995 intel_dp->compliance.test_lane_count)) { 1996 index = intel_dp_rate_index(intel_dp->common_rates, 1997 intel_dp->num_common_rates, 1998 intel_dp->compliance.test_link_rate); 1999 if (index >= 0) 2000 limits->min_clock = limits->max_clock = index; 2001 limits->min_lane_count = limits->max_lane_count = 2002 intel_dp->compliance.test_lane_count; 2003 } 2004 } 2005 } 2006 2007 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp) 2008 { 2009 /* 2010 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 2011 * format of the number of bytes per pixel will be half the number 2012 * of bytes of RGB pixel. 2013 */ 2014 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2015 bpp /= 2; 2016 2017 return bpp; 2018 } 2019 2020 /* Optimize link config in order: max bpp, min clock, min lanes */ 2021 static int 2022 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 2023 struct intel_crtc_state *pipe_config, 2024 const struct link_config_limits *limits) 2025 { 2026 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2027 int bpp, clock, lane_count; 2028 int mode_rate, link_clock, link_avail; 2029 2030 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2031 int output_bpp = intel_dp_output_bpp(pipe_config, bpp); 2032 2033 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2034 output_bpp); 2035 2036 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2037 for (lane_count = limits->min_lane_count; 2038 lane_count <= limits->max_lane_count; 2039 lane_count <<= 1) { 2040 link_clock = intel_dp->common_rates[clock]; 2041 link_avail = intel_dp_max_data_rate(link_clock, 2042 lane_count); 2043 2044 if (mode_rate <= link_avail) { 2045 pipe_config->lane_count = lane_count; 2046 pipe_config->pipe_bpp = bpp; 2047 pipe_config->port_clock = link_clock; 2048 2049 return 0; 2050 } 2051 } 2052 } 2053 } 2054 2055 return -EINVAL; 2056 } 2057 2058 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 2059 { 2060 int i, num_bpc; 2061 u8 dsc_bpc[3] = {0}; 2062 2063 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 2064 dsc_bpc); 2065 for (i = 0; i < num_bpc; i++) { 2066 if (dsc_max_bpc >= dsc_bpc[i]) 2067 return dsc_bpc[i] * 3; 2068 } 2069 2070 return 0; 2071 } 2072 2073 #define DSC_SUPPORTED_VERSION_MIN 1 2074 2075 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 2076 struct intel_crtc_state *crtc_state) 2077 { 2078 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2079 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2080 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2081 u8 line_buf_depth; 2082 int ret; 2083 2084 ret = intel_dsc_compute_params(encoder, crtc_state); 2085 if (ret) 2086 return ret; 2087 2088 /* 2089 * Slice Height of 8 works for all currently available panels. So start 2090 * with that if pic_height is an integral multiple of 8. Eventually add 2091 * logic to try multiple slice heights. 2092 */ 2093 if (vdsc_cfg->pic_height % 8 == 0) 2094 vdsc_cfg->slice_height = 8; 2095 else if (vdsc_cfg->pic_height % 4 == 0) 2096 vdsc_cfg->slice_height = 4; 2097 else 2098 vdsc_cfg->slice_height = 2; 2099 2100 vdsc_cfg->dsc_version_major = 2101 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2102 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 2103 vdsc_cfg->dsc_version_minor = 2104 min(DSC_SUPPORTED_VERSION_MIN, 2105 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2106 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 2107 2108 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2109 DP_DSC_RGB; 2110 2111 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 2112 if (!line_buf_depth) { 2113 drm_dbg_kms(&i915->drm, 2114 "DSC Sink Line Buffer Depth invalid\n"); 2115 return -EINVAL; 2116 } 2117 2118 if (vdsc_cfg->dsc_version_minor == 2) 2119 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 2120 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 2121 else 2122 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 2123 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 2124 2125 vdsc_cfg->block_pred_enable = 2126 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2127 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2128 2129 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2130 } 2131 2132 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2133 struct intel_crtc_state *pipe_config, 2134 struct drm_connector_state *conn_state, 2135 struct link_config_limits *limits) 2136 { 2137 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2138 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2139 const struct drm_display_mode *adjusted_mode = 2140 &pipe_config->hw.adjusted_mode; 2141 u8 dsc_max_bpc; 2142 int pipe_bpp; 2143 int ret; 2144 2145 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2146 intel_dp_supports_fec(intel_dp, pipe_config); 2147 2148 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 2149 return -EINVAL; 2150 2151 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 2152 if (INTEL_GEN(dev_priv) >= 12) 2153 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 2154 else 2155 dsc_max_bpc = min_t(u8, 10, 2156 conn_state->max_requested_bpc); 2157 2158 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 2159 2160 /* Min Input BPC for ICL+ is 8 */ 2161 if (pipe_bpp < 8 * 3) { 2162 drm_dbg_kms(&dev_priv->drm, 2163 "No DSC support for less than 8bpc\n"); 2164 return -EINVAL; 2165 } 2166 2167 /* 2168 * For now enable DSC for max bpp, max link rate, max lane count. 2169 * Optimize this later for the minimum possible link rate/lane count 2170 * with DSC enabled for the requested mode. 2171 */ 2172 pipe_config->pipe_bpp = pipe_bpp; 2173 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 2174 pipe_config->lane_count = limits->max_lane_count; 2175 2176 if (intel_dp_is_edp(intel_dp)) { 2177 pipe_config->dsc.compressed_bpp = 2178 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 2179 pipe_config->pipe_bpp); 2180 pipe_config->dsc.slice_count = 2181 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 2182 true); 2183 } else { 2184 u16 dsc_max_output_bpp; 2185 u8 dsc_dp_slice_count; 2186 2187 dsc_max_output_bpp = 2188 intel_dp_dsc_get_output_bpp(dev_priv, 2189 pipe_config->port_clock, 2190 pipe_config->lane_count, 2191 adjusted_mode->crtc_clock, 2192 adjusted_mode->crtc_hdisplay); 2193 dsc_dp_slice_count = 2194 intel_dp_dsc_get_slice_count(intel_dp, 2195 adjusted_mode->crtc_clock, 2196 adjusted_mode->crtc_hdisplay); 2197 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 2198 drm_dbg_kms(&dev_priv->drm, 2199 "Compressed BPP/Slice Count not supported\n"); 2200 return -EINVAL; 2201 } 2202 pipe_config->dsc.compressed_bpp = min_t(u16, 2203 dsc_max_output_bpp >> 4, 2204 pipe_config->pipe_bpp); 2205 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2206 } 2207 /* 2208 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2209 * is greater than the maximum Cdclock and if slice count is even 2210 * then we need to use 2 VDSC instances. 2211 */ 2212 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { 2213 if (pipe_config->dsc.slice_count > 1) { 2214 pipe_config->dsc.dsc_split = true; 2215 } else { 2216 drm_dbg_kms(&dev_priv->drm, 2217 "Cannot split stream to use 2 VDSC instances\n"); 2218 return -EINVAL; 2219 } 2220 } 2221 2222 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 2223 if (ret < 0) { 2224 drm_dbg_kms(&dev_priv->drm, 2225 "Cannot compute valid DSC parameters for Input Bpp = %d " 2226 "Compressed BPP = %d\n", 2227 pipe_config->pipe_bpp, 2228 pipe_config->dsc.compressed_bpp); 2229 return ret; 2230 } 2231 2232 pipe_config->dsc.compression_enable = true; 2233 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2234 "Compressed Bpp = %d Slice Count = %d\n", 2235 pipe_config->pipe_bpp, 2236 pipe_config->dsc.compressed_bpp, 2237 pipe_config->dsc.slice_count); 2238 2239 return 0; 2240 } 2241 2242 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state) 2243 { 2244 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) 2245 return 6 * 3; 2246 else 2247 return 8 * 3; 2248 } 2249 2250 static int 2251 intel_dp_compute_link_config(struct intel_encoder *encoder, 2252 struct intel_crtc_state *pipe_config, 2253 struct drm_connector_state *conn_state) 2254 { 2255 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2256 const struct drm_display_mode *adjusted_mode = 2257 &pipe_config->hw.adjusted_mode; 2258 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2259 struct link_config_limits limits; 2260 int common_len; 2261 int ret; 2262 2263 common_len = intel_dp_common_len_rate_limit(intel_dp, 2264 intel_dp->max_link_rate); 2265 2266 /* No common link rates between source and sink */ 2267 drm_WARN_ON(encoder->base.dev, common_len <= 0); 2268 2269 limits.min_clock = 0; 2270 limits.max_clock = common_len - 1; 2271 2272 limits.min_lane_count = 1; 2273 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 2274 2275 limits.min_bpp = intel_dp_min_bpp(pipe_config); 2276 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); 2277 2278 if (intel_dp_is_edp(intel_dp)) { 2279 /* 2280 * Use the maximum clock and number of lanes the eDP panel 2281 * advertizes being capable of. The panels are generally 2282 * designed to support only a single clock and lane 2283 * configuration, and typically these values correspond to the 2284 * native resolution of the panel. 2285 */ 2286 limits.min_lane_count = limits.max_lane_count; 2287 limits.min_clock = limits.max_clock; 2288 } 2289 2290 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 2291 2292 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 2293 "max rate %d max bpp %d pixel clock %iKHz\n", 2294 limits.max_lane_count, 2295 intel_dp->common_rates[limits.max_clock], 2296 limits.max_bpp, adjusted_mode->crtc_clock); 2297 2298 /* 2299 * Optimize for slow and wide. This is the place to add alternative 2300 * optimization policy. 2301 */ 2302 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 2303 2304 /* enable compression if the mode doesn't fit available BW */ 2305 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 2306 if (ret || intel_dp->force_dsc_en) { 2307 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2308 conn_state, &limits); 2309 if (ret < 0) 2310 return ret; 2311 } 2312 2313 if (pipe_config->dsc.compression_enable) { 2314 drm_dbg_kms(&i915->drm, 2315 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2316 pipe_config->lane_count, pipe_config->port_clock, 2317 pipe_config->pipe_bpp, 2318 pipe_config->dsc.compressed_bpp); 2319 2320 drm_dbg_kms(&i915->drm, 2321 "DP link rate required %i available %i\n", 2322 intel_dp_link_required(adjusted_mode->crtc_clock, 2323 pipe_config->dsc.compressed_bpp), 2324 intel_dp_max_data_rate(pipe_config->port_clock, 2325 pipe_config->lane_count)); 2326 } else { 2327 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 2328 pipe_config->lane_count, pipe_config->port_clock, 2329 pipe_config->pipe_bpp); 2330 2331 drm_dbg_kms(&i915->drm, 2332 "DP link rate required %i available %i\n", 2333 intel_dp_link_required(adjusted_mode->crtc_clock, 2334 pipe_config->pipe_bpp), 2335 intel_dp_max_data_rate(pipe_config->port_clock, 2336 pipe_config->lane_count)); 2337 } 2338 return 0; 2339 } 2340 2341 static int 2342 intel_dp_ycbcr420_config(struct intel_dp *intel_dp, 2343 struct intel_crtc_state *crtc_state, 2344 const struct drm_connector_state *conn_state) 2345 { 2346 struct drm_connector *connector = conn_state->connector; 2347 const struct drm_display_info *info = &connector->display_info; 2348 const struct drm_display_mode *adjusted_mode = 2349 &crtc_state->hw.adjusted_mode; 2350 2351 if (!drm_mode_is_420_only(info, adjusted_mode) || 2352 !intel_dp_get_colorimetry_status(intel_dp) || 2353 !connector->ycbcr_420_allowed) 2354 return 0; 2355 2356 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2357 2358 return intel_pch_panel_fitting(crtc_state, conn_state); 2359 } 2360 2361 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2362 const struct drm_connector_state *conn_state) 2363 { 2364 const struct intel_digital_connector_state *intel_conn_state = 2365 to_intel_digital_connector_state(conn_state); 2366 const struct drm_display_mode *adjusted_mode = 2367 &crtc_state->hw.adjusted_mode; 2368 2369 /* 2370 * Our YCbCr output is always limited range. 2371 * crtc_state->limited_color_range only applies to RGB, 2372 * and it must never be set for YCbCr or we risk setting 2373 * some conflicting bits in PIPECONF which will mess up 2374 * the colors on the monitor. 2375 */ 2376 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2377 return false; 2378 2379 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2380 /* 2381 * See: 2382 * CEA-861-E - 5.1 Default Encoding Parameters 2383 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2384 */ 2385 return crtc_state->pipe_bpp != 18 && 2386 drm_default_rgb_quant_range(adjusted_mode) == 2387 HDMI_QUANTIZATION_RANGE_LIMITED; 2388 } else { 2389 return intel_conn_state->broadcast_rgb == 2390 INTEL_BROADCAST_RGB_LIMITED; 2391 } 2392 } 2393 2394 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2395 enum port port) 2396 { 2397 if (IS_G4X(dev_priv)) 2398 return false; 2399 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 2400 return false; 2401 2402 return true; 2403 } 2404 2405 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 2406 const struct drm_connector_state *conn_state, 2407 struct drm_dp_vsc_sdp *vsc) 2408 { 2409 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2410 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2411 2412 /* 2413 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2414 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2415 * Colorimetry Format indication. 2416 */ 2417 vsc->revision = 0x5; 2418 vsc->length = 0x13; 2419 2420 /* DP 1.4a spec, Table 2-120 */ 2421 switch (crtc_state->output_format) { 2422 case INTEL_OUTPUT_FORMAT_YCBCR444: 2423 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 2424 break; 2425 case INTEL_OUTPUT_FORMAT_YCBCR420: 2426 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 2427 break; 2428 case INTEL_OUTPUT_FORMAT_RGB: 2429 default: 2430 vsc->pixelformat = DP_PIXELFORMAT_RGB; 2431 } 2432 2433 switch (conn_state->colorspace) { 2434 case DRM_MODE_COLORIMETRY_BT709_YCC: 2435 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2436 break; 2437 case DRM_MODE_COLORIMETRY_XVYCC_601: 2438 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 2439 break; 2440 case DRM_MODE_COLORIMETRY_XVYCC_709: 2441 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 2442 break; 2443 case DRM_MODE_COLORIMETRY_SYCC_601: 2444 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 2445 break; 2446 case DRM_MODE_COLORIMETRY_OPYCC_601: 2447 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 2448 break; 2449 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2450 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 2451 break; 2452 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2453 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 2454 break; 2455 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2456 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 2457 break; 2458 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 2459 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 2460 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 2461 break; 2462 default: 2463 /* 2464 * RGB->YCBCR color conversion uses the BT.709 2465 * color space. 2466 */ 2467 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2468 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2469 else 2470 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 2471 break; 2472 } 2473 2474 vsc->bpc = crtc_state->pipe_bpp / 3; 2475 2476 /* only RGB pixelformat supports 6 bpc */ 2477 drm_WARN_ON(&dev_priv->drm, 2478 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 2479 2480 /* all YCbCr are always limited range */ 2481 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 2482 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 2483 } 2484 2485 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 2486 struct intel_crtc_state *crtc_state, 2487 const struct drm_connector_state *conn_state) 2488 { 2489 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 2490 2491 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 2492 if (crtc_state->has_psr) 2493 return; 2494 2495 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 2496 return; 2497 2498 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 2499 vsc->sdp_type = DP_SDP_VSC; 2500 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2501 &crtc_state->infoframes.vsc); 2502 } 2503 2504 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 2505 const struct intel_crtc_state *crtc_state, 2506 const struct drm_connector_state *conn_state, 2507 struct drm_dp_vsc_sdp *vsc) 2508 { 2509 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2510 2511 vsc->sdp_type = DP_SDP_VSC; 2512 2513 if (dev_priv->psr.psr2_enabled) { 2514 if (dev_priv->psr.colorimetry_support && 2515 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2516 /* [PSR2, +Colorimetry] */ 2517 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2518 vsc); 2519 } else { 2520 /* 2521 * [PSR2, -Colorimetry] 2522 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 2523 * 3D stereo + PSR/PSR2 + Y-coordinate. 2524 */ 2525 vsc->revision = 0x4; 2526 vsc->length = 0xe; 2527 } 2528 } else { 2529 /* 2530 * [PSR1] 2531 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2532 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 2533 * higher). 2534 */ 2535 vsc->revision = 0x2; 2536 vsc->length = 0x8; 2537 } 2538 } 2539 2540 static void 2541 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 2542 struct intel_crtc_state *crtc_state, 2543 const struct drm_connector_state *conn_state) 2544 { 2545 int ret; 2546 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2547 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 2548 2549 if (!conn_state->hdr_output_metadata) 2550 return; 2551 2552 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 2553 2554 if (ret) { 2555 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 2556 return; 2557 } 2558 2559 crtc_state->infoframes.enable |= 2560 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2561 } 2562 2563 int 2564 intel_dp_compute_config(struct intel_encoder *encoder, 2565 struct intel_crtc_state *pipe_config, 2566 struct drm_connector_state *conn_state) 2567 { 2568 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2569 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2570 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2571 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); 2572 enum port port = encoder->port; 2573 struct intel_connector *intel_connector = intel_dp->attached_connector; 2574 struct intel_digital_connector_state *intel_conn_state = 2575 to_intel_digital_connector_state(conn_state); 2576 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, 2577 DP_DPCD_QUIRK_CONSTANT_N); 2578 int ret = 0, output_bpp; 2579 2580 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 2581 pipe_config->has_pch_encoder = true; 2582 2583 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2584 2585 if (lspcon->active) 2586 lspcon_ycbcr420_config(&intel_connector->base, pipe_config); 2587 else 2588 ret = intel_dp_ycbcr420_config(intel_dp, pipe_config, 2589 conn_state); 2590 if (ret) 2591 return ret; 2592 2593 pipe_config->has_drrs = false; 2594 if (!intel_dp_port_has_audio(dev_priv, port)) 2595 pipe_config->has_audio = false; 2596 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2597 pipe_config->has_audio = intel_dp->has_audio; 2598 else 2599 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 2600 2601 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2602 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 2603 adjusted_mode); 2604 2605 if (HAS_GMCH(dev_priv)) 2606 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 2607 else 2608 ret = intel_pch_panel_fitting(pipe_config, conn_state); 2609 if (ret) 2610 return ret; 2611 } 2612 2613 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2614 return -EINVAL; 2615 2616 if (HAS_GMCH(dev_priv) && 2617 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2618 return -EINVAL; 2619 2620 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2621 return -EINVAL; 2622 2623 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2624 return -EINVAL; 2625 2626 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2627 if (ret < 0) 2628 return ret; 2629 2630 pipe_config->limited_color_range = 2631 intel_dp_limited_color_range(pipe_config, conn_state); 2632 2633 if (pipe_config->dsc.compression_enable) 2634 output_bpp = pipe_config->dsc.compressed_bpp; 2635 else 2636 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); 2637 2638 intel_link_compute_m_n(output_bpp, 2639 pipe_config->lane_count, 2640 adjusted_mode->crtc_clock, 2641 pipe_config->port_clock, 2642 &pipe_config->dp_m_n, 2643 constant_n, pipe_config->fec_enable); 2644 2645 if (intel_connector->panel.downclock_mode != NULL && 2646 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { 2647 pipe_config->has_drrs = true; 2648 intel_link_compute_m_n(output_bpp, 2649 pipe_config->lane_count, 2650 intel_connector->panel.downclock_mode->clock, 2651 pipe_config->port_clock, 2652 &pipe_config->dp_m2_n2, 2653 constant_n, pipe_config->fec_enable); 2654 } 2655 2656 if (!HAS_DDI(dev_priv)) 2657 intel_dp_set_clock(encoder, pipe_config); 2658 2659 intel_psr_compute_config(intel_dp, pipe_config); 2660 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 2661 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 2662 2663 return 0; 2664 } 2665 2666 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2667 int link_rate, u8 lane_count, 2668 bool link_mst) 2669 { 2670 intel_dp->link_trained = false; 2671 intel_dp->link_rate = link_rate; 2672 intel_dp->lane_count = lane_count; 2673 intel_dp->link_mst = link_mst; 2674 } 2675 2676 static void intel_dp_prepare(struct intel_encoder *encoder, 2677 const struct intel_crtc_state *pipe_config) 2678 { 2679 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2680 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2681 enum port port = encoder->port; 2682 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2683 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2684 2685 intel_dp_set_link_params(intel_dp, pipe_config->port_clock, 2686 pipe_config->lane_count, 2687 intel_crtc_has_type(pipe_config, 2688 INTEL_OUTPUT_DP_MST)); 2689 2690 /* 2691 * There are four kinds of DP registers: 2692 * 2693 * IBX PCH 2694 * SNB CPU 2695 * IVB CPU 2696 * CPT PCH 2697 * 2698 * IBX PCH and CPU are the same for almost everything, 2699 * except that the CPU DP PLL is configured in this 2700 * register 2701 * 2702 * CPT PCH is quite different, having many bits moved 2703 * to the TRANS_DP_CTL register instead. That 2704 * configuration happens (oddly) in ilk_pch_enable 2705 */ 2706 2707 /* Preserve the BIOS-computed detected bit. This is 2708 * supposed to be read-only. 2709 */ 2710 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 2711 2712 /* Handle DP bits in common between all three register formats */ 2713 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 2714 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 2715 2716 /* Split out the IBX/CPU vs CPT settings */ 2717 2718 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 2719 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2720 intel_dp->DP |= DP_SYNC_HS_HIGH; 2721 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2722 intel_dp->DP |= DP_SYNC_VS_HIGH; 2723 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2724 2725 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2726 intel_dp->DP |= DP_ENHANCED_FRAMING; 2727 2728 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 2729 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2730 u32 trans_dp; 2731 2732 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2733 2734 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 2735 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2736 trans_dp |= TRANS_DP_ENH_FRAMING; 2737 else 2738 trans_dp &= ~TRANS_DP_ENH_FRAMING; 2739 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 2740 } else { 2741 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 2742 intel_dp->DP |= DP_COLOR_RANGE_16_235; 2743 2744 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2745 intel_dp->DP |= DP_SYNC_HS_HIGH; 2746 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2747 intel_dp->DP |= DP_SYNC_VS_HIGH; 2748 intel_dp->DP |= DP_LINK_TRAIN_OFF; 2749 2750 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2751 intel_dp->DP |= DP_ENHANCED_FRAMING; 2752 2753 if (IS_CHERRYVIEW(dev_priv)) 2754 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 2755 else 2756 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 2757 } 2758 } 2759 2760 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 2761 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 2762 2763 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 2764 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 2765 2766 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 2767 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 2768 2769 static void intel_pps_verify_state(struct intel_dp *intel_dp); 2770 2771 static void wait_panel_status(struct intel_dp *intel_dp, 2772 u32 mask, 2773 u32 value) 2774 { 2775 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2776 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2777 2778 lockdep_assert_held(&dev_priv->pps_mutex); 2779 2780 intel_pps_verify_state(intel_dp); 2781 2782 pp_stat_reg = _pp_stat_reg(intel_dp); 2783 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2784 2785 drm_dbg_kms(&dev_priv->drm, 2786 "mask %08x value %08x status %08x control %08x\n", 2787 mask, value, 2788 intel_de_read(dev_priv, pp_stat_reg), 2789 intel_de_read(dev_priv, pp_ctrl_reg)); 2790 2791 if (intel_de_wait_for_register(dev_priv, pp_stat_reg, 2792 mask, value, 5000)) 2793 drm_err(&dev_priv->drm, 2794 "Panel status timeout: status %08x control %08x\n", 2795 intel_de_read(dev_priv, pp_stat_reg), 2796 intel_de_read(dev_priv, pp_ctrl_reg)); 2797 2798 drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); 2799 } 2800 2801 static void wait_panel_on(struct intel_dp *intel_dp) 2802 { 2803 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2804 2805 drm_dbg_kms(&i915->drm, "Wait for panel power on\n"); 2806 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 2807 } 2808 2809 static void wait_panel_off(struct intel_dp *intel_dp) 2810 { 2811 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2812 2813 drm_dbg_kms(&i915->drm, "Wait for panel power off time\n"); 2814 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 2815 } 2816 2817 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 2818 { 2819 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2820 ktime_t panel_power_on_time; 2821 s64 panel_power_off_duration; 2822 2823 drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n"); 2824 2825 /* take the difference of currrent time and panel power off time 2826 * and then make panel wait for t11_t12 if needed. */ 2827 panel_power_on_time = ktime_get_boottime(); 2828 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 2829 2830 /* When we disable the VDD override bit last we have to do the manual 2831 * wait. */ 2832 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 2833 wait_remaining_ms_from_jiffies(jiffies, 2834 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 2835 2836 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 2837 } 2838 2839 static void wait_backlight_on(struct intel_dp *intel_dp) 2840 { 2841 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 2842 intel_dp->backlight_on_delay); 2843 } 2844 2845 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 2846 { 2847 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 2848 intel_dp->backlight_off_delay); 2849 } 2850 2851 /* Read the current pp_control value, unlocking the register if it 2852 * is locked 2853 */ 2854 2855 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 2856 { 2857 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2858 u32 control; 2859 2860 lockdep_assert_held(&dev_priv->pps_mutex); 2861 2862 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); 2863 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && 2864 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 2865 control &= ~PANEL_UNLOCK_MASK; 2866 control |= PANEL_UNLOCK_REGS; 2867 } 2868 return control; 2869 } 2870 2871 /* 2872 * Must be paired with edp_panel_vdd_off(). 2873 * Must hold pps_mutex around the whole on/off sequence. 2874 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2875 */ 2876 static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 2877 { 2878 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2879 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2880 u32 pp; 2881 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2882 bool need_to_disable = !intel_dp->want_panel_vdd; 2883 2884 lockdep_assert_held(&dev_priv->pps_mutex); 2885 2886 if (!intel_dp_is_edp(intel_dp)) 2887 return false; 2888 2889 cancel_delayed_work(&intel_dp->panel_vdd_work); 2890 intel_dp->want_panel_vdd = true; 2891 2892 if (edp_have_panel_vdd(intel_dp)) 2893 return need_to_disable; 2894 2895 intel_display_power_get(dev_priv, 2896 intel_aux_power_domain(intel_dig_port)); 2897 2898 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", 2899 intel_dig_port->base.base.base.id, 2900 intel_dig_port->base.base.name); 2901 2902 if (!edp_have_panel_power(intel_dp)) 2903 wait_panel_power_cycle(intel_dp); 2904 2905 pp = ilk_get_pp_control(intel_dp); 2906 pp |= EDP_FORCE_VDD; 2907 2908 pp_stat_reg = _pp_stat_reg(intel_dp); 2909 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2910 2911 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2912 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2913 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2914 intel_de_read(dev_priv, pp_stat_reg), 2915 intel_de_read(dev_priv, pp_ctrl_reg)); 2916 /* 2917 * If the panel wasn't on, delay before accessing aux channel 2918 */ 2919 if (!edp_have_panel_power(intel_dp)) { 2920 drm_dbg_kms(&dev_priv->drm, 2921 "[ENCODER:%d:%s] panel power wasn't enabled\n", 2922 intel_dig_port->base.base.base.id, 2923 intel_dig_port->base.base.name); 2924 msleep(intel_dp->panel_power_up_delay); 2925 } 2926 2927 return need_to_disable; 2928 } 2929 2930 /* 2931 * Must be paired with intel_edp_panel_vdd_off() or 2932 * intel_edp_panel_off(). 2933 * Nested calls to these functions are not allowed since 2934 * we drop the lock. Caller must use some higher level 2935 * locking to prevent nested calls from other threads. 2936 */ 2937 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 2938 { 2939 intel_wakeref_t wakeref; 2940 bool vdd; 2941 2942 if (!intel_dp_is_edp(intel_dp)) 2943 return; 2944 2945 vdd = false; 2946 with_pps_lock(intel_dp, wakeref) 2947 vdd = edp_panel_vdd_on(intel_dp); 2948 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", 2949 dp_to_dig_port(intel_dp)->base.base.base.id, 2950 dp_to_dig_port(intel_dp)->base.base.name); 2951 } 2952 2953 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 2954 { 2955 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2956 struct intel_digital_port *intel_dig_port = 2957 dp_to_dig_port(intel_dp); 2958 u32 pp; 2959 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2960 2961 lockdep_assert_held(&dev_priv->pps_mutex); 2962 2963 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd); 2964 2965 if (!edp_have_panel_vdd(intel_dp)) 2966 return; 2967 2968 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", 2969 intel_dig_port->base.base.base.id, 2970 intel_dig_port->base.base.name); 2971 2972 pp = ilk_get_pp_control(intel_dp); 2973 pp &= ~EDP_FORCE_VDD; 2974 2975 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2976 pp_stat_reg = _pp_stat_reg(intel_dp); 2977 2978 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2979 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2980 2981 /* Make sure sequencer is idle before allowing subsequent activity */ 2982 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2983 intel_de_read(dev_priv, pp_stat_reg), 2984 intel_de_read(dev_priv, pp_ctrl_reg)); 2985 2986 if ((pp & PANEL_POWER_ON) == 0) 2987 intel_dp->panel_power_off_time = ktime_get_boottime(); 2988 2989 intel_display_power_put_unchecked(dev_priv, 2990 intel_aux_power_domain(intel_dig_port)); 2991 } 2992 2993 static void edp_panel_vdd_work(struct work_struct *__work) 2994 { 2995 struct intel_dp *intel_dp = 2996 container_of(to_delayed_work(__work), 2997 struct intel_dp, panel_vdd_work); 2998 intel_wakeref_t wakeref; 2999 3000 with_pps_lock(intel_dp, wakeref) { 3001 if (!intel_dp->want_panel_vdd) 3002 edp_panel_vdd_off_sync(intel_dp); 3003 } 3004 } 3005 3006 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 3007 { 3008 unsigned long delay; 3009 3010 /* 3011 * Queue the timer to fire a long time from now (relative to the power 3012 * down delay) to keep the panel power up across a sequence of 3013 * operations. 3014 */ 3015 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 3016 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 3017 } 3018 3019 /* 3020 * Must be paired with edp_panel_vdd_on(). 3021 * Must hold pps_mutex around the whole on/off sequence. 3022 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3023 */ 3024 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 3025 { 3026 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3027 3028 lockdep_assert_held(&dev_priv->pps_mutex); 3029 3030 if (!intel_dp_is_edp(intel_dp)) 3031 return; 3032 3033 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", 3034 dp_to_dig_port(intel_dp)->base.base.base.id, 3035 dp_to_dig_port(intel_dp)->base.base.name); 3036 3037 intel_dp->want_panel_vdd = false; 3038 3039 if (sync) 3040 edp_panel_vdd_off_sync(intel_dp); 3041 else 3042 edp_panel_vdd_schedule_off(intel_dp); 3043 } 3044 3045 static void edp_panel_on(struct intel_dp *intel_dp) 3046 { 3047 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3048 u32 pp; 3049 i915_reg_t pp_ctrl_reg; 3050 3051 lockdep_assert_held(&dev_priv->pps_mutex); 3052 3053 if (!intel_dp_is_edp(intel_dp)) 3054 return; 3055 3056 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n", 3057 dp_to_dig_port(intel_dp)->base.base.base.id, 3058 dp_to_dig_port(intel_dp)->base.base.name); 3059 3060 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), 3061 "[ENCODER:%d:%s] panel power already on\n", 3062 dp_to_dig_port(intel_dp)->base.base.base.id, 3063 dp_to_dig_port(intel_dp)->base.base.name)) 3064 return; 3065 3066 wait_panel_power_cycle(intel_dp); 3067 3068 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3069 pp = ilk_get_pp_control(intel_dp); 3070 if (IS_GEN(dev_priv, 5)) { 3071 /* ILK workaround: disable reset around power sequence */ 3072 pp &= ~PANEL_POWER_RESET; 3073 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3074 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3075 } 3076 3077 pp |= PANEL_POWER_ON; 3078 if (!IS_GEN(dev_priv, 5)) 3079 pp |= PANEL_POWER_RESET; 3080 3081 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3082 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3083 3084 wait_panel_on(intel_dp); 3085 intel_dp->last_power_on = jiffies; 3086 3087 if (IS_GEN(dev_priv, 5)) { 3088 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 3089 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3090 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3091 } 3092 } 3093 3094 void intel_edp_panel_on(struct intel_dp *intel_dp) 3095 { 3096 intel_wakeref_t wakeref; 3097 3098 if (!intel_dp_is_edp(intel_dp)) 3099 return; 3100 3101 with_pps_lock(intel_dp, wakeref) 3102 edp_panel_on(intel_dp); 3103 } 3104 3105 3106 static void edp_panel_off(struct intel_dp *intel_dp) 3107 { 3108 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3109 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3110 u32 pp; 3111 i915_reg_t pp_ctrl_reg; 3112 3113 lockdep_assert_held(&dev_priv->pps_mutex); 3114 3115 if (!intel_dp_is_edp(intel_dp)) 3116 return; 3117 3118 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n", 3119 dig_port->base.base.base.id, dig_port->base.base.name); 3120 3121 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd, 3122 "Need [ENCODER:%d:%s] VDD to turn off panel\n", 3123 dig_port->base.base.base.id, dig_port->base.base.name); 3124 3125 pp = ilk_get_pp_control(intel_dp); 3126 /* We need to switch off panel power _and_ force vdd, for otherwise some 3127 * panels get very unhappy and cease to work. */ 3128 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 3129 EDP_BLC_ENABLE); 3130 3131 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3132 3133 intel_dp->want_panel_vdd = false; 3134 3135 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3136 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3137 3138 wait_panel_off(intel_dp); 3139 intel_dp->panel_power_off_time = ktime_get_boottime(); 3140 3141 /* We got a reference when we enabled the VDD. */ 3142 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); 3143 } 3144 3145 void intel_edp_panel_off(struct intel_dp *intel_dp) 3146 { 3147 intel_wakeref_t wakeref; 3148 3149 if (!intel_dp_is_edp(intel_dp)) 3150 return; 3151 3152 with_pps_lock(intel_dp, wakeref) 3153 edp_panel_off(intel_dp); 3154 } 3155 3156 /* Enable backlight in the panel power control. */ 3157 static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 3158 { 3159 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3160 intel_wakeref_t wakeref; 3161 3162 /* 3163 * If we enable the backlight right away following a panel power 3164 * on, we may see slight flicker as the panel syncs with the eDP 3165 * link. So delay a bit to make sure the image is solid before 3166 * allowing it to appear. 3167 */ 3168 wait_backlight_on(intel_dp); 3169 3170 with_pps_lock(intel_dp, wakeref) { 3171 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3172 u32 pp; 3173 3174 pp = ilk_get_pp_control(intel_dp); 3175 pp |= EDP_BLC_ENABLE; 3176 3177 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3178 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3179 } 3180 } 3181 3182 /* Enable backlight PWM and backlight PP control. */ 3183 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3184 const struct drm_connector_state *conn_state) 3185 { 3186 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3187 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3188 3189 if (!intel_dp_is_edp(intel_dp)) 3190 return; 3191 3192 drm_dbg_kms(&i915->drm, "\n"); 3193 3194 intel_panel_enable_backlight(crtc_state, conn_state); 3195 _intel_edp_backlight_on(intel_dp); 3196 } 3197 3198 /* Disable backlight in the panel power control. */ 3199 static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 3200 { 3201 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3202 intel_wakeref_t wakeref; 3203 3204 if (!intel_dp_is_edp(intel_dp)) 3205 return; 3206 3207 with_pps_lock(intel_dp, wakeref) { 3208 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3209 u32 pp; 3210 3211 pp = ilk_get_pp_control(intel_dp); 3212 pp &= ~EDP_BLC_ENABLE; 3213 3214 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3215 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3216 } 3217 3218 intel_dp->last_backlight_off = jiffies; 3219 edp_wait_backlight_off(intel_dp); 3220 } 3221 3222 /* Disable backlight PP control and backlight PWM. */ 3223 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3224 { 3225 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3226 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3227 3228 if (!intel_dp_is_edp(intel_dp)) 3229 return; 3230 3231 drm_dbg_kms(&i915->drm, "\n"); 3232 3233 _intel_edp_backlight_off(intel_dp); 3234 intel_panel_disable_backlight(old_conn_state); 3235 } 3236 3237 /* 3238 * Hook for controlling the panel power control backlight through the bl_power 3239 * sysfs attribute. Take care to handle multiple calls. 3240 */ 3241 static void intel_edp_backlight_power(struct intel_connector *connector, 3242 bool enable) 3243 { 3244 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3245 struct intel_dp *intel_dp = intel_attached_dp(connector); 3246 intel_wakeref_t wakeref; 3247 bool is_enabled; 3248 3249 is_enabled = false; 3250 with_pps_lock(intel_dp, wakeref) 3251 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 3252 if (is_enabled == enable) 3253 return; 3254 3255 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n", 3256 enable ? "enable" : "disable"); 3257 3258 if (enable) 3259 _intel_edp_backlight_on(intel_dp); 3260 else 3261 _intel_edp_backlight_off(intel_dp); 3262 } 3263 3264 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 3265 { 3266 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3267 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3268 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; 3269 3270 I915_STATE_WARN(cur_state != state, 3271 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 3272 dig_port->base.base.base.id, dig_port->base.base.name, 3273 onoff(state), onoff(cur_state)); 3274 } 3275 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 3276 3277 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 3278 { 3279 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; 3280 3281 I915_STATE_WARN(cur_state != state, 3282 "eDP PLL state assertion failure (expected %s, current %s)\n", 3283 onoff(state), onoff(cur_state)); 3284 } 3285 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 3286 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 3287 3288 static void ilk_edp_pll_on(struct intel_dp *intel_dp, 3289 const struct intel_crtc_state *pipe_config) 3290 { 3291 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3292 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3293 3294 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 3295 assert_dp_port_disabled(intel_dp); 3296 assert_edp_pll_disabled(dev_priv); 3297 3298 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", 3299 pipe_config->port_clock); 3300 3301 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 3302 3303 if (pipe_config->port_clock == 162000) 3304 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 3305 else 3306 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 3307 3308 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3309 intel_de_posting_read(dev_priv, DP_A); 3310 udelay(500); 3311 3312 /* 3313 * [DevILK] Work around required when enabling DP PLL 3314 * while a pipe is enabled going to FDI: 3315 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 3316 * 2. Program DP PLL enable 3317 */ 3318 if (IS_GEN(dev_priv, 5)) 3319 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 3320 3321 intel_dp->DP |= DP_PLL_ENABLE; 3322 3323 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3324 intel_de_posting_read(dev_priv, DP_A); 3325 udelay(200); 3326 } 3327 3328 static void ilk_edp_pll_off(struct intel_dp *intel_dp, 3329 const struct intel_crtc_state *old_crtc_state) 3330 { 3331 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3332 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3333 3334 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 3335 assert_dp_port_disabled(intel_dp); 3336 assert_edp_pll_enabled(dev_priv); 3337 3338 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); 3339 3340 intel_dp->DP &= ~DP_PLL_ENABLE; 3341 3342 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3343 intel_de_posting_read(dev_priv, DP_A); 3344 udelay(200); 3345 } 3346 3347 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3348 { 3349 /* 3350 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3351 * be capable of signalling downstream hpd with a long pulse. 3352 * Whether or not that means D3 is safe to use is not clear, 3353 * but let's assume so until proven otherwise. 3354 * 3355 * FIXME should really check all downstream ports... 3356 */ 3357 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3358 drm_dp_is_branch(intel_dp->dpcd) && 3359 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3360 } 3361 3362 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3363 const struct intel_crtc_state *crtc_state, 3364 bool enable) 3365 { 3366 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3367 int ret; 3368 3369 if (!crtc_state->dsc.compression_enable) 3370 return; 3371 3372 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3373 enable ? DP_DECOMPRESSION_EN : 0); 3374 if (ret < 0) 3375 drm_dbg_kms(&i915->drm, 3376 "Failed to %s sink decompression state\n", 3377 enable ? "enable" : "disable"); 3378 } 3379 3380 /* If the sink supports it, try to set the power state appropriately */ 3381 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 3382 { 3383 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3384 int ret, i; 3385 3386 /* Should have a valid DPCD by this point */ 3387 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3388 return; 3389 3390 if (mode != DRM_MODE_DPMS_ON) { 3391 if (downstream_hpd_needs_d0(intel_dp)) 3392 return; 3393 3394 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3395 DP_SET_POWER_D3); 3396 } else { 3397 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3398 3399 /* 3400 * When turning on, we need to retry for 1ms to give the sink 3401 * time to wake up. 3402 */ 3403 for (i = 0; i < 3; i++) { 3404 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3405 DP_SET_POWER_D0); 3406 if (ret == 1) 3407 break; 3408 msleep(1); 3409 } 3410 3411 if (ret == 1 && lspcon->active) 3412 lspcon_wait_pcon_mode(lspcon); 3413 } 3414 3415 if (ret != 1) 3416 drm_dbg_kms(&i915->drm, "failed to %s sink power state\n", 3417 mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); 3418 } 3419 3420 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 3421 enum port port, enum pipe *pipe) 3422 { 3423 enum pipe p; 3424 3425 for_each_pipe(dev_priv, p) { 3426 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); 3427 3428 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 3429 *pipe = p; 3430 return true; 3431 } 3432 } 3433 3434 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", 3435 port_name(port)); 3436 3437 /* must initialize pipe to something for the asserts */ 3438 *pipe = PIPE_A; 3439 3440 return false; 3441 } 3442 3443 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 3444 i915_reg_t dp_reg, enum port port, 3445 enum pipe *pipe) 3446 { 3447 bool ret; 3448 u32 val; 3449 3450 val = intel_de_read(dev_priv, dp_reg); 3451 3452 ret = val & DP_PORT_EN; 3453 3454 /* asserts want to know the pipe even if the port is disabled */ 3455 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3456 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 3457 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3458 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 3459 else if (IS_CHERRYVIEW(dev_priv)) 3460 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 3461 else 3462 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 3463 3464 return ret; 3465 } 3466 3467 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 3468 enum pipe *pipe) 3469 { 3470 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3471 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3472 intel_wakeref_t wakeref; 3473 bool ret; 3474 3475 wakeref = intel_display_power_get_if_enabled(dev_priv, 3476 encoder->power_domain); 3477 if (!wakeref) 3478 return false; 3479 3480 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 3481 encoder->port, pipe); 3482 3483 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 3484 3485 return ret; 3486 } 3487 3488 static void intel_dp_get_config(struct intel_encoder *encoder, 3489 struct intel_crtc_state *pipe_config) 3490 { 3491 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3492 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3493 u32 tmp, flags = 0; 3494 enum port port = encoder->port; 3495 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3496 3497 if (encoder->type == INTEL_OUTPUT_EDP) 3498 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 3499 else 3500 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 3501 3502 tmp = intel_de_read(dev_priv, intel_dp->output_reg); 3503 3504 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 3505 3506 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3507 u32 trans_dp = intel_de_read(dev_priv, 3508 TRANS_DP_CTL(crtc->pipe)); 3509 3510 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 3511 flags |= DRM_MODE_FLAG_PHSYNC; 3512 else 3513 flags |= DRM_MODE_FLAG_NHSYNC; 3514 3515 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 3516 flags |= DRM_MODE_FLAG_PVSYNC; 3517 else 3518 flags |= DRM_MODE_FLAG_NVSYNC; 3519 } else { 3520 if (tmp & DP_SYNC_HS_HIGH) 3521 flags |= DRM_MODE_FLAG_PHSYNC; 3522 else 3523 flags |= DRM_MODE_FLAG_NHSYNC; 3524 3525 if (tmp & DP_SYNC_VS_HIGH) 3526 flags |= DRM_MODE_FLAG_PVSYNC; 3527 else 3528 flags |= DRM_MODE_FLAG_NVSYNC; 3529 } 3530 3531 pipe_config->hw.adjusted_mode.flags |= flags; 3532 3533 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 3534 pipe_config->limited_color_range = true; 3535 3536 pipe_config->lane_count = 3537 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 3538 3539 intel_dp_get_m_n(crtc, pipe_config); 3540 3541 if (port == PORT_A) { 3542 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 3543 pipe_config->port_clock = 162000; 3544 else 3545 pipe_config->port_clock = 270000; 3546 } 3547 3548 pipe_config->hw.adjusted_mode.crtc_clock = 3549 intel_dotclock_calculate(pipe_config->port_clock, 3550 &pipe_config->dp_m_n); 3551 3552 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 3553 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 3554 /* 3555 * This is a big fat ugly hack. 3556 * 3557 * Some machines in UEFI boot mode provide us a VBT that has 18 3558 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3559 * unknown we fail to light up. Yet the same BIOS boots up with 3560 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3561 * max, not what it tells us to use. 3562 * 3563 * Note: This will still be broken if the eDP panel is not lit 3564 * up by the BIOS, and thus we can't get the mode at module 3565 * load. 3566 */ 3567 drm_dbg_kms(&dev_priv->drm, 3568 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3569 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 3570 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3571 } 3572 } 3573 3574 static void intel_disable_dp(struct intel_atomic_state *state, 3575 struct intel_encoder *encoder, 3576 const struct intel_crtc_state *old_crtc_state, 3577 const struct drm_connector_state *old_conn_state) 3578 { 3579 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3580 3581 intel_dp->link_trained = false; 3582 3583 if (old_crtc_state->has_audio) 3584 intel_audio_codec_disable(encoder, 3585 old_crtc_state, old_conn_state); 3586 3587 /* Make sure the panel is off before trying to change the mode. But also 3588 * ensure that we have vdd while we switch off the panel. */ 3589 intel_edp_panel_vdd_on(intel_dp); 3590 intel_edp_backlight_off(old_conn_state); 3591 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 3592 intel_edp_panel_off(intel_dp); 3593 } 3594 3595 static void g4x_disable_dp(struct intel_atomic_state *state, 3596 struct intel_encoder *encoder, 3597 const struct intel_crtc_state *old_crtc_state, 3598 const struct drm_connector_state *old_conn_state) 3599 { 3600 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3601 } 3602 3603 static void vlv_disable_dp(struct intel_atomic_state *state, 3604 struct intel_encoder *encoder, 3605 const struct intel_crtc_state *old_crtc_state, 3606 const struct drm_connector_state *old_conn_state) 3607 { 3608 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3609 } 3610 3611 static void g4x_post_disable_dp(struct intel_atomic_state *state, 3612 struct intel_encoder *encoder, 3613 const struct intel_crtc_state *old_crtc_state, 3614 const struct drm_connector_state *old_conn_state) 3615 { 3616 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3617 enum port port = encoder->port; 3618 3619 /* 3620 * Bspec does not list a specific disable sequence for g4x DP. 3621 * Follow the ilk+ sequence (disable pipe before the port) for 3622 * g4x DP as it does not suffer from underruns like the normal 3623 * g4x modeset sequence (disable pipe after the port). 3624 */ 3625 intel_dp_link_down(encoder, old_crtc_state); 3626 3627 /* Only ilk+ has port A */ 3628 if (port == PORT_A) 3629 ilk_edp_pll_off(intel_dp, old_crtc_state); 3630 } 3631 3632 static void vlv_post_disable_dp(struct intel_atomic_state *state, 3633 struct intel_encoder *encoder, 3634 const struct intel_crtc_state *old_crtc_state, 3635 const struct drm_connector_state *old_conn_state) 3636 { 3637 intel_dp_link_down(encoder, old_crtc_state); 3638 } 3639 3640 static void chv_post_disable_dp(struct intel_atomic_state *state, 3641 struct intel_encoder *encoder, 3642 const struct intel_crtc_state *old_crtc_state, 3643 const struct drm_connector_state *old_conn_state) 3644 { 3645 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3646 3647 intel_dp_link_down(encoder, old_crtc_state); 3648 3649 vlv_dpio_get(dev_priv); 3650 3651 /* Assert data lane reset */ 3652 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 3653 3654 vlv_dpio_put(dev_priv); 3655 } 3656 3657 static void 3658 cpt_set_link_train(struct intel_dp *intel_dp, 3659 u8 dp_train_pat) 3660 { 3661 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3662 u32 *DP = &intel_dp->DP; 3663 3664 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 3665 3666 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3667 case DP_TRAINING_PATTERN_DISABLE: 3668 *DP |= DP_LINK_TRAIN_OFF_CPT; 3669 break; 3670 case DP_TRAINING_PATTERN_1: 3671 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 3672 break; 3673 case DP_TRAINING_PATTERN_2: 3674 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3675 break; 3676 case DP_TRAINING_PATTERN_3: 3677 drm_dbg_kms(&dev_priv->drm, 3678 "TPS3 not supported, using TPS2 instead\n"); 3679 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3680 break; 3681 } 3682 3683 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3684 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3685 } 3686 3687 static void 3688 g4x_set_link_train(struct intel_dp *intel_dp, 3689 u8 dp_train_pat) 3690 { 3691 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3692 u32 *DP = &intel_dp->DP; 3693 3694 *DP &= ~DP_LINK_TRAIN_MASK; 3695 3696 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3697 case DP_TRAINING_PATTERN_DISABLE: 3698 *DP |= DP_LINK_TRAIN_OFF; 3699 break; 3700 case DP_TRAINING_PATTERN_1: 3701 *DP |= DP_LINK_TRAIN_PAT_1; 3702 break; 3703 case DP_TRAINING_PATTERN_2: 3704 *DP |= DP_LINK_TRAIN_PAT_2; 3705 break; 3706 case DP_TRAINING_PATTERN_3: 3707 drm_dbg_kms(&dev_priv->drm, 3708 "TPS3 not supported, using TPS2 instead\n"); 3709 *DP |= DP_LINK_TRAIN_PAT_2; 3710 break; 3711 } 3712 3713 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3714 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3715 } 3716 3717 static void intel_dp_enable_port(struct intel_dp *intel_dp, 3718 const struct intel_crtc_state *old_crtc_state) 3719 { 3720 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3721 3722 /* enable with pattern 1 (as per spec) */ 3723 3724 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); 3725 3726 /* 3727 * Magic for VLV/CHV. We _must_ first set up the register 3728 * without actually enabling the port, and then do another 3729 * write to enable the port. Otherwise link training will 3730 * fail when the power sequencer is freshly used for this port. 3731 */ 3732 intel_dp->DP |= DP_PORT_EN; 3733 if (old_crtc_state->has_audio) 3734 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 3735 3736 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3737 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3738 } 3739 3740 static void intel_enable_dp(struct intel_atomic_state *state, 3741 struct intel_encoder *encoder, 3742 const struct intel_crtc_state *pipe_config, 3743 const struct drm_connector_state *conn_state) 3744 { 3745 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3746 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3747 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3748 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); 3749 enum pipe pipe = crtc->pipe; 3750 intel_wakeref_t wakeref; 3751 3752 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) 3753 return; 3754 3755 with_pps_lock(intel_dp, wakeref) { 3756 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3757 vlv_init_panel_power_sequencer(encoder, pipe_config); 3758 3759 intel_dp_enable_port(intel_dp, pipe_config); 3760 3761 edp_panel_vdd_on(intel_dp); 3762 edp_panel_on(intel_dp); 3763 edp_panel_vdd_off(intel_dp, true); 3764 } 3765 3766 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3767 unsigned int lane_mask = 0x0; 3768 3769 if (IS_CHERRYVIEW(dev_priv)) 3770 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 3771 3772 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 3773 lane_mask); 3774 } 3775 3776 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 3777 intel_dp_start_link_train(intel_dp); 3778 intel_dp_stop_link_train(intel_dp); 3779 3780 if (pipe_config->has_audio) { 3781 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", 3782 pipe_name(pipe)); 3783 intel_audio_codec_enable(encoder, pipe_config, conn_state); 3784 } 3785 } 3786 3787 static void g4x_enable_dp(struct intel_atomic_state *state, 3788 struct intel_encoder *encoder, 3789 const struct intel_crtc_state *pipe_config, 3790 const struct drm_connector_state *conn_state) 3791 { 3792 intel_enable_dp(state, encoder, pipe_config, conn_state); 3793 intel_edp_backlight_on(pipe_config, conn_state); 3794 } 3795 3796 static void vlv_enable_dp(struct intel_atomic_state *state, 3797 struct intel_encoder *encoder, 3798 const struct intel_crtc_state *pipe_config, 3799 const struct drm_connector_state *conn_state) 3800 { 3801 intel_edp_backlight_on(pipe_config, conn_state); 3802 } 3803 3804 static void g4x_pre_enable_dp(struct intel_atomic_state *state, 3805 struct intel_encoder *encoder, 3806 const struct intel_crtc_state *pipe_config, 3807 const struct drm_connector_state *conn_state) 3808 { 3809 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3810 enum port port = encoder->port; 3811 3812 intel_dp_prepare(encoder, pipe_config); 3813 3814 /* Only ilk+ has port A */ 3815 if (port == PORT_A) 3816 ilk_edp_pll_on(intel_dp, pipe_config); 3817 } 3818 3819 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 3820 { 3821 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3822 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); 3823 enum pipe pipe = intel_dp->pps_pipe; 3824 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 3825 3826 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3827 3828 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) 3829 return; 3830 3831 edp_panel_vdd_off_sync(intel_dp); 3832 3833 /* 3834 * VLV seems to get confused when multiple power sequencers 3835 * have the same port selected (even if only one has power/vdd 3836 * enabled). The failure manifests as vlv_wait_port_ready() failing 3837 * CHV on the other hand doesn't seem to mind having the same port 3838 * selected in multiple power sequencers, but let's clear the 3839 * port select always when logically disconnecting a power sequencer 3840 * from a port. 3841 */ 3842 drm_dbg_kms(&dev_priv->drm, 3843 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", 3844 pipe_name(pipe), intel_dig_port->base.base.base.id, 3845 intel_dig_port->base.base.name); 3846 intel_de_write(dev_priv, pp_on_reg, 0); 3847 intel_de_posting_read(dev_priv, pp_on_reg); 3848 3849 intel_dp->pps_pipe = INVALID_PIPE; 3850 } 3851 3852 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 3853 enum pipe pipe) 3854 { 3855 struct intel_encoder *encoder; 3856 3857 lockdep_assert_held(&dev_priv->pps_mutex); 3858 3859 for_each_intel_dp(&dev_priv->drm, encoder) { 3860 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3861 3862 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe, 3863 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", 3864 pipe_name(pipe), encoder->base.base.id, 3865 encoder->base.name); 3866 3867 if (intel_dp->pps_pipe != pipe) 3868 continue; 3869 3870 drm_dbg_kms(&dev_priv->drm, 3871 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", 3872 pipe_name(pipe), encoder->base.base.id, 3873 encoder->base.name); 3874 3875 /* make sure vdd is off before we steal it */ 3876 vlv_detach_power_sequencer(intel_dp); 3877 } 3878 } 3879 3880 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 3881 const struct intel_crtc_state *crtc_state) 3882 { 3883 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3884 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3885 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3886 3887 lockdep_assert_held(&dev_priv->pps_mutex); 3888 3889 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3890 3891 if (intel_dp->pps_pipe != INVALID_PIPE && 3892 intel_dp->pps_pipe != crtc->pipe) { 3893 /* 3894 * If another power sequencer was being used on this 3895 * port previously make sure to turn off vdd there while 3896 * we still have control of it. 3897 */ 3898 vlv_detach_power_sequencer(intel_dp); 3899 } 3900 3901 /* 3902 * We may be stealing the power 3903 * sequencer from another port. 3904 */ 3905 vlv_steal_power_sequencer(dev_priv, crtc->pipe); 3906 3907 intel_dp->active_pipe = crtc->pipe; 3908 3909 if (!intel_dp_is_edp(intel_dp)) 3910 return; 3911 3912 /* now it's all ours */ 3913 intel_dp->pps_pipe = crtc->pipe; 3914 3915 drm_dbg_kms(&dev_priv->drm, 3916 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", 3917 pipe_name(intel_dp->pps_pipe), encoder->base.base.id, 3918 encoder->base.name); 3919 3920 /* init power sequencer on this pipe and port */ 3921 intel_dp_init_panel_power_sequencer(intel_dp); 3922 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 3923 } 3924 3925 static void vlv_pre_enable_dp(struct intel_atomic_state *state, 3926 struct intel_encoder *encoder, 3927 const struct intel_crtc_state *pipe_config, 3928 const struct drm_connector_state *conn_state) 3929 { 3930 vlv_phy_pre_encoder_enable(encoder, pipe_config); 3931 3932 intel_enable_dp(state, encoder, pipe_config, conn_state); 3933 } 3934 3935 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, 3936 struct intel_encoder *encoder, 3937 const struct intel_crtc_state *pipe_config, 3938 const struct drm_connector_state *conn_state) 3939 { 3940 intel_dp_prepare(encoder, pipe_config); 3941 3942 vlv_phy_pre_pll_enable(encoder, pipe_config); 3943 } 3944 3945 static void chv_pre_enable_dp(struct intel_atomic_state *state, 3946 struct intel_encoder *encoder, 3947 const struct intel_crtc_state *pipe_config, 3948 const struct drm_connector_state *conn_state) 3949 { 3950 chv_phy_pre_encoder_enable(encoder, pipe_config); 3951 3952 intel_enable_dp(state, encoder, pipe_config, conn_state); 3953 3954 /* Second common lane will stay alive on its own now */ 3955 chv_phy_release_cl2_override(encoder); 3956 } 3957 3958 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, 3959 struct intel_encoder *encoder, 3960 const struct intel_crtc_state *pipe_config, 3961 const struct drm_connector_state *conn_state) 3962 { 3963 intel_dp_prepare(encoder, pipe_config); 3964 3965 chv_phy_pre_pll_enable(encoder, pipe_config); 3966 } 3967 3968 static void chv_dp_post_pll_disable(struct intel_atomic_state *state, 3969 struct intel_encoder *encoder, 3970 const struct intel_crtc_state *old_crtc_state, 3971 const struct drm_connector_state *old_conn_state) 3972 { 3973 chv_phy_post_pll_disable(encoder, old_crtc_state); 3974 } 3975 3976 /* 3977 * Fetch AUX CH registers 0x202 - 0x207 which contain 3978 * link status information 3979 */ 3980 bool 3981 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) 3982 { 3983 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 3984 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 3985 } 3986 3987 /* These are source-specific values. */ 3988 u8 3989 intel_dp_voltage_max(struct intel_dp *intel_dp) 3990 { 3991 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3992 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3993 enum port port = encoder->port; 3994 3995 if (HAS_DDI(dev_priv)) 3996 return intel_ddi_dp_voltage_max(encoder); 3997 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3998 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 3999 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 4000 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 4001 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 4002 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 4003 else 4004 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 4005 } 4006 4007 u8 4008 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing) 4009 { 4010 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4011 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4012 enum port port = encoder->port; 4013 4014 if (HAS_DDI(dev_priv)) { 4015 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing); 4016 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4017 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 4018 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4019 return DP_TRAIN_PRE_EMPH_LEVEL_3; 4020 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4021 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4022 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4023 return DP_TRAIN_PRE_EMPH_LEVEL_1; 4024 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4025 default: 4026 return DP_TRAIN_PRE_EMPH_LEVEL_0; 4027 } 4028 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 4029 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 4030 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4031 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4032 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4034 return DP_TRAIN_PRE_EMPH_LEVEL_1; 4035 default: 4036 return DP_TRAIN_PRE_EMPH_LEVEL_0; 4037 } 4038 } else { 4039 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 4040 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4041 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4042 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4043 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4044 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4045 return DP_TRAIN_PRE_EMPH_LEVEL_1; 4046 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4047 default: 4048 return DP_TRAIN_PRE_EMPH_LEVEL_0; 4049 } 4050 } 4051 } 4052 4053 static void vlv_set_signal_levels(struct intel_dp *intel_dp) 4054 { 4055 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4056 unsigned long demph_reg_value, preemph_reg_value, 4057 uniqtranscale_reg_value; 4058 u8 train_set = intel_dp->train_set[0]; 4059 4060 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4061 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4062 preemph_reg_value = 0x0004000; 4063 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4065 demph_reg_value = 0x2B405555; 4066 uniqtranscale_reg_value = 0x552AB83A; 4067 break; 4068 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4069 demph_reg_value = 0x2B404040; 4070 uniqtranscale_reg_value = 0x5548B83A; 4071 break; 4072 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4073 demph_reg_value = 0x2B245555; 4074 uniqtranscale_reg_value = 0x5560B83A; 4075 break; 4076 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4077 demph_reg_value = 0x2B405555; 4078 uniqtranscale_reg_value = 0x5598DA3A; 4079 break; 4080 default: 4081 return; 4082 } 4083 break; 4084 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4085 preemph_reg_value = 0x0002000; 4086 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4088 demph_reg_value = 0x2B404040; 4089 uniqtranscale_reg_value = 0x5552B83A; 4090 break; 4091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4092 demph_reg_value = 0x2B404848; 4093 uniqtranscale_reg_value = 0x5580B83A; 4094 break; 4095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4096 demph_reg_value = 0x2B404040; 4097 uniqtranscale_reg_value = 0x55ADDA3A; 4098 break; 4099 default: 4100 return; 4101 } 4102 break; 4103 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4104 preemph_reg_value = 0x0000000; 4105 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4106 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4107 demph_reg_value = 0x2B305555; 4108 uniqtranscale_reg_value = 0x5570B83A; 4109 break; 4110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4111 demph_reg_value = 0x2B2B4040; 4112 uniqtranscale_reg_value = 0x55ADDA3A; 4113 break; 4114 default: 4115 return; 4116 } 4117 break; 4118 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4119 preemph_reg_value = 0x0006000; 4120 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4122 demph_reg_value = 0x1B405555; 4123 uniqtranscale_reg_value = 0x55ADDA3A; 4124 break; 4125 default: 4126 return; 4127 } 4128 break; 4129 default: 4130 return; 4131 } 4132 4133 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, 4134 uniqtranscale_reg_value, 0); 4135 } 4136 4137 static void chv_set_signal_levels(struct intel_dp *intel_dp) 4138 { 4139 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4140 u32 deemph_reg_value, margin_reg_value; 4141 bool uniq_trans_scale = false; 4142 u8 train_set = intel_dp->train_set[0]; 4143 4144 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4145 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4146 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4147 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4148 deemph_reg_value = 128; 4149 margin_reg_value = 52; 4150 break; 4151 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4152 deemph_reg_value = 128; 4153 margin_reg_value = 77; 4154 break; 4155 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4156 deemph_reg_value = 128; 4157 margin_reg_value = 102; 4158 break; 4159 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4160 deemph_reg_value = 128; 4161 margin_reg_value = 154; 4162 uniq_trans_scale = true; 4163 break; 4164 default: 4165 return; 4166 } 4167 break; 4168 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4169 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4170 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4171 deemph_reg_value = 85; 4172 margin_reg_value = 78; 4173 break; 4174 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4175 deemph_reg_value = 85; 4176 margin_reg_value = 116; 4177 break; 4178 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4179 deemph_reg_value = 85; 4180 margin_reg_value = 154; 4181 break; 4182 default: 4183 return; 4184 } 4185 break; 4186 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4187 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4188 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4189 deemph_reg_value = 64; 4190 margin_reg_value = 104; 4191 break; 4192 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4193 deemph_reg_value = 64; 4194 margin_reg_value = 154; 4195 break; 4196 default: 4197 return; 4198 } 4199 break; 4200 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4201 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4203 deemph_reg_value = 43; 4204 margin_reg_value = 154; 4205 break; 4206 default: 4207 return; 4208 } 4209 break; 4210 default: 4211 return; 4212 } 4213 4214 chv_set_phy_signal_level(encoder, deemph_reg_value, 4215 margin_reg_value, uniq_trans_scale); 4216 } 4217 4218 static u32 g4x_signal_levels(u8 train_set) 4219 { 4220 u32 signal_levels = 0; 4221 4222 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4223 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4224 default: 4225 signal_levels |= DP_VOLTAGE_0_4; 4226 break; 4227 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4228 signal_levels |= DP_VOLTAGE_0_6; 4229 break; 4230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4231 signal_levels |= DP_VOLTAGE_0_8; 4232 break; 4233 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4234 signal_levels |= DP_VOLTAGE_1_2; 4235 break; 4236 } 4237 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4238 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4239 default: 4240 signal_levels |= DP_PRE_EMPHASIS_0; 4241 break; 4242 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4243 signal_levels |= DP_PRE_EMPHASIS_3_5; 4244 break; 4245 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4246 signal_levels |= DP_PRE_EMPHASIS_6; 4247 break; 4248 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4249 signal_levels |= DP_PRE_EMPHASIS_9_5; 4250 break; 4251 } 4252 return signal_levels; 4253 } 4254 4255 static void 4256 g4x_set_signal_levels(struct intel_dp *intel_dp) 4257 { 4258 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4259 u8 train_set = intel_dp->train_set[0]; 4260 u32 signal_levels; 4261 4262 signal_levels = g4x_signal_levels(train_set); 4263 4264 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4265 signal_levels); 4266 4267 intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); 4268 intel_dp->DP |= signal_levels; 4269 4270 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4271 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4272 } 4273 4274 /* SNB CPU eDP voltage swing and pre-emphasis control */ 4275 static u32 snb_cpu_edp_signal_levels(u8 train_set) 4276 { 4277 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4278 DP_TRAIN_PRE_EMPHASIS_MASK); 4279 4280 switch (signal_levels) { 4281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4283 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4285 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 4286 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4287 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4288 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 4289 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4291 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 4292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4294 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 4295 default: 4296 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4297 "0x%x\n", signal_levels); 4298 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4299 } 4300 } 4301 4302 static void 4303 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4304 { 4305 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4306 u8 train_set = intel_dp->train_set[0]; 4307 u32 signal_levels; 4308 4309 signal_levels = snb_cpu_edp_signal_levels(train_set); 4310 4311 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4312 signal_levels); 4313 4314 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 4315 intel_dp->DP |= signal_levels; 4316 4317 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4318 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4319 } 4320 4321 /* IVB CPU eDP voltage swing and pre-emphasis control */ 4322 static u32 ivb_cpu_edp_signal_levels(u8 train_set) 4323 { 4324 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4325 DP_TRAIN_PRE_EMPHASIS_MASK); 4326 4327 switch (signal_levels) { 4328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4329 return EDP_LINK_TRAIN_400MV_0DB_IVB; 4330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4331 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 4332 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4333 return EDP_LINK_TRAIN_400MV_6DB_IVB; 4334 4335 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4336 return EDP_LINK_TRAIN_600MV_0DB_IVB; 4337 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4338 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 4339 4340 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4341 return EDP_LINK_TRAIN_800MV_0DB_IVB; 4342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4343 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 4344 4345 default: 4346 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4347 "0x%x\n", signal_levels); 4348 return EDP_LINK_TRAIN_500MV_0DB_IVB; 4349 } 4350 } 4351 4352 static void 4353 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4354 { 4355 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4356 u8 train_set = intel_dp->train_set[0]; 4357 u32 signal_levels; 4358 4359 signal_levels = ivb_cpu_edp_signal_levels(train_set); 4360 4361 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4362 signal_levels); 4363 4364 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 4365 intel_dp->DP |= signal_levels; 4366 4367 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4368 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4369 } 4370 4371 void intel_dp_set_signal_levels(struct intel_dp *intel_dp) 4372 { 4373 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4374 u8 train_set = intel_dp->train_set[0]; 4375 4376 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", 4377 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 4378 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); 4379 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", 4380 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 4381 DP_TRAIN_PRE_EMPHASIS_SHIFT, 4382 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 4383 " (max)" : ""); 4384 4385 intel_dp->set_signal_levels(intel_dp); 4386 } 4387 4388 void 4389 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 4390 u8 dp_train_pat) 4391 { 4392 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4393 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); 4394 4395 if (dp_train_pat & train_pat_mask) 4396 drm_dbg_kms(&dev_priv->drm, 4397 "Using DP training pattern TPS%d\n", 4398 dp_train_pat & train_pat_mask); 4399 4400 intel_dp->set_link_train(intel_dp, dp_train_pat); 4401 } 4402 4403 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 4404 { 4405 if (intel_dp->set_idle_link_train) 4406 intel_dp->set_idle_link_train(intel_dp); 4407 } 4408 4409 static void 4410 intel_dp_link_down(struct intel_encoder *encoder, 4411 const struct intel_crtc_state *old_crtc_state) 4412 { 4413 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4414 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4415 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 4416 enum port port = encoder->port; 4417 u32 DP = intel_dp->DP; 4418 4419 if (drm_WARN_ON(&dev_priv->drm, 4420 (intel_de_read(dev_priv, intel_dp->output_reg) & 4421 DP_PORT_EN) == 0)) 4422 return; 4423 4424 drm_dbg_kms(&dev_priv->drm, "\n"); 4425 4426 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 4427 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 4428 DP &= ~DP_LINK_TRAIN_MASK_CPT; 4429 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 4430 } else { 4431 DP &= ~DP_LINK_TRAIN_MASK; 4432 DP |= DP_LINK_TRAIN_PAT_IDLE; 4433 } 4434 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4435 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4436 4437 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 4438 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4439 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4440 4441 /* 4442 * HW workaround for IBX, we need to move the port 4443 * to transcoder A after disabling it to allow the 4444 * matching HDMI port to be enabled on transcoder A. 4445 */ 4446 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 4447 /* 4448 * We get CPU/PCH FIFO underruns on the other pipe when 4449 * doing the workaround. Sweep them under the rug. 4450 */ 4451 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4452 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4453 4454 /* always enable with pattern 1 (as per spec) */ 4455 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 4456 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 4457 DP_LINK_TRAIN_PAT_1; 4458 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4459 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4460 4461 DP &= ~DP_PORT_EN; 4462 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4463 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4464 4465 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 4466 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4467 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4468 } 4469 4470 msleep(intel_dp->panel_power_down_delay); 4471 4472 intel_dp->DP = DP; 4473 4474 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4475 intel_wakeref_t wakeref; 4476 4477 with_pps_lock(intel_dp, wakeref) 4478 intel_dp->active_pipe = INVALID_PIPE; 4479 } 4480 } 4481 4482 static void 4483 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) 4484 { 4485 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4486 u8 dpcd_ext[6]; 4487 4488 /* 4489 * Prior to DP1.3 the bit represented by 4490 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. 4491 * if it is set DP_DPCD_REV at 0000h could be at a value less than 4492 * the true capability of the panel. The only way to check is to 4493 * then compare 0000h and 2200h. 4494 */ 4495 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] & 4496 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) 4497 return; 4498 4499 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV, 4500 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) { 4501 drm_err(&i915->drm, 4502 "DPCD failed read at extended capabilities\n"); 4503 return; 4504 } 4505 4506 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { 4507 drm_dbg_kms(&i915->drm, 4508 "DPCD extended DPCD rev less than base DPCD rev\n"); 4509 return; 4510 } 4511 4512 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext))) 4513 return; 4514 4515 drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n", 4516 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); 4517 4518 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)); 4519 } 4520 4521 bool 4522 intel_dp_read_dpcd(struct intel_dp *intel_dp) 4523 { 4524 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4525 4526 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 4527 sizeof(intel_dp->dpcd)) < 0) 4528 return false; /* aux transfer failed */ 4529 4530 intel_dp_extended_receiver_capabilities(intel_dp); 4531 4532 drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd), 4533 intel_dp->dpcd); 4534 4535 return intel_dp->dpcd[DP_DPCD_REV] != 0; 4536 } 4537 4538 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 4539 { 4540 u8 dprx = 0; 4541 4542 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 4543 &dprx) != 1) 4544 return false; 4545 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 4546 } 4547 4548 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 4549 { 4550 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4551 4552 /* 4553 * Clear the cached register set to avoid using stale values 4554 * for the sinks that do not support DSC. 4555 */ 4556 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4557 4558 /* Clear fec_capable to avoid using stale values */ 4559 intel_dp->fec_capable = 0; 4560 4561 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 4562 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 4563 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4564 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 4565 intel_dp->dsc_dpcd, 4566 sizeof(intel_dp->dsc_dpcd)) < 0) 4567 drm_err(&i915->drm, 4568 "Failed to read DPCD register 0x%x\n", 4569 DP_DSC_SUPPORT); 4570 4571 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 4572 (int)sizeof(intel_dp->dsc_dpcd), 4573 intel_dp->dsc_dpcd); 4574 4575 /* FEC is supported only on DP 1.4 */ 4576 if (!intel_dp_is_edp(intel_dp) && 4577 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 4578 &intel_dp->fec_capable) < 0) 4579 drm_err(&i915->drm, 4580 "Failed to read FEC DPCD register\n"); 4581 4582 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 4583 intel_dp->fec_capable); 4584 } 4585 } 4586 4587 static bool 4588 intel_edp_init_dpcd(struct intel_dp *intel_dp) 4589 { 4590 struct drm_i915_private *dev_priv = 4591 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4592 4593 /* this function is meant to be called only once */ 4594 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 4595 4596 if (!intel_dp_read_dpcd(intel_dp)) 4597 return false; 4598 4599 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4600 drm_dp_is_branch(intel_dp->dpcd)); 4601 4602 /* 4603 * Read the eDP display control registers. 4604 * 4605 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4606 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4607 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4608 * method). The display control registers should read zero if they're 4609 * not supported anyway. 4610 */ 4611 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4612 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4613 sizeof(intel_dp->edp_dpcd)) 4614 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 4615 (int)sizeof(intel_dp->edp_dpcd), 4616 intel_dp->edp_dpcd); 4617 4618 /* 4619 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4620 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4621 */ 4622 intel_psr_init_dpcd(intel_dp); 4623 4624 /* Read the eDP 1.4+ supported link rates. */ 4625 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4626 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4627 int i; 4628 4629 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4630 sink_rates, sizeof(sink_rates)); 4631 4632 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4633 int val = le16_to_cpu(sink_rates[i]); 4634 4635 if (val == 0) 4636 break; 4637 4638 /* Value read multiplied by 200kHz gives the per-lane 4639 * link rate in kHz. The source rates are, however, 4640 * stored in terms of LS_Clk kHz. The full conversion 4641 * back to symbols is 4642 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4643 */ 4644 intel_dp->sink_rates[i] = (val * 200) / 10; 4645 } 4646 intel_dp->num_sink_rates = i; 4647 } 4648 4649 /* 4650 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4651 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4652 */ 4653 if (intel_dp->num_sink_rates) 4654 intel_dp->use_rate_select = true; 4655 else 4656 intel_dp_set_sink_rates(intel_dp); 4657 4658 intel_dp_set_common_rates(intel_dp); 4659 4660 /* Read the eDP DSC DPCD registers */ 4661 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4662 intel_dp_get_dsc_sink_cap(intel_dp); 4663 4664 return true; 4665 } 4666 4667 4668 static bool 4669 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4670 { 4671 if (!intel_dp_read_dpcd(intel_dp)) 4672 return false; 4673 4674 /* 4675 * Don't clobber cached eDP rates. Also skip re-reading 4676 * the OUI/ID since we know it won't change. 4677 */ 4678 if (!intel_dp_is_edp(intel_dp)) { 4679 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4680 drm_dp_is_branch(intel_dp->dpcd)); 4681 4682 intel_dp_set_sink_rates(intel_dp); 4683 intel_dp_set_common_rates(intel_dp); 4684 } 4685 4686 /* 4687 * Some eDP panels do not set a valid value for sink count, that is why 4688 * it don't care about read it here and in intel_edp_init_dpcd(). 4689 */ 4690 if (!intel_dp_is_edp(intel_dp) && 4691 !drm_dp_has_quirk(&intel_dp->desc, 0, 4692 DP_DPCD_QUIRK_NO_SINK_COUNT)) { 4693 u8 count; 4694 ssize_t r; 4695 4696 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count); 4697 if (r < 1) 4698 return false; 4699 4700 /* 4701 * Sink count can change between short pulse hpd hence 4702 * a member variable in intel_dp will track any changes 4703 * between short pulse interrupts. 4704 */ 4705 intel_dp->sink_count = DP_GET_SINK_COUNT(count); 4706 4707 /* 4708 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4709 * a dongle is present but no display. Unless we require to know 4710 * if a dongle is present or not, we don't need to update 4711 * downstream port information. So, an early return here saves 4712 * time from performing other operations which are not required. 4713 */ 4714 if (!intel_dp->sink_count) 4715 return false; 4716 } 4717 4718 if (!drm_dp_is_branch(intel_dp->dpcd)) 4719 return true; /* native DP sink */ 4720 4721 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 4722 return true; /* no per-port downstream info */ 4723 4724 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, 4725 intel_dp->downstream_ports, 4726 DP_MAX_DOWNSTREAM_PORTS) < 0) 4727 return false; /* downstream port status fetch failed */ 4728 4729 return true; 4730 } 4731 4732 static bool 4733 intel_dp_sink_can_mst(struct intel_dp *intel_dp) 4734 { 4735 u8 mstm_cap; 4736 4737 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 4738 return false; 4739 4740 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1) 4741 return false; 4742 4743 return mstm_cap & DP_MST_CAP; 4744 } 4745 4746 static bool 4747 intel_dp_can_mst(struct intel_dp *intel_dp) 4748 { 4749 return i915_modparams.enable_dp_mst && 4750 intel_dp->can_mst && 4751 intel_dp_sink_can_mst(intel_dp); 4752 } 4753 4754 static void 4755 intel_dp_configure_mst(struct intel_dp *intel_dp) 4756 { 4757 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4758 struct intel_encoder *encoder = 4759 &dp_to_dig_port(intel_dp)->base; 4760 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); 4761 4762 drm_dbg_kms(&i915->drm, 4763 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4764 encoder->base.base.id, encoder->base.name, 4765 yesno(intel_dp->can_mst), yesno(sink_can_mst), 4766 yesno(i915_modparams.enable_dp_mst)); 4767 4768 if (!intel_dp->can_mst) 4769 return; 4770 4771 intel_dp->is_mst = sink_can_mst && 4772 i915_modparams.enable_dp_mst; 4773 4774 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4775 intel_dp->is_mst); 4776 } 4777 4778 static bool 4779 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4780 { 4781 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4782 sink_irq_vector, DP_DPRX_ESI_LEN) == 4783 DP_DPRX_ESI_LEN; 4784 } 4785 4786 bool 4787 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4788 const struct drm_connector_state *conn_state) 4789 { 4790 /* 4791 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4792 * of Color Encoding Format and Content Color Gamut], in order to 4793 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4794 */ 4795 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4796 return true; 4797 4798 switch (conn_state->colorspace) { 4799 case DRM_MODE_COLORIMETRY_SYCC_601: 4800 case DRM_MODE_COLORIMETRY_OPYCC_601: 4801 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4802 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4803 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4804 return true; 4805 default: 4806 break; 4807 } 4808 4809 return false; 4810 } 4811 4812 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 4813 struct dp_sdp *sdp, size_t size) 4814 { 4815 size_t length = sizeof(struct dp_sdp); 4816 4817 if (size < length) 4818 return -ENOSPC; 4819 4820 memset(sdp, 0, size); 4821 4822 /* 4823 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 4824 * VSC SDP Header Bytes 4825 */ 4826 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 4827 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 4828 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 4829 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 4830 4831 /* 4832 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 4833 * per DP 1.4a spec. 4834 */ 4835 if (vsc->revision != 0x5) 4836 goto out; 4837 4838 /* VSC SDP Payload for DB16 through DB18 */ 4839 /* Pixel Encoding and Colorimetry Formats */ 4840 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 4841 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 4842 4843 switch (vsc->bpc) { 4844 case 6: 4845 /* 6bpc: 0x0 */ 4846 break; 4847 case 8: 4848 sdp->db[17] = 0x1; /* DB17[3:0] */ 4849 break; 4850 case 10: 4851 sdp->db[17] = 0x2; 4852 break; 4853 case 12: 4854 sdp->db[17] = 0x3; 4855 break; 4856 case 16: 4857 sdp->db[17] = 0x4; 4858 break; 4859 default: 4860 MISSING_CASE(vsc->bpc); 4861 break; 4862 } 4863 /* Dynamic Range and Component Bit Depth */ 4864 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 4865 sdp->db[17] |= 0x80; /* DB17[7] */ 4866 4867 /* Content Type */ 4868 sdp->db[18] = vsc->content_type & 0x7; 4869 4870 out: 4871 return length; 4872 } 4873 4874 static ssize_t 4875 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 4876 struct dp_sdp *sdp, 4877 size_t size) 4878 { 4879 size_t length = sizeof(struct dp_sdp); 4880 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4881 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4882 ssize_t len; 4883 4884 if (size < length) 4885 return -ENOSPC; 4886 4887 memset(sdp, 0, size); 4888 4889 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 4890 if (len < 0) { 4891 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 4892 return -ENOSPC; 4893 } 4894 4895 if (len != infoframe_size) { 4896 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 4897 return -ENOSPC; 4898 } 4899 4900 /* 4901 * Set up the infoframe sdp packet for HDR static metadata. 4902 * Prepare VSC Header for SU as per DP 1.4a spec, 4903 * Table 2-100 and Table 2-101 4904 */ 4905 4906 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 4907 sdp->sdp_header.HB0 = 0; 4908 /* 4909 * Packet Type 80h + Non-audio INFOFRAME Type value 4910 * HDMI_INFOFRAME_TYPE_DRM: 0x87 4911 * - 80h + Non-audio INFOFRAME Type value 4912 * - InfoFrame Type: 0x07 4913 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 4914 */ 4915 sdp->sdp_header.HB1 = drm_infoframe->type; 4916 /* 4917 * Least Significant Eight Bits of (Data Byte Count – 1) 4918 * infoframe_size - 1 4919 */ 4920 sdp->sdp_header.HB2 = 0x1D; 4921 /* INFOFRAME SDP Version Number */ 4922 sdp->sdp_header.HB3 = (0x13 << 2); 4923 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4924 sdp->db[0] = drm_infoframe->version; 4925 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4926 sdp->db[1] = drm_infoframe->length; 4927 /* 4928 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4929 * HDMI_INFOFRAME_HEADER_SIZE 4930 */ 4931 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4932 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4933 HDMI_DRM_INFOFRAME_SIZE); 4934 4935 /* 4936 * Size of DP infoframe sdp packet for HDR static metadata consists of 4937 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4938 * - Two Data Blocks: 2 bytes 4939 * CTA Header Byte2 (INFOFRAME Version Number) 4940 * CTA Header Byte3 (Length of INFOFRAME) 4941 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4942 * 4943 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4944 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4945 * will pad rest of the size. 4946 */ 4947 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 4948 } 4949 4950 static void intel_write_dp_sdp(struct intel_encoder *encoder, 4951 const struct intel_crtc_state *crtc_state, 4952 unsigned int type) 4953 { 4954 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 4955 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4956 struct dp_sdp sdp = {}; 4957 ssize_t len; 4958 4959 if ((crtc_state->infoframes.enable & 4960 intel_hdmi_infoframe_enable(type)) == 0) 4961 return; 4962 4963 switch (type) { 4964 case DP_SDP_VSC: 4965 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 4966 sizeof(sdp)); 4967 break; 4968 case HDMI_PACKET_TYPE_GAMUT_METADATA: 4969 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 4970 &sdp, sizeof(sdp)); 4971 break; 4972 default: 4973 MISSING_CASE(type); 4974 return; 4975 } 4976 4977 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4978 return; 4979 4980 intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 4981 } 4982 4983 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 4984 const struct intel_crtc_state *crtc_state, 4985 struct drm_dp_vsc_sdp *vsc) 4986 { 4987 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 4988 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4989 struct dp_sdp sdp = {}; 4990 ssize_t len; 4991 4992 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 4993 4994 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4995 return; 4996 4997 intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 4998 &sdp, len); 4999 } 5000 5001 void intel_dp_set_infoframes(struct intel_encoder *encoder, 5002 bool enable, 5003 const struct intel_crtc_state *crtc_state, 5004 const struct drm_connector_state *conn_state) 5005 { 5006 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5007 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5008 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 5009 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 5010 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 5011 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 5012 u32 val = intel_de_read(dev_priv, reg); 5013 5014 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 5015 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 5016 if (intel_psr_enabled(intel_dp)) 5017 val &= ~dip_enable; 5018 else 5019 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); 5020 5021 if (!enable) { 5022 intel_de_write(dev_priv, reg, val); 5023 intel_de_posting_read(dev_priv, reg); 5024 return; 5025 } 5026 5027 intel_de_write(dev_priv, reg, val); 5028 intel_de_posting_read(dev_priv, reg); 5029 5030 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5031 if (!intel_psr_enabled(intel_dp)) 5032 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 5033 5034 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 5035 } 5036 5037 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 5038 const void *buffer, size_t size) 5039 { 5040 const struct dp_sdp *sdp = buffer; 5041 5042 if (size < sizeof(struct dp_sdp)) 5043 return -EINVAL; 5044 5045 memset(vsc, 0, size); 5046 5047 if (sdp->sdp_header.HB0 != 0) 5048 return -EINVAL; 5049 5050 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 5051 return -EINVAL; 5052 5053 vsc->sdp_type = sdp->sdp_header.HB1; 5054 vsc->revision = sdp->sdp_header.HB2; 5055 vsc->length = sdp->sdp_header.HB3; 5056 5057 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 5058 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 5059 /* 5060 * - HB2 = 0x2, HB3 = 0x8 5061 * VSC SDP supporting 3D stereo + PSR 5062 * - HB2 = 0x4, HB3 = 0xe 5063 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 5064 * first scan line of the SU region (applies to eDP v1.4b 5065 * and higher). 5066 */ 5067 return 0; 5068 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 5069 /* 5070 * - HB2 = 0x5, HB3 = 0x13 5071 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 5072 * Format. 5073 */ 5074 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 5075 vsc->colorimetry = sdp->db[16] & 0xf; 5076 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 5077 5078 switch (sdp->db[17] & 0x7) { 5079 case 0x0: 5080 vsc->bpc = 6; 5081 break; 5082 case 0x1: 5083 vsc->bpc = 8; 5084 break; 5085 case 0x2: 5086 vsc->bpc = 10; 5087 break; 5088 case 0x3: 5089 vsc->bpc = 12; 5090 break; 5091 case 0x4: 5092 vsc->bpc = 16; 5093 break; 5094 default: 5095 MISSING_CASE(sdp->db[17] & 0x7); 5096 return -EINVAL; 5097 } 5098 5099 vsc->content_type = sdp->db[18] & 0x7; 5100 } else { 5101 return -EINVAL; 5102 } 5103 5104 return 0; 5105 } 5106 5107 static int 5108 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 5109 const void *buffer, size_t size) 5110 { 5111 int ret; 5112 5113 const struct dp_sdp *sdp = buffer; 5114 5115 if (size < sizeof(struct dp_sdp)) 5116 return -EINVAL; 5117 5118 if (sdp->sdp_header.HB0 != 0) 5119 return -EINVAL; 5120 5121 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 5122 return -EINVAL; 5123 5124 /* 5125 * Least Significant Eight Bits of (Data Byte Count – 1) 5126 * 1Dh (i.e., Data Byte Count = 30 bytes). 5127 */ 5128 if (sdp->sdp_header.HB2 != 0x1D) 5129 return -EINVAL; 5130 5131 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 5132 if ((sdp->sdp_header.HB3 & 0x3) != 0) 5133 return -EINVAL; 5134 5135 /* INFOFRAME SDP Version Number */ 5136 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 5137 return -EINVAL; 5138 5139 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5140 if (sdp->db[0] != 1) 5141 return -EINVAL; 5142 5143 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5144 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 5145 return -EINVAL; 5146 5147 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 5148 HDMI_DRM_INFOFRAME_SIZE); 5149 5150 return ret; 5151 } 5152 5153 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 5154 struct intel_crtc_state *crtc_state, 5155 struct drm_dp_vsc_sdp *vsc) 5156 { 5157 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 5158 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5159 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5160 unsigned int type = DP_SDP_VSC; 5161 struct dp_sdp sdp = {}; 5162 int ret; 5163 5164 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5165 if (intel_psr_enabled(intel_dp)) 5166 return; 5167 5168 if ((crtc_state->infoframes.enable & 5169 intel_hdmi_infoframe_enable(type)) == 0) 5170 return; 5171 5172 intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 5173 5174 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 5175 5176 if (ret) 5177 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 5178 } 5179 5180 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 5181 struct intel_crtc_state *crtc_state, 5182 struct hdmi_drm_infoframe *drm_infoframe) 5183 { 5184 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 5185 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5186 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 5187 struct dp_sdp sdp = {}; 5188 int ret; 5189 5190 if ((crtc_state->infoframes.enable & 5191 intel_hdmi_infoframe_enable(type)) == 0) 5192 return; 5193 5194 intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 5195 sizeof(sdp)); 5196 5197 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 5198 sizeof(sdp)); 5199 5200 if (ret) 5201 drm_dbg_kms(&dev_priv->drm, 5202 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 5203 } 5204 5205 void intel_read_dp_sdp(struct intel_encoder *encoder, 5206 struct intel_crtc_state *crtc_state, 5207 unsigned int type) 5208 { 5209 if (encoder->type != INTEL_OUTPUT_DDI) 5210 return; 5211 5212 switch (type) { 5213 case DP_SDP_VSC: 5214 intel_read_dp_vsc_sdp(encoder, crtc_state, 5215 &crtc_state->infoframes.vsc); 5216 break; 5217 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5218 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 5219 &crtc_state->infoframes.drm.drm); 5220 break; 5221 default: 5222 MISSING_CASE(type); 5223 break; 5224 } 5225 } 5226 5227 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 5228 { 5229 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5230 int status = 0; 5231 int test_link_rate; 5232 u8 test_lane_count, test_link_bw; 5233 /* (DP CTS 1.2) 5234 * 4.3.1.11 5235 */ 5236 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 5237 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 5238 &test_lane_count); 5239 5240 if (status <= 0) { 5241 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 5242 return DP_TEST_NAK; 5243 } 5244 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 5245 5246 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 5247 &test_link_bw); 5248 if (status <= 0) { 5249 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 5250 return DP_TEST_NAK; 5251 } 5252 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 5253 5254 /* Validate the requested link rate and lane count */ 5255 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 5256 test_lane_count)) 5257 return DP_TEST_NAK; 5258 5259 intel_dp->compliance.test_lane_count = test_lane_count; 5260 intel_dp->compliance.test_link_rate = test_link_rate; 5261 5262 return DP_TEST_ACK; 5263 } 5264 5265 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 5266 { 5267 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5268 u8 test_pattern; 5269 u8 test_misc; 5270 __be16 h_width, v_height; 5271 int status = 0; 5272 5273 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 5274 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 5275 &test_pattern); 5276 if (status <= 0) { 5277 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 5278 return DP_TEST_NAK; 5279 } 5280 if (test_pattern != DP_COLOR_RAMP) 5281 return DP_TEST_NAK; 5282 5283 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 5284 &h_width, 2); 5285 if (status <= 0) { 5286 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 5287 return DP_TEST_NAK; 5288 } 5289 5290 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 5291 &v_height, 2); 5292 if (status <= 0) { 5293 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 5294 return DP_TEST_NAK; 5295 } 5296 5297 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 5298 &test_misc); 5299 if (status <= 0) { 5300 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 5301 return DP_TEST_NAK; 5302 } 5303 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 5304 return DP_TEST_NAK; 5305 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 5306 return DP_TEST_NAK; 5307 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 5308 case DP_TEST_BIT_DEPTH_6: 5309 intel_dp->compliance.test_data.bpc = 6; 5310 break; 5311 case DP_TEST_BIT_DEPTH_8: 5312 intel_dp->compliance.test_data.bpc = 8; 5313 break; 5314 default: 5315 return DP_TEST_NAK; 5316 } 5317 5318 intel_dp->compliance.test_data.video_pattern = test_pattern; 5319 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 5320 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 5321 /* Set test active flag here so userspace doesn't interrupt things */ 5322 intel_dp->compliance.test_active = true; 5323 5324 return DP_TEST_ACK; 5325 } 5326 5327 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 5328 { 5329 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5330 u8 test_result = DP_TEST_ACK; 5331 struct intel_connector *intel_connector = intel_dp->attached_connector; 5332 struct drm_connector *connector = &intel_connector->base; 5333 5334 if (intel_connector->detect_edid == NULL || 5335 connector->edid_corrupt || 5336 intel_dp->aux.i2c_defer_count > 6) { 5337 /* Check EDID read for NACKs, DEFERs and corruption 5338 * (DP CTS 1.2 Core r1.1) 5339 * 4.2.2.4 : Failed EDID read, I2C_NAK 5340 * 4.2.2.5 : Failed EDID read, I2C_DEFER 5341 * 4.2.2.6 : EDID corruption detected 5342 * Use failsafe mode for all cases 5343 */ 5344 if (intel_dp->aux.i2c_nack_count > 0 || 5345 intel_dp->aux.i2c_defer_count > 0) 5346 drm_dbg_kms(&i915->drm, 5347 "EDID read had %d NACKs, %d DEFERs\n", 5348 intel_dp->aux.i2c_nack_count, 5349 intel_dp->aux.i2c_defer_count); 5350 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 5351 } else { 5352 struct edid *block = intel_connector->detect_edid; 5353 5354 /* We have to write the checksum 5355 * of the last block read 5356 */ 5357 block += intel_connector->detect_edid->extensions; 5358 5359 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 5360 block->checksum) <= 0) 5361 drm_dbg_kms(&i915->drm, 5362 "Failed to write EDID checksum\n"); 5363 5364 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 5365 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 5366 } 5367 5368 /* Set test active flag here so userspace doesn't interrupt things */ 5369 intel_dp->compliance.test_active = true; 5370 5371 return test_result; 5372 } 5373 5374 static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp) 5375 { 5376 struct drm_dp_phy_test_params *data = 5377 &intel_dp->compliance.test_data.phytest; 5378 5379 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 5380 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 5381 return DP_TEST_NAK; 5382 } 5383 5384 /* 5385 * link_mst is set to false to avoid executing mst related code 5386 * during compliance testing. 5387 */ 5388 intel_dp->link_mst = false; 5389 5390 return DP_TEST_ACK; 5391 } 5392 5393 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp) 5394 { 5395 struct drm_i915_private *dev_priv = 5396 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 5397 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 5398 struct drm_dp_phy_test_params *data = 5399 &intel_dp->compliance.test_data.phytest; 5400 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 5401 enum pipe pipe = crtc->pipe; 5402 u32 pattern_val; 5403 5404 switch (data->phy_pattern) { 5405 case DP_PHY_TEST_PATTERN_NONE: 5406 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 5407 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 5408 break; 5409 case DP_PHY_TEST_PATTERN_D10_2: 5410 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 5411 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5412 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 5413 break; 5414 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 5415 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 5416 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5417 DDI_DP_COMP_CTL_ENABLE | 5418 DDI_DP_COMP_CTL_SCRAMBLED_0); 5419 break; 5420 case DP_PHY_TEST_PATTERN_PRBS7: 5421 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 5422 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5423 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 5424 break; 5425 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 5426 /* 5427 * FIXME: Ideally pattern should come from DPCD 0x250. As 5428 * current firmware of DPR-100 could not set it, so hardcoding 5429 * now for complaince test. 5430 */ 5431 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 5432 pattern_val = 0x3e0f83e0; 5433 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 5434 pattern_val = 0x0f83e0f8; 5435 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 5436 pattern_val = 0x0000f83e; 5437 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 5438 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5439 DDI_DP_COMP_CTL_ENABLE | 5440 DDI_DP_COMP_CTL_CUSTOM80); 5441 break; 5442 case DP_PHY_TEST_PATTERN_CP2520: 5443 /* 5444 * FIXME: Ideally pattern should come from DPCD 0x24A. As 5445 * current firmware of DPR-100 could not set it, so hardcoding 5446 * now for complaince test. 5447 */ 5448 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 5449 pattern_val = 0xFB; 5450 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5451 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 5452 pattern_val); 5453 break; 5454 default: 5455 WARN(1, "Invalid Phy Test Pattern\n"); 5456 } 5457 } 5458 5459 static void 5460 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp) 5461 { 5462 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 5463 struct drm_device *dev = intel_dig_port->base.base.dev; 5464 struct drm_i915_private *dev_priv = to_i915(dev); 5465 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 5466 enum pipe pipe = crtc->pipe; 5467 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5468 5469 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5470 TRANS_DDI_FUNC_CTL(pipe)); 5471 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5472 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5473 5474 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 5475 TGL_TRANS_DDI_PORT_MASK); 5476 trans_conf_value &= ~PIPECONF_ENABLE; 5477 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 5478 5479 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5480 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5481 trans_ddi_func_ctl_value); 5482 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5483 } 5484 5485 static void 5486 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt) 5487 { 5488 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 5489 struct drm_device *dev = intel_dig_port->base.base.dev; 5490 struct drm_i915_private *dev_priv = to_i915(dev); 5491 enum port port = intel_dig_port->base.port; 5492 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 5493 enum pipe pipe = crtc->pipe; 5494 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5495 5496 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5497 TRANS_DDI_FUNC_CTL(pipe)); 5498 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5499 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5500 5501 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 5502 TGL_TRANS_DDI_SELECT_PORT(port); 5503 trans_conf_value |= PIPECONF_ENABLE; 5504 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 5505 5506 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5507 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5508 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5509 trans_ddi_func_ctl_value); 5510 } 5511 5512 void intel_dp_process_phy_request(struct intel_dp *intel_dp) 5513 { 5514 struct drm_dp_phy_test_params *data = 5515 &intel_dp->compliance.test_data.phytest; 5516 u8 link_status[DP_LINK_STATUS_SIZE]; 5517 5518 if (!intel_dp_get_link_status(intel_dp, link_status)) { 5519 DRM_DEBUG_KMS("failed to get link status\n"); 5520 return; 5521 } 5522 5523 /* retrieve vswing & pre-emphasis setting */ 5524 intel_dp_get_adjust_train(intel_dp, link_status); 5525 5526 intel_dp_autotest_phy_ddi_disable(intel_dp); 5527 5528 intel_dp_set_signal_levels(intel_dp); 5529 5530 intel_dp_phy_pattern_update(intel_dp); 5531 5532 intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes); 5533 5534 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 5535 link_status[DP_DPCD_REV]); 5536 } 5537 5538 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 5539 { 5540 u8 test_result; 5541 5542 test_result = intel_dp_prepare_phytest(intel_dp); 5543 if (test_result != DP_TEST_ACK) 5544 DRM_ERROR("Phy test preparation failed\n"); 5545 5546 intel_dp_process_phy_request(intel_dp); 5547 5548 return test_result; 5549 } 5550 5551 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 5552 { 5553 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5554 u8 response = DP_TEST_NAK; 5555 u8 request = 0; 5556 int status; 5557 5558 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 5559 if (status <= 0) { 5560 drm_dbg_kms(&i915->drm, 5561 "Could not read test request from sink\n"); 5562 goto update_status; 5563 } 5564 5565 switch (request) { 5566 case DP_TEST_LINK_TRAINING: 5567 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 5568 response = intel_dp_autotest_link_training(intel_dp); 5569 break; 5570 case DP_TEST_LINK_VIDEO_PATTERN: 5571 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 5572 response = intel_dp_autotest_video_pattern(intel_dp); 5573 break; 5574 case DP_TEST_LINK_EDID_READ: 5575 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 5576 response = intel_dp_autotest_edid(intel_dp); 5577 break; 5578 case DP_TEST_LINK_PHY_TEST_PATTERN: 5579 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 5580 response = intel_dp_autotest_phy_pattern(intel_dp); 5581 break; 5582 default: 5583 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 5584 request); 5585 break; 5586 } 5587 5588 if (response & DP_TEST_ACK) 5589 intel_dp->compliance.test_type = request; 5590 5591 update_status: 5592 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 5593 if (status <= 0) 5594 drm_dbg_kms(&i915->drm, 5595 "Could not write test response to sink\n"); 5596 } 5597 5598 static int 5599 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5600 { 5601 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5602 bool need_retrain = false; 5603 5604 if (!intel_dp->is_mst) 5605 return -EINVAL; 5606 5607 WARN_ON_ONCE(intel_dp->active_mst_links < 0); 5608 5609 for (;;) { 5610 u8 esi[DP_DPRX_ESI_LEN] = {}; 5611 bool bret, handled; 5612 int retry; 5613 5614 bret = intel_dp_get_sink_irq_esi(intel_dp, esi); 5615 if (!bret) { 5616 drm_dbg_kms(&i915->drm, 5617 "failed to get ESI - device may have failed\n"); 5618 return -EINVAL; 5619 } 5620 5621 /* check link status - esi[10] = 0x200c */ 5622 if (intel_dp->active_mst_links > 0 && !need_retrain && 5623 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 5624 drm_dbg_kms(&i915->drm, 5625 "channel EQ not ok, retraining\n"); 5626 need_retrain = true; 5627 } 5628 5629 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 5630 5631 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 5632 if (!handled) 5633 break; 5634 5635 for (retry = 0; retry < 3; retry++) { 5636 int wret; 5637 5638 wret = drm_dp_dpcd_write(&intel_dp->aux, 5639 DP_SINK_COUNT_ESI+1, 5640 &esi[1], 3); 5641 if (wret == 3) 5642 break; 5643 } 5644 } 5645 5646 return need_retrain; 5647 } 5648 5649 static bool 5650 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5651 { 5652 u8 link_status[DP_LINK_STATUS_SIZE]; 5653 5654 if (!intel_dp->link_trained) 5655 return false; 5656 5657 /* 5658 * While PSR source HW is enabled, it will control main-link sending 5659 * frames, enabling and disabling it so trying to do a retrain will fail 5660 * as the link would or not be on or it could mix training patterns 5661 * and frame data at the same time causing retrain to fail. 5662 * Also when exiting PSR, HW will retrain the link anyways fixing 5663 * any link status error. 5664 */ 5665 if (intel_psr_enabled(intel_dp)) 5666 return false; 5667 5668 if (!intel_dp_get_link_status(intel_dp, link_status)) 5669 return false; 5670 5671 /* 5672 * Validate the cached values of intel_dp->link_rate and 5673 * intel_dp->lane_count before attempting to retrain. 5674 */ 5675 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5676 intel_dp->lane_count)) 5677 return false; 5678 5679 /* Retrain if Channel EQ or CR not ok */ 5680 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5681 } 5682 5683 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 5684 const struct drm_connector_state *conn_state) 5685 { 5686 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5687 struct intel_encoder *encoder; 5688 enum pipe pipe; 5689 5690 if (!conn_state->best_encoder) 5691 return false; 5692 5693 /* SST */ 5694 encoder = &dp_to_dig_port(intel_dp)->base; 5695 if (conn_state->best_encoder == &encoder->base) 5696 return true; 5697 5698 /* MST */ 5699 for_each_pipe(i915, pipe) { 5700 encoder = &intel_dp->mst_encoders[pipe]->base; 5701 if (conn_state->best_encoder == &encoder->base) 5702 return true; 5703 } 5704 5705 return false; 5706 } 5707 5708 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 5709 struct drm_modeset_acquire_ctx *ctx, 5710 u32 *crtc_mask) 5711 { 5712 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5713 struct drm_connector_list_iter conn_iter; 5714 struct intel_connector *connector; 5715 int ret = 0; 5716 5717 *crtc_mask = 0; 5718 5719 if (!intel_dp_needs_link_retrain(intel_dp)) 5720 return 0; 5721 5722 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5723 for_each_intel_connector_iter(connector, &conn_iter) { 5724 struct drm_connector_state *conn_state = 5725 connector->base.state; 5726 struct intel_crtc_state *crtc_state; 5727 struct intel_crtc *crtc; 5728 5729 if (!intel_dp_has_connector(intel_dp, conn_state)) 5730 continue; 5731 5732 crtc = to_intel_crtc(conn_state->crtc); 5733 if (!crtc) 5734 continue; 5735 5736 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5737 if (ret) 5738 break; 5739 5740 crtc_state = to_intel_crtc_state(crtc->base.state); 5741 5742 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5743 5744 if (!crtc_state->hw.active) 5745 continue; 5746 5747 if (conn_state->commit && 5748 !try_wait_for_completion(&conn_state->commit->hw_done)) 5749 continue; 5750 5751 *crtc_mask |= drm_crtc_mask(&crtc->base); 5752 } 5753 drm_connector_list_iter_end(&conn_iter); 5754 5755 if (!intel_dp_needs_link_retrain(intel_dp)) 5756 *crtc_mask = 0; 5757 5758 return ret; 5759 } 5760 5761 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 5762 { 5763 struct intel_connector *connector = intel_dp->attached_connector; 5764 5765 return connector->base.status == connector_status_connected || 5766 intel_dp->is_mst; 5767 } 5768 5769 int intel_dp_retrain_link(struct intel_encoder *encoder, 5770 struct drm_modeset_acquire_ctx *ctx) 5771 { 5772 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5773 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5774 struct intel_crtc *crtc; 5775 u32 crtc_mask; 5776 int ret; 5777 5778 if (!intel_dp_is_connected(intel_dp)) 5779 return 0; 5780 5781 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5782 ctx); 5783 if (ret) 5784 return ret; 5785 5786 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 5787 if (ret) 5788 return ret; 5789 5790 if (crtc_mask == 0) 5791 return 0; 5792 5793 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 5794 encoder->base.base.id, encoder->base.name); 5795 5796 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5797 const struct intel_crtc_state *crtc_state = 5798 to_intel_crtc_state(crtc->base.state); 5799 5800 /* Suppress underruns caused by re-training */ 5801 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5802 if (crtc_state->has_pch_encoder) 5803 intel_set_pch_fifo_underrun_reporting(dev_priv, 5804 intel_crtc_pch_transcoder(crtc), false); 5805 } 5806 5807 intel_dp_start_link_train(intel_dp); 5808 intel_dp_stop_link_train(intel_dp); 5809 5810 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5811 const struct intel_crtc_state *crtc_state = 5812 to_intel_crtc_state(crtc->base.state); 5813 5814 /* Keep underrun reporting disabled until things are stable */ 5815 intel_wait_for_vblank(dev_priv, crtc->pipe); 5816 5817 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5818 if (crtc_state->has_pch_encoder) 5819 intel_set_pch_fifo_underrun_reporting(dev_priv, 5820 intel_crtc_pch_transcoder(crtc), true); 5821 } 5822 5823 return 0; 5824 } 5825 5826 /* 5827 * If display is now connected check links status, 5828 * there has been known issues of link loss triggering 5829 * long pulse. 5830 * 5831 * Some sinks (eg. ASUS PB287Q) seem to perform some 5832 * weird HPD ping pong during modesets. So we can apparently 5833 * end up with HPD going low during a modeset, and then 5834 * going back up soon after. And once that happens we must 5835 * retrain the link to get a picture. That's in case no 5836 * userspace component reacted to intermittent HPD dip. 5837 */ 5838 static enum intel_hotplug_state 5839 intel_dp_hotplug(struct intel_encoder *encoder, 5840 struct intel_connector *connector) 5841 { 5842 struct drm_modeset_acquire_ctx ctx; 5843 enum intel_hotplug_state state; 5844 int ret; 5845 5846 state = intel_encoder_hotplug(encoder, connector); 5847 5848 drm_modeset_acquire_init(&ctx, 0); 5849 5850 for (;;) { 5851 ret = intel_dp_retrain_link(encoder, &ctx); 5852 5853 if (ret == -EDEADLK) { 5854 drm_modeset_backoff(&ctx); 5855 continue; 5856 } 5857 5858 break; 5859 } 5860 5861 drm_modeset_drop_locks(&ctx); 5862 drm_modeset_acquire_fini(&ctx); 5863 drm_WARN(encoder->base.dev, ret, 5864 "Acquiring modeset locks failed with %i\n", ret); 5865 5866 /* 5867 * Keeping it consistent with intel_ddi_hotplug() and 5868 * intel_hdmi_hotplug(). 5869 */ 5870 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) 5871 state = INTEL_HOTPLUG_RETRY; 5872 5873 return state; 5874 } 5875 5876 static void intel_dp_check_service_irq(struct intel_dp *intel_dp) 5877 { 5878 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5879 u8 val; 5880 5881 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5882 return; 5883 5884 if (drm_dp_dpcd_readb(&intel_dp->aux, 5885 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5886 return; 5887 5888 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5889 5890 if (val & DP_AUTOMATED_TEST_REQUEST) 5891 intel_dp_handle_test_request(intel_dp); 5892 5893 if (val & DP_CP_IRQ) 5894 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5895 5896 if (val & DP_SINK_SPECIFIC_IRQ) 5897 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 5898 } 5899 5900 /* 5901 * According to DP spec 5902 * 5.1.2: 5903 * 1. Read DPCD 5904 * 2. Configure link according to Receiver Capabilities 5905 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5906 * 4. Check link status on receipt of hot-plug interrupt 5907 * 5908 * intel_dp_short_pulse - handles short pulse interrupts 5909 * when full detection is not required. 5910 * Returns %true if short pulse is handled and full detection 5911 * is NOT required and %false otherwise. 5912 */ 5913 static bool 5914 intel_dp_short_pulse(struct intel_dp *intel_dp) 5915 { 5916 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5917 u8 old_sink_count = intel_dp->sink_count; 5918 bool ret; 5919 5920 /* 5921 * Clearing compliance test variables to allow capturing 5922 * of values for next automated test request. 5923 */ 5924 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5925 5926 /* 5927 * Now read the DPCD to see if it's actually running 5928 * If the current value of sink count doesn't match with 5929 * the value that was stored earlier or dpcd read failed 5930 * we need to do full detection 5931 */ 5932 ret = intel_dp_get_dpcd(intel_dp); 5933 5934 if ((old_sink_count != intel_dp->sink_count) || !ret) { 5935 /* No need to proceed if we are going to do full detect */ 5936 return false; 5937 } 5938 5939 intel_dp_check_service_irq(intel_dp); 5940 5941 /* Handle CEC interrupts, if any */ 5942 drm_dp_cec_irq(&intel_dp->aux); 5943 5944 /* defer to the hotplug work for link retraining if needed */ 5945 if (intel_dp_needs_link_retrain(intel_dp)) 5946 return false; 5947 5948 intel_psr_short_pulse(intel_dp); 5949 5950 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 5951 drm_dbg_kms(&dev_priv->drm, 5952 "Link Training Compliance Test requested\n"); 5953 /* Send a Hotplug Uevent to userspace to start modeset */ 5954 drm_kms_helper_hotplug_event(&dev_priv->drm); 5955 } 5956 5957 return true; 5958 } 5959 5960 /* XXX this is probably wrong for multiple downstream ports */ 5961 static enum drm_connector_status 5962 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 5963 { 5964 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5965 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 5966 u8 *dpcd = intel_dp->dpcd; 5967 u8 type; 5968 5969 if (WARN_ON(intel_dp_is_edp(intel_dp))) 5970 return connector_status_connected; 5971 5972 if (lspcon->active) 5973 lspcon_resume(lspcon); 5974 5975 if (!intel_dp_get_dpcd(intel_dp)) 5976 return connector_status_disconnected; 5977 5978 /* if there's no downstream port, we're done */ 5979 if (!drm_dp_is_branch(dpcd)) 5980 return connector_status_connected; 5981 5982 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 5983 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 5984 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 5985 5986 return intel_dp->sink_count ? 5987 connector_status_connected : connector_status_disconnected; 5988 } 5989 5990 if (intel_dp_can_mst(intel_dp)) 5991 return connector_status_connected; 5992 5993 /* If no HPD, poke DDC gently */ 5994 if (drm_probe_ddc(&intel_dp->aux.ddc)) 5995 return connector_status_connected; 5996 5997 /* Well we tried, say unknown for unreliable port types */ 5998 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 5999 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 6000 if (type == DP_DS_PORT_TYPE_VGA || 6001 type == DP_DS_PORT_TYPE_NON_EDID) 6002 return connector_status_unknown; 6003 } else { 6004 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 6005 DP_DWN_STRM_PORT_TYPE_MASK; 6006 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 6007 type == DP_DWN_STRM_PORT_TYPE_OTHER) 6008 return connector_status_unknown; 6009 } 6010 6011 /* Anything else is out of spec, warn and ignore */ 6012 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 6013 return connector_status_disconnected; 6014 } 6015 6016 static enum drm_connector_status 6017 edp_detect(struct intel_dp *intel_dp) 6018 { 6019 return connector_status_connected; 6020 } 6021 6022 static bool ibx_digital_port_connected(struct intel_encoder *encoder) 6023 { 6024 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6025 u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; 6026 6027 return intel_de_read(dev_priv, SDEISR) & bit; 6028 } 6029 6030 static bool g4x_digital_port_connected(struct intel_encoder *encoder) 6031 { 6032 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6033 u32 bit; 6034 6035 switch (encoder->hpd_pin) { 6036 case HPD_PORT_B: 6037 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 6038 break; 6039 case HPD_PORT_C: 6040 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 6041 break; 6042 case HPD_PORT_D: 6043 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 6044 break; 6045 default: 6046 MISSING_CASE(encoder->hpd_pin); 6047 return false; 6048 } 6049 6050 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6051 } 6052 6053 static bool gm45_digital_port_connected(struct intel_encoder *encoder) 6054 { 6055 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6056 u32 bit; 6057 6058 switch (encoder->hpd_pin) { 6059 case HPD_PORT_B: 6060 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 6061 break; 6062 case HPD_PORT_C: 6063 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 6064 break; 6065 case HPD_PORT_D: 6066 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 6067 break; 6068 default: 6069 MISSING_CASE(encoder->hpd_pin); 6070 return false; 6071 } 6072 6073 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6074 } 6075 6076 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 6077 { 6078 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6079 u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; 6080 6081 return intel_de_read(dev_priv, DEISR) & bit; 6082 } 6083 6084 /* 6085 * intel_digital_port_connected - is the specified port connected? 6086 * @encoder: intel_encoder 6087 * 6088 * In cases where there's a connector physically connected but it can't be used 6089 * by our hardware we also return false, since the rest of the driver should 6090 * pretty much treat the port as disconnected. This is relevant for type-C 6091 * (starting on ICL) where there's ownership involved. 6092 * 6093 * Return %true if port is connected, %false otherwise. 6094 */ 6095 bool intel_digital_port_connected(struct intel_encoder *encoder) 6096 { 6097 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6098 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6099 bool is_connected = false; 6100 intel_wakeref_t wakeref; 6101 6102 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 6103 is_connected = dig_port->connected(encoder); 6104 6105 return is_connected; 6106 } 6107 6108 static struct edid * 6109 intel_dp_get_edid(struct intel_dp *intel_dp) 6110 { 6111 struct intel_connector *intel_connector = intel_dp->attached_connector; 6112 6113 /* use cached edid if we have one */ 6114 if (intel_connector->edid) { 6115 /* invalid edid */ 6116 if (IS_ERR(intel_connector->edid)) 6117 return NULL; 6118 6119 return drm_edid_duplicate(intel_connector->edid); 6120 } else 6121 return drm_get_edid(&intel_connector->base, 6122 &intel_dp->aux.ddc); 6123 } 6124 6125 static void 6126 intel_dp_set_edid(struct intel_dp *intel_dp) 6127 { 6128 struct intel_connector *intel_connector = intel_dp->attached_connector; 6129 struct edid *edid; 6130 6131 intel_dp_unset_edid(intel_dp); 6132 edid = intel_dp_get_edid(intel_dp); 6133 intel_connector->detect_edid = edid; 6134 6135 intel_dp->has_audio = drm_detect_monitor_audio(edid); 6136 drm_dp_cec_set_edid(&intel_dp->aux, edid); 6137 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 6138 } 6139 6140 static void 6141 intel_dp_unset_edid(struct intel_dp *intel_dp) 6142 { 6143 struct intel_connector *intel_connector = intel_dp->attached_connector; 6144 6145 drm_dp_cec_unset_edid(&intel_dp->aux); 6146 kfree(intel_connector->detect_edid); 6147 intel_connector->detect_edid = NULL; 6148 6149 intel_dp->has_audio = false; 6150 intel_dp->edid_quirks = 0; 6151 } 6152 6153 static int 6154 intel_dp_detect(struct drm_connector *connector, 6155 struct drm_modeset_acquire_ctx *ctx, 6156 bool force) 6157 { 6158 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6159 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6160 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6161 struct intel_encoder *encoder = &dig_port->base; 6162 enum drm_connector_status status; 6163 6164 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6165 connector->base.id, connector->name); 6166 drm_WARN_ON(&dev_priv->drm, 6167 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 6168 6169 /* Can't disconnect eDP */ 6170 if (intel_dp_is_edp(intel_dp)) 6171 status = edp_detect(intel_dp); 6172 else if (intel_digital_port_connected(encoder)) 6173 status = intel_dp_detect_dpcd(intel_dp); 6174 else 6175 status = connector_status_disconnected; 6176 6177 if (status == connector_status_disconnected) { 6178 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 6179 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 6180 6181 if (intel_dp->is_mst) { 6182 drm_dbg_kms(&dev_priv->drm, 6183 "MST device may have disappeared %d vs %d\n", 6184 intel_dp->is_mst, 6185 intel_dp->mst_mgr.mst_state); 6186 intel_dp->is_mst = false; 6187 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6188 intel_dp->is_mst); 6189 } 6190 6191 goto out; 6192 } 6193 6194 if (intel_dp->reset_link_params) { 6195 /* Initial max link lane count */ 6196 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 6197 6198 /* Initial max link rate */ 6199 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 6200 6201 intel_dp->reset_link_params = false; 6202 } 6203 6204 intel_dp_print_rates(intel_dp); 6205 6206 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 6207 if (INTEL_GEN(dev_priv) >= 11) 6208 intel_dp_get_dsc_sink_cap(intel_dp); 6209 6210 intel_dp_configure_mst(intel_dp); 6211 6212 if (intel_dp->is_mst) { 6213 /* 6214 * If we are in MST mode then this connector 6215 * won't appear connected or have anything 6216 * with EDID on it 6217 */ 6218 status = connector_status_disconnected; 6219 goto out; 6220 } 6221 6222 /* 6223 * Some external monitors do not signal loss of link synchronization 6224 * with an IRQ_HPD, so force a link status check. 6225 */ 6226 if (!intel_dp_is_edp(intel_dp)) { 6227 int ret; 6228 6229 ret = intel_dp_retrain_link(encoder, ctx); 6230 if (ret) 6231 return ret; 6232 } 6233 6234 /* 6235 * Clearing NACK and defer counts to get their exact values 6236 * while reading EDID which are required by Compliance tests 6237 * 4.2.2.4 and 4.2.2.5 6238 */ 6239 intel_dp->aux.i2c_nack_count = 0; 6240 intel_dp->aux.i2c_defer_count = 0; 6241 6242 intel_dp_set_edid(intel_dp); 6243 if (intel_dp_is_edp(intel_dp) || 6244 to_intel_connector(connector)->detect_edid) 6245 status = connector_status_connected; 6246 6247 intel_dp_check_service_irq(intel_dp); 6248 6249 out: 6250 if (status != connector_status_connected && !intel_dp->is_mst) 6251 intel_dp_unset_edid(intel_dp); 6252 6253 /* 6254 * Make sure the refs for power wells enabled during detect are 6255 * dropped to avoid a new detect cycle triggered by HPD polling. 6256 */ 6257 intel_display_power_flush_work(dev_priv); 6258 6259 return status; 6260 } 6261 6262 static void 6263 intel_dp_force(struct drm_connector *connector) 6264 { 6265 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6266 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6267 struct intel_encoder *intel_encoder = &dig_port->base; 6268 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 6269 enum intel_display_power_domain aux_domain = 6270 intel_aux_power_domain(dig_port); 6271 intel_wakeref_t wakeref; 6272 6273 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6274 connector->base.id, connector->name); 6275 intel_dp_unset_edid(intel_dp); 6276 6277 if (connector->status != connector_status_connected) 6278 return; 6279 6280 wakeref = intel_display_power_get(dev_priv, aux_domain); 6281 6282 intel_dp_set_edid(intel_dp); 6283 6284 intel_display_power_put(dev_priv, aux_domain, wakeref); 6285 } 6286 6287 static int intel_dp_get_modes(struct drm_connector *connector) 6288 { 6289 struct intel_connector *intel_connector = to_intel_connector(connector); 6290 struct edid *edid; 6291 6292 edid = intel_connector->detect_edid; 6293 if (edid) { 6294 int ret = intel_connector_update_modes(connector, edid); 6295 if (ret) 6296 return ret; 6297 } 6298 6299 /* if eDP has no EDID, fall back to fixed mode */ 6300 if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) && 6301 intel_connector->panel.fixed_mode) { 6302 struct drm_display_mode *mode; 6303 6304 mode = drm_mode_duplicate(connector->dev, 6305 intel_connector->panel.fixed_mode); 6306 if (mode) { 6307 drm_mode_probed_add(connector, mode); 6308 return 1; 6309 } 6310 } 6311 6312 return 0; 6313 } 6314 6315 static int 6316 intel_dp_connector_register(struct drm_connector *connector) 6317 { 6318 struct drm_i915_private *i915 = to_i915(connector->dev); 6319 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6320 int ret; 6321 6322 ret = intel_connector_register(connector); 6323 if (ret) 6324 return ret; 6325 6326 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 6327 intel_dp->aux.name, connector->kdev->kobj.name); 6328 6329 intel_dp->aux.dev = connector->kdev; 6330 ret = drm_dp_aux_register(&intel_dp->aux); 6331 if (!ret) 6332 drm_dp_cec_register_connector(&intel_dp->aux, connector); 6333 return ret; 6334 } 6335 6336 static void 6337 intel_dp_connector_unregister(struct drm_connector *connector) 6338 { 6339 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6340 6341 drm_dp_cec_unregister_connector(&intel_dp->aux); 6342 drm_dp_aux_unregister(&intel_dp->aux); 6343 intel_connector_unregister(connector); 6344 } 6345 6346 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 6347 { 6348 struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 6349 struct intel_dp *intel_dp = &intel_dig_port->dp; 6350 6351 intel_dp_mst_encoder_cleanup(intel_dig_port); 6352 if (intel_dp_is_edp(intel_dp)) { 6353 intel_wakeref_t wakeref; 6354 6355 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6356 /* 6357 * vdd might still be enabled do to the delayed vdd off. 6358 * Make sure vdd is actually turned off here. 6359 */ 6360 with_pps_lock(intel_dp, wakeref) 6361 edp_panel_vdd_off_sync(intel_dp); 6362 6363 if (intel_dp->edp_notifier.notifier_call) { 6364 unregister_reboot_notifier(&intel_dp->edp_notifier); 6365 intel_dp->edp_notifier.notifier_call = NULL; 6366 } 6367 } 6368 6369 intel_dp_aux_fini(intel_dp); 6370 } 6371 6372 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 6373 { 6374 intel_dp_encoder_flush_work(encoder); 6375 6376 drm_encoder_cleanup(encoder); 6377 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 6378 } 6379 6380 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 6381 { 6382 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6383 intel_wakeref_t wakeref; 6384 6385 if (!intel_dp_is_edp(intel_dp)) 6386 return; 6387 6388 /* 6389 * vdd might still be enabled do to the delayed vdd off. 6390 * Make sure vdd is actually turned off here. 6391 */ 6392 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6393 with_pps_lock(intel_dp, wakeref) 6394 edp_panel_vdd_off_sync(intel_dp); 6395 } 6396 6397 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) 6398 { 6399 long ret; 6400 6401 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) 6402 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, 6403 msecs_to_jiffies(timeout)); 6404 6405 if (!ret) 6406 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); 6407 } 6408 6409 static 6410 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, 6411 u8 *an) 6412 { 6413 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6414 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base)); 6415 static const struct drm_dp_aux_msg msg = { 6416 .request = DP_AUX_NATIVE_WRITE, 6417 .address = DP_AUX_HDCP_AKSV, 6418 .size = DRM_HDCP_KSV_LEN, 6419 }; 6420 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; 6421 ssize_t dpcd_ret; 6422 int ret; 6423 6424 /* Output An first, that's easy */ 6425 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, 6426 an, DRM_HDCP_AN_LEN); 6427 if (dpcd_ret != DRM_HDCP_AN_LEN) { 6428 drm_dbg_kms(&i915->drm, 6429 "Failed to write An over DP/AUX (%zd)\n", 6430 dpcd_ret); 6431 return dpcd_ret >= 0 ? -EIO : dpcd_ret; 6432 } 6433 6434 /* 6435 * Since Aksv is Oh-So-Secret, we can't access it in software. So in 6436 * order to get it on the wire, we need to create the AUX header as if 6437 * we were writing the data, and then tickle the hardware to output the 6438 * data once the header is sent out. 6439 */ 6440 intel_dp_aux_header(txbuf, &msg); 6441 6442 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size, 6443 rxbuf, sizeof(rxbuf), 6444 DP_AUX_CH_CTL_AUX_AKSV_SELECT); 6445 if (ret < 0) { 6446 drm_dbg_kms(&i915->drm, 6447 "Write Aksv over DP/AUX failed (%d)\n", ret); 6448 return ret; 6449 } else if (ret == 0) { 6450 drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n"); 6451 return -EIO; 6452 } 6453 6454 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; 6455 if (reply != DP_AUX_NATIVE_REPLY_ACK) { 6456 drm_dbg_kms(&i915->drm, 6457 "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", 6458 reply); 6459 return -EIO; 6460 } 6461 return 0; 6462 } 6463 6464 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, 6465 u8 *bksv) 6466 { 6467 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6468 ssize_t ret; 6469 6470 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, 6471 DRM_HDCP_KSV_LEN); 6472 if (ret != DRM_HDCP_KSV_LEN) { 6473 drm_dbg_kms(&i915->drm, 6474 "Read Bksv from DP/AUX failed (%zd)\n", ret); 6475 return ret >= 0 ? -EIO : ret; 6476 } 6477 return 0; 6478 } 6479 6480 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, 6481 u8 *bstatus) 6482 { 6483 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6484 ssize_t ret; 6485 6486 /* 6487 * For some reason the HDMI and DP HDCP specs call this register 6488 * definition by different names. In the HDMI spec, it's called BSTATUS, 6489 * but in DP it's called BINFO. 6490 */ 6491 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, 6492 bstatus, DRM_HDCP_BSTATUS_LEN); 6493 if (ret != DRM_HDCP_BSTATUS_LEN) { 6494 drm_dbg_kms(&i915->drm, 6495 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6496 return ret >= 0 ? -EIO : ret; 6497 } 6498 return 0; 6499 } 6500 6501 static 6502 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, 6503 u8 *bcaps) 6504 { 6505 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6506 ssize_t ret; 6507 6508 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, 6509 bcaps, 1); 6510 if (ret != 1) { 6511 drm_dbg_kms(&i915->drm, 6512 "Read bcaps from DP/AUX failed (%zd)\n", ret); 6513 return ret >= 0 ? -EIO : ret; 6514 } 6515 6516 return 0; 6517 } 6518 6519 static 6520 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, 6521 bool *repeater_present) 6522 { 6523 ssize_t ret; 6524 u8 bcaps; 6525 6526 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); 6527 if (ret) 6528 return ret; 6529 6530 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; 6531 return 0; 6532 } 6533 6534 static 6535 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, 6536 u8 *ri_prime) 6537 { 6538 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6539 ssize_t ret; 6540 6541 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, 6542 ri_prime, DRM_HDCP_RI_LEN); 6543 if (ret != DRM_HDCP_RI_LEN) { 6544 drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", 6545 ret); 6546 return ret >= 0 ? -EIO : ret; 6547 } 6548 return 0; 6549 } 6550 6551 static 6552 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, 6553 bool *ksv_ready) 6554 { 6555 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6556 ssize_t ret; 6557 u8 bstatus; 6558 6559 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6560 &bstatus, 1); 6561 if (ret != 1) { 6562 drm_dbg_kms(&i915->drm, 6563 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6564 return ret >= 0 ? -EIO : ret; 6565 } 6566 *ksv_ready = bstatus & DP_BSTATUS_READY; 6567 return 0; 6568 } 6569 6570 static 6571 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, 6572 int num_downstream, u8 *ksv_fifo) 6573 { 6574 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6575 ssize_t ret; 6576 int i; 6577 6578 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ 6579 for (i = 0; i < num_downstream; i += 3) { 6580 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; 6581 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6582 DP_AUX_HDCP_KSV_FIFO, 6583 ksv_fifo + i * DRM_HDCP_KSV_LEN, 6584 len); 6585 if (ret != len) { 6586 drm_dbg_kms(&i915->drm, 6587 "Read ksv[%d] from DP/AUX failed (%zd)\n", 6588 i, ret); 6589 return ret >= 0 ? -EIO : ret; 6590 } 6591 } 6592 return 0; 6593 } 6594 6595 static 6596 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, 6597 int i, u32 *part) 6598 { 6599 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6600 ssize_t ret; 6601 6602 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) 6603 return -EINVAL; 6604 6605 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6606 DP_AUX_HDCP_V_PRIME(i), part, 6607 DRM_HDCP_V_PRIME_PART_LEN); 6608 if (ret != DRM_HDCP_V_PRIME_PART_LEN) { 6609 drm_dbg_kms(&i915->drm, 6610 "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); 6611 return ret >= 0 ? -EIO : ret; 6612 } 6613 return 0; 6614 } 6615 6616 static 6617 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, 6618 bool enable) 6619 { 6620 /* Not used for single stream DisplayPort setups */ 6621 return 0; 6622 } 6623 6624 static 6625 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) 6626 { 6627 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6628 ssize_t ret; 6629 u8 bstatus; 6630 6631 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6632 &bstatus, 1); 6633 if (ret != 1) { 6634 drm_dbg_kms(&i915->drm, 6635 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6636 return false; 6637 } 6638 6639 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); 6640 } 6641 6642 static 6643 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port, 6644 bool *hdcp_capable) 6645 { 6646 ssize_t ret; 6647 u8 bcaps; 6648 6649 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); 6650 if (ret) 6651 return ret; 6652 6653 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; 6654 return 0; 6655 } 6656 6657 struct hdcp2_dp_errata_stream_type { 6658 u8 msg_id; 6659 u8 stream_type; 6660 } __packed; 6661 6662 struct hdcp2_dp_msg_data { 6663 u8 msg_id; 6664 u32 offset; 6665 bool msg_detectable; 6666 u32 timeout; 6667 u32 timeout2; /* Added for non_paired situation */ 6668 }; 6669 6670 static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { 6671 { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 }, 6672 { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, 6673 false, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, 6674 { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, 6675 false, 0, 0 }, 6676 { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, 6677 false, 0, 0 }, 6678 { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, 6679 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, 6680 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, 6681 { HDCP_2_2_AKE_SEND_PAIRING_INFO, 6682 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, 6683 HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, 6684 { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 }, 6685 { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, 6686 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 }, 6687 { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, 6688 0, 0 }, 6689 { HDCP_2_2_REP_SEND_RECVID_LIST, 6690 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, 6691 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, 6692 { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, 6693 0, 0 }, 6694 { HDCP_2_2_REP_STREAM_MANAGE, 6695 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, 6696 0, 0 }, 6697 { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, 6698 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, 6699 /* local define to shovel this through the write_2_2 interface */ 6700 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 6701 { HDCP_2_2_ERRATA_DP_STREAM_TYPE, 6702 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, 6703 0, 0 }, 6704 }; 6705 6706 static int 6707 intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, 6708 u8 *rx_status) 6709 { 6710 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6711 ssize_t ret; 6712 6713 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6714 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, 6715 HDCP_2_2_DP_RXSTATUS_LEN); 6716 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { 6717 drm_dbg_kms(&i915->drm, 6718 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6719 return ret >= 0 ? -EIO : ret; 6720 } 6721 6722 return 0; 6723 } 6724 6725 static 6726 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, 6727 u8 msg_id, bool *msg_ready) 6728 { 6729 u8 rx_status; 6730 int ret; 6731 6732 *msg_ready = false; 6733 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); 6734 if (ret < 0) 6735 return ret; 6736 6737 switch (msg_id) { 6738 case HDCP_2_2_AKE_SEND_HPRIME: 6739 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) 6740 *msg_ready = true; 6741 break; 6742 case HDCP_2_2_AKE_SEND_PAIRING_INFO: 6743 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) 6744 *msg_ready = true; 6745 break; 6746 case HDCP_2_2_REP_SEND_RECVID_LIST: 6747 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6748 *msg_ready = true; 6749 break; 6750 default: 6751 DRM_ERROR("Unidentified msg_id: %d\n", msg_id); 6752 return -EINVAL; 6753 } 6754 6755 return 0; 6756 } 6757 6758 static ssize_t 6759 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, 6760 const struct hdcp2_dp_msg_data *hdcp2_msg_data) 6761 { 6762 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6763 struct intel_dp *dp = &intel_dig_port->dp; 6764 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6765 u8 msg_id = hdcp2_msg_data->msg_id; 6766 int ret, timeout; 6767 bool msg_ready = false; 6768 6769 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) 6770 timeout = hdcp2_msg_data->timeout2; 6771 else 6772 timeout = hdcp2_msg_data->timeout; 6773 6774 /* 6775 * There is no way to detect the CERT, LPRIME and STREAM_READY 6776 * availability. So Wait for timeout and read the msg. 6777 */ 6778 if (!hdcp2_msg_data->msg_detectable) { 6779 mdelay(timeout); 6780 ret = 0; 6781 } else { 6782 /* 6783 * As we want to check the msg availability at timeout, Ignoring 6784 * the timeout at wait for CP_IRQ. 6785 */ 6786 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); 6787 ret = hdcp2_detect_msg_availability(intel_dig_port, 6788 msg_id, &msg_ready); 6789 if (!msg_ready) 6790 ret = -ETIMEDOUT; 6791 } 6792 6793 if (ret) 6794 drm_dbg_kms(&i915->drm, 6795 "msg_id %d, ret %d, timeout(mSec): %d\n", 6796 hdcp2_msg_data->msg_id, ret, timeout); 6797 6798 return ret; 6799 } 6800 6801 static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) 6802 { 6803 int i; 6804 6805 for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) 6806 if (hdcp2_dp_msg_data[i].msg_id == msg_id) 6807 return &hdcp2_dp_msg_data[i]; 6808 6809 return NULL; 6810 } 6811 6812 static 6813 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, 6814 void *buf, size_t size) 6815 { 6816 struct intel_dp *dp = &intel_dig_port->dp; 6817 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6818 unsigned int offset; 6819 u8 *byte = buf; 6820 ssize_t ret, bytes_to_write, len; 6821 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6822 6823 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); 6824 if (!hdcp2_msg_data) 6825 return -EINVAL; 6826 6827 offset = hdcp2_msg_data->offset; 6828 6829 /* No msg_id in DP HDCP2.2 msgs */ 6830 bytes_to_write = size - 1; 6831 byte++; 6832 6833 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); 6834 6835 while (bytes_to_write) { 6836 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? 6837 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; 6838 6839 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, 6840 offset, (void *)byte, len); 6841 if (ret < 0) 6842 return ret; 6843 6844 bytes_to_write -= ret; 6845 byte += ret; 6846 offset += ret; 6847 } 6848 6849 return size; 6850 } 6851 6852 static 6853 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port) 6854 { 6855 u8 rx_info[HDCP_2_2_RXINFO_LEN]; 6856 u32 dev_cnt; 6857 ssize_t ret; 6858 6859 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6860 DP_HDCP_2_2_REG_RXINFO_OFFSET, 6861 (void *)rx_info, HDCP_2_2_RXINFO_LEN); 6862 if (ret != HDCP_2_2_RXINFO_LEN) 6863 return ret >= 0 ? -EIO : ret; 6864 6865 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 6866 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 6867 6868 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) 6869 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; 6870 6871 ret = sizeof(struct hdcp2_rep_send_receiverid_list) - 6872 HDCP_2_2_RECEIVER_IDS_MAX_LEN + 6873 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); 6874 6875 return ret; 6876 } 6877 6878 static 6879 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, 6880 u8 msg_id, void *buf, size_t size) 6881 { 6882 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 6883 unsigned int offset; 6884 u8 *byte = buf; 6885 ssize_t ret, bytes_to_recv, len; 6886 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6887 6888 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); 6889 if (!hdcp2_msg_data) 6890 return -EINVAL; 6891 offset = hdcp2_msg_data->offset; 6892 6893 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data); 6894 if (ret < 0) 6895 return ret; 6896 6897 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { 6898 ret = get_receiver_id_list_size(intel_dig_port); 6899 if (ret < 0) 6900 return ret; 6901 6902 size = ret; 6903 } 6904 bytes_to_recv = size - 1; 6905 6906 /* DP adaptation msgs has no msg_id */ 6907 byte++; 6908 6909 while (bytes_to_recv) { 6910 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? 6911 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; 6912 6913 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset, 6914 (void *)byte, len); 6915 if (ret < 0) { 6916 drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", 6917 msg_id, ret); 6918 return ret; 6919 } 6920 6921 bytes_to_recv -= ret; 6922 byte += ret; 6923 offset += ret; 6924 } 6925 byte = buf; 6926 *byte = msg_id; 6927 6928 return size; 6929 } 6930 6931 static 6932 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, 6933 bool is_repeater, u8 content_type) 6934 { 6935 int ret; 6936 struct hdcp2_dp_errata_stream_type stream_type_msg; 6937 6938 if (is_repeater) 6939 return 0; 6940 6941 /* 6942 * Errata for DP: As Stream type is used for encryption, Receiver 6943 * should be communicated with stream type for the decryption of the 6944 * content. 6945 * Repeater will be communicated with stream type as a part of it's 6946 * auth later in time. 6947 */ 6948 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; 6949 stream_type_msg.stream_type = content_type; 6950 6951 ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, 6952 sizeof(stream_type_msg)); 6953 6954 return ret < 0 ? ret : 0; 6955 6956 } 6957 6958 static 6959 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port) 6960 { 6961 u8 rx_status; 6962 int ret; 6963 6964 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); 6965 if (ret) 6966 return ret; 6967 6968 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) 6969 ret = HDCP_REAUTH_REQUEST; 6970 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) 6971 ret = HDCP_LINK_INTEGRITY_FAILURE; 6972 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6973 ret = HDCP_TOPOLOGY_CHANGE; 6974 6975 return ret; 6976 } 6977 6978 static 6979 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port, 6980 bool *capable) 6981 { 6982 u8 rx_caps[3]; 6983 int ret; 6984 6985 *capable = false; 6986 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, 6987 DP_HDCP_2_2_REG_RX_CAPS_OFFSET, 6988 rx_caps, HDCP_2_2_RXCAPS_LEN); 6989 if (ret != HDCP_2_2_RXCAPS_LEN) 6990 return ret >= 0 ? -EIO : ret; 6991 6992 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && 6993 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) 6994 *capable = true; 6995 6996 return 0; 6997 } 6998 6999 static const struct intel_hdcp_shim intel_dp_hdcp_shim = { 7000 .write_an_aksv = intel_dp_hdcp_write_an_aksv, 7001 .read_bksv = intel_dp_hdcp_read_bksv, 7002 .read_bstatus = intel_dp_hdcp_read_bstatus, 7003 .repeater_present = intel_dp_hdcp_repeater_present, 7004 .read_ri_prime = intel_dp_hdcp_read_ri_prime, 7005 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, 7006 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, 7007 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, 7008 .toggle_signalling = intel_dp_hdcp_toggle_signalling, 7009 .check_link = intel_dp_hdcp_check_link, 7010 .hdcp_capable = intel_dp_hdcp_capable, 7011 .write_2_2_msg = intel_dp_hdcp2_write_msg, 7012 .read_2_2_msg = intel_dp_hdcp2_read_msg, 7013 .config_stream_type = intel_dp_hdcp2_config_stream_type, 7014 .check_2_2_link = intel_dp_hdcp2_check_link, 7015 .hdcp_2_2_capable = intel_dp_hdcp2_capable, 7016 .protocol = HDCP_PROTOCOL_DP, 7017 }; 7018 7019 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 7020 { 7021 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7022 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 7023 7024 lockdep_assert_held(&dev_priv->pps_mutex); 7025 7026 if (!edp_have_panel_vdd(intel_dp)) 7027 return; 7028 7029 /* 7030 * The VDD bit needs a power domain reference, so if the bit is 7031 * already enabled when we boot or resume, grab this reference and 7032 * schedule a vdd off, so we don't hold on to the reference 7033 * indefinitely. 7034 */ 7035 drm_dbg_kms(&dev_priv->drm, 7036 "VDD left on by BIOS, adjusting state tracking\n"); 7037 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); 7038 7039 edp_panel_vdd_schedule_off(intel_dp); 7040 } 7041 7042 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 7043 { 7044 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7045 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 7046 enum pipe pipe; 7047 7048 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 7049 encoder->port, &pipe)) 7050 return pipe; 7051 7052 return INVALID_PIPE; 7053 } 7054 7055 void intel_dp_encoder_reset(struct drm_encoder *encoder) 7056 { 7057 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 7058 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 7059 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 7060 intel_wakeref_t wakeref; 7061 7062 if (!HAS_DDI(dev_priv)) 7063 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 7064 7065 if (lspcon->active) 7066 lspcon_resume(lspcon); 7067 7068 intel_dp->reset_link_params = true; 7069 7070 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 7071 !intel_dp_is_edp(intel_dp)) 7072 return; 7073 7074 with_pps_lock(intel_dp, wakeref) { 7075 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7076 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 7077 7078 if (intel_dp_is_edp(intel_dp)) { 7079 /* 7080 * Reinit the power sequencer, in case BIOS did 7081 * something nasty with it. 7082 */ 7083 intel_dp_pps_init(intel_dp); 7084 intel_edp_panel_vdd_sanitize(intel_dp); 7085 } 7086 } 7087 } 7088 7089 static int intel_modeset_tile_group(struct intel_atomic_state *state, 7090 int tile_group_id) 7091 { 7092 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7093 struct drm_connector_list_iter conn_iter; 7094 struct drm_connector *connector; 7095 int ret = 0; 7096 7097 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 7098 drm_for_each_connector_iter(connector, &conn_iter) { 7099 struct drm_connector_state *conn_state; 7100 struct intel_crtc_state *crtc_state; 7101 struct intel_crtc *crtc; 7102 7103 if (!connector->has_tile || 7104 connector->tile_group->id != tile_group_id) 7105 continue; 7106 7107 conn_state = drm_atomic_get_connector_state(&state->base, 7108 connector); 7109 if (IS_ERR(conn_state)) { 7110 ret = PTR_ERR(conn_state); 7111 break; 7112 } 7113 7114 crtc = to_intel_crtc(conn_state->crtc); 7115 7116 if (!crtc) 7117 continue; 7118 7119 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 7120 crtc_state->uapi.mode_changed = true; 7121 7122 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 7123 if (ret) 7124 break; 7125 } 7126 drm_connector_list_iter_end(&conn_iter); 7127 7128 return ret; 7129 } 7130 7131 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 7132 { 7133 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7134 struct intel_crtc *crtc; 7135 7136 if (transcoders == 0) 7137 return 0; 7138 7139 for_each_intel_crtc(&dev_priv->drm, crtc) { 7140 struct intel_crtc_state *crtc_state; 7141 int ret; 7142 7143 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 7144 if (IS_ERR(crtc_state)) 7145 return PTR_ERR(crtc_state); 7146 7147 if (!crtc_state->hw.enable) 7148 continue; 7149 7150 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 7151 continue; 7152 7153 crtc_state->uapi.mode_changed = true; 7154 7155 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 7156 if (ret) 7157 return ret; 7158 7159 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 7160 if (ret) 7161 return ret; 7162 7163 transcoders &= ~BIT(crtc_state->cpu_transcoder); 7164 } 7165 7166 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 7167 7168 return 0; 7169 } 7170 7171 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 7172 struct drm_connector *connector) 7173 { 7174 const struct drm_connector_state *old_conn_state = 7175 drm_atomic_get_old_connector_state(&state->base, connector); 7176 const struct intel_crtc_state *old_crtc_state; 7177 struct intel_crtc *crtc; 7178 u8 transcoders; 7179 7180 crtc = to_intel_crtc(old_conn_state->crtc); 7181 if (!crtc) 7182 return 0; 7183 7184 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 7185 7186 if (!old_crtc_state->hw.active) 7187 return 0; 7188 7189 transcoders = old_crtc_state->sync_mode_slaves_mask; 7190 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 7191 transcoders |= BIT(old_crtc_state->master_transcoder); 7192 7193 return intel_modeset_affected_transcoders(state, 7194 transcoders); 7195 } 7196 7197 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 7198 struct drm_atomic_state *_state) 7199 { 7200 struct drm_i915_private *dev_priv = to_i915(conn->dev); 7201 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7202 int ret; 7203 7204 ret = intel_digital_connector_atomic_check(conn, &state->base); 7205 if (ret) 7206 return ret; 7207 7208 /* 7209 * We don't enable port sync on BDW due to missing w/as and 7210 * due to not having adjusted the modeset sequence appropriately. 7211 */ 7212 if (INTEL_GEN(dev_priv) < 9) 7213 return 0; 7214 7215 if (!intel_connector_needs_modeset(state, conn)) 7216 return 0; 7217 7218 if (conn->has_tile) { 7219 ret = intel_modeset_tile_group(state, conn->tile_group->id); 7220 if (ret) 7221 return ret; 7222 } 7223 7224 return intel_modeset_synced_crtcs(state, conn); 7225 } 7226 7227 static const struct drm_connector_funcs intel_dp_connector_funcs = { 7228 .force = intel_dp_force, 7229 .fill_modes = drm_helper_probe_single_connector_modes, 7230 .atomic_get_property = intel_digital_connector_atomic_get_property, 7231 .atomic_set_property = intel_digital_connector_atomic_set_property, 7232 .late_register = intel_dp_connector_register, 7233 .early_unregister = intel_dp_connector_unregister, 7234 .destroy = intel_connector_destroy, 7235 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7236 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 7237 }; 7238 7239 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 7240 .detect_ctx = intel_dp_detect, 7241 .get_modes = intel_dp_get_modes, 7242 .mode_valid = intel_dp_mode_valid, 7243 .atomic_check = intel_dp_connector_atomic_check, 7244 }; 7245 7246 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 7247 .reset = intel_dp_encoder_reset, 7248 .destroy = intel_dp_encoder_destroy, 7249 }; 7250 7251 static bool intel_edp_have_power(struct intel_dp *intel_dp) 7252 { 7253 intel_wakeref_t wakeref; 7254 bool have_power = false; 7255 7256 with_pps_lock(intel_dp, wakeref) { 7257 have_power = edp_have_panel_power(intel_dp) && 7258 edp_have_panel_vdd(intel_dp); 7259 } 7260 7261 return have_power; 7262 } 7263 7264 enum irqreturn 7265 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) 7266 { 7267 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 7268 struct intel_dp *intel_dp = &intel_dig_port->dp; 7269 7270 if (intel_dig_port->base.type == INTEL_OUTPUT_EDP && 7271 (long_hpd || !intel_edp_have_power(intel_dp))) { 7272 /* 7273 * vdd off can generate a long/short pulse on eDP which 7274 * would require vdd on to handle it, and thus we 7275 * would end up in an endless cycle of 7276 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 7277 */ 7278 drm_dbg_kms(&i915->drm, 7279 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 7280 long_hpd ? "long" : "short", 7281 intel_dig_port->base.base.base.id, 7282 intel_dig_port->base.base.name); 7283 return IRQ_HANDLED; 7284 } 7285 7286 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 7287 intel_dig_port->base.base.base.id, 7288 intel_dig_port->base.base.name, 7289 long_hpd ? "long" : "short"); 7290 7291 if (long_hpd) { 7292 intel_dp->reset_link_params = true; 7293 return IRQ_NONE; 7294 } 7295 7296 if (intel_dp->is_mst) { 7297 switch (intel_dp_check_mst_status(intel_dp)) { 7298 case -EINVAL: 7299 /* 7300 * If we were in MST mode, and device is not 7301 * there, get out of MST mode 7302 */ 7303 drm_dbg_kms(&i915->drm, 7304 "MST device may have disappeared %d vs %d\n", 7305 intel_dp->is_mst, 7306 intel_dp->mst_mgr.mst_state); 7307 intel_dp->is_mst = false; 7308 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 7309 intel_dp->is_mst); 7310 7311 return IRQ_NONE; 7312 case 1: 7313 return IRQ_NONE; 7314 default: 7315 break; 7316 } 7317 } 7318 7319 if (!intel_dp->is_mst) { 7320 bool handled; 7321 7322 handled = intel_dp_short_pulse(intel_dp); 7323 7324 if (!handled) 7325 return IRQ_NONE; 7326 } 7327 7328 return IRQ_HANDLED; 7329 } 7330 7331 /* check the VBT to see whether the eDP is on another port */ 7332 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 7333 { 7334 /* 7335 * eDP not supported on g4x. so bail out early just 7336 * for a bit extra safety in case the VBT is bonkers. 7337 */ 7338 if (INTEL_GEN(dev_priv) < 5) 7339 return false; 7340 7341 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 7342 return true; 7343 7344 return intel_bios_is_port_edp(dev_priv, port); 7345 } 7346 7347 static void 7348 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 7349 { 7350 struct drm_i915_private *dev_priv = to_i915(connector->dev); 7351 enum port port = dp_to_dig_port(intel_dp)->base.port; 7352 7353 if (!IS_G4X(dev_priv) && port != PORT_A) 7354 intel_attach_force_audio_property(connector); 7355 7356 intel_attach_broadcast_rgb_property(connector); 7357 if (HAS_GMCH(dev_priv)) 7358 drm_connector_attach_max_bpc_property(connector, 6, 10); 7359 else if (INTEL_GEN(dev_priv) >= 5) 7360 drm_connector_attach_max_bpc_property(connector, 6, 12); 7361 7362 intel_attach_colorspace_property(connector); 7363 7364 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 7365 drm_object_attach_property(&connector->base, 7366 connector->dev->mode_config.hdr_output_metadata_property, 7367 0); 7368 7369 if (intel_dp_is_edp(intel_dp)) { 7370 u32 allowed_scalers; 7371 7372 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 7373 if (!HAS_GMCH(dev_priv)) 7374 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 7375 7376 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 7377 7378 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 7379 7380 } 7381 } 7382 7383 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 7384 { 7385 intel_dp->panel_power_off_time = ktime_get_boottime(); 7386 intel_dp->last_power_on = jiffies; 7387 intel_dp->last_backlight_off = jiffies; 7388 } 7389 7390 static void 7391 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 7392 { 7393 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7394 u32 pp_on, pp_off, pp_ctl; 7395 struct pps_registers regs; 7396 7397 intel_pps_get_registers(intel_dp, ®s); 7398 7399 pp_ctl = ilk_get_pp_control(intel_dp); 7400 7401 /* Ensure PPS is unlocked */ 7402 if (!HAS_DDI(dev_priv)) 7403 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7404 7405 pp_on = intel_de_read(dev_priv, regs.pp_on); 7406 pp_off = intel_de_read(dev_priv, regs.pp_off); 7407 7408 /* Pull timing values out of registers */ 7409 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 7410 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 7411 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 7412 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 7413 7414 if (i915_mmio_reg_valid(regs.pp_div)) { 7415 u32 pp_div; 7416 7417 pp_div = intel_de_read(dev_priv, regs.pp_div); 7418 7419 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 7420 } else { 7421 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 7422 } 7423 } 7424 7425 static void 7426 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 7427 { 7428 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 7429 state_name, 7430 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 7431 } 7432 7433 static void 7434 intel_pps_verify_state(struct intel_dp *intel_dp) 7435 { 7436 struct edp_power_seq hw; 7437 struct edp_power_seq *sw = &intel_dp->pps_delays; 7438 7439 intel_pps_readout_hw_state(intel_dp, &hw); 7440 7441 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 7442 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 7443 DRM_ERROR("PPS state mismatch\n"); 7444 intel_pps_dump_state("sw", sw); 7445 intel_pps_dump_state("hw", &hw); 7446 } 7447 } 7448 7449 static void 7450 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) 7451 { 7452 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7453 struct edp_power_seq cur, vbt, spec, 7454 *final = &intel_dp->pps_delays; 7455 7456 lockdep_assert_held(&dev_priv->pps_mutex); 7457 7458 /* already initialized? */ 7459 if (final->t11_t12 != 0) 7460 return; 7461 7462 intel_pps_readout_hw_state(intel_dp, &cur); 7463 7464 intel_pps_dump_state("cur", &cur); 7465 7466 vbt = dev_priv->vbt.edp.pps; 7467 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 7468 * of 500ms appears to be too short. Ocassionally the panel 7469 * just fails to power back on. Increasing the delay to 800ms 7470 * seems sufficient to avoid this problem. 7471 */ 7472 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 7473 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 7474 drm_dbg_kms(&dev_priv->drm, 7475 "Increasing T12 panel delay as per the quirk to %d\n", 7476 vbt.t11_t12); 7477 } 7478 /* T11_T12 delay is special and actually in units of 100ms, but zero 7479 * based in the hw (so we need to add 100 ms). But the sw vbt 7480 * table multiplies it with 1000 to make it in units of 100usec, 7481 * too. */ 7482 vbt.t11_t12 += 100 * 10; 7483 7484 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 7485 * our hw here, which are all in 100usec. */ 7486 spec.t1_t3 = 210 * 10; 7487 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 7488 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 7489 spec.t10 = 500 * 10; 7490 /* This one is special and actually in units of 100ms, but zero 7491 * based in the hw (so we need to add 100 ms). But the sw vbt 7492 * table multiplies it with 1000 to make it in units of 100usec, 7493 * too. */ 7494 spec.t11_t12 = (510 + 100) * 10; 7495 7496 intel_pps_dump_state("vbt", &vbt); 7497 7498 /* Use the max of the register settings and vbt. If both are 7499 * unset, fall back to the spec limits. */ 7500 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 7501 spec.field : \ 7502 max(cur.field, vbt.field)) 7503 assign_final(t1_t3); 7504 assign_final(t8); 7505 assign_final(t9); 7506 assign_final(t10); 7507 assign_final(t11_t12); 7508 #undef assign_final 7509 7510 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 7511 intel_dp->panel_power_up_delay = get_delay(t1_t3); 7512 intel_dp->backlight_on_delay = get_delay(t8); 7513 intel_dp->backlight_off_delay = get_delay(t9); 7514 intel_dp->panel_power_down_delay = get_delay(t10); 7515 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 7516 #undef get_delay 7517 7518 drm_dbg_kms(&dev_priv->drm, 7519 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 7520 intel_dp->panel_power_up_delay, 7521 intel_dp->panel_power_down_delay, 7522 intel_dp->panel_power_cycle_delay); 7523 7524 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", 7525 intel_dp->backlight_on_delay, 7526 intel_dp->backlight_off_delay); 7527 7528 /* 7529 * We override the HW backlight delays to 1 because we do manual waits 7530 * on them. For T8, even BSpec recommends doing it. For T9, if we 7531 * don't do this, we'll end up waiting for the backlight off delay 7532 * twice: once when we do the manual sleep, and once when we disable 7533 * the panel and wait for the PP_STATUS bit to become zero. 7534 */ 7535 final->t8 = 1; 7536 final->t9 = 1; 7537 7538 /* 7539 * HW has only a 100msec granularity for t11_t12 so round it up 7540 * accordingly. 7541 */ 7542 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 7543 } 7544 7545 static void 7546 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 7547 bool force_disable_vdd) 7548 { 7549 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7550 u32 pp_on, pp_off, port_sel = 0; 7551 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; 7552 struct pps_registers regs; 7553 enum port port = dp_to_dig_port(intel_dp)->base.port; 7554 const struct edp_power_seq *seq = &intel_dp->pps_delays; 7555 7556 lockdep_assert_held(&dev_priv->pps_mutex); 7557 7558 intel_pps_get_registers(intel_dp, ®s); 7559 7560 /* 7561 * On some VLV machines the BIOS can leave the VDD 7562 * enabled even on power sequencers which aren't 7563 * hooked up to any port. This would mess up the 7564 * power domain tracking the first time we pick 7565 * one of these power sequencers for use since 7566 * edp_panel_vdd_on() would notice that the VDD was 7567 * already on and therefore wouldn't grab the power 7568 * domain reference. Disable VDD first to avoid this. 7569 * This also avoids spuriously turning the VDD on as 7570 * soon as the new power sequencer gets initialized. 7571 */ 7572 if (force_disable_vdd) { 7573 u32 pp = ilk_get_pp_control(intel_dp); 7574 7575 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, 7576 "Panel power already on\n"); 7577 7578 if (pp & EDP_FORCE_VDD) 7579 drm_dbg_kms(&dev_priv->drm, 7580 "VDD already on, disabling first\n"); 7581 7582 pp &= ~EDP_FORCE_VDD; 7583 7584 intel_de_write(dev_priv, regs.pp_ctrl, pp); 7585 } 7586 7587 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 7588 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 7589 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 7590 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 7591 7592 /* Haswell doesn't have any port selection bits for the panel 7593 * power sequencer any more. */ 7594 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7595 port_sel = PANEL_PORT_SELECT_VLV(port); 7596 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7597 switch (port) { 7598 case PORT_A: 7599 port_sel = PANEL_PORT_SELECT_DPA; 7600 break; 7601 case PORT_C: 7602 port_sel = PANEL_PORT_SELECT_DPC; 7603 break; 7604 case PORT_D: 7605 port_sel = PANEL_PORT_SELECT_DPD; 7606 break; 7607 default: 7608 MISSING_CASE(port); 7609 break; 7610 } 7611 } 7612 7613 pp_on |= port_sel; 7614 7615 intel_de_write(dev_priv, regs.pp_on, pp_on); 7616 intel_de_write(dev_priv, regs.pp_off, pp_off); 7617 7618 /* 7619 * Compute the divisor for the pp clock, simply match the Bspec formula. 7620 */ 7621 if (i915_mmio_reg_valid(regs.pp_div)) { 7622 intel_de_write(dev_priv, regs.pp_div, 7623 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 7624 } else { 7625 u32 pp_ctl; 7626 7627 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); 7628 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 7629 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 7630 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7631 } 7632 7633 drm_dbg_kms(&dev_priv->drm, 7634 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 7635 intel_de_read(dev_priv, regs.pp_on), 7636 intel_de_read(dev_priv, regs.pp_off), 7637 i915_mmio_reg_valid(regs.pp_div) ? 7638 intel_de_read(dev_priv, regs.pp_div) : 7639 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 7640 } 7641 7642 static void intel_dp_pps_init(struct intel_dp *intel_dp) 7643 { 7644 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7645 7646 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7647 vlv_initial_power_sequencer_setup(intel_dp); 7648 } else { 7649 intel_dp_init_panel_power_sequencer(intel_dp); 7650 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 7651 } 7652 } 7653 7654 /** 7655 * intel_dp_set_drrs_state - program registers for RR switch to take effect 7656 * @dev_priv: i915 device 7657 * @crtc_state: a pointer to the active intel_crtc_state 7658 * @refresh_rate: RR to be programmed 7659 * 7660 * This function gets called when refresh rate (RR) has to be changed from 7661 * one frequency to another. Switches can be between high and low RR 7662 * supported by the panel or to any other RR based on media playback (in 7663 * this case, RR value needs to be passed from user space). 7664 * 7665 * The caller of this function needs to take a lock on dev_priv->drrs. 7666 */ 7667 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 7668 const struct intel_crtc_state *crtc_state, 7669 int refresh_rate) 7670 { 7671 struct intel_dp *intel_dp = dev_priv->drrs.dp; 7672 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 7673 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 7674 7675 if (refresh_rate <= 0) { 7676 drm_dbg_kms(&dev_priv->drm, 7677 "Refresh rate should be positive non-zero.\n"); 7678 return; 7679 } 7680 7681 if (intel_dp == NULL) { 7682 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 7683 return; 7684 } 7685 7686 if (!intel_crtc) { 7687 drm_dbg_kms(&dev_priv->drm, 7688 "DRRS: intel_crtc not initialized\n"); 7689 return; 7690 } 7691 7692 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 7693 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 7694 return; 7695 } 7696 7697 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh == 7698 refresh_rate) 7699 index = DRRS_LOW_RR; 7700 7701 if (index == dev_priv->drrs.refresh_rate_type) { 7702 drm_dbg_kms(&dev_priv->drm, 7703 "DRRS requested for previously set RR...ignoring\n"); 7704 return; 7705 } 7706 7707 if (!crtc_state->hw.active) { 7708 drm_dbg_kms(&dev_priv->drm, 7709 "eDP encoder disabled. CRTC not Active\n"); 7710 return; 7711 } 7712 7713 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 7714 switch (index) { 7715 case DRRS_HIGH_RR: 7716 intel_dp_set_m_n(crtc_state, M1_N1); 7717 break; 7718 case DRRS_LOW_RR: 7719 intel_dp_set_m_n(crtc_state, M2_N2); 7720 break; 7721 case DRRS_MAX_RR: 7722 default: 7723 drm_err(&dev_priv->drm, 7724 "Unsupported refreshrate type\n"); 7725 } 7726 } else if (INTEL_GEN(dev_priv) > 6) { 7727 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 7728 u32 val; 7729 7730 val = intel_de_read(dev_priv, reg); 7731 if (index > DRRS_HIGH_RR) { 7732 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7733 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7734 else 7735 val |= PIPECONF_EDP_RR_MODE_SWITCH; 7736 } else { 7737 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7738 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7739 else 7740 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 7741 } 7742 intel_de_write(dev_priv, reg, val); 7743 } 7744 7745 dev_priv->drrs.refresh_rate_type = index; 7746 7747 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 7748 refresh_rate); 7749 } 7750 7751 /** 7752 * intel_edp_drrs_enable - init drrs struct if supported 7753 * @intel_dp: DP struct 7754 * @crtc_state: A pointer to the active crtc state. 7755 * 7756 * Initializes frontbuffer_bits and drrs.dp 7757 */ 7758 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 7759 const struct intel_crtc_state *crtc_state) 7760 { 7761 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7762 7763 if (!crtc_state->has_drrs) { 7764 drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n"); 7765 return; 7766 } 7767 7768 if (dev_priv->psr.enabled) { 7769 drm_dbg_kms(&dev_priv->drm, 7770 "PSR enabled. Not enabling DRRS.\n"); 7771 return; 7772 } 7773 7774 mutex_lock(&dev_priv->drrs.mutex); 7775 if (dev_priv->drrs.dp) { 7776 drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n"); 7777 goto unlock; 7778 } 7779 7780 dev_priv->drrs.busy_frontbuffer_bits = 0; 7781 7782 dev_priv->drrs.dp = intel_dp; 7783 7784 unlock: 7785 mutex_unlock(&dev_priv->drrs.mutex); 7786 } 7787 7788 /** 7789 * intel_edp_drrs_disable - Disable DRRS 7790 * @intel_dp: DP struct 7791 * @old_crtc_state: Pointer to old crtc_state. 7792 * 7793 */ 7794 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 7795 const struct intel_crtc_state *old_crtc_state) 7796 { 7797 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7798 7799 if (!old_crtc_state->has_drrs) 7800 return; 7801 7802 mutex_lock(&dev_priv->drrs.mutex); 7803 if (!dev_priv->drrs.dp) { 7804 mutex_unlock(&dev_priv->drrs.mutex); 7805 return; 7806 } 7807 7808 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7809 intel_dp_set_drrs_state(dev_priv, old_crtc_state, 7810 intel_dp->attached_connector->panel.fixed_mode->vrefresh); 7811 7812 dev_priv->drrs.dp = NULL; 7813 mutex_unlock(&dev_priv->drrs.mutex); 7814 7815 cancel_delayed_work_sync(&dev_priv->drrs.work); 7816 } 7817 7818 static void intel_edp_drrs_downclock_work(struct work_struct *work) 7819 { 7820 struct drm_i915_private *dev_priv = 7821 container_of(work, typeof(*dev_priv), drrs.work.work); 7822 struct intel_dp *intel_dp; 7823 7824 mutex_lock(&dev_priv->drrs.mutex); 7825 7826 intel_dp = dev_priv->drrs.dp; 7827 7828 if (!intel_dp) 7829 goto unlock; 7830 7831 /* 7832 * The delayed work can race with an invalidate hence we need to 7833 * recheck. 7834 */ 7835 7836 if (dev_priv->drrs.busy_frontbuffer_bits) 7837 goto unlock; 7838 7839 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 7840 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7841 7842 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7843 intel_dp->attached_connector->panel.downclock_mode->vrefresh); 7844 } 7845 7846 unlock: 7847 mutex_unlock(&dev_priv->drrs.mutex); 7848 } 7849 7850 /** 7851 * intel_edp_drrs_invalidate - Disable Idleness DRRS 7852 * @dev_priv: i915 device 7853 * @frontbuffer_bits: frontbuffer plane tracking bits 7854 * 7855 * This function gets called everytime rendering on the given planes start. 7856 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 7857 * 7858 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7859 */ 7860 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 7861 unsigned int frontbuffer_bits) 7862 { 7863 struct drm_crtc *crtc; 7864 enum pipe pipe; 7865 7866 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7867 return; 7868 7869 cancel_delayed_work(&dev_priv->drrs.work); 7870 7871 mutex_lock(&dev_priv->drrs.mutex); 7872 if (!dev_priv->drrs.dp) { 7873 mutex_unlock(&dev_priv->drrs.mutex); 7874 return; 7875 } 7876 7877 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 7878 pipe = to_intel_crtc(crtc)->pipe; 7879 7880 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7881 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 7882 7883 /* invalidate means busy screen hence upclock */ 7884 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7885 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7886 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 7887 7888 mutex_unlock(&dev_priv->drrs.mutex); 7889 } 7890 7891 /** 7892 * intel_edp_drrs_flush - Restart Idleness DRRS 7893 * @dev_priv: i915 device 7894 * @frontbuffer_bits: frontbuffer plane tracking bits 7895 * 7896 * This function gets called every time rendering on the given planes has 7897 * completed or flip on a crtc is completed. So DRRS should be upclocked 7898 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 7899 * if no other planes are dirty. 7900 * 7901 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7902 */ 7903 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 7904 unsigned int frontbuffer_bits) 7905 { 7906 struct drm_crtc *crtc; 7907 enum pipe pipe; 7908 7909 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7910 return; 7911 7912 cancel_delayed_work(&dev_priv->drrs.work); 7913 7914 mutex_lock(&dev_priv->drrs.mutex); 7915 if (!dev_priv->drrs.dp) { 7916 mutex_unlock(&dev_priv->drrs.mutex); 7917 return; 7918 } 7919 7920 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; 7921 pipe = to_intel_crtc(crtc)->pipe; 7922 7923 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7924 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 7925 7926 /* flush means busy screen hence upclock */ 7927 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7928 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7929 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); 7930 7931 /* 7932 * flush also means no more activity hence schedule downclock, if all 7933 * other fbs are quiescent too 7934 */ 7935 if (!dev_priv->drrs.busy_frontbuffer_bits) 7936 schedule_delayed_work(&dev_priv->drrs.work, 7937 msecs_to_jiffies(1000)); 7938 mutex_unlock(&dev_priv->drrs.mutex); 7939 } 7940 7941 /** 7942 * DOC: Display Refresh Rate Switching (DRRS) 7943 * 7944 * Display Refresh Rate Switching (DRRS) is a power conservation feature 7945 * which enables swtching between low and high refresh rates, 7946 * dynamically, based on the usage scenario. This feature is applicable 7947 * for internal panels. 7948 * 7949 * Indication that the panel supports DRRS is given by the panel EDID, which 7950 * would list multiple refresh rates for one resolution. 7951 * 7952 * DRRS is of 2 types - static and seamless. 7953 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 7954 * (may appear as a blink on screen) and is used in dock-undock scenario. 7955 * Seamless DRRS involves changing RR without any visual effect to the user 7956 * and can be used during normal system usage. This is done by programming 7957 * certain registers. 7958 * 7959 * Support for static/seamless DRRS may be indicated in the VBT based on 7960 * inputs from the panel spec. 7961 * 7962 * DRRS saves power by switching to low RR based on usage scenarios. 7963 * 7964 * The implementation is based on frontbuffer tracking implementation. When 7965 * there is a disturbance on the screen triggered by user activity or a periodic 7966 * system activity, DRRS is disabled (RR is changed to high RR). When there is 7967 * no movement on screen, after a timeout of 1 second, a switch to low RR is 7968 * made. 7969 * 7970 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 7971 * and intel_edp_drrs_flush() are called. 7972 * 7973 * DRRS can be further extended to support other internal panels and also 7974 * the scenario of video playback wherein RR is set based on the rate 7975 * requested by userspace. 7976 */ 7977 7978 /** 7979 * intel_dp_drrs_init - Init basic DRRS work and mutex. 7980 * @connector: eDP connector 7981 * @fixed_mode: preferred mode of panel 7982 * 7983 * This function is called only once at driver load to initialize basic 7984 * DRRS stuff. 7985 * 7986 * Returns: 7987 * Downclock mode if panel supports it, else return NULL. 7988 * DRRS support is determined by the presence of downclock mode (apart 7989 * from VBT setting). 7990 */ 7991 static struct drm_display_mode * 7992 intel_dp_drrs_init(struct intel_connector *connector, 7993 struct drm_display_mode *fixed_mode) 7994 { 7995 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 7996 struct drm_display_mode *downclock_mode = NULL; 7997 7998 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 7999 mutex_init(&dev_priv->drrs.mutex); 8000 8001 if (INTEL_GEN(dev_priv) <= 6) { 8002 drm_dbg_kms(&dev_priv->drm, 8003 "DRRS supported for Gen7 and above\n"); 8004 return NULL; 8005 } 8006 8007 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 8008 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 8009 return NULL; 8010 } 8011 8012 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 8013 if (!downclock_mode) { 8014 drm_dbg_kms(&dev_priv->drm, 8015 "Downclock mode is not found. DRRS not supported\n"); 8016 return NULL; 8017 } 8018 8019 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 8020 8021 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 8022 drm_dbg_kms(&dev_priv->drm, 8023 "seamless DRRS supported for eDP panel.\n"); 8024 return downclock_mode; 8025 } 8026 8027 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 8028 struct intel_connector *intel_connector) 8029 { 8030 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 8031 struct drm_device *dev = &dev_priv->drm; 8032 struct drm_connector *connector = &intel_connector->base; 8033 struct drm_display_mode *fixed_mode = NULL; 8034 struct drm_display_mode *downclock_mode = NULL; 8035 bool has_dpcd; 8036 enum pipe pipe = INVALID_PIPE; 8037 intel_wakeref_t wakeref; 8038 struct edid *edid; 8039 8040 if (!intel_dp_is_edp(intel_dp)) 8041 return true; 8042 8043 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); 8044 8045 /* 8046 * On IBX/CPT we may get here with LVDS already registered. Since the 8047 * driver uses the only internal power sequencer available for both 8048 * eDP and LVDS bail out early in this case to prevent interfering 8049 * with an already powered-on LVDS power sequencer. 8050 */ 8051 if (intel_get_lvds_encoder(dev_priv)) { 8052 drm_WARN_ON(dev, 8053 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 8054 drm_info(&dev_priv->drm, 8055 "LVDS was detected, not registering eDP\n"); 8056 8057 return false; 8058 } 8059 8060 with_pps_lock(intel_dp, wakeref) { 8061 intel_dp_init_panel_power_timestamps(intel_dp); 8062 intel_dp_pps_init(intel_dp); 8063 intel_edp_panel_vdd_sanitize(intel_dp); 8064 } 8065 8066 /* Cache DPCD and EDID for edp. */ 8067 has_dpcd = intel_edp_init_dpcd(intel_dp); 8068 8069 if (!has_dpcd) { 8070 /* if this fails, presume the device is a ghost */ 8071 drm_info(&dev_priv->drm, 8072 "failed to retrieve link info, disabling eDP\n"); 8073 goto out_vdd_off; 8074 } 8075 8076 mutex_lock(&dev->mode_config.mutex); 8077 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 8078 if (edid) { 8079 if (drm_add_edid_modes(connector, edid)) { 8080 drm_connector_update_edid_property(connector, edid); 8081 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 8082 } else { 8083 kfree(edid); 8084 edid = ERR_PTR(-EINVAL); 8085 } 8086 } else { 8087 edid = ERR_PTR(-ENOENT); 8088 } 8089 intel_connector->edid = edid; 8090 8091 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 8092 if (fixed_mode) 8093 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 8094 8095 /* fallback to VBT if available for eDP */ 8096 if (!fixed_mode) 8097 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 8098 mutex_unlock(&dev->mode_config.mutex); 8099 8100 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 8101 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 8102 register_reboot_notifier(&intel_dp->edp_notifier); 8103 8104 /* 8105 * Figure out the current pipe for the initial backlight setup. 8106 * If the current pipe isn't valid, try the PPS pipe, and if that 8107 * fails just assume pipe A. 8108 */ 8109 pipe = vlv_active_pipe(intel_dp); 8110 8111 if (pipe != PIPE_A && pipe != PIPE_B) 8112 pipe = intel_dp->pps_pipe; 8113 8114 if (pipe != PIPE_A && pipe != PIPE_B) 8115 pipe = PIPE_A; 8116 8117 drm_dbg_kms(&dev_priv->drm, 8118 "using pipe %c for initial backlight setup\n", 8119 pipe_name(pipe)); 8120 } 8121 8122 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 8123 intel_connector->panel.backlight.power = intel_edp_backlight_power; 8124 intel_panel_setup_backlight(connector, pipe); 8125 8126 if (fixed_mode) { 8127 drm_connector_set_panel_orientation_with_quirk(connector, 8128 dev_priv->vbt.orientation, 8129 fixed_mode->hdisplay, fixed_mode->vdisplay); 8130 } 8131 8132 return true; 8133 8134 out_vdd_off: 8135 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 8136 /* 8137 * vdd might still be enabled do to the delayed vdd off. 8138 * Make sure vdd is actually turned off here. 8139 */ 8140 with_pps_lock(intel_dp, wakeref) 8141 edp_panel_vdd_off_sync(intel_dp); 8142 8143 return false; 8144 } 8145 8146 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 8147 { 8148 struct intel_connector *intel_connector; 8149 struct drm_connector *connector; 8150 8151 intel_connector = container_of(work, typeof(*intel_connector), 8152 modeset_retry_work); 8153 connector = &intel_connector->base; 8154 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 8155 connector->name); 8156 8157 /* Grab the locks before changing connector property*/ 8158 mutex_lock(&connector->dev->mode_config.mutex); 8159 /* Set connector link status to BAD and send a Uevent to notify 8160 * userspace to do a modeset. 8161 */ 8162 drm_connector_set_link_status_property(connector, 8163 DRM_MODE_LINK_STATUS_BAD); 8164 mutex_unlock(&connector->dev->mode_config.mutex); 8165 /* Send Hotplug uevent so userspace can reprobe */ 8166 drm_kms_helper_hotplug_event(connector->dev); 8167 } 8168 8169 bool 8170 intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 8171 struct intel_connector *intel_connector) 8172 { 8173 struct drm_connector *connector = &intel_connector->base; 8174 struct intel_dp *intel_dp = &intel_dig_port->dp; 8175 struct intel_encoder *intel_encoder = &intel_dig_port->base; 8176 struct drm_device *dev = intel_encoder->base.dev; 8177 struct drm_i915_private *dev_priv = to_i915(dev); 8178 enum port port = intel_encoder->port; 8179 enum phy phy = intel_port_to_phy(dev_priv, port); 8180 int type; 8181 8182 /* Initialize the work for modeset in case of link train failure */ 8183 INIT_WORK(&intel_connector->modeset_retry_work, 8184 intel_dp_modeset_retry_work_fn); 8185 8186 if (drm_WARN(dev, intel_dig_port->max_lanes < 1, 8187 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 8188 intel_dig_port->max_lanes, intel_encoder->base.base.id, 8189 intel_encoder->base.name)) 8190 return false; 8191 8192 intel_dp_set_source_rates(intel_dp); 8193 8194 intel_dp->reset_link_params = true; 8195 intel_dp->pps_pipe = INVALID_PIPE; 8196 intel_dp->active_pipe = INVALID_PIPE; 8197 8198 /* Preserve the current hw state. */ 8199 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 8200 intel_dp->attached_connector = intel_connector; 8201 8202 if (intel_dp_is_port_edp(dev_priv, port)) { 8203 /* 8204 * Currently we don't support eDP on TypeC ports, although in 8205 * theory it could work on TypeC legacy ports. 8206 */ 8207 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 8208 type = DRM_MODE_CONNECTOR_eDP; 8209 } else { 8210 type = DRM_MODE_CONNECTOR_DisplayPort; 8211 } 8212 8213 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 8214 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 8215 8216 /* 8217 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 8218 * for DP the encoder type can be set by the caller to 8219 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 8220 */ 8221 if (type == DRM_MODE_CONNECTOR_eDP) 8222 intel_encoder->type = INTEL_OUTPUT_EDP; 8223 8224 /* eDP only on port B and/or C on vlv/chv */ 8225 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 8226 IS_CHERRYVIEW(dev_priv)) && 8227 intel_dp_is_edp(intel_dp) && 8228 port != PORT_B && port != PORT_C)) 8229 return false; 8230 8231 drm_dbg_kms(&dev_priv->drm, 8232 "Adding %s connector on [ENCODER:%d:%s]\n", 8233 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 8234 intel_encoder->base.base.id, intel_encoder->base.name); 8235 8236 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 8237 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 8238 8239 if (!HAS_GMCH(dev_priv)) 8240 connector->interlace_allowed = true; 8241 connector->doublescan_allowed = 0; 8242 8243 if (INTEL_GEN(dev_priv) >= 11) 8244 connector->ycbcr_420_allowed = true; 8245 8246 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 8247 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 8248 8249 intel_dp_aux_init(intel_dp); 8250 8251 intel_connector_attach_encoder(intel_connector, intel_encoder); 8252 8253 if (HAS_DDI(dev_priv)) 8254 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 8255 else 8256 intel_connector->get_hw_state = intel_connector_get_hw_state; 8257 8258 /* init MST on ports that can support it */ 8259 intel_dp_mst_encoder_init(intel_dig_port, 8260 intel_connector->base.base.id); 8261 8262 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 8263 intel_dp_aux_fini(intel_dp); 8264 intel_dp_mst_encoder_cleanup(intel_dig_port); 8265 goto fail; 8266 } 8267 8268 intel_dp_add_properties(intel_dp, connector); 8269 8270 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 8271 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); 8272 if (ret) 8273 drm_dbg_kms(&dev_priv->drm, 8274 "HDCP init failed, skipping.\n"); 8275 } 8276 8277 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 8278 * 0xd. Failure to do so will result in spurious interrupts being 8279 * generated on the port when a cable is not attached. 8280 */ 8281 if (IS_G45(dev_priv)) { 8282 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 8283 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 8284 (temp & ~0xf) | 0xd); 8285 } 8286 8287 return true; 8288 8289 fail: 8290 drm_connector_cleanup(connector); 8291 8292 return false; 8293 } 8294 8295 bool intel_dp_init(struct drm_i915_private *dev_priv, 8296 i915_reg_t output_reg, 8297 enum port port) 8298 { 8299 struct intel_digital_port *intel_dig_port; 8300 struct intel_encoder *intel_encoder; 8301 struct drm_encoder *encoder; 8302 struct intel_connector *intel_connector; 8303 8304 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 8305 if (!intel_dig_port) 8306 return false; 8307 8308 intel_connector = intel_connector_alloc(); 8309 if (!intel_connector) 8310 goto err_connector_alloc; 8311 8312 intel_encoder = &intel_dig_port->base; 8313 encoder = &intel_encoder->base; 8314 8315 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 8316 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 8317 "DP %c", port_name(port))) 8318 goto err_encoder_init; 8319 8320 intel_encoder->hotplug = intel_dp_hotplug; 8321 intel_encoder->compute_config = intel_dp_compute_config; 8322 intel_encoder->get_hw_state = intel_dp_get_hw_state; 8323 intel_encoder->get_config = intel_dp_get_config; 8324 intel_encoder->update_pipe = intel_panel_update_backlight; 8325 intel_encoder->suspend = intel_dp_encoder_suspend; 8326 if (IS_CHERRYVIEW(dev_priv)) { 8327 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 8328 intel_encoder->pre_enable = chv_pre_enable_dp; 8329 intel_encoder->enable = vlv_enable_dp; 8330 intel_encoder->disable = vlv_disable_dp; 8331 intel_encoder->post_disable = chv_post_disable_dp; 8332 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 8333 } else if (IS_VALLEYVIEW(dev_priv)) { 8334 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 8335 intel_encoder->pre_enable = vlv_pre_enable_dp; 8336 intel_encoder->enable = vlv_enable_dp; 8337 intel_encoder->disable = vlv_disable_dp; 8338 intel_encoder->post_disable = vlv_post_disable_dp; 8339 } else { 8340 intel_encoder->pre_enable = g4x_pre_enable_dp; 8341 intel_encoder->enable = g4x_enable_dp; 8342 intel_encoder->disable = g4x_disable_dp; 8343 intel_encoder->post_disable = g4x_post_disable_dp; 8344 } 8345 8346 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 8347 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) 8348 intel_dig_port->dp.set_link_train = cpt_set_link_train; 8349 else 8350 intel_dig_port->dp.set_link_train = g4x_set_link_train; 8351 8352 if (IS_CHERRYVIEW(dev_priv)) 8353 intel_dig_port->dp.set_signal_levels = chv_set_signal_levels; 8354 else if (IS_VALLEYVIEW(dev_priv)) 8355 intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels; 8356 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 8357 intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; 8358 else if (IS_GEN(dev_priv, 6) && port == PORT_A) 8359 intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; 8360 else 8361 intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels; 8362 8363 intel_dig_port->dp.output_reg = output_reg; 8364 intel_dig_port->max_lanes = 4; 8365 intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); 8366 intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); 8367 8368 intel_encoder->type = INTEL_OUTPUT_DP; 8369 intel_encoder->power_domain = intel_port_to_power_domain(port); 8370 if (IS_CHERRYVIEW(dev_priv)) { 8371 if (port == PORT_D) 8372 intel_encoder->pipe_mask = BIT(PIPE_C); 8373 else 8374 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 8375 } else { 8376 intel_encoder->pipe_mask = ~0; 8377 } 8378 intel_encoder->cloneable = 0; 8379 intel_encoder->port = port; 8380 8381 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; 8382 8383 if (HAS_GMCH(dev_priv)) { 8384 if (IS_GM45(dev_priv)) 8385 intel_dig_port->connected = gm45_digital_port_connected; 8386 else 8387 intel_dig_port->connected = g4x_digital_port_connected; 8388 } else { 8389 if (port == PORT_A) 8390 intel_dig_port->connected = ilk_digital_port_connected; 8391 else 8392 intel_dig_port->connected = ibx_digital_port_connected; 8393 } 8394 8395 if (port != PORT_A) 8396 intel_infoframe_init(intel_dig_port); 8397 8398 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 8399 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 8400 goto err_init_connector; 8401 8402 return true; 8403 8404 err_init_connector: 8405 drm_encoder_cleanup(encoder); 8406 err_encoder_init: 8407 kfree(intel_connector); 8408 err_connector_alloc: 8409 kfree(intel_dig_port); 8410 return false; 8411 } 8412 8413 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 8414 { 8415 struct intel_encoder *encoder; 8416 8417 for_each_intel_encoder(&dev_priv->drm, encoder) { 8418 struct intel_dp *intel_dp; 8419 8420 if (encoder->type != INTEL_OUTPUT_DDI) 8421 continue; 8422 8423 intel_dp = enc_to_intel_dp(encoder); 8424 8425 if (!intel_dp->can_mst) 8426 continue; 8427 8428 if (intel_dp->is_mst) 8429 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 8430 } 8431 } 8432 8433 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 8434 { 8435 struct intel_encoder *encoder; 8436 8437 for_each_intel_encoder(&dev_priv->drm, encoder) { 8438 struct intel_dp *intel_dp; 8439 int ret; 8440 8441 if (encoder->type != INTEL_OUTPUT_DDI) 8442 continue; 8443 8444 intel_dp = enc_to_intel_dp(encoder); 8445 8446 if (!intel_dp->can_mst) 8447 continue; 8448 8449 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 8450 true); 8451 if (ret) { 8452 intel_dp->is_mst = false; 8453 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 8454 false); 8455 } 8456 } 8457 } 8458