1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/reboot.h> 32 #include <linux/slab.h> 33 #include <linux/types.h> 34 35 #include <asm/byteorder.h> 36 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_crtc.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_hdcp.h> 42 #include <drm/drm_probe_helper.h> 43 44 #include "i915_debugfs.h" 45 #include "i915_drv.h" 46 #include "i915_trace.h" 47 #include "intel_atomic.h" 48 #include "intel_audio.h" 49 #include "intel_connector.h" 50 #include "intel_ddi.h" 51 #include "intel_display_types.h" 52 #include "intel_dp.h" 53 #include "intel_dp_link_training.h" 54 #include "intel_dp_mst.h" 55 #include "intel_dpio_phy.h" 56 #include "intel_fifo_underrun.h" 57 #include "intel_hdcp.h" 58 #include "intel_hdmi.h" 59 #include "intel_hotplug.h" 60 #include "intel_lspcon.h" 61 #include "intel_lvds.h" 62 #include "intel_panel.h" 63 #include "intel_psr.h" 64 #include "intel_sideband.h" 65 #include "intel_tc.h" 66 #include "intel_vdsc.h" 67 68 #define DP_DPRX_ESI_LEN 14 69 70 /* DP DSC throughput values used for slice count calculations KPixels/s */ 71 #define DP_DSC_PEAK_PIXEL_RATE 2720000 72 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 73 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 74 75 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 76 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 77 78 /* Compliance test status bits */ 79 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 80 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 81 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 82 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 83 84 struct dp_link_dpll { 85 int clock; 86 struct dpll dpll; 87 }; 88 89 static const struct dp_link_dpll g4x_dpll[] = { 90 { 162000, 91 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 92 { 270000, 93 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 94 }; 95 96 static const struct dp_link_dpll pch_dpll[] = { 97 { 162000, 98 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 99 { 270000, 100 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 101 }; 102 103 static const struct dp_link_dpll vlv_dpll[] = { 104 { 162000, 105 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 106 { 270000, 107 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 108 }; 109 110 /* 111 * CHV supports eDP 1.4 that have more link rates. 112 * Below only provides the fixed rate but exclude variable rate. 113 */ 114 static const struct dp_link_dpll chv_dpll[] = { 115 /* 116 * CHV requires to program fractional division for m2. 117 * m2 is stored in fixed point format using formula below 118 * (m2_int << 22) | m2_fraction 119 */ 120 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 121 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 122 { 270000, /* m2_int = 27, m2_fraction = 0 */ 123 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 124 }; 125 126 /* Constants for DP DSC configurations */ 127 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 128 129 /* With Single pipe configuration, HW is capable of supporting maximum 130 * of 4 slices per line. 131 */ 132 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 133 134 /** 135 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 136 * @intel_dp: DP struct 137 * 138 * If a CPU or PCH DP output is attached to an eDP panel, this function 139 * will return true, and false otherwise. 140 */ 141 bool intel_dp_is_edp(struct intel_dp *intel_dp) 142 { 143 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 144 145 return dig_port->base.type == INTEL_OUTPUT_EDP; 146 } 147 148 static void intel_dp_link_down(struct intel_encoder *encoder, 149 const struct intel_crtc_state *old_crtc_state); 150 static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 151 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 152 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 153 const struct intel_crtc_state *crtc_state); 154 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 155 enum pipe pipe); 156 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 157 158 /* update sink rates from dpcd */ 159 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 160 { 161 static const int dp_rates[] = { 162 162000, 270000, 540000, 810000 163 }; 164 int i, max_rate; 165 166 if (drm_dp_has_quirk(&intel_dp->desc, 0, 167 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 168 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 169 static const int quirk_rates[] = { 162000, 270000, 324000 }; 170 171 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 172 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 173 174 return; 175 } 176 177 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 178 179 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 180 if (dp_rates[i] > max_rate) 181 break; 182 intel_dp->sink_rates[i] = dp_rates[i]; 183 } 184 185 intel_dp->num_sink_rates = i; 186 } 187 188 /* Get length of rates array potentially limited by max_rate. */ 189 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 190 { 191 int i; 192 193 /* Limit results by potentially reduced max rate */ 194 for (i = 0; i < len; i++) { 195 if (rates[len - i - 1] <= max_rate) 196 return len - i; 197 } 198 199 return 0; 200 } 201 202 /* Get length of common rates array potentially limited by max_rate. */ 203 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 204 int max_rate) 205 { 206 return intel_dp_rate_limit_len(intel_dp->common_rates, 207 intel_dp->num_common_rates, max_rate); 208 } 209 210 /* Theoretical max between source and sink */ 211 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 212 { 213 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 214 } 215 216 /* Theoretical max between source and sink */ 217 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 218 { 219 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 220 int source_max = dig_port->max_lanes; 221 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 222 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 223 224 return min3(source_max, sink_max, fia_max); 225 } 226 227 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 228 { 229 return intel_dp->max_link_lane_count; 230 } 231 232 int 233 intel_dp_link_required(int pixel_clock, int bpp) 234 { 235 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 236 return DIV_ROUND_UP(pixel_clock * bpp, 8); 237 } 238 239 int 240 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 241 { 242 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 243 * link rate that is generally expressed in Gbps. Since, 8 bits of data 244 * is transmitted every LS_Clk per lane, there is no need to account for 245 * the channel encoding that is done in the PHY layer here. 246 */ 247 248 return max_link_clock * max_lanes; 249 } 250 251 static int 252 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) 253 { 254 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 255 struct intel_encoder *encoder = &dig_port->base; 256 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 257 int max_dotclk = dev_priv->max_dotclk_freq; 258 int ds_max_dotclk; 259 260 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 261 262 if (type != DP_DS_PORT_TYPE_VGA) 263 return max_dotclk; 264 265 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd, 266 intel_dp->downstream_ports); 267 268 if (ds_max_dotclk != 0) 269 max_dotclk = min(max_dotclk, ds_max_dotclk); 270 271 return max_dotclk; 272 } 273 274 static int cnl_max_source_rate(struct intel_dp *intel_dp) 275 { 276 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 277 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 278 enum port port = dig_port->base.port; 279 280 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 281 282 /* Low voltage SKUs are limited to max of 5.4G */ 283 if (voltage == VOLTAGE_INFO_0_85V) 284 return 540000; 285 286 /* For this SKU 8.1G is supported in all ports */ 287 if (IS_CNL_WITH_PORT_F(dev_priv)) 288 return 810000; 289 290 /* For other SKUs, max rate on ports A and D is 5.4G */ 291 if (port == PORT_A || port == PORT_D) 292 return 540000; 293 294 return 810000; 295 } 296 297 static int icl_max_source_rate(struct intel_dp *intel_dp) 298 { 299 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 300 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 301 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 302 303 if (intel_phy_is_combo(dev_priv, phy) && 304 !IS_ELKHARTLAKE(dev_priv) && 305 !intel_dp_is_edp(intel_dp)) 306 return 540000; 307 308 return 810000; 309 } 310 311 static void 312 intel_dp_set_source_rates(struct intel_dp *intel_dp) 313 { 314 /* The values must be in increasing order */ 315 static const int cnl_rates[] = { 316 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 317 }; 318 static const int bxt_rates[] = { 319 162000, 216000, 243000, 270000, 324000, 432000, 540000 320 }; 321 static const int skl_rates[] = { 322 162000, 216000, 270000, 324000, 432000, 540000 323 }; 324 static const int hsw_rates[] = { 325 162000, 270000, 540000 326 }; 327 static const int g4x_rates[] = { 328 162000, 270000 329 }; 330 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 331 struct intel_encoder *encoder = &dig_port->base; 332 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 333 const int *source_rates; 334 int size, max_rate = 0, vbt_max_rate; 335 336 /* This should only be done once */ 337 drm_WARN_ON(&dev_priv->drm, 338 intel_dp->source_rates || intel_dp->num_source_rates); 339 340 if (INTEL_GEN(dev_priv) >= 10) { 341 source_rates = cnl_rates; 342 size = ARRAY_SIZE(cnl_rates); 343 if (IS_GEN(dev_priv, 10)) 344 max_rate = cnl_max_source_rate(intel_dp); 345 else 346 max_rate = icl_max_source_rate(intel_dp); 347 } else if (IS_GEN9_LP(dev_priv)) { 348 source_rates = bxt_rates; 349 size = ARRAY_SIZE(bxt_rates); 350 } else if (IS_GEN9_BC(dev_priv)) { 351 source_rates = skl_rates; 352 size = ARRAY_SIZE(skl_rates); 353 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 354 IS_BROADWELL(dev_priv)) { 355 source_rates = hsw_rates; 356 size = ARRAY_SIZE(hsw_rates); 357 } else { 358 source_rates = g4x_rates; 359 size = ARRAY_SIZE(g4x_rates); 360 } 361 362 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 363 if (max_rate && vbt_max_rate) 364 max_rate = min(max_rate, vbt_max_rate); 365 else if (vbt_max_rate) 366 max_rate = vbt_max_rate; 367 368 if (max_rate) 369 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 370 371 intel_dp->source_rates = source_rates; 372 intel_dp->num_source_rates = size; 373 } 374 375 static int intersect_rates(const int *source_rates, int source_len, 376 const int *sink_rates, int sink_len, 377 int *common_rates) 378 { 379 int i = 0, j = 0, k = 0; 380 381 while (i < source_len && j < sink_len) { 382 if (source_rates[i] == sink_rates[j]) { 383 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 384 return k; 385 common_rates[k] = source_rates[i]; 386 ++k; 387 ++i; 388 ++j; 389 } else if (source_rates[i] < sink_rates[j]) { 390 ++i; 391 } else { 392 ++j; 393 } 394 } 395 return k; 396 } 397 398 /* return index of rate in rates array, or -1 if not found */ 399 static int intel_dp_rate_index(const int *rates, int len, int rate) 400 { 401 int i; 402 403 for (i = 0; i < len; i++) 404 if (rate == rates[i]) 405 return i; 406 407 return -1; 408 } 409 410 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 411 { 412 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 413 414 drm_WARN_ON(&i915->drm, 415 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 416 417 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 418 intel_dp->num_source_rates, 419 intel_dp->sink_rates, 420 intel_dp->num_sink_rates, 421 intel_dp->common_rates); 422 423 /* Paranoia, there should always be something in common. */ 424 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 425 intel_dp->common_rates[0] = 162000; 426 intel_dp->num_common_rates = 1; 427 } 428 } 429 430 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 431 u8 lane_count) 432 { 433 /* 434 * FIXME: we need to synchronize the current link parameters with 435 * hardware readout. Currently fast link training doesn't work on 436 * boot-up. 437 */ 438 if (link_rate == 0 || 439 link_rate > intel_dp->max_link_rate) 440 return false; 441 442 if (lane_count == 0 || 443 lane_count > intel_dp_max_lane_count(intel_dp)) 444 return false; 445 446 return true; 447 } 448 449 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 450 int link_rate, 451 u8 lane_count) 452 { 453 const struct drm_display_mode *fixed_mode = 454 intel_dp->attached_connector->panel.fixed_mode; 455 int mode_rate, max_rate; 456 457 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 458 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 459 if (mode_rate > max_rate) 460 return false; 461 462 return true; 463 } 464 465 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 466 int link_rate, u8 lane_count) 467 { 468 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 469 int index; 470 471 /* 472 * TODO: Enable fallback on MST links once MST link compute can handle 473 * the fallback params. 474 */ 475 if (intel_dp->is_mst) { 476 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 477 return -1; 478 } 479 480 index = intel_dp_rate_index(intel_dp->common_rates, 481 intel_dp->num_common_rates, 482 link_rate); 483 if (index > 0) { 484 if (intel_dp_is_edp(intel_dp) && 485 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 486 intel_dp->common_rates[index - 1], 487 lane_count)) { 488 drm_dbg_kms(&i915->drm, 489 "Retrying Link training for eDP with same parameters\n"); 490 return 0; 491 } 492 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 493 intel_dp->max_link_lane_count = lane_count; 494 } else if (lane_count > 1) { 495 if (intel_dp_is_edp(intel_dp) && 496 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 497 intel_dp_max_common_rate(intel_dp), 498 lane_count >> 1)) { 499 drm_dbg_kms(&i915->drm, 500 "Retrying Link training for eDP with same parameters\n"); 501 return 0; 502 } 503 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 504 intel_dp->max_link_lane_count = lane_count >> 1; 505 } else { 506 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 507 return -1; 508 } 509 510 return 0; 511 } 512 513 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 514 { 515 return div_u64(mul_u32_u32(mode_clock, 1000000U), 516 DP_DSC_FEC_OVERHEAD_FACTOR); 517 } 518 519 static int 520 small_joiner_ram_size_bits(struct drm_i915_private *i915) 521 { 522 if (INTEL_GEN(i915) >= 11) 523 return 7680 * 8; 524 else 525 return 6144 * 8; 526 } 527 528 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 529 u32 link_clock, u32 lane_count, 530 u32 mode_clock, u32 mode_hdisplay) 531 { 532 u32 bits_per_pixel, max_bpp_small_joiner_ram; 533 int i; 534 535 /* 536 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 537 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 538 * for SST -> TimeSlotsPerMTP is 1, 539 * for MST -> TimeSlotsPerMTP has to be calculated 540 */ 541 bits_per_pixel = (link_clock * lane_count * 8) / 542 intel_dp_mode_to_fec_clock(mode_clock); 543 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 544 545 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 546 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 547 mode_hdisplay; 548 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 549 max_bpp_small_joiner_ram); 550 551 /* 552 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 553 * check, output bpp from small joiner RAM check) 554 */ 555 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 556 557 /* Error out if the max bpp is less than smallest allowed valid bpp */ 558 if (bits_per_pixel < valid_dsc_bpp[0]) { 559 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 560 bits_per_pixel, valid_dsc_bpp[0]); 561 return 0; 562 } 563 564 /* Find the nearest match in the array of known BPPs from VESA */ 565 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 566 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 567 break; 568 } 569 bits_per_pixel = valid_dsc_bpp[i]; 570 571 /* 572 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 573 * fractional part is 0 574 */ 575 return bits_per_pixel << 4; 576 } 577 578 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 579 int mode_clock, int mode_hdisplay) 580 { 581 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 582 u8 min_slice_count, i; 583 int max_slice_width; 584 585 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 586 min_slice_count = DIV_ROUND_UP(mode_clock, 587 DP_DSC_MAX_ENC_THROUGHPUT_0); 588 else 589 min_slice_count = DIV_ROUND_UP(mode_clock, 590 DP_DSC_MAX_ENC_THROUGHPUT_1); 591 592 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 593 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 594 drm_dbg_kms(&i915->drm, 595 "Unsupported slice width %d by DP DSC Sink device\n", 596 max_slice_width); 597 return 0; 598 } 599 /* Also take into account max slice width */ 600 min_slice_count = min_t(u8, min_slice_count, 601 DIV_ROUND_UP(mode_hdisplay, 602 max_slice_width)); 603 604 /* Find the closest match to the valid slice count values */ 605 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 606 if (valid_dsc_slicecount[i] > 607 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 608 false)) 609 break; 610 if (min_slice_count <= valid_dsc_slicecount[i]) 611 return valid_dsc_slicecount[i]; 612 } 613 614 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 615 min_slice_count); 616 return 0; 617 } 618 619 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 620 int hdisplay) 621 { 622 /* 623 * Older platforms don't like hdisplay==4096 with DP. 624 * 625 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 626 * and frame counter increment), but we don't get vblank interrupts, 627 * and the pipe underruns immediately. The link also doesn't seem 628 * to get trained properly. 629 * 630 * On CHV the vblank interrupts don't seem to disappear but 631 * otherwise the symptoms are similar. 632 * 633 * TODO: confirm the behaviour on HSW+ 634 */ 635 return hdisplay == 4096 && !HAS_DDI(dev_priv); 636 } 637 638 static enum drm_mode_status 639 intel_dp_mode_valid(struct drm_connector *connector, 640 struct drm_display_mode *mode) 641 { 642 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 643 struct intel_connector *intel_connector = to_intel_connector(connector); 644 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 645 struct drm_i915_private *dev_priv = to_i915(connector->dev); 646 int target_clock = mode->clock; 647 int max_rate, mode_rate, max_lanes, max_link_clock; 648 int max_dotclk; 649 u16 dsc_max_output_bpp = 0; 650 u8 dsc_slice_count = 0; 651 652 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 653 return MODE_NO_DBLESCAN; 654 655 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 656 657 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 658 if (mode->hdisplay > fixed_mode->hdisplay) 659 return MODE_PANEL; 660 661 if (mode->vdisplay > fixed_mode->vdisplay) 662 return MODE_PANEL; 663 664 target_clock = fixed_mode->clock; 665 } 666 667 max_link_clock = intel_dp_max_link_rate(intel_dp); 668 max_lanes = intel_dp_max_lane_count(intel_dp); 669 670 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 671 mode_rate = intel_dp_link_required(target_clock, 18); 672 673 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 674 return MODE_H_ILLEGAL; 675 676 /* 677 * Output bpp is stored in 6.4 format so right shift by 4 to get the 678 * integer value since we support only integer values of bpp. 679 */ 680 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 681 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 682 if (intel_dp_is_edp(intel_dp)) { 683 dsc_max_output_bpp = 684 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 685 dsc_slice_count = 686 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 687 true); 688 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 689 dsc_max_output_bpp = 690 intel_dp_dsc_get_output_bpp(dev_priv, 691 max_link_clock, 692 max_lanes, 693 target_clock, 694 mode->hdisplay) >> 4; 695 dsc_slice_count = 696 intel_dp_dsc_get_slice_count(intel_dp, 697 target_clock, 698 mode->hdisplay); 699 } 700 } 701 702 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || 703 target_clock > max_dotclk) 704 return MODE_CLOCK_HIGH; 705 706 if (mode->clock < 10000) 707 return MODE_CLOCK_LOW; 708 709 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 710 return MODE_H_ILLEGAL; 711 712 return intel_mode_valid_max_plane_size(dev_priv, mode); 713 } 714 715 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 716 { 717 int i; 718 u32 v = 0; 719 720 if (src_bytes > 4) 721 src_bytes = 4; 722 for (i = 0; i < src_bytes; i++) 723 v |= ((u32)src[i]) << ((3 - i) * 8); 724 return v; 725 } 726 727 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 728 { 729 int i; 730 if (dst_bytes > 4) 731 dst_bytes = 4; 732 for (i = 0; i < dst_bytes; i++) 733 dst[i] = src >> ((3-i) * 8); 734 } 735 736 static void 737 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); 738 static void 739 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 740 bool force_disable_vdd); 741 static void 742 intel_dp_pps_init(struct intel_dp *intel_dp); 743 744 static intel_wakeref_t 745 pps_lock(struct intel_dp *intel_dp) 746 { 747 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 748 intel_wakeref_t wakeref; 749 750 /* 751 * See intel_power_sequencer_reset() why we need 752 * a power domain reference here. 753 */ 754 wakeref = intel_display_power_get(dev_priv, 755 intel_aux_power_domain(dp_to_dig_port(intel_dp))); 756 757 mutex_lock(&dev_priv->pps_mutex); 758 759 return wakeref; 760 } 761 762 static intel_wakeref_t 763 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) 764 { 765 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 766 767 mutex_unlock(&dev_priv->pps_mutex); 768 intel_display_power_put(dev_priv, 769 intel_aux_power_domain(dp_to_dig_port(intel_dp)), 770 wakeref); 771 return 0; 772 } 773 774 #define with_pps_lock(dp, wf) \ 775 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) 776 777 static void 778 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 779 { 780 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 781 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 782 enum pipe pipe = intel_dp->pps_pipe; 783 bool pll_enabled, release_cl_override = false; 784 enum dpio_phy phy = DPIO_PHY(pipe); 785 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 786 u32 DP; 787 788 if (drm_WARN(&dev_priv->drm, 789 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, 790 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", 791 pipe_name(pipe), dig_port->base.base.base.id, 792 dig_port->base.base.name)) 793 return; 794 795 drm_dbg_kms(&dev_priv->drm, 796 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", 797 pipe_name(pipe), dig_port->base.base.base.id, 798 dig_port->base.base.name); 799 800 /* Preserve the BIOS-computed detected bit. This is 801 * supposed to be read-only. 802 */ 803 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 804 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 805 DP |= DP_PORT_WIDTH(1); 806 DP |= DP_LINK_TRAIN_PAT_1; 807 808 if (IS_CHERRYVIEW(dev_priv)) 809 DP |= DP_PIPE_SEL_CHV(pipe); 810 else 811 DP |= DP_PIPE_SEL(pipe); 812 813 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; 814 815 /* 816 * The DPLL for the pipe must be enabled for this to work. 817 * So enable temporarily it if it's not already enabled. 818 */ 819 if (!pll_enabled) { 820 release_cl_override = IS_CHERRYVIEW(dev_priv) && 821 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 822 823 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 824 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 825 drm_err(&dev_priv->drm, 826 "Failed to force on pll for pipe %c!\n", 827 pipe_name(pipe)); 828 return; 829 } 830 } 831 832 /* 833 * Similar magic as in intel_dp_enable_port(). 834 * We _must_ do this port enable + disable trick 835 * to make this power sequencer lock onto the port. 836 * Otherwise even VDD force bit won't work. 837 */ 838 intel_de_write(dev_priv, intel_dp->output_reg, DP); 839 intel_de_posting_read(dev_priv, intel_dp->output_reg); 840 841 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); 842 intel_de_posting_read(dev_priv, intel_dp->output_reg); 843 844 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); 845 intel_de_posting_read(dev_priv, intel_dp->output_reg); 846 847 if (!pll_enabled) { 848 vlv_force_pll_off(dev_priv, pipe); 849 850 if (release_cl_override) 851 chv_phy_powergate_ch(dev_priv, phy, ch, false); 852 } 853 } 854 855 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 856 { 857 struct intel_encoder *encoder; 858 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 859 860 /* 861 * We don't have power sequencer currently. 862 * Pick one that's not used by other ports. 863 */ 864 for_each_intel_dp(&dev_priv->drm, encoder) { 865 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 866 867 if (encoder->type == INTEL_OUTPUT_EDP) { 868 drm_WARN_ON(&dev_priv->drm, 869 intel_dp->active_pipe != INVALID_PIPE && 870 intel_dp->active_pipe != 871 intel_dp->pps_pipe); 872 873 if (intel_dp->pps_pipe != INVALID_PIPE) 874 pipes &= ~(1 << intel_dp->pps_pipe); 875 } else { 876 drm_WARN_ON(&dev_priv->drm, 877 intel_dp->pps_pipe != INVALID_PIPE); 878 879 if (intel_dp->active_pipe != INVALID_PIPE) 880 pipes &= ~(1 << intel_dp->active_pipe); 881 } 882 } 883 884 if (pipes == 0) 885 return INVALID_PIPE; 886 887 return ffs(pipes) - 1; 888 } 889 890 static enum pipe 891 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 892 { 893 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 894 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 895 enum pipe pipe; 896 897 lockdep_assert_held(&dev_priv->pps_mutex); 898 899 /* We should never land here with regular DP ports */ 900 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 901 902 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE && 903 intel_dp->active_pipe != intel_dp->pps_pipe); 904 905 if (intel_dp->pps_pipe != INVALID_PIPE) 906 return intel_dp->pps_pipe; 907 908 pipe = vlv_find_free_pps(dev_priv); 909 910 /* 911 * Didn't find one. This should not happen since there 912 * are two power sequencers and up to two eDP ports. 913 */ 914 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) 915 pipe = PIPE_A; 916 917 vlv_steal_power_sequencer(dev_priv, pipe); 918 intel_dp->pps_pipe = pipe; 919 920 drm_dbg_kms(&dev_priv->drm, 921 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", 922 pipe_name(intel_dp->pps_pipe), 923 dig_port->base.base.base.id, 924 dig_port->base.base.name); 925 926 /* init power sequencer on this pipe and port */ 927 intel_dp_init_panel_power_sequencer(intel_dp); 928 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 929 930 /* 931 * Even vdd force doesn't work until we've made 932 * the power sequencer lock in on the port. 933 */ 934 vlv_power_sequencer_kick(intel_dp); 935 936 return intel_dp->pps_pipe; 937 } 938 939 static int 940 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 941 { 942 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 943 int backlight_controller = dev_priv->vbt.backlight.controller; 944 945 lockdep_assert_held(&dev_priv->pps_mutex); 946 947 /* We should never land here with regular DP ports */ 948 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 949 950 if (!intel_dp->pps_reset) 951 return backlight_controller; 952 953 intel_dp->pps_reset = false; 954 955 /* 956 * Only the HW needs to be reprogrammed, the SW state is fixed and 957 * has been setup during connector init. 958 */ 959 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 960 961 return backlight_controller; 962 } 963 964 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 965 enum pipe pipe); 966 967 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 968 enum pipe pipe) 969 { 970 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON; 971 } 972 973 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 974 enum pipe pipe) 975 { 976 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD; 977 } 978 979 static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 980 enum pipe pipe) 981 { 982 return true; 983 } 984 985 static enum pipe 986 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 987 enum port port, 988 vlv_pipe_check pipe_check) 989 { 990 enum pipe pipe; 991 992 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 993 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & 994 PANEL_PORT_SELECT_MASK; 995 996 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 997 continue; 998 999 if (!pipe_check(dev_priv, pipe)) 1000 continue; 1001 1002 return pipe; 1003 } 1004 1005 return INVALID_PIPE; 1006 } 1007 1008 static void 1009 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 1010 { 1011 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1012 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1013 enum port port = dig_port->base.port; 1014 1015 lockdep_assert_held(&dev_priv->pps_mutex); 1016 1017 /* try to find a pipe with this port selected */ 1018 /* first pick one where the panel is on */ 1019 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1020 vlv_pipe_has_pp_on); 1021 /* didn't find one? pick one where vdd is on */ 1022 if (intel_dp->pps_pipe == INVALID_PIPE) 1023 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1024 vlv_pipe_has_vdd_on); 1025 /* didn't find one? pick one with just the correct port */ 1026 if (intel_dp->pps_pipe == INVALID_PIPE) 1027 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1028 vlv_pipe_any); 1029 1030 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 1031 if (intel_dp->pps_pipe == INVALID_PIPE) { 1032 drm_dbg_kms(&dev_priv->drm, 1033 "no initial power sequencer for [ENCODER:%d:%s]\n", 1034 dig_port->base.base.base.id, 1035 dig_port->base.base.name); 1036 return; 1037 } 1038 1039 drm_dbg_kms(&dev_priv->drm, 1040 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", 1041 dig_port->base.base.base.id, 1042 dig_port->base.base.name, 1043 pipe_name(intel_dp->pps_pipe)); 1044 1045 intel_dp_init_panel_power_sequencer(intel_dp); 1046 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1047 } 1048 1049 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 1050 { 1051 struct intel_encoder *encoder; 1052 1053 if (drm_WARN_ON(&dev_priv->drm, 1054 !(IS_VALLEYVIEW(dev_priv) || 1055 IS_CHERRYVIEW(dev_priv) || 1056 IS_GEN9_LP(dev_priv)))) 1057 return; 1058 1059 /* 1060 * We can't grab pps_mutex here due to deadlock with power_domain 1061 * mutex when power_domain functions are called while holding pps_mutex. 1062 * That also means that in order to use pps_pipe the code needs to 1063 * hold both a power domain reference and pps_mutex, and the power domain 1064 * reference get/put must be done while _not_ holding pps_mutex. 1065 * pps_{lock,unlock}() do these steps in the correct order, so one 1066 * should use them always. 1067 */ 1068 1069 for_each_intel_dp(&dev_priv->drm, encoder) { 1070 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1071 1072 drm_WARN_ON(&dev_priv->drm, 1073 intel_dp->active_pipe != INVALID_PIPE); 1074 1075 if (encoder->type != INTEL_OUTPUT_EDP) 1076 continue; 1077 1078 if (IS_GEN9_LP(dev_priv)) 1079 intel_dp->pps_reset = true; 1080 else 1081 intel_dp->pps_pipe = INVALID_PIPE; 1082 } 1083 } 1084 1085 struct pps_registers { 1086 i915_reg_t pp_ctrl; 1087 i915_reg_t pp_stat; 1088 i915_reg_t pp_on; 1089 i915_reg_t pp_off; 1090 i915_reg_t pp_div; 1091 }; 1092 1093 static void intel_pps_get_registers(struct intel_dp *intel_dp, 1094 struct pps_registers *regs) 1095 { 1096 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1097 int pps_idx = 0; 1098 1099 memset(regs, 0, sizeof(*regs)); 1100 1101 if (IS_GEN9_LP(dev_priv)) 1102 pps_idx = bxt_power_sequencer_idx(intel_dp); 1103 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1104 pps_idx = vlv_power_sequencer_pipe(intel_dp); 1105 1106 regs->pp_ctrl = PP_CONTROL(pps_idx); 1107 regs->pp_stat = PP_STATUS(pps_idx); 1108 regs->pp_on = PP_ON_DELAYS(pps_idx); 1109 regs->pp_off = PP_OFF_DELAYS(pps_idx); 1110 1111 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 1112 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 1113 regs->pp_div = INVALID_MMIO_REG; 1114 else 1115 regs->pp_div = PP_DIVISOR(pps_idx); 1116 } 1117 1118 static i915_reg_t 1119 _pp_ctrl_reg(struct intel_dp *intel_dp) 1120 { 1121 struct pps_registers regs; 1122 1123 intel_pps_get_registers(intel_dp, ®s); 1124 1125 return regs.pp_ctrl; 1126 } 1127 1128 static i915_reg_t 1129 _pp_stat_reg(struct intel_dp *intel_dp) 1130 { 1131 struct pps_registers regs; 1132 1133 intel_pps_get_registers(intel_dp, ®s); 1134 1135 return regs.pp_stat; 1136 } 1137 1138 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing 1139 This function only applicable when panel PM state is not to be tracked */ 1140 static int edp_notify_handler(struct notifier_block *this, unsigned long code, 1141 void *unused) 1142 { 1143 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 1144 edp_notifier); 1145 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1146 intel_wakeref_t wakeref; 1147 1148 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) 1149 return 0; 1150 1151 with_pps_lock(intel_dp, wakeref) { 1152 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1153 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 1154 i915_reg_t pp_ctrl_reg, pp_div_reg; 1155 u32 pp_div; 1156 1157 pp_ctrl_reg = PP_CONTROL(pipe); 1158 pp_div_reg = PP_DIVISOR(pipe); 1159 pp_div = intel_de_read(dev_priv, pp_div_reg); 1160 pp_div &= PP_REFERENCE_DIVIDER_MASK; 1161 1162 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 1163 intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F); 1164 intel_de_write(dev_priv, pp_ctrl_reg, 1165 PANEL_UNLOCK_REGS); 1166 msleep(intel_dp->panel_power_cycle_delay); 1167 } 1168 } 1169 1170 return 0; 1171 } 1172 1173 static bool edp_have_panel_power(struct intel_dp *intel_dp) 1174 { 1175 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1176 1177 lockdep_assert_held(&dev_priv->pps_mutex); 1178 1179 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1180 intel_dp->pps_pipe == INVALID_PIPE) 1181 return false; 1182 1183 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 1184 } 1185 1186 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 1187 { 1188 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1189 1190 lockdep_assert_held(&dev_priv->pps_mutex); 1191 1192 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1193 intel_dp->pps_pipe == INVALID_PIPE) 1194 return false; 1195 1196 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 1197 } 1198 1199 static void 1200 intel_dp_check_edp(struct intel_dp *intel_dp) 1201 { 1202 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1203 1204 if (!intel_dp_is_edp(intel_dp)) 1205 return; 1206 1207 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 1208 drm_WARN(&dev_priv->drm, 1, 1209 "eDP powered off while attempting aux channel communication.\n"); 1210 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n", 1211 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), 1212 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); 1213 } 1214 } 1215 1216 static u32 1217 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 1218 { 1219 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1220 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1221 const unsigned int timeout_ms = 10; 1222 u32 status; 1223 bool done; 1224 1225 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1226 done = wait_event_timeout(i915->gmbus_wait_queue, C, 1227 msecs_to_jiffies_timeout(timeout_ms)); 1228 1229 /* just trace the final value */ 1230 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1231 1232 if (!done) 1233 drm_err(&i915->drm, 1234 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 1235 intel_dp->aux.name, timeout_ms, status); 1236 #undef C 1237 1238 return status; 1239 } 1240 1241 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1242 { 1243 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1244 1245 if (index) 1246 return 0; 1247 1248 /* 1249 * The clock divider is based off the hrawclk, and would like to run at 1250 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 1251 */ 1252 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 1253 } 1254 1255 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1256 { 1257 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1258 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1259 u32 freq; 1260 1261 if (index) 1262 return 0; 1263 1264 /* 1265 * The clock divider is based off the cdclk or PCH rawclk, and would 1266 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1267 * divide by 2000 and use that 1268 */ 1269 if (dig_port->aux_ch == AUX_CH_A) 1270 freq = dev_priv->cdclk.hw.cdclk; 1271 else 1272 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 1273 return DIV_ROUND_CLOSEST(freq, 2000); 1274 } 1275 1276 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1277 { 1278 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1279 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1280 1281 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1282 /* Workaround for non-ULT HSW */ 1283 switch (index) { 1284 case 0: return 63; 1285 case 1: return 72; 1286 default: return 0; 1287 } 1288 } 1289 1290 return ilk_get_aux_clock_divider(intel_dp, index); 1291 } 1292 1293 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1294 { 1295 /* 1296 * SKL doesn't need us to program the AUX clock divider (Hardware will 1297 * derive the clock from CDCLK automatically). We still implement the 1298 * get_aux_clock_divider vfunc to plug-in into the existing code. 1299 */ 1300 return index ? 0 : 1; 1301 } 1302 1303 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 1304 int send_bytes, 1305 u32 aux_clock_divider) 1306 { 1307 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1308 struct drm_i915_private *dev_priv = 1309 to_i915(dig_port->base.base.dev); 1310 u32 precharge, timeout; 1311 1312 if (IS_GEN(dev_priv, 6)) 1313 precharge = 3; 1314 else 1315 precharge = 5; 1316 1317 if (IS_BROADWELL(dev_priv)) 1318 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1319 else 1320 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1321 1322 return DP_AUX_CH_CTL_SEND_BUSY | 1323 DP_AUX_CH_CTL_DONE | 1324 DP_AUX_CH_CTL_INTERRUPT | 1325 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1326 timeout | 1327 DP_AUX_CH_CTL_RECEIVE_ERROR | 1328 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1329 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1330 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1331 } 1332 1333 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1334 int send_bytes, 1335 u32 unused) 1336 { 1337 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1338 struct drm_i915_private *i915 = 1339 to_i915(dig_port->base.base.dev); 1340 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 1341 u32 ret; 1342 1343 ret = DP_AUX_CH_CTL_SEND_BUSY | 1344 DP_AUX_CH_CTL_DONE | 1345 DP_AUX_CH_CTL_INTERRUPT | 1346 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1347 DP_AUX_CH_CTL_TIME_OUT_MAX | 1348 DP_AUX_CH_CTL_RECEIVE_ERROR | 1349 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1350 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1351 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1352 1353 if (intel_phy_is_tc(i915, phy) && 1354 dig_port->tc_mode == TC_PORT_TBT_ALT) 1355 ret |= DP_AUX_CH_CTL_TBT_IO; 1356 1357 return ret; 1358 } 1359 1360 static int 1361 intel_dp_aux_xfer(struct intel_dp *intel_dp, 1362 const u8 *send, int send_bytes, 1363 u8 *recv, int recv_size, 1364 u32 aux_send_ctl_flags) 1365 { 1366 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1367 struct drm_i915_private *i915 = 1368 to_i915(dig_port->base.base.dev); 1369 struct intel_uncore *uncore = &i915->uncore; 1370 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 1371 bool is_tc_port = intel_phy_is_tc(i915, phy); 1372 i915_reg_t ch_ctl, ch_data[5]; 1373 u32 aux_clock_divider; 1374 enum intel_display_power_domain aux_domain; 1375 intel_wakeref_t aux_wakeref; 1376 intel_wakeref_t pps_wakeref; 1377 int i, ret, recv_bytes; 1378 int try, clock = 0; 1379 u32 status; 1380 bool vdd; 1381 1382 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1383 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1384 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1385 1386 if (is_tc_port) 1387 intel_tc_port_lock(dig_port); 1388 1389 aux_domain = intel_aux_power_domain(dig_port); 1390 1391 aux_wakeref = intel_display_power_get(i915, aux_domain); 1392 pps_wakeref = pps_lock(intel_dp); 1393 1394 /* 1395 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1396 * In such cases we want to leave VDD enabled and it's up to upper layers 1397 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1398 * ourselves. 1399 */ 1400 vdd = edp_panel_vdd_on(intel_dp); 1401 1402 /* dp aux is extremely sensitive to irq latency, hence request the 1403 * lowest possible wakeup latency and so prevent the cpu from going into 1404 * deep sleep states. 1405 */ 1406 cpu_latency_qos_update_request(&i915->pm_qos, 0); 1407 1408 intel_dp_check_edp(intel_dp); 1409 1410 /* Try to wait for any previous AUX channel activity */ 1411 for (try = 0; try < 3; try++) { 1412 status = intel_uncore_read_notrace(uncore, ch_ctl); 1413 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1414 break; 1415 msleep(1); 1416 } 1417 /* just trace the final value */ 1418 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1419 1420 if (try == 3) { 1421 const u32 status = intel_uncore_read(uncore, ch_ctl); 1422 1423 if (status != intel_dp->aux_busy_last_status) { 1424 drm_WARN(&i915->drm, 1, 1425 "%s: not started (status 0x%08x)\n", 1426 intel_dp->aux.name, status); 1427 intel_dp->aux_busy_last_status = status; 1428 } 1429 1430 ret = -EBUSY; 1431 goto out; 1432 } 1433 1434 /* Only 5 data registers! */ 1435 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 1436 ret = -E2BIG; 1437 goto out; 1438 } 1439 1440 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1441 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1442 send_bytes, 1443 aux_clock_divider); 1444 1445 send_ctl |= aux_send_ctl_flags; 1446 1447 /* Must try at least 3 times according to DP spec */ 1448 for (try = 0; try < 5; try++) { 1449 /* Load the send data into the aux channel data registers */ 1450 for (i = 0; i < send_bytes; i += 4) 1451 intel_uncore_write(uncore, 1452 ch_data[i >> 2], 1453 intel_dp_pack_aux(send + i, 1454 send_bytes - i)); 1455 1456 /* Send the command and wait for it to complete */ 1457 intel_uncore_write(uncore, ch_ctl, send_ctl); 1458 1459 status = intel_dp_aux_wait_done(intel_dp); 1460 1461 /* Clear done status and any errors */ 1462 intel_uncore_write(uncore, 1463 ch_ctl, 1464 status | 1465 DP_AUX_CH_CTL_DONE | 1466 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1467 DP_AUX_CH_CTL_RECEIVE_ERROR); 1468 1469 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1470 * 400us delay required for errors and timeouts 1471 * Timeout errors from the HW already meet this 1472 * requirement so skip to next iteration 1473 */ 1474 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1475 continue; 1476 1477 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1478 usleep_range(400, 500); 1479 continue; 1480 } 1481 if (status & DP_AUX_CH_CTL_DONE) 1482 goto done; 1483 } 1484 } 1485 1486 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1487 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 1488 intel_dp->aux.name, status); 1489 ret = -EBUSY; 1490 goto out; 1491 } 1492 1493 done: 1494 /* Check for timeout or receive error. 1495 * Timeouts occur when the sink is not connected 1496 */ 1497 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1498 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 1499 intel_dp->aux.name, status); 1500 ret = -EIO; 1501 goto out; 1502 } 1503 1504 /* Timeouts occur when the device isn't connected, so they're 1505 * "normal" -- don't fill the kernel log with these */ 1506 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1507 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 1508 intel_dp->aux.name, status); 1509 ret = -ETIMEDOUT; 1510 goto out; 1511 } 1512 1513 /* Unload any bytes sent back from the other side */ 1514 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1515 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1516 1517 /* 1518 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1519 * We have no idea of what happened so we return -EBUSY so 1520 * drm layer takes care for the necessary retries. 1521 */ 1522 if (recv_bytes == 0 || recv_bytes > 20) { 1523 drm_dbg_kms(&i915->drm, 1524 "%s: Forbidden recv_bytes = %d on aux transaction\n", 1525 intel_dp->aux.name, recv_bytes); 1526 ret = -EBUSY; 1527 goto out; 1528 } 1529 1530 if (recv_bytes > recv_size) 1531 recv_bytes = recv_size; 1532 1533 for (i = 0; i < recv_bytes; i += 4) 1534 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 1535 recv + i, recv_bytes - i); 1536 1537 ret = recv_bytes; 1538 out: 1539 cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); 1540 1541 if (vdd) 1542 edp_panel_vdd_off(intel_dp, false); 1543 1544 pps_unlock(intel_dp, pps_wakeref); 1545 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 1546 1547 if (is_tc_port) 1548 intel_tc_port_unlock(dig_port); 1549 1550 return ret; 1551 } 1552 1553 #define BARE_ADDRESS_SIZE 3 1554 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1555 1556 static void 1557 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1558 const struct drm_dp_aux_msg *msg) 1559 { 1560 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1561 txbuf[1] = (msg->address >> 8) & 0xff; 1562 txbuf[2] = msg->address & 0xff; 1563 txbuf[3] = msg->size - 1; 1564 } 1565 1566 static ssize_t 1567 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1568 { 1569 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1570 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1571 u8 txbuf[20], rxbuf[20]; 1572 size_t txsize, rxsize; 1573 int ret; 1574 1575 intel_dp_aux_header(txbuf, msg); 1576 1577 switch (msg->request & ~DP_AUX_I2C_MOT) { 1578 case DP_AUX_NATIVE_WRITE: 1579 case DP_AUX_I2C_WRITE: 1580 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1581 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1582 rxsize = 2; /* 0 or 1 data bytes */ 1583 1584 if (drm_WARN_ON(&i915->drm, txsize > 20)) 1585 return -E2BIG; 1586 1587 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); 1588 1589 if (msg->buffer) 1590 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1591 1592 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1593 rxbuf, rxsize, 0); 1594 if (ret > 0) { 1595 msg->reply = rxbuf[0] >> 4; 1596 1597 if (ret > 1) { 1598 /* Number of bytes written in a short write. */ 1599 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1600 } else { 1601 /* Return payload size. */ 1602 ret = msg->size; 1603 } 1604 } 1605 break; 1606 1607 case DP_AUX_NATIVE_READ: 1608 case DP_AUX_I2C_READ: 1609 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1610 rxsize = msg->size + 1; 1611 1612 if (drm_WARN_ON(&i915->drm, rxsize > 20)) 1613 return -E2BIG; 1614 1615 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1616 rxbuf, rxsize, 0); 1617 if (ret > 0) { 1618 msg->reply = rxbuf[0] >> 4; 1619 /* 1620 * Assume happy day, and copy the data. The caller is 1621 * expected to check msg->reply before touching it. 1622 * 1623 * Return payload size. 1624 */ 1625 ret--; 1626 memcpy(msg->buffer, rxbuf + 1, ret); 1627 } 1628 break; 1629 1630 default: 1631 ret = -EINVAL; 1632 break; 1633 } 1634 1635 return ret; 1636 } 1637 1638 1639 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1640 { 1641 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1642 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1643 enum aux_ch aux_ch = dig_port->aux_ch; 1644 1645 switch (aux_ch) { 1646 case AUX_CH_B: 1647 case AUX_CH_C: 1648 case AUX_CH_D: 1649 return DP_AUX_CH_CTL(aux_ch); 1650 default: 1651 MISSING_CASE(aux_ch); 1652 return DP_AUX_CH_CTL(AUX_CH_B); 1653 } 1654 } 1655 1656 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1657 { 1658 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1659 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1660 enum aux_ch aux_ch = dig_port->aux_ch; 1661 1662 switch (aux_ch) { 1663 case AUX_CH_B: 1664 case AUX_CH_C: 1665 case AUX_CH_D: 1666 return DP_AUX_CH_DATA(aux_ch, index); 1667 default: 1668 MISSING_CASE(aux_ch); 1669 return DP_AUX_CH_DATA(AUX_CH_B, index); 1670 } 1671 } 1672 1673 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1674 { 1675 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1676 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1677 enum aux_ch aux_ch = dig_port->aux_ch; 1678 1679 switch (aux_ch) { 1680 case AUX_CH_A: 1681 return DP_AUX_CH_CTL(aux_ch); 1682 case AUX_CH_B: 1683 case AUX_CH_C: 1684 case AUX_CH_D: 1685 return PCH_DP_AUX_CH_CTL(aux_ch); 1686 default: 1687 MISSING_CASE(aux_ch); 1688 return DP_AUX_CH_CTL(AUX_CH_A); 1689 } 1690 } 1691 1692 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1693 { 1694 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1695 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1696 enum aux_ch aux_ch = dig_port->aux_ch; 1697 1698 switch (aux_ch) { 1699 case AUX_CH_A: 1700 return DP_AUX_CH_DATA(aux_ch, index); 1701 case AUX_CH_B: 1702 case AUX_CH_C: 1703 case AUX_CH_D: 1704 return PCH_DP_AUX_CH_DATA(aux_ch, index); 1705 default: 1706 MISSING_CASE(aux_ch); 1707 return DP_AUX_CH_DATA(AUX_CH_A, index); 1708 } 1709 } 1710 1711 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1712 { 1713 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1714 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1715 enum aux_ch aux_ch = dig_port->aux_ch; 1716 1717 switch (aux_ch) { 1718 case AUX_CH_A: 1719 case AUX_CH_B: 1720 case AUX_CH_C: 1721 case AUX_CH_D: 1722 case AUX_CH_E: 1723 case AUX_CH_F: 1724 case AUX_CH_G: 1725 return DP_AUX_CH_CTL(aux_ch); 1726 default: 1727 MISSING_CASE(aux_ch); 1728 return DP_AUX_CH_CTL(AUX_CH_A); 1729 } 1730 } 1731 1732 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1733 { 1734 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1735 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1736 enum aux_ch aux_ch = dig_port->aux_ch; 1737 1738 switch (aux_ch) { 1739 case AUX_CH_A: 1740 case AUX_CH_B: 1741 case AUX_CH_C: 1742 case AUX_CH_D: 1743 case AUX_CH_E: 1744 case AUX_CH_F: 1745 case AUX_CH_G: 1746 return DP_AUX_CH_DATA(aux_ch, index); 1747 default: 1748 MISSING_CASE(aux_ch); 1749 return DP_AUX_CH_DATA(AUX_CH_A, index); 1750 } 1751 } 1752 1753 static void 1754 intel_dp_aux_fini(struct intel_dp *intel_dp) 1755 { 1756 kfree(intel_dp->aux.name); 1757 } 1758 1759 static void 1760 intel_dp_aux_init(struct intel_dp *intel_dp) 1761 { 1762 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1763 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1764 struct intel_encoder *encoder = &dig_port->base; 1765 1766 if (INTEL_GEN(dev_priv) >= 9) { 1767 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1768 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1769 } else if (HAS_PCH_SPLIT(dev_priv)) { 1770 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1771 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1772 } else { 1773 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1774 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1775 } 1776 1777 if (INTEL_GEN(dev_priv) >= 9) 1778 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1779 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1780 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1781 else if (HAS_PCH_SPLIT(dev_priv)) 1782 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1783 else 1784 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1785 1786 if (INTEL_GEN(dev_priv) >= 9) 1787 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1788 else 1789 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1790 1791 drm_dp_aux_init(&intel_dp->aux); 1792 1793 /* Failure to allocate our preferred name is not critical */ 1794 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c", 1795 aux_ch_name(dig_port->aux_ch), 1796 port_name(encoder->port)); 1797 intel_dp->aux.transfer = intel_dp_aux_transfer; 1798 } 1799 1800 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1801 { 1802 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1803 1804 return max_rate >= 540000; 1805 } 1806 1807 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 1808 { 1809 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1810 1811 return max_rate >= 810000; 1812 } 1813 1814 static void 1815 intel_dp_set_clock(struct intel_encoder *encoder, 1816 struct intel_crtc_state *pipe_config) 1817 { 1818 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1819 const struct dp_link_dpll *divisor = NULL; 1820 int i, count = 0; 1821 1822 if (IS_G4X(dev_priv)) { 1823 divisor = g4x_dpll; 1824 count = ARRAY_SIZE(g4x_dpll); 1825 } else if (HAS_PCH_SPLIT(dev_priv)) { 1826 divisor = pch_dpll; 1827 count = ARRAY_SIZE(pch_dpll); 1828 } else if (IS_CHERRYVIEW(dev_priv)) { 1829 divisor = chv_dpll; 1830 count = ARRAY_SIZE(chv_dpll); 1831 } else if (IS_VALLEYVIEW(dev_priv)) { 1832 divisor = vlv_dpll; 1833 count = ARRAY_SIZE(vlv_dpll); 1834 } 1835 1836 if (divisor && count) { 1837 for (i = 0; i < count; i++) { 1838 if (pipe_config->port_clock == divisor[i].clock) { 1839 pipe_config->dpll = divisor[i].dpll; 1840 pipe_config->clock_set = true; 1841 break; 1842 } 1843 } 1844 } 1845 } 1846 1847 static void snprintf_int_array(char *str, size_t len, 1848 const int *array, int nelem) 1849 { 1850 int i; 1851 1852 str[0] = '\0'; 1853 1854 for (i = 0; i < nelem; i++) { 1855 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1856 if (r >= len) 1857 return; 1858 str += r; 1859 len -= r; 1860 } 1861 } 1862 1863 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1864 { 1865 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1866 char str[128]; /* FIXME: too big for stack? */ 1867 1868 if (!drm_debug_enabled(DRM_UT_KMS)) 1869 return; 1870 1871 snprintf_int_array(str, sizeof(str), 1872 intel_dp->source_rates, intel_dp->num_source_rates); 1873 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 1874 1875 snprintf_int_array(str, sizeof(str), 1876 intel_dp->sink_rates, intel_dp->num_sink_rates); 1877 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 1878 1879 snprintf_int_array(str, sizeof(str), 1880 intel_dp->common_rates, intel_dp->num_common_rates); 1881 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 1882 } 1883 1884 int 1885 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1886 { 1887 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1888 int len; 1889 1890 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1891 if (drm_WARN_ON(&i915->drm, len <= 0)) 1892 return 162000; 1893 1894 return intel_dp->common_rates[len - 1]; 1895 } 1896 1897 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1898 { 1899 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1900 int i = intel_dp_rate_index(intel_dp->sink_rates, 1901 intel_dp->num_sink_rates, rate); 1902 1903 if (drm_WARN_ON(&i915->drm, i < 0)) 1904 i = 0; 1905 1906 return i; 1907 } 1908 1909 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1910 u8 *link_bw, u8 *rate_select) 1911 { 1912 /* eDP 1.4 rate select method. */ 1913 if (intel_dp->use_rate_select) { 1914 *link_bw = 0; 1915 *rate_select = 1916 intel_dp_rate_select(intel_dp, port_clock); 1917 } else { 1918 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1919 *rate_select = 0; 1920 } 1921 } 1922 1923 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1924 const struct intel_crtc_state *pipe_config) 1925 { 1926 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1927 1928 /* On TGL, FEC is supported on all Pipes */ 1929 if (INTEL_GEN(dev_priv) >= 12) 1930 return true; 1931 1932 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 1933 return true; 1934 1935 return false; 1936 } 1937 1938 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1939 const struct intel_crtc_state *pipe_config) 1940 { 1941 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1942 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1943 } 1944 1945 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1946 const struct intel_crtc_state *crtc_state) 1947 { 1948 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1949 1950 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) 1951 return false; 1952 1953 return intel_dsc_source_support(encoder, crtc_state) && 1954 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1955 } 1956 1957 static int intel_dp_compute_bpp(struct intel_dp *intel_dp, 1958 struct intel_crtc_state *pipe_config) 1959 { 1960 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1961 struct intel_connector *intel_connector = intel_dp->attached_connector; 1962 int bpp, bpc; 1963 1964 bpp = pipe_config->pipe_bpp; 1965 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports); 1966 1967 if (bpc > 0) 1968 bpp = min(bpp, 3*bpc); 1969 1970 if (intel_dp_is_edp(intel_dp)) { 1971 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1972 if (intel_connector->base.display_info.bpc == 0 && 1973 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1974 drm_dbg_kms(&dev_priv->drm, 1975 "clamping bpp for eDP panel to BIOS-provided %i\n", 1976 dev_priv->vbt.edp.bpp); 1977 bpp = dev_priv->vbt.edp.bpp; 1978 } 1979 } 1980 1981 return bpp; 1982 } 1983 1984 /* Adjust link config limits based on compliance test requests. */ 1985 void 1986 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1987 struct intel_crtc_state *pipe_config, 1988 struct link_config_limits *limits) 1989 { 1990 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1991 1992 /* For DP Compliance we override the computed bpp for the pipe */ 1993 if (intel_dp->compliance.test_data.bpc != 0) { 1994 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1995 1996 limits->min_bpp = limits->max_bpp = bpp; 1997 pipe_config->dither_force_disable = bpp == 6 * 3; 1998 1999 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 2000 } 2001 2002 /* Use values requested by Compliance Test Request */ 2003 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 2004 int index; 2005 2006 /* Validate the compliance test data since max values 2007 * might have changed due to link train fallback. 2008 */ 2009 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 2010 intel_dp->compliance.test_lane_count)) { 2011 index = intel_dp_rate_index(intel_dp->common_rates, 2012 intel_dp->num_common_rates, 2013 intel_dp->compliance.test_link_rate); 2014 if (index >= 0) 2015 limits->min_clock = limits->max_clock = index; 2016 limits->min_lane_count = limits->max_lane_count = 2017 intel_dp->compliance.test_lane_count; 2018 } 2019 } 2020 } 2021 2022 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp) 2023 { 2024 /* 2025 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 2026 * format of the number of bytes per pixel will be half the number 2027 * of bytes of RGB pixel. 2028 */ 2029 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2030 bpp /= 2; 2031 2032 return bpp; 2033 } 2034 2035 /* Optimize link config in order: max bpp, min clock, min lanes */ 2036 static int 2037 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 2038 struct intel_crtc_state *pipe_config, 2039 const struct link_config_limits *limits) 2040 { 2041 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2042 int bpp, clock, lane_count; 2043 int mode_rate, link_clock, link_avail; 2044 2045 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2046 int output_bpp = intel_dp_output_bpp(pipe_config, bpp); 2047 2048 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2049 output_bpp); 2050 2051 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2052 for (lane_count = limits->min_lane_count; 2053 lane_count <= limits->max_lane_count; 2054 lane_count <<= 1) { 2055 link_clock = intel_dp->common_rates[clock]; 2056 link_avail = intel_dp_max_data_rate(link_clock, 2057 lane_count); 2058 2059 if (mode_rate <= link_avail) { 2060 pipe_config->lane_count = lane_count; 2061 pipe_config->pipe_bpp = bpp; 2062 pipe_config->port_clock = link_clock; 2063 2064 return 0; 2065 } 2066 } 2067 } 2068 } 2069 2070 return -EINVAL; 2071 } 2072 2073 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 2074 { 2075 int i, num_bpc; 2076 u8 dsc_bpc[3] = {0}; 2077 2078 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 2079 dsc_bpc); 2080 for (i = 0; i < num_bpc; i++) { 2081 if (dsc_max_bpc >= dsc_bpc[i]) 2082 return dsc_bpc[i] * 3; 2083 } 2084 2085 return 0; 2086 } 2087 2088 #define DSC_SUPPORTED_VERSION_MIN 1 2089 2090 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 2091 struct intel_crtc_state *crtc_state) 2092 { 2093 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2094 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2095 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2096 u8 line_buf_depth; 2097 int ret; 2098 2099 ret = intel_dsc_compute_params(encoder, crtc_state); 2100 if (ret) 2101 return ret; 2102 2103 /* 2104 * Slice Height of 8 works for all currently available panels. So start 2105 * with that if pic_height is an integral multiple of 8. Eventually add 2106 * logic to try multiple slice heights. 2107 */ 2108 if (vdsc_cfg->pic_height % 8 == 0) 2109 vdsc_cfg->slice_height = 8; 2110 else if (vdsc_cfg->pic_height % 4 == 0) 2111 vdsc_cfg->slice_height = 4; 2112 else 2113 vdsc_cfg->slice_height = 2; 2114 2115 vdsc_cfg->dsc_version_major = 2116 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2117 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 2118 vdsc_cfg->dsc_version_minor = 2119 min(DSC_SUPPORTED_VERSION_MIN, 2120 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2121 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 2122 2123 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2124 DP_DSC_RGB; 2125 2126 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 2127 if (!line_buf_depth) { 2128 drm_dbg_kms(&i915->drm, 2129 "DSC Sink Line Buffer Depth invalid\n"); 2130 return -EINVAL; 2131 } 2132 2133 if (vdsc_cfg->dsc_version_minor == 2) 2134 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 2135 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 2136 else 2137 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 2138 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 2139 2140 vdsc_cfg->block_pred_enable = 2141 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2142 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2143 2144 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2145 } 2146 2147 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2148 struct intel_crtc_state *pipe_config, 2149 struct drm_connector_state *conn_state, 2150 struct link_config_limits *limits) 2151 { 2152 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2153 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2154 const struct drm_display_mode *adjusted_mode = 2155 &pipe_config->hw.adjusted_mode; 2156 u8 dsc_max_bpc; 2157 int pipe_bpp; 2158 int ret; 2159 2160 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2161 intel_dp_supports_fec(intel_dp, pipe_config); 2162 2163 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 2164 return -EINVAL; 2165 2166 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 2167 if (INTEL_GEN(dev_priv) >= 12) 2168 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 2169 else 2170 dsc_max_bpc = min_t(u8, 10, 2171 conn_state->max_requested_bpc); 2172 2173 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 2174 2175 /* Min Input BPC for ICL+ is 8 */ 2176 if (pipe_bpp < 8 * 3) { 2177 drm_dbg_kms(&dev_priv->drm, 2178 "No DSC support for less than 8bpc\n"); 2179 return -EINVAL; 2180 } 2181 2182 /* 2183 * For now enable DSC for max bpp, max link rate, max lane count. 2184 * Optimize this later for the minimum possible link rate/lane count 2185 * with DSC enabled for the requested mode. 2186 */ 2187 pipe_config->pipe_bpp = pipe_bpp; 2188 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 2189 pipe_config->lane_count = limits->max_lane_count; 2190 2191 if (intel_dp_is_edp(intel_dp)) { 2192 pipe_config->dsc.compressed_bpp = 2193 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 2194 pipe_config->pipe_bpp); 2195 pipe_config->dsc.slice_count = 2196 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 2197 true); 2198 } else { 2199 u16 dsc_max_output_bpp; 2200 u8 dsc_dp_slice_count; 2201 2202 dsc_max_output_bpp = 2203 intel_dp_dsc_get_output_bpp(dev_priv, 2204 pipe_config->port_clock, 2205 pipe_config->lane_count, 2206 adjusted_mode->crtc_clock, 2207 adjusted_mode->crtc_hdisplay); 2208 dsc_dp_slice_count = 2209 intel_dp_dsc_get_slice_count(intel_dp, 2210 adjusted_mode->crtc_clock, 2211 adjusted_mode->crtc_hdisplay); 2212 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 2213 drm_dbg_kms(&dev_priv->drm, 2214 "Compressed BPP/Slice Count not supported\n"); 2215 return -EINVAL; 2216 } 2217 pipe_config->dsc.compressed_bpp = min_t(u16, 2218 dsc_max_output_bpp >> 4, 2219 pipe_config->pipe_bpp); 2220 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2221 } 2222 /* 2223 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2224 * is greater than the maximum Cdclock and if slice count is even 2225 * then we need to use 2 VDSC instances. 2226 */ 2227 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { 2228 if (pipe_config->dsc.slice_count > 1) { 2229 pipe_config->dsc.dsc_split = true; 2230 } else { 2231 drm_dbg_kms(&dev_priv->drm, 2232 "Cannot split stream to use 2 VDSC instances\n"); 2233 return -EINVAL; 2234 } 2235 } 2236 2237 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 2238 if (ret < 0) { 2239 drm_dbg_kms(&dev_priv->drm, 2240 "Cannot compute valid DSC parameters for Input Bpp = %d " 2241 "Compressed BPP = %d\n", 2242 pipe_config->pipe_bpp, 2243 pipe_config->dsc.compressed_bpp); 2244 return ret; 2245 } 2246 2247 pipe_config->dsc.compression_enable = true; 2248 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2249 "Compressed Bpp = %d Slice Count = %d\n", 2250 pipe_config->pipe_bpp, 2251 pipe_config->dsc.compressed_bpp, 2252 pipe_config->dsc.slice_count); 2253 2254 return 0; 2255 } 2256 2257 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state) 2258 { 2259 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) 2260 return 6 * 3; 2261 else 2262 return 8 * 3; 2263 } 2264 2265 static int 2266 intel_dp_compute_link_config(struct intel_encoder *encoder, 2267 struct intel_crtc_state *pipe_config, 2268 struct drm_connector_state *conn_state) 2269 { 2270 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2271 const struct drm_display_mode *adjusted_mode = 2272 &pipe_config->hw.adjusted_mode; 2273 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2274 struct link_config_limits limits; 2275 int common_len; 2276 int ret; 2277 2278 common_len = intel_dp_common_len_rate_limit(intel_dp, 2279 intel_dp->max_link_rate); 2280 2281 /* No common link rates between source and sink */ 2282 drm_WARN_ON(encoder->base.dev, common_len <= 0); 2283 2284 limits.min_clock = 0; 2285 limits.max_clock = common_len - 1; 2286 2287 limits.min_lane_count = 1; 2288 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 2289 2290 limits.min_bpp = intel_dp_min_bpp(pipe_config); 2291 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); 2292 2293 if (intel_dp_is_edp(intel_dp)) { 2294 /* 2295 * Use the maximum clock and number of lanes the eDP panel 2296 * advertizes being capable of. The panels are generally 2297 * designed to support only a single clock and lane 2298 * configuration, and typically these values correspond to the 2299 * native resolution of the panel. 2300 */ 2301 limits.min_lane_count = limits.max_lane_count; 2302 limits.min_clock = limits.max_clock; 2303 } 2304 2305 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 2306 2307 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 2308 "max rate %d max bpp %d pixel clock %iKHz\n", 2309 limits.max_lane_count, 2310 intel_dp->common_rates[limits.max_clock], 2311 limits.max_bpp, adjusted_mode->crtc_clock); 2312 2313 /* 2314 * Optimize for slow and wide. This is the place to add alternative 2315 * optimization policy. 2316 */ 2317 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 2318 2319 /* enable compression if the mode doesn't fit available BW */ 2320 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 2321 if (ret || intel_dp->force_dsc_en) { 2322 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2323 conn_state, &limits); 2324 if (ret < 0) 2325 return ret; 2326 } 2327 2328 if (pipe_config->dsc.compression_enable) { 2329 drm_dbg_kms(&i915->drm, 2330 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2331 pipe_config->lane_count, pipe_config->port_clock, 2332 pipe_config->pipe_bpp, 2333 pipe_config->dsc.compressed_bpp); 2334 2335 drm_dbg_kms(&i915->drm, 2336 "DP link rate required %i available %i\n", 2337 intel_dp_link_required(adjusted_mode->crtc_clock, 2338 pipe_config->dsc.compressed_bpp), 2339 intel_dp_max_data_rate(pipe_config->port_clock, 2340 pipe_config->lane_count)); 2341 } else { 2342 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 2343 pipe_config->lane_count, pipe_config->port_clock, 2344 pipe_config->pipe_bpp); 2345 2346 drm_dbg_kms(&i915->drm, 2347 "DP link rate required %i available %i\n", 2348 intel_dp_link_required(adjusted_mode->crtc_clock, 2349 pipe_config->pipe_bpp), 2350 intel_dp_max_data_rate(pipe_config->port_clock, 2351 pipe_config->lane_count)); 2352 } 2353 return 0; 2354 } 2355 2356 static int 2357 intel_dp_ycbcr420_config(struct intel_dp *intel_dp, 2358 struct intel_crtc_state *crtc_state, 2359 const struct drm_connector_state *conn_state) 2360 { 2361 struct drm_connector *connector = conn_state->connector; 2362 const struct drm_display_info *info = &connector->display_info; 2363 const struct drm_display_mode *adjusted_mode = 2364 &crtc_state->hw.adjusted_mode; 2365 2366 if (!drm_mode_is_420_only(info, adjusted_mode) || 2367 !intel_dp_get_colorimetry_status(intel_dp) || 2368 !connector->ycbcr_420_allowed) 2369 return 0; 2370 2371 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2372 2373 return intel_pch_panel_fitting(crtc_state, conn_state); 2374 } 2375 2376 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2377 const struct drm_connector_state *conn_state) 2378 { 2379 const struct intel_digital_connector_state *intel_conn_state = 2380 to_intel_digital_connector_state(conn_state); 2381 const struct drm_display_mode *adjusted_mode = 2382 &crtc_state->hw.adjusted_mode; 2383 2384 /* 2385 * Our YCbCr output is always limited range. 2386 * crtc_state->limited_color_range only applies to RGB, 2387 * and it must never be set for YCbCr or we risk setting 2388 * some conflicting bits in PIPECONF which will mess up 2389 * the colors on the monitor. 2390 */ 2391 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2392 return false; 2393 2394 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2395 /* 2396 * See: 2397 * CEA-861-E - 5.1 Default Encoding Parameters 2398 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2399 */ 2400 return crtc_state->pipe_bpp != 18 && 2401 drm_default_rgb_quant_range(adjusted_mode) == 2402 HDMI_QUANTIZATION_RANGE_LIMITED; 2403 } else { 2404 return intel_conn_state->broadcast_rgb == 2405 INTEL_BROADCAST_RGB_LIMITED; 2406 } 2407 } 2408 2409 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2410 enum port port) 2411 { 2412 if (IS_G4X(dev_priv)) 2413 return false; 2414 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 2415 return false; 2416 2417 return true; 2418 } 2419 2420 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 2421 const struct drm_connector_state *conn_state, 2422 struct drm_dp_vsc_sdp *vsc) 2423 { 2424 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2425 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2426 2427 /* 2428 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2429 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2430 * Colorimetry Format indication. 2431 */ 2432 vsc->revision = 0x5; 2433 vsc->length = 0x13; 2434 2435 /* DP 1.4a spec, Table 2-120 */ 2436 switch (crtc_state->output_format) { 2437 case INTEL_OUTPUT_FORMAT_YCBCR444: 2438 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 2439 break; 2440 case INTEL_OUTPUT_FORMAT_YCBCR420: 2441 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 2442 break; 2443 case INTEL_OUTPUT_FORMAT_RGB: 2444 default: 2445 vsc->pixelformat = DP_PIXELFORMAT_RGB; 2446 } 2447 2448 switch (conn_state->colorspace) { 2449 case DRM_MODE_COLORIMETRY_BT709_YCC: 2450 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2451 break; 2452 case DRM_MODE_COLORIMETRY_XVYCC_601: 2453 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 2454 break; 2455 case DRM_MODE_COLORIMETRY_XVYCC_709: 2456 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 2457 break; 2458 case DRM_MODE_COLORIMETRY_SYCC_601: 2459 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 2460 break; 2461 case DRM_MODE_COLORIMETRY_OPYCC_601: 2462 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 2463 break; 2464 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2465 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 2466 break; 2467 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2468 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 2469 break; 2470 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2471 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 2472 break; 2473 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 2474 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 2475 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 2476 break; 2477 default: 2478 /* 2479 * RGB->YCBCR color conversion uses the BT.709 2480 * color space. 2481 */ 2482 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2483 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2484 else 2485 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 2486 break; 2487 } 2488 2489 vsc->bpc = crtc_state->pipe_bpp / 3; 2490 2491 /* only RGB pixelformat supports 6 bpc */ 2492 drm_WARN_ON(&dev_priv->drm, 2493 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 2494 2495 /* all YCbCr are always limited range */ 2496 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 2497 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 2498 } 2499 2500 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 2501 struct intel_crtc_state *crtc_state, 2502 const struct drm_connector_state *conn_state) 2503 { 2504 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 2505 2506 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 2507 if (crtc_state->has_psr) 2508 return; 2509 2510 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 2511 return; 2512 2513 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 2514 vsc->sdp_type = DP_SDP_VSC; 2515 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2516 &crtc_state->infoframes.vsc); 2517 } 2518 2519 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 2520 const struct intel_crtc_state *crtc_state, 2521 const struct drm_connector_state *conn_state, 2522 struct drm_dp_vsc_sdp *vsc) 2523 { 2524 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2525 2526 vsc->sdp_type = DP_SDP_VSC; 2527 2528 if (dev_priv->psr.psr2_enabled) { 2529 if (dev_priv->psr.colorimetry_support && 2530 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2531 /* [PSR2, +Colorimetry] */ 2532 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2533 vsc); 2534 } else { 2535 /* 2536 * [PSR2, -Colorimetry] 2537 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 2538 * 3D stereo + PSR/PSR2 + Y-coordinate. 2539 */ 2540 vsc->revision = 0x4; 2541 vsc->length = 0xe; 2542 } 2543 } else { 2544 /* 2545 * [PSR1] 2546 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2547 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 2548 * higher). 2549 */ 2550 vsc->revision = 0x2; 2551 vsc->length = 0x8; 2552 } 2553 } 2554 2555 static void 2556 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 2557 struct intel_crtc_state *crtc_state, 2558 const struct drm_connector_state *conn_state) 2559 { 2560 int ret; 2561 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2562 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 2563 2564 if (!conn_state->hdr_output_metadata) 2565 return; 2566 2567 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 2568 2569 if (ret) { 2570 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 2571 return; 2572 } 2573 2574 crtc_state->infoframes.enable |= 2575 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2576 } 2577 2578 int 2579 intel_dp_compute_config(struct intel_encoder *encoder, 2580 struct intel_crtc_state *pipe_config, 2581 struct drm_connector_state *conn_state) 2582 { 2583 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2584 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2585 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2586 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); 2587 enum port port = encoder->port; 2588 struct intel_connector *intel_connector = intel_dp->attached_connector; 2589 struct intel_digital_connector_state *intel_conn_state = 2590 to_intel_digital_connector_state(conn_state); 2591 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, 2592 DP_DPCD_QUIRK_CONSTANT_N); 2593 int ret = 0, output_bpp; 2594 2595 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 2596 pipe_config->has_pch_encoder = true; 2597 2598 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2599 2600 if (lspcon->active) 2601 lspcon_ycbcr420_config(&intel_connector->base, pipe_config); 2602 else 2603 ret = intel_dp_ycbcr420_config(intel_dp, pipe_config, 2604 conn_state); 2605 if (ret) 2606 return ret; 2607 2608 pipe_config->has_drrs = false; 2609 if (!intel_dp_port_has_audio(dev_priv, port)) 2610 pipe_config->has_audio = false; 2611 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2612 pipe_config->has_audio = intel_dp->has_audio; 2613 else 2614 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 2615 2616 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2617 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 2618 adjusted_mode); 2619 2620 if (HAS_GMCH(dev_priv)) 2621 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 2622 else 2623 ret = intel_pch_panel_fitting(pipe_config, conn_state); 2624 if (ret) 2625 return ret; 2626 } 2627 2628 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2629 return -EINVAL; 2630 2631 if (HAS_GMCH(dev_priv) && 2632 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2633 return -EINVAL; 2634 2635 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2636 return -EINVAL; 2637 2638 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2639 return -EINVAL; 2640 2641 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2642 if (ret < 0) 2643 return ret; 2644 2645 pipe_config->limited_color_range = 2646 intel_dp_limited_color_range(pipe_config, conn_state); 2647 2648 if (pipe_config->dsc.compression_enable) 2649 output_bpp = pipe_config->dsc.compressed_bpp; 2650 else 2651 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); 2652 2653 intel_link_compute_m_n(output_bpp, 2654 pipe_config->lane_count, 2655 adjusted_mode->crtc_clock, 2656 pipe_config->port_clock, 2657 &pipe_config->dp_m_n, 2658 constant_n, pipe_config->fec_enable); 2659 2660 if (intel_connector->panel.downclock_mode != NULL && 2661 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { 2662 pipe_config->has_drrs = true; 2663 intel_link_compute_m_n(output_bpp, 2664 pipe_config->lane_count, 2665 intel_connector->panel.downclock_mode->clock, 2666 pipe_config->port_clock, 2667 &pipe_config->dp_m2_n2, 2668 constant_n, pipe_config->fec_enable); 2669 } 2670 2671 if (!HAS_DDI(dev_priv)) 2672 intel_dp_set_clock(encoder, pipe_config); 2673 2674 intel_psr_compute_config(intel_dp, pipe_config); 2675 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 2676 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 2677 2678 return 0; 2679 } 2680 2681 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2682 int link_rate, u8 lane_count, 2683 bool link_mst) 2684 { 2685 intel_dp->link_trained = false; 2686 intel_dp->link_rate = link_rate; 2687 intel_dp->lane_count = lane_count; 2688 intel_dp->link_mst = link_mst; 2689 } 2690 2691 static void intel_dp_prepare(struct intel_encoder *encoder, 2692 const struct intel_crtc_state *pipe_config) 2693 { 2694 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2695 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2696 enum port port = encoder->port; 2697 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2698 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2699 2700 intel_dp_set_link_params(intel_dp, pipe_config->port_clock, 2701 pipe_config->lane_count, 2702 intel_crtc_has_type(pipe_config, 2703 INTEL_OUTPUT_DP_MST)); 2704 2705 /* 2706 * There are four kinds of DP registers: 2707 * 2708 * IBX PCH 2709 * SNB CPU 2710 * IVB CPU 2711 * CPT PCH 2712 * 2713 * IBX PCH and CPU are the same for almost everything, 2714 * except that the CPU DP PLL is configured in this 2715 * register 2716 * 2717 * CPT PCH is quite different, having many bits moved 2718 * to the TRANS_DP_CTL register instead. That 2719 * configuration happens (oddly) in ilk_pch_enable 2720 */ 2721 2722 /* Preserve the BIOS-computed detected bit. This is 2723 * supposed to be read-only. 2724 */ 2725 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 2726 2727 /* Handle DP bits in common between all three register formats */ 2728 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 2729 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 2730 2731 /* Split out the IBX/CPU vs CPT settings */ 2732 2733 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 2734 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2735 intel_dp->DP |= DP_SYNC_HS_HIGH; 2736 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2737 intel_dp->DP |= DP_SYNC_VS_HIGH; 2738 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2739 2740 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2741 intel_dp->DP |= DP_ENHANCED_FRAMING; 2742 2743 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 2744 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2745 u32 trans_dp; 2746 2747 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2748 2749 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 2750 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2751 trans_dp |= TRANS_DP_ENH_FRAMING; 2752 else 2753 trans_dp &= ~TRANS_DP_ENH_FRAMING; 2754 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 2755 } else { 2756 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 2757 intel_dp->DP |= DP_COLOR_RANGE_16_235; 2758 2759 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2760 intel_dp->DP |= DP_SYNC_HS_HIGH; 2761 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2762 intel_dp->DP |= DP_SYNC_VS_HIGH; 2763 intel_dp->DP |= DP_LINK_TRAIN_OFF; 2764 2765 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2766 intel_dp->DP |= DP_ENHANCED_FRAMING; 2767 2768 if (IS_CHERRYVIEW(dev_priv)) 2769 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 2770 else 2771 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 2772 } 2773 } 2774 2775 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 2776 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 2777 2778 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 2779 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 2780 2781 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 2782 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 2783 2784 static void intel_pps_verify_state(struct intel_dp *intel_dp); 2785 2786 static void wait_panel_status(struct intel_dp *intel_dp, 2787 u32 mask, 2788 u32 value) 2789 { 2790 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2791 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2792 2793 lockdep_assert_held(&dev_priv->pps_mutex); 2794 2795 intel_pps_verify_state(intel_dp); 2796 2797 pp_stat_reg = _pp_stat_reg(intel_dp); 2798 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2799 2800 drm_dbg_kms(&dev_priv->drm, 2801 "mask %08x value %08x status %08x control %08x\n", 2802 mask, value, 2803 intel_de_read(dev_priv, pp_stat_reg), 2804 intel_de_read(dev_priv, pp_ctrl_reg)); 2805 2806 if (intel_de_wait_for_register(dev_priv, pp_stat_reg, 2807 mask, value, 5000)) 2808 drm_err(&dev_priv->drm, 2809 "Panel status timeout: status %08x control %08x\n", 2810 intel_de_read(dev_priv, pp_stat_reg), 2811 intel_de_read(dev_priv, pp_ctrl_reg)); 2812 2813 drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); 2814 } 2815 2816 static void wait_panel_on(struct intel_dp *intel_dp) 2817 { 2818 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2819 2820 drm_dbg_kms(&i915->drm, "Wait for panel power on\n"); 2821 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 2822 } 2823 2824 static void wait_panel_off(struct intel_dp *intel_dp) 2825 { 2826 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2827 2828 drm_dbg_kms(&i915->drm, "Wait for panel power off time\n"); 2829 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 2830 } 2831 2832 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 2833 { 2834 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2835 ktime_t panel_power_on_time; 2836 s64 panel_power_off_duration; 2837 2838 drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n"); 2839 2840 /* take the difference of currrent time and panel power off time 2841 * and then make panel wait for t11_t12 if needed. */ 2842 panel_power_on_time = ktime_get_boottime(); 2843 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 2844 2845 /* When we disable the VDD override bit last we have to do the manual 2846 * wait. */ 2847 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 2848 wait_remaining_ms_from_jiffies(jiffies, 2849 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 2850 2851 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 2852 } 2853 2854 static void wait_backlight_on(struct intel_dp *intel_dp) 2855 { 2856 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 2857 intel_dp->backlight_on_delay); 2858 } 2859 2860 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 2861 { 2862 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 2863 intel_dp->backlight_off_delay); 2864 } 2865 2866 /* Read the current pp_control value, unlocking the register if it 2867 * is locked 2868 */ 2869 2870 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 2871 { 2872 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2873 u32 control; 2874 2875 lockdep_assert_held(&dev_priv->pps_mutex); 2876 2877 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); 2878 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && 2879 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 2880 control &= ~PANEL_UNLOCK_MASK; 2881 control |= PANEL_UNLOCK_REGS; 2882 } 2883 return control; 2884 } 2885 2886 /* 2887 * Must be paired with edp_panel_vdd_off(). 2888 * Must hold pps_mutex around the whole on/off sequence. 2889 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2890 */ 2891 static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 2892 { 2893 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2894 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2895 u32 pp; 2896 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2897 bool need_to_disable = !intel_dp->want_panel_vdd; 2898 2899 lockdep_assert_held(&dev_priv->pps_mutex); 2900 2901 if (!intel_dp_is_edp(intel_dp)) 2902 return false; 2903 2904 cancel_delayed_work(&intel_dp->panel_vdd_work); 2905 intel_dp->want_panel_vdd = true; 2906 2907 if (edp_have_panel_vdd(intel_dp)) 2908 return need_to_disable; 2909 2910 intel_display_power_get(dev_priv, 2911 intel_aux_power_domain(dig_port)); 2912 2913 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", 2914 dig_port->base.base.base.id, 2915 dig_port->base.base.name); 2916 2917 if (!edp_have_panel_power(intel_dp)) 2918 wait_panel_power_cycle(intel_dp); 2919 2920 pp = ilk_get_pp_control(intel_dp); 2921 pp |= EDP_FORCE_VDD; 2922 2923 pp_stat_reg = _pp_stat_reg(intel_dp); 2924 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2925 2926 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2927 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2928 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2929 intel_de_read(dev_priv, pp_stat_reg), 2930 intel_de_read(dev_priv, pp_ctrl_reg)); 2931 /* 2932 * If the panel wasn't on, delay before accessing aux channel 2933 */ 2934 if (!edp_have_panel_power(intel_dp)) { 2935 drm_dbg_kms(&dev_priv->drm, 2936 "[ENCODER:%d:%s] panel power wasn't enabled\n", 2937 dig_port->base.base.base.id, 2938 dig_port->base.base.name); 2939 msleep(intel_dp->panel_power_up_delay); 2940 } 2941 2942 return need_to_disable; 2943 } 2944 2945 /* 2946 * Must be paired with intel_edp_panel_vdd_off() or 2947 * intel_edp_panel_off(). 2948 * Nested calls to these functions are not allowed since 2949 * we drop the lock. Caller must use some higher level 2950 * locking to prevent nested calls from other threads. 2951 */ 2952 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 2953 { 2954 intel_wakeref_t wakeref; 2955 bool vdd; 2956 2957 if (!intel_dp_is_edp(intel_dp)) 2958 return; 2959 2960 vdd = false; 2961 with_pps_lock(intel_dp, wakeref) 2962 vdd = edp_panel_vdd_on(intel_dp); 2963 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", 2964 dp_to_dig_port(intel_dp)->base.base.base.id, 2965 dp_to_dig_port(intel_dp)->base.base.name); 2966 } 2967 2968 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 2969 { 2970 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2971 struct intel_digital_port *dig_port = 2972 dp_to_dig_port(intel_dp); 2973 u32 pp; 2974 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2975 2976 lockdep_assert_held(&dev_priv->pps_mutex); 2977 2978 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd); 2979 2980 if (!edp_have_panel_vdd(intel_dp)) 2981 return; 2982 2983 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", 2984 dig_port->base.base.base.id, 2985 dig_port->base.base.name); 2986 2987 pp = ilk_get_pp_control(intel_dp); 2988 pp &= ~EDP_FORCE_VDD; 2989 2990 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2991 pp_stat_reg = _pp_stat_reg(intel_dp); 2992 2993 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2994 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2995 2996 /* Make sure sequencer is idle before allowing subsequent activity */ 2997 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2998 intel_de_read(dev_priv, pp_stat_reg), 2999 intel_de_read(dev_priv, pp_ctrl_reg)); 3000 3001 if ((pp & PANEL_POWER_ON) == 0) 3002 intel_dp->panel_power_off_time = ktime_get_boottime(); 3003 3004 intel_display_power_put_unchecked(dev_priv, 3005 intel_aux_power_domain(dig_port)); 3006 } 3007 3008 static void edp_panel_vdd_work(struct work_struct *__work) 3009 { 3010 struct intel_dp *intel_dp = 3011 container_of(to_delayed_work(__work), 3012 struct intel_dp, panel_vdd_work); 3013 intel_wakeref_t wakeref; 3014 3015 with_pps_lock(intel_dp, wakeref) { 3016 if (!intel_dp->want_panel_vdd) 3017 edp_panel_vdd_off_sync(intel_dp); 3018 } 3019 } 3020 3021 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 3022 { 3023 unsigned long delay; 3024 3025 /* 3026 * Queue the timer to fire a long time from now (relative to the power 3027 * down delay) to keep the panel power up across a sequence of 3028 * operations. 3029 */ 3030 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 3031 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 3032 } 3033 3034 /* 3035 * Must be paired with edp_panel_vdd_on(). 3036 * Must hold pps_mutex around the whole on/off sequence. 3037 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3038 */ 3039 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 3040 { 3041 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3042 3043 lockdep_assert_held(&dev_priv->pps_mutex); 3044 3045 if (!intel_dp_is_edp(intel_dp)) 3046 return; 3047 3048 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", 3049 dp_to_dig_port(intel_dp)->base.base.base.id, 3050 dp_to_dig_port(intel_dp)->base.base.name); 3051 3052 intel_dp->want_panel_vdd = false; 3053 3054 if (sync) 3055 edp_panel_vdd_off_sync(intel_dp); 3056 else 3057 edp_panel_vdd_schedule_off(intel_dp); 3058 } 3059 3060 static void edp_panel_on(struct intel_dp *intel_dp) 3061 { 3062 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3063 u32 pp; 3064 i915_reg_t pp_ctrl_reg; 3065 3066 lockdep_assert_held(&dev_priv->pps_mutex); 3067 3068 if (!intel_dp_is_edp(intel_dp)) 3069 return; 3070 3071 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n", 3072 dp_to_dig_port(intel_dp)->base.base.base.id, 3073 dp_to_dig_port(intel_dp)->base.base.name); 3074 3075 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), 3076 "[ENCODER:%d:%s] panel power already on\n", 3077 dp_to_dig_port(intel_dp)->base.base.base.id, 3078 dp_to_dig_port(intel_dp)->base.base.name)) 3079 return; 3080 3081 wait_panel_power_cycle(intel_dp); 3082 3083 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3084 pp = ilk_get_pp_control(intel_dp); 3085 if (IS_GEN(dev_priv, 5)) { 3086 /* ILK workaround: disable reset around power sequence */ 3087 pp &= ~PANEL_POWER_RESET; 3088 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3089 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3090 } 3091 3092 pp |= PANEL_POWER_ON; 3093 if (!IS_GEN(dev_priv, 5)) 3094 pp |= PANEL_POWER_RESET; 3095 3096 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3097 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3098 3099 wait_panel_on(intel_dp); 3100 intel_dp->last_power_on = jiffies; 3101 3102 if (IS_GEN(dev_priv, 5)) { 3103 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 3104 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3105 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3106 } 3107 } 3108 3109 void intel_edp_panel_on(struct intel_dp *intel_dp) 3110 { 3111 intel_wakeref_t wakeref; 3112 3113 if (!intel_dp_is_edp(intel_dp)) 3114 return; 3115 3116 with_pps_lock(intel_dp, wakeref) 3117 edp_panel_on(intel_dp); 3118 } 3119 3120 3121 static void edp_panel_off(struct intel_dp *intel_dp) 3122 { 3123 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3124 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3125 u32 pp; 3126 i915_reg_t pp_ctrl_reg; 3127 3128 lockdep_assert_held(&dev_priv->pps_mutex); 3129 3130 if (!intel_dp_is_edp(intel_dp)) 3131 return; 3132 3133 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n", 3134 dig_port->base.base.base.id, dig_port->base.base.name); 3135 3136 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd, 3137 "Need [ENCODER:%d:%s] VDD to turn off panel\n", 3138 dig_port->base.base.base.id, dig_port->base.base.name); 3139 3140 pp = ilk_get_pp_control(intel_dp); 3141 /* We need to switch off panel power _and_ force vdd, for otherwise some 3142 * panels get very unhappy and cease to work. */ 3143 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 3144 EDP_BLC_ENABLE); 3145 3146 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3147 3148 intel_dp->want_panel_vdd = false; 3149 3150 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3151 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3152 3153 wait_panel_off(intel_dp); 3154 intel_dp->panel_power_off_time = ktime_get_boottime(); 3155 3156 /* We got a reference when we enabled the VDD. */ 3157 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); 3158 } 3159 3160 void intel_edp_panel_off(struct intel_dp *intel_dp) 3161 { 3162 intel_wakeref_t wakeref; 3163 3164 if (!intel_dp_is_edp(intel_dp)) 3165 return; 3166 3167 with_pps_lock(intel_dp, wakeref) 3168 edp_panel_off(intel_dp); 3169 } 3170 3171 /* Enable backlight in the panel power control. */ 3172 static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 3173 { 3174 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3175 intel_wakeref_t wakeref; 3176 3177 /* 3178 * If we enable the backlight right away following a panel power 3179 * on, we may see slight flicker as the panel syncs with the eDP 3180 * link. So delay a bit to make sure the image is solid before 3181 * allowing it to appear. 3182 */ 3183 wait_backlight_on(intel_dp); 3184 3185 with_pps_lock(intel_dp, wakeref) { 3186 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3187 u32 pp; 3188 3189 pp = ilk_get_pp_control(intel_dp); 3190 pp |= EDP_BLC_ENABLE; 3191 3192 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3193 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3194 } 3195 } 3196 3197 /* Enable backlight PWM and backlight PP control. */ 3198 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3199 const struct drm_connector_state *conn_state) 3200 { 3201 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3202 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3203 3204 if (!intel_dp_is_edp(intel_dp)) 3205 return; 3206 3207 drm_dbg_kms(&i915->drm, "\n"); 3208 3209 intel_panel_enable_backlight(crtc_state, conn_state); 3210 _intel_edp_backlight_on(intel_dp); 3211 } 3212 3213 /* Disable backlight in the panel power control. */ 3214 static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 3215 { 3216 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3217 intel_wakeref_t wakeref; 3218 3219 if (!intel_dp_is_edp(intel_dp)) 3220 return; 3221 3222 with_pps_lock(intel_dp, wakeref) { 3223 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3224 u32 pp; 3225 3226 pp = ilk_get_pp_control(intel_dp); 3227 pp &= ~EDP_BLC_ENABLE; 3228 3229 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3230 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3231 } 3232 3233 intel_dp->last_backlight_off = jiffies; 3234 edp_wait_backlight_off(intel_dp); 3235 } 3236 3237 /* Disable backlight PP control and backlight PWM. */ 3238 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3239 { 3240 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3241 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3242 3243 if (!intel_dp_is_edp(intel_dp)) 3244 return; 3245 3246 drm_dbg_kms(&i915->drm, "\n"); 3247 3248 _intel_edp_backlight_off(intel_dp); 3249 intel_panel_disable_backlight(old_conn_state); 3250 } 3251 3252 /* 3253 * Hook for controlling the panel power control backlight through the bl_power 3254 * sysfs attribute. Take care to handle multiple calls. 3255 */ 3256 static void intel_edp_backlight_power(struct intel_connector *connector, 3257 bool enable) 3258 { 3259 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3260 struct intel_dp *intel_dp = intel_attached_dp(connector); 3261 intel_wakeref_t wakeref; 3262 bool is_enabled; 3263 3264 is_enabled = false; 3265 with_pps_lock(intel_dp, wakeref) 3266 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 3267 if (is_enabled == enable) 3268 return; 3269 3270 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n", 3271 enable ? "enable" : "disable"); 3272 3273 if (enable) 3274 _intel_edp_backlight_on(intel_dp); 3275 else 3276 _intel_edp_backlight_off(intel_dp); 3277 } 3278 3279 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 3280 { 3281 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3282 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3283 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; 3284 3285 I915_STATE_WARN(cur_state != state, 3286 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 3287 dig_port->base.base.base.id, dig_port->base.base.name, 3288 onoff(state), onoff(cur_state)); 3289 } 3290 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 3291 3292 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 3293 { 3294 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; 3295 3296 I915_STATE_WARN(cur_state != state, 3297 "eDP PLL state assertion failure (expected %s, current %s)\n", 3298 onoff(state), onoff(cur_state)); 3299 } 3300 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 3301 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 3302 3303 static void ilk_edp_pll_on(struct intel_dp *intel_dp, 3304 const struct intel_crtc_state *pipe_config) 3305 { 3306 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3307 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3308 3309 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 3310 assert_dp_port_disabled(intel_dp); 3311 assert_edp_pll_disabled(dev_priv); 3312 3313 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", 3314 pipe_config->port_clock); 3315 3316 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 3317 3318 if (pipe_config->port_clock == 162000) 3319 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 3320 else 3321 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 3322 3323 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3324 intel_de_posting_read(dev_priv, DP_A); 3325 udelay(500); 3326 3327 /* 3328 * [DevILK] Work around required when enabling DP PLL 3329 * while a pipe is enabled going to FDI: 3330 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 3331 * 2. Program DP PLL enable 3332 */ 3333 if (IS_GEN(dev_priv, 5)) 3334 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 3335 3336 intel_dp->DP |= DP_PLL_ENABLE; 3337 3338 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3339 intel_de_posting_read(dev_priv, DP_A); 3340 udelay(200); 3341 } 3342 3343 static void ilk_edp_pll_off(struct intel_dp *intel_dp, 3344 const struct intel_crtc_state *old_crtc_state) 3345 { 3346 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3347 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3348 3349 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 3350 assert_dp_port_disabled(intel_dp); 3351 assert_edp_pll_enabled(dev_priv); 3352 3353 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); 3354 3355 intel_dp->DP &= ~DP_PLL_ENABLE; 3356 3357 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3358 intel_de_posting_read(dev_priv, DP_A); 3359 udelay(200); 3360 } 3361 3362 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3363 { 3364 /* 3365 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3366 * be capable of signalling downstream hpd with a long pulse. 3367 * Whether or not that means D3 is safe to use is not clear, 3368 * but let's assume so until proven otherwise. 3369 * 3370 * FIXME should really check all downstream ports... 3371 */ 3372 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3373 drm_dp_is_branch(intel_dp->dpcd) && 3374 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3375 } 3376 3377 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3378 const struct intel_crtc_state *crtc_state, 3379 bool enable) 3380 { 3381 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3382 int ret; 3383 3384 if (!crtc_state->dsc.compression_enable) 3385 return; 3386 3387 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3388 enable ? DP_DECOMPRESSION_EN : 0); 3389 if (ret < 0) 3390 drm_dbg_kms(&i915->drm, 3391 "Failed to %s sink decompression state\n", 3392 enable ? "enable" : "disable"); 3393 } 3394 3395 /* If the sink supports it, try to set the power state appropriately */ 3396 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 3397 { 3398 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3399 int ret, i; 3400 3401 /* Should have a valid DPCD by this point */ 3402 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3403 return; 3404 3405 if (mode != DRM_MODE_DPMS_ON) { 3406 if (downstream_hpd_needs_d0(intel_dp)) 3407 return; 3408 3409 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3410 DP_SET_POWER_D3); 3411 } else { 3412 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3413 3414 /* 3415 * When turning on, we need to retry for 1ms to give the sink 3416 * time to wake up. 3417 */ 3418 for (i = 0; i < 3; i++) { 3419 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3420 DP_SET_POWER_D0); 3421 if (ret == 1) 3422 break; 3423 msleep(1); 3424 } 3425 3426 if (ret == 1 && lspcon->active) 3427 lspcon_wait_pcon_mode(lspcon); 3428 } 3429 3430 if (ret != 1) 3431 drm_dbg_kms(&i915->drm, "failed to %s sink power state\n", 3432 mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); 3433 } 3434 3435 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 3436 enum port port, enum pipe *pipe) 3437 { 3438 enum pipe p; 3439 3440 for_each_pipe(dev_priv, p) { 3441 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); 3442 3443 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 3444 *pipe = p; 3445 return true; 3446 } 3447 } 3448 3449 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", 3450 port_name(port)); 3451 3452 /* must initialize pipe to something for the asserts */ 3453 *pipe = PIPE_A; 3454 3455 return false; 3456 } 3457 3458 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 3459 i915_reg_t dp_reg, enum port port, 3460 enum pipe *pipe) 3461 { 3462 bool ret; 3463 u32 val; 3464 3465 val = intel_de_read(dev_priv, dp_reg); 3466 3467 ret = val & DP_PORT_EN; 3468 3469 /* asserts want to know the pipe even if the port is disabled */ 3470 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3471 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 3472 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3473 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 3474 else if (IS_CHERRYVIEW(dev_priv)) 3475 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 3476 else 3477 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 3478 3479 return ret; 3480 } 3481 3482 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 3483 enum pipe *pipe) 3484 { 3485 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3486 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3487 intel_wakeref_t wakeref; 3488 bool ret; 3489 3490 wakeref = intel_display_power_get_if_enabled(dev_priv, 3491 encoder->power_domain); 3492 if (!wakeref) 3493 return false; 3494 3495 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 3496 encoder->port, pipe); 3497 3498 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 3499 3500 return ret; 3501 } 3502 3503 static void intel_dp_get_config(struct intel_encoder *encoder, 3504 struct intel_crtc_state *pipe_config) 3505 { 3506 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3507 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3508 u32 tmp, flags = 0; 3509 enum port port = encoder->port; 3510 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3511 3512 if (encoder->type == INTEL_OUTPUT_EDP) 3513 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 3514 else 3515 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 3516 3517 tmp = intel_de_read(dev_priv, intel_dp->output_reg); 3518 3519 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 3520 3521 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3522 u32 trans_dp = intel_de_read(dev_priv, 3523 TRANS_DP_CTL(crtc->pipe)); 3524 3525 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 3526 flags |= DRM_MODE_FLAG_PHSYNC; 3527 else 3528 flags |= DRM_MODE_FLAG_NHSYNC; 3529 3530 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 3531 flags |= DRM_MODE_FLAG_PVSYNC; 3532 else 3533 flags |= DRM_MODE_FLAG_NVSYNC; 3534 } else { 3535 if (tmp & DP_SYNC_HS_HIGH) 3536 flags |= DRM_MODE_FLAG_PHSYNC; 3537 else 3538 flags |= DRM_MODE_FLAG_NHSYNC; 3539 3540 if (tmp & DP_SYNC_VS_HIGH) 3541 flags |= DRM_MODE_FLAG_PVSYNC; 3542 else 3543 flags |= DRM_MODE_FLAG_NVSYNC; 3544 } 3545 3546 pipe_config->hw.adjusted_mode.flags |= flags; 3547 3548 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 3549 pipe_config->limited_color_range = true; 3550 3551 pipe_config->lane_count = 3552 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 3553 3554 intel_dp_get_m_n(crtc, pipe_config); 3555 3556 if (port == PORT_A) { 3557 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 3558 pipe_config->port_clock = 162000; 3559 else 3560 pipe_config->port_clock = 270000; 3561 } 3562 3563 pipe_config->hw.adjusted_mode.crtc_clock = 3564 intel_dotclock_calculate(pipe_config->port_clock, 3565 &pipe_config->dp_m_n); 3566 3567 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 3568 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 3569 /* 3570 * This is a big fat ugly hack. 3571 * 3572 * Some machines in UEFI boot mode provide us a VBT that has 18 3573 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3574 * unknown we fail to light up. Yet the same BIOS boots up with 3575 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3576 * max, not what it tells us to use. 3577 * 3578 * Note: This will still be broken if the eDP panel is not lit 3579 * up by the BIOS, and thus we can't get the mode at module 3580 * load. 3581 */ 3582 drm_dbg_kms(&dev_priv->drm, 3583 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3584 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 3585 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3586 } 3587 } 3588 3589 static void intel_disable_dp(struct intel_atomic_state *state, 3590 struct intel_encoder *encoder, 3591 const struct intel_crtc_state *old_crtc_state, 3592 const struct drm_connector_state *old_conn_state) 3593 { 3594 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3595 3596 intel_dp->link_trained = false; 3597 3598 if (old_crtc_state->has_audio) 3599 intel_audio_codec_disable(encoder, 3600 old_crtc_state, old_conn_state); 3601 3602 /* Make sure the panel is off before trying to change the mode. But also 3603 * ensure that we have vdd while we switch off the panel. */ 3604 intel_edp_panel_vdd_on(intel_dp); 3605 intel_edp_backlight_off(old_conn_state); 3606 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 3607 intel_edp_panel_off(intel_dp); 3608 } 3609 3610 static void g4x_disable_dp(struct intel_atomic_state *state, 3611 struct intel_encoder *encoder, 3612 const struct intel_crtc_state *old_crtc_state, 3613 const struct drm_connector_state *old_conn_state) 3614 { 3615 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3616 } 3617 3618 static void vlv_disable_dp(struct intel_atomic_state *state, 3619 struct intel_encoder *encoder, 3620 const struct intel_crtc_state *old_crtc_state, 3621 const struct drm_connector_state *old_conn_state) 3622 { 3623 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3624 } 3625 3626 static void g4x_post_disable_dp(struct intel_atomic_state *state, 3627 struct intel_encoder *encoder, 3628 const struct intel_crtc_state *old_crtc_state, 3629 const struct drm_connector_state *old_conn_state) 3630 { 3631 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3632 enum port port = encoder->port; 3633 3634 /* 3635 * Bspec does not list a specific disable sequence for g4x DP. 3636 * Follow the ilk+ sequence (disable pipe before the port) for 3637 * g4x DP as it does not suffer from underruns like the normal 3638 * g4x modeset sequence (disable pipe after the port). 3639 */ 3640 intel_dp_link_down(encoder, old_crtc_state); 3641 3642 /* Only ilk+ has port A */ 3643 if (port == PORT_A) 3644 ilk_edp_pll_off(intel_dp, old_crtc_state); 3645 } 3646 3647 static void vlv_post_disable_dp(struct intel_atomic_state *state, 3648 struct intel_encoder *encoder, 3649 const struct intel_crtc_state *old_crtc_state, 3650 const struct drm_connector_state *old_conn_state) 3651 { 3652 intel_dp_link_down(encoder, old_crtc_state); 3653 } 3654 3655 static void chv_post_disable_dp(struct intel_atomic_state *state, 3656 struct intel_encoder *encoder, 3657 const struct intel_crtc_state *old_crtc_state, 3658 const struct drm_connector_state *old_conn_state) 3659 { 3660 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3661 3662 intel_dp_link_down(encoder, old_crtc_state); 3663 3664 vlv_dpio_get(dev_priv); 3665 3666 /* Assert data lane reset */ 3667 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 3668 3669 vlv_dpio_put(dev_priv); 3670 } 3671 3672 static void 3673 cpt_set_link_train(struct intel_dp *intel_dp, 3674 u8 dp_train_pat) 3675 { 3676 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3677 u32 *DP = &intel_dp->DP; 3678 3679 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 3680 3681 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3682 case DP_TRAINING_PATTERN_DISABLE: 3683 *DP |= DP_LINK_TRAIN_OFF_CPT; 3684 break; 3685 case DP_TRAINING_PATTERN_1: 3686 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 3687 break; 3688 case DP_TRAINING_PATTERN_2: 3689 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3690 break; 3691 case DP_TRAINING_PATTERN_3: 3692 drm_dbg_kms(&dev_priv->drm, 3693 "TPS3 not supported, using TPS2 instead\n"); 3694 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3695 break; 3696 } 3697 3698 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3699 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3700 } 3701 3702 static void 3703 g4x_set_link_train(struct intel_dp *intel_dp, 3704 u8 dp_train_pat) 3705 { 3706 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3707 u32 *DP = &intel_dp->DP; 3708 3709 *DP &= ~DP_LINK_TRAIN_MASK; 3710 3711 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3712 case DP_TRAINING_PATTERN_DISABLE: 3713 *DP |= DP_LINK_TRAIN_OFF; 3714 break; 3715 case DP_TRAINING_PATTERN_1: 3716 *DP |= DP_LINK_TRAIN_PAT_1; 3717 break; 3718 case DP_TRAINING_PATTERN_2: 3719 *DP |= DP_LINK_TRAIN_PAT_2; 3720 break; 3721 case DP_TRAINING_PATTERN_3: 3722 drm_dbg_kms(&dev_priv->drm, 3723 "TPS3 not supported, using TPS2 instead\n"); 3724 *DP |= DP_LINK_TRAIN_PAT_2; 3725 break; 3726 } 3727 3728 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3729 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3730 } 3731 3732 static void intel_dp_enable_port(struct intel_dp *intel_dp, 3733 const struct intel_crtc_state *old_crtc_state) 3734 { 3735 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3736 3737 /* enable with pattern 1 (as per spec) */ 3738 3739 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); 3740 3741 /* 3742 * Magic for VLV/CHV. We _must_ first set up the register 3743 * without actually enabling the port, and then do another 3744 * write to enable the port. Otherwise link training will 3745 * fail when the power sequencer is freshly used for this port. 3746 */ 3747 intel_dp->DP |= DP_PORT_EN; 3748 if (old_crtc_state->has_audio) 3749 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 3750 3751 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3752 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3753 } 3754 3755 static void intel_enable_dp(struct intel_atomic_state *state, 3756 struct intel_encoder *encoder, 3757 const struct intel_crtc_state *pipe_config, 3758 const struct drm_connector_state *conn_state) 3759 { 3760 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3761 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3762 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3763 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); 3764 enum pipe pipe = crtc->pipe; 3765 intel_wakeref_t wakeref; 3766 3767 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) 3768 return; 3769 3770 with_pps_lock(intel_dp, wakeref) { 3771 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3772 vlv_init_panel_power_sequencer(encoder, pipe_config); 3773 3774 intel_dp_enable_port(intel_dp, pipe_config); 3775 3776 edp_panel_vdd_on(intel_dp); 3777 edp_panel_on(intel_dp); 3778 edp_panel_vdd_off(intel_dp, true); 3779 } 3780 3781 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3782 unsigned int lane_mask = 0x0; 3783 3784 if (IS_CHERRYVIEW(dev_priv)) 3785 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 3786 3787 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 3788 lane_mask); 3789 } 3790 3791 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 3792 intel_dp_start_link_train(intel_dp); 3793 intel_dp_stop_link_train(intel_dp); 3794 3795 if (pipe_config->has_audio) { 3796 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", 3797 pipe_name(pipe)); 3798 intel_audio_codec_enable(encoder, pipe_config, conn_state); 3799 } 3800 } 3801 3802 static void g4x_enable_dp(struct intel_atomic_state *state, 3803 struct intel_encoder *encoder, 3804 const struct intel_crtc_state *pipe_config, 3805 const struct drm_connector_state *conn_state) 3806 { 3807 intel_enable_dp(state, encoder, pipe_config, conn_state); 3808 intel_edp_backlight_on(pipe_config, conn_state); 3809 } 3810 3811 static void vlv_enable_dp(struct intel_atomic_state *state, 3812 struct intel_encoder *encoder, 3813 const struct intel_crtc_state *pipe_config, 3814 const struct drm_connector_state *conn_state) 3815 { 3816 intel_edp_backlight_on(pipe_config, conn_state); 3817 } 3818 3819 static void g4x_pre_enable_dp(struct intel_atomic_state *state, 3820 struct intel_encoder *encoder, 3821 const struct intel_crtc_state *pipe_config, 3822 const struct drm_connector_state *conn_state) 3823 { 3824 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3825 enum port port = encoder->port; 3826 3827 intel_dp_prepare(encoder, pipe_config); 3828 3829 /* Only ilk+ has port A */ 3830 if (port == PORT_A) 3831 ilk_edp_pll_on(intel_dp, pipe_config); 3832 } 3833 3834 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 3835 { 3836 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3837 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3838 enum pipe pipe = intel_dp->pps_pipe; 3839 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 3840 3841 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3842 3843 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) 3844 return; 3845 3846 edp_panel_vdd_off_sync(intel_dp); 3847 3848 /* 3849 * VLV seems to get confused when multiple power sequencers 3850 * have the same port selected (even if only one has power/vdd 3851 * enabled). The failure manifests as vlv_wait_port_ready() failing 3852 * CHV on the other hand doesn't seem to mind having the same port 3853 * selected in multiple power sequencers, but let's clear the 3854 * port select always when logically disconnecting a power sequencer 3855 * from a port. 3856 */ 3857 drm_dbg_kms(&dev_priv->drm, 3858 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", 3859 pipe_name(pipe), dig_port->base.base.base.id, 3860 dig_port->base.base.name); 3861 intel_de_write(dev_priv, pp_on_reg, 0); 3862 intel_de_posting_read(dev_priv, pp_on_reg); 3863 3864 intel_dp->pps_pipe = INVALID_PIPE; 3865 } 3866 3867 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 3868 enum pipe pipe) 3869 { 3870 struct intel_encoder *encoder; 3871 3872 lockdep_assert_held(&dev_priv->pps_mutex); 3873 3874 for_each_intel_dp(&dev_priv->drm, encoder) { 3875 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3876 3877 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe, 3878 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", 3879 pipe_name(pipe), encoder->base.base.id, 3880 encoder->base.name); 3881 3882 if (intel_dp->pps_pipe != pipe) 3883 continue; 3884 3885 drm_dbg_kms(&dev_priv->drm, 3886 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", 3887 pipe_name(pipe), encoder->base.base.id, 3888 encoder->base.name); 3889 3890 /* make sure vdd is off before we steal it */ 3891 vlv_detach_power_sequencer(intel_dp); 3892 } 3893 } 3894 3895 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 3896 const struct intel_crtc_state *crtc_state) 3897 { 3898 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3899 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3900 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3901 3902 lockdep_assert_held(&dev_priv->pps_mutex); 3903 3904 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3905 3906 if (intel_dp->pps_pipe != INVALID_PIPE && 3907 intel_dp->pps_pipe != crtc->pipe) { 3908 /* 3909 * If another power sequencer was being used on this 3910 * port previously make sure to turn off vdd there while 3911 * we still have control of it. 3912 */ 3913 vlv_detach_power_sequencer(intel_dp); 3914 } 3915 3916 /* 3917 * We may be stealing the power 3918 * sequencer from another port. 3919 */ 3920 vlv_steal_power_sequencer(dev_priv, crtc->pipe); 3921 3922 intel_dp->active_pipe = crtc->pipe; 3923 3924 if (!intel_dp_is_edp(intel_dp)) 3925 return; 3926 3927 /* now it's all ours */ 3928 intel_dp->pps_pipe = crtc->pipe; 3929 3930 drm_dbg_kms(&dev_priv->drm, 3931 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", 3932 pipe_name(intel_dp->pps_pipe), encoder->base.base.id, 3933 encoder->base.name); 3934 3935 /* init power sequencer on this pipe and port */ 3936 intel_dp_init_panel_power_sequencer(intel_dp); 3937 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 3938 } 3939 3940 static void vlv_pre_enable_dp(struct intel_atomic_state *state, 3941 struct intel_encoder *encoder, 3942 const struct intel_crtc_state *pipe_config, 3943 const struct drm_connector_state *conn_state) 3944 { 3945 vlv_phy_pre_encoder_enable(encoder, pipe_config); 3946 3947 intel_enable_dp(state, encoder, pipe_config, conn_state); 3948 } 3949 3950 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, 3951 struct intel_encoder *encoder, 3952 const struct intel_crtc_state *pipe_config, 3953 const struct drm_connector_state *conn_state) 3954 { 3955 intel_dp_prepare(encoder, pipe_config); 3956 3957 vlv_phy_pre_pll_enable(encoder, pipe_config); 3958 } 3959 3960 static void chv_pre_enable_dp(struct intel_atomic_state *state, 3961 struct intel_encoder *encoder, 3962 const struct intel_crtc_state *pipe_config, 3963 const struct drm_connector_state *conn_state) 3964 { 3965 chv_phy_pre_encoder_enable(encoder, pipe_config); 3966 3967 intel_enable_dp(state, encoder, pipe_config, conn_state); 3968 3969 /* Second common lane will stay alive on its own now */ 3970 chv_phy_release_cl2_override(encoder); 3971 } 3972 3973 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, 3974 struct intel_encoder *encoder, 3975 const struct intel_crtc_state *pipe_config, 3976 const struct drm_connector_state *conn_state) 3977 { 3978 intel_dp_prepare(encoder, pipe_config); 3979 3980 chv_phy_pre_pll_enable(encoder, pipe_config); 3981 } 3982 3983 static void chv_dp_post_pll_disable(struct intel_atomic_state *state, 3984 struct intel_encoder *encoder, 3985 const struct intel_crtc_state *old_crtc_state, 3986 const struct drm_connector_state *old_conn_state) 3987 { 3988 chv_phy_post_pll_disable(encoder, old_crtc_state); 3989 } 3990 3991 /* 3992 * Fetch AUX CH registers 0x202 - 0x207 which contain 3993 * link status information 3994 */ 3995 bool 3996 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) 3997 { 3998 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 3999 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 4000 } 4001 4002 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp) 4003 { 4004 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 4005 } 4006 4007 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp) 4008 { 4009 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 4010 } 4011 4012 static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp) 4013 { 4014 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4015 } 4016 4017 static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp) 4018 { 4019 return DP_TRAIN_PRE_EMPH_LEVEL_3; 4020 } 4021 4022 static void vlv_set_signal_levels(struct intel_dp *intel_dp) 4023 { 4024 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4025 unsigned long demph_reg_value, preemph_reg_value, 4026 uniqtranscale_reg_value; 4027 u8 train_set = intel_dp->train_set[0]; 4028 4029 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4030 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4031 preemph_reg_value = 0x0004000; 4032 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4034 demph_reg_value = 0x2B405555; 4035 uniqtranscale_reg_value = 0x552AB83A; 4036 break; 4037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4038 demph_reg_value = 0x2B404040; 4039 uniqtranscale_reg_value = 0x5548B83A; 4040 break; 4041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4042 demph_reg_value = 0x2B245555; 4043 uniqtranscale_reg_value = 0x5560B83A; 4044 break; 4045 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4046 demph_reg_value = 0x2B405555; 4047 uniqtranscale_reg_value = 0x5598DA3A; 4048 break; 4049 default: 4050 return; 4051 } 4052 break; 4053 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4054 preemph_reg_value = 0x0002000; 4055 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4056 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4057 demph_reg_value = 0x2B404040; 4058 uniqtranscale_reg_value = 0x5552B83A; 4059 break; 4060 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4061 demph_reg_value = 0x2B404848; 4062 uniqtranscale_reg_value = 0x5580B83A; 4063 break; 4064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4065 demph_reg_value = 0x2B404040; 4066 uniqtranscale_reg_value = 0x55ADDA3A; 4067 break; 4068 default: 4069 return; 4070 } 4071 break; 4072 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4073 preemph_reg_value = 0x0000000; 4074 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4075 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4076 demph_reg_value = 0x2B305555; 4077 uniqtranscale_reg_value = 0x5570B83A; 4078 break; 4079 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4080 demph_reg_value = 0x2B2B4040; 4081 uniqtranscale_reg_value = 0x55ADDA3A; 4082 break; 4083 default: 4084 return; 4085 } 4086 break; 4087 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4088 preemph_reg_value = 0x0006000; 4089 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4090 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4091 demph_reg_value = 0x1B405555; 4092 uniqtranscale_reg_value = 0x55ADDA3A; 4093 break; 4094 default: 4095 return; 4096 } 4097 break; 4098 default: 4099 return; 4100 } 4101 4102 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, 4103 uniqtranscale_reg_value, 0); 4104 } 4105 4106 static void chv_set_signal_levels(struct intel_dp *intel_dp) 4107 { 4108 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4109 u32 deemph_reg_value, margin_reg_value; 4110 bool uniq_trans_scale = false; 4111 u8 train_set = intel_dp->train_set[0]; 4112 4113 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4114 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4115 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4116 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4117 deemph_reg_value = 128; 4118 margin_reg_value = 52; 4119 break; 4120 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4121 deemph_reg_value = 128; 4122 margin_reg_value = 77; 4123 break; 4124 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4125 deemph_reg_value = 128; 4126 margin_reg_value = 102; 4127 break; 4128 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4129 deemph_reg_value = 128; 4130 margin_reg_value = 154; 4131 uniq_trans_scale = true; 4132 break; 4133 default: 4134 return; 4135 } 4136 break; 4137 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4138 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4140 deemph_reg_value = 85; 4141 margin_reg_value = 78; 4142 break; 4143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4144 deemph_reg_value = 85; 4145 margin_reg_value = 116; 4146 break; 4147 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4148 deemph_reg_value = 85; 4149 margin_reg_value = 154; 4150 break; 4151 default: 4152 return; 4153 } 4154 break; 4155 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4156 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4157 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4158 deemph_reg_value = 64; 4159 margin_reg_value = 104; 4160 break; 4161 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4162 deemph_reg_value = 64; 4163 margin_reg_value = 154; 4164 break; 4165 default: 4166 return; 4167 } 4168 break; 4169 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4170 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4171 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4172 deemph_reg_value = 43; 4173 margin_reg_value = 154; 4174 break; 4175 default: 4176 return; 4177 } 4178 break; 4179 default: 4180 return; 4181 } 4182 4183 chv_set_phy_signal_level(encoder, deemph_reg_value, 4184 margin_reg_value, uniq_trans_scale); 4185 } 4186 4187 static u32 g4x_signal_levels(u8 train_set) 4188 { 4189 u32 signal_levels = 0; 4190 4191 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4192 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4193 default: 4194 signal_levels |= DP_VOLTAGE_0_4; 4195 break; 4196 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4197 signal_levels |= DP_VOLTAGE_0_6; 4198 break; 4199 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4200 signal_levels |= DP_VOLTAGE_0_8; 4201 break; 4202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4203 signal_levels |= DP_VOLTAGE_1_2; 4204 break; 4205 } 4206 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4207 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4208 default: 4209 signal_levels |= DP_PRE_EMPHASIS_0; 4210 break; 4211 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4212 signal_levels |= DP_PRE_EMPHASIS_3_5; 4213 break; 4214 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4215 signal_levels |= DP_PRE_EMPHASIS_6; 4216 break; 4217 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4218 signal_levels |= DP_PRE_EMPHASIS_9_5; 4219 break; 4220 } 4221 return signal_levels; 4222 } 4223 4224 static void 4225 g4x_set_signal_levels(struct intel_dp *intel_dp) 4226 { 4227 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4228 u8 train_set = intel_dp->train_set[0]; 4229 u32 signal_levels; 4230 4231 signal_levels = g4x_signal_levels(train_set); 4232 4233 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4234 signal_levels); 4235 4236 intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); 4237 intel_dp->DP |= signal_levels; 4238 4239 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4240 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4241 } 4242 4243 /* SNB CPU eDP voltage swing and pre-emphasis control */ 4244 static u32 snb_cpu_edp_signal_levels(u8 train_set) 4245 { 4246 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4247 DP_TRAIN_PRE_EMPHASIS_MASK); 4248 4249 switch (signal_levels) { 4250 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4251 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4252 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4253 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4254 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 4255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4256 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4257 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 4258 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4259 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4260 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 4261 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4262 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4263 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 4264 default: 4265 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4266 "0x%x\n", signal_levels); 4267 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4268 } 4269 } 4270 4271 static void 4272 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4273 { 4274 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4275 u8 train_set = intel_dp->train_set[0]; 4276 u32 signal_levels; 4277 4278 signal_levels = snb_cpu_edp_signal_levels(train_set); 4279 4280 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4281 signal_levels); 4282 4283 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 4284 intel_dp->DP |= signal_levels; 4285 4286 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4287 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4288 } 4289 4290 /* IVB CPU eDP voltage swing and pre-emphasis control */ 4291 static u32 ivb_cpu_edp_signal_levels(u8 train_set) 4292 { 4293 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4294 DP_TRAIN_PRE_EMPHASIS_MASK); 4295 4296 switch (signal_levels) { 4297 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4298 return EDP_LINK_TRAIN_400MV_0DB_IVB; 4299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4300 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 4301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4303 return EDP_LINK_TRAIN_400MV_6DB_IVB; 4304 4305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4306 return EDP_LINK_TRAIN_600MV_0DB_IVB; 4307 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4308 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 4309 4310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4311 return EDP_LINK_TRAIN_800MV_0DB_IVB; 4312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4313 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 4314 4315 default: 4316 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4317 "0x%x\n", signal_levels); 4318 return EDP_LINK_TRAIN_500MV_0DB_IVB; 4319 } 4320 } 4321 4322 static void 4323 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4324 { 4325 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4326 u8 train_set = intel_dp->train_set[0]; 4327 u32 signal_levels; 4328 4329 signal_levels = ivb_cpu_edp_signal_levels(train_set); 4330 4331 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4332 signal_levels); 4333 4334 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 4335 intel_dp->DP |= signal_levels; 4336 4337 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4338 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4339 } 4340 4341 void intel_dp_set_signal_levels(struct intel_dp *intel_dp) 4342 { 4343 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4344 u8 train_set = intel_dp->train_set[0]; 4345 4346 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", 4347 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 4348 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); 4349 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", 4350 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 4351 DP_TRAIN_PRE_EMPHASIS_SHIFT, 4352 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 4353 " (max)" : ""); 4354 4355 intel_dp->set_signal_levels(intel_dp); 4356 } 4357 4358 void 4359 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 4360 u8 dp_train_pat) 4361 { 4362 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4363 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); 4364 4365 if (dp_train_pat & train_pat_mask) 4366 drm_dbg_kms(&dev_priv->drm, 4367 "Using DP training pattern TPS%d\n", 4368 dp_train_pat & train_pat_mask); 4369 4370 intel_dp->set_link_train(intel_dp, dp_train_pat); 4371 } 4372 4373 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 4374 { 4375 if (intel_dp->set_idle_link_train) 4376 intel_dp->set_idle_link_train(intel_dp); 4377 } 4378 4379 static void 4380 intel_dp_link_down(struct intel_encoder *encoder, 4381 const struct intel_crtc_state *old_crtc_state) 4382 { 4383 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4384 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4385 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 4386 enum port port = encoder->port; 4387 u32 DP = intel_dp->DP; 4388 4389 if (drm_WARN_ON(&dev_priv->drm, 4390 (intel_de_read(dev_priv, intel_dp->output_reg) & 4391 DP_PORT_EN) == 0)) 4392 return; 4393 4394 drm_dbg_kms(&dev_priv->drm, "\n"); 4395 4396 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 4397 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 4398 DP &= ~DP_LINK_TRAIN_MASK_CPT; 4399 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 4400 } else { 4401 DP &= ~DP_LINK_TRAIN_MASK; 4402 DP |= DP_LINK_TRAIN_PAT_IDLE; 4403 } 4404 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4405 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4406 4407 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 4408 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4409 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4410 4411 /* 4412 * HW workaround for IBX, we need to move the port 4413 * to transcoder A after disabling it to allow the 4414 * matching HDMI port to be enabled on transcoder A. 4415 */ 4416 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 4417 /* 4418 * We get CPU/PCH FIFO underruns on the other pipe when 4419 * doing the workaround. Sweep them under the rug. 4420 */ 4421 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4422 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4423 4424 /* always enable with pattern 1 (as per spec) */ 4425 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 4426 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 4427 DP_LINK_TRAIN_PAT_1; 4428 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4429 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4430 4431 DP &= ~DP_PORT_EN; 4432 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4433 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4434 4435 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 4436 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4437 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4438 } 4439 4440 msleep(intel_dp->panel_power_down_delay); 4441 4442 intel_dp->DP = DP; 4443 4444 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4445 intel_wakeref_t wakeref; 4446 4447 with_pps_lock(intel_dp, wakeref) 4448 intel_dp->active_pipe = INVALID_PIPE; 4449 } 4450 } 4451 4452 static void 4453 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) 4454 { 4455 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4456 u8 dpcd_ext[6]; 4457 4458 /* 4459 * Prior to DP1.3 the bit represented by 4460 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. 4461 * if it is set DP_DPCD_REV at 0000h could be at a value less than 4462 * the true capability of the panel. The only way to check is to 4463 * then compare 0000h and 2200h. 4464 */ 4465 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] & 4466 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) 4467 return; 4468 4469 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV, 4470 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) { 4471 drm_err(&i915->drm, 4472 "DPCD failed read at extended capabilities\n"); 4473 return; 4474 } 4475 4476 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { 4477 drm_dbg_kms(&i915->drm, 4478 "DPCD extended DPCD rev less than base DPCD rev\n"); 4479 return; 4480 } 4481 4482 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext))) 4483 return; 4484 4485 drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n", 4486 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); 4487 4488 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)); 4489 } 4490 4491 bool 4492 intel_dp_read_dpcd(struct intel_dp *intel_dp) 4493 { 4494 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4495 4496 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 4497 sizeof(intel_dp->dpcd)) < 0) 4498 return false; /* aux transfer failed */ 4499 4500 intel_dp_extended_receiver_capabilities(intel_dp); 4501 4502 drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd), 4503 intel_dp->dpcd); 4504 4505 return intel_dp->dpcd[DP_DPCD_REV] != 0; 4506 } 4507 4508 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 4509 { 4510 u8 dprx = 0; 4511 4512 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 4513 &dprx) != 1) 4514 return false; 4515 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 4516 } 4517 4518 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 4519 { 4520 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4521 4522 /* 4523 * Clear the cached register set to avoid using stale values 4524 * for the sinks that do not support DSC. 4525 */ 4526 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4527 4528 /* Clear fec_capable to avoid using stale values */ 4529 intel_dp->fec_capable = 0; 4530 4531 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 4532 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 4533 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4534 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 4535 intel_dp->dsc_dpcd, 4536 sizeof(intel_dp->dsc_dpcd)) < 0) 4537 drm_err(&i915->drm, 4538 "Failed to read DPCD register 0x%x\n", 4539 DP_DSC_SUPPORT); 4540 4541 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 4542 (int)sizeof(intel_dp->dsc_dpcd), 4543 intel_dp->dsc_dpcd); 4544 4545 /* FEC is supported only on DP 1.4 */ 4546 if (!intel_dp_is_edp(intel_dp) && 4547 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 4548 &intel_dp->fec_capable) < 0) 4549 drm_err(&i915->drm, 4550 "Failed to read FEC DPCD register\n"); 4551 4552 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 4553 intel_dp->fec_capable); 4554 } 4555 } 4556 4557 static bool 4558 intel_edp_init_dpcd(struct intel_dp *intel_dp) 4559 { 4560 struct drm_i915_private *dev_priv = 4561 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4562 4563 /* this function is meant to be called only once */ 4564 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 4565 4566 if (!intel_dp_read_dpcd(intel_dp)) 4567 return false; 4568 4569 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4570 drm_dp_is_branch(intel_dp->dpcd)); 4571 4572 /* 4573 * Read the eDP display control registers. 4574 * 4575 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4576 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4577 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4578 * method). The display control registers should read zero if they're 4579 * not supported anyway. 4580 */ 4581 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4582 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4583 sizeof(intel_dp->edp_dpcd)) 4584 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 4585 (int)sizeof(intel_dp->edp_dpcd), 4586 intel_dp->edp_dpcd); 4587 4588 /* 4589 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4590 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4591 */ 4592 intel_psr_init_dpcd(intel_dp); 4593 4594 /* Read the eDP 1.4+ supported link rates. */ 4595 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4596 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4597 int i; 4598 4599 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4600 sink_rates, sizeof(sink_rates)); 4601 4602 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4603 int val = le16_to_cpu(sink_rates[i]); 4604 4605 if (val == 0) 4606 break; 4607 4608 /* Value read multiplied by 200kHz gives the per-lane 4609 * link rate in kHz. The source rates are, however, 4610 * stored in terms of LS_Clk kHz. The full conversion 4611 * back to symbols is 4612 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4613 */ 4614 intel_dp->sink_rates[i] = (val * 200) / 10; 4615 } 4616 intel_dp->num_sink_rates = i; 4617 } 4618 4619 /* 4620 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4621 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4622 */ 4623 if (intel_dp->num_sink_rates) 4624 intel_dp->use_rate_select = true; 4625 else 4626 intel_dp_set_sink_rates(intel_dp); 4627 4628 intel_dp_set_common_rates(intel_dp); 4629 4630 /* Read the eDP DSC DPCD registers */ 4631 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4632 intel_dp_get_dsc_sink_cap(intel_dp); 4633 4634 return true; 4635 } 4636 4637 4638 static bool 4639 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4640 { 4641 if (!intel_dp_read_dpcd(intel_dp)) 4642 return false; 4643 4644 /* 4645 * Don't clobber cached eDP rates. Also skip re-reading 4646 * the OUI/ID since we know it won't change. 4647 */ 4648 if (!intel_dp_is_edp(intel_dp)) { 4649 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4650 drm_dp_is_branch(intel_dp->dpcd)); 4651 4652 intel_dp_set_sink_rates(intel_dp); 4653 intel_dp_set_common_rates(intel_dp); 4654 } 4655 4656 /* 4657 * Some eDP panels do not set a valid value for sink count, that is why 4658 * it don't care about read it here and in intel_edp_init_dpcd(). 4659 */ 4660 if (!intel_dp_is_edp(intel_dp) && 4661 !drm_dp_has_quirk(&intel_dp->desc, 0, 4662 DP_DPCD_QUIRK_NO_SINK_COUNT)) { 4663 u8 count; 4664 ssize_t r; 4665 4666 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count); 4667 if (r < 1) 4668 return false; 4669 4670 /* 4671 * Sink count can change between short pulse hpd hence 4672 * a member variable in intel_dp will track any changes 4673 * between short pulse interrupts. 4674 */ 4675 intel_dp->sink_count = DP_GET_SINK_COUNT(count); 4676 4677 /* 4678 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4679 * a dongle is present but no display. Unless we require to know 4680 * if a dongle is present or not, we don't need to update 4681 * downstream port information. So, an early return here saves 4682 * time from performing other operations which are not required. 4683 */ 4684 if (!intel_dp->sink_count) 4685 return false; 4686 } 4687 4688 if (!drm_dp_is_branch(intel_dp->dpcd)) 4689 return true; /* native DP sink */ 4690 4691 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 4692 return true; /* no per-port downstream info */ 4693 4694 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, 4695 intel_dp->downstream_ports, 4696 DP_MAX_DOWNSTREAM_PORTS) < 0) 4697 return false; /* downstream port status fetch failed */ 4698 4699 return true; 4700 } 4701 4702 static bool 4703 intel_dp_sink_can_mst(struct intel_dp *intel_dp) 4704 { 4705 u8 mstm_cap; 4706 4707 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 4708 return false; 4709 4710 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1) 4711 return false; 4712 4713 return mstm_cap & DP_MST_CAP; 4714 } 4715 4716 static bool 4717 intel_dp_can_mst(struct intel_dp *intel_dp) 4718 { 4719 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4720 4721 return i915->params.enable_dp_mst && 4722 intel_dp->can_mst && 4723 intel_dp_sink_can_mst(intel_dp); 4724 } 4725 4726 static void 4727 intel_dp_configure_mst(struct intel_dp *intel_dp) 4728 { 4729 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4730 struct intel_encoder *encoder = 4731 &dp_to_dig_port(intel_dp)->base; 4732 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); 4733 4734 drm_dbg_kms(&i915->drm, 4735 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4736 encoder->base.base.id, encoder->base.name, 4737 yesno(intel_dp->can_mst), yesno(sink_can_mst), 4738 yesno(i915->params.enable_dp_mst)); 4739 4740 if (!intel_dp->can_mst) 4741 return; 4742 4743 intel_dp->is_mst = sink_can_mst && 4744 i915->params.enable_dp_mst; 4745 4746 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4747 intel_dp->is_mst); 4748 } 4749 4750 static bool 4751 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4752 { 4753 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4754 sink_irq_vector, DP_DPRX_ESI_LEN) == 4755 DP_DPRX_ESI_LEN; 4756 } 4757 4758 bool 4759 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4760 const struct drm_connector_state *conn_state) 4761 { 4762 /* 4763 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4764 * of Color Encoding Format and Content Color Gamut], in order to 4765 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4766 */ 4767 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4768 return true; 4769 4770 switch (conn_state->colorspace) { 4771 case DRM_MODE_COLORIMETRY_SYCC_601: 4772 case DRM_MODE_COLORIMETRY_OPYCC_601: 4773 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4774 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4775 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4776 return true; 4777 default: 4778 break; 4779 } 4780 4781 return false; 4782 } 4783 4784 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 4785 struct dp_sdp *sdp, size_t size) 4786 { 4787 size_t length = sizeof(struct dp_sdp); 4788 4789 if (size < length) 4790 return -ENOSPC; 4791 4792 memset(sdp, 0, size); 4793 4794 /* 4795 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 4796 * VSC SDP Header Bytes 4797 */ 4798 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 4799 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 4800 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 4801 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 4802 4803 /* 4804 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 4805 * per DP 1.4a spec. 4806 */ 4807 if (vsc->revision != 0x5) 4808 goto out; 4809 4810 /* VSC SDP Payload for DB16 through DB18 */ 4811 /* Pixel Encoding and Colorimetry Formats */ 4812 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 4813 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 4814 4815 switch (vsc->bpc) { 4816 case 6: 4817 /* 6bpc: 0x0 */ 4818 break; 4819 case 8: 4820 sdp->db[17] = 0x1; /* DB17[3:0] */ 4821 break; 4822 case 10: 4823 sdp->db[17] = 0x2; 4824 break; 4825 case 12: 4826 sdp->db[17] = 0x3; 4827 break; 4828 case 16: 4829 sdp->db[17] = 0x4; 4830 break; 4831 default: 4832 MISSING_CASE(vsc->bpc); 4833 break; 4834 } 4835 /* Dynamic Range and Component Bit Depth */ 4836 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 4837 sdp->db[17] |= 0x80; /* DB17[7] */ 4838 4839 /* Content Type */ 4840 sdp->db[18] = vsc->content_type & 0x7; 4841 4842 out: 4843 return length; 4844 } 4845 4846 static ssize_t 4847 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 4848 struct dp_sdp *sdp, 4849 size_t size) 4850 { 4851 size_t length = sizeof(struct dp_sdp); 4852 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4853 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4854 ssize_t len; 4855 4856 if (size < length) 4857 return -ENOSPC; 4858 4859 memset(sdp, 0, size); 4860 4861 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 4862 if (len < 0) { 4863 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 4864 return -ENOSPC; 4865 } 4866 4867 if (len != infoframe_size) { 4868 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 4869 return -ENOSPC; 4870 } 4871 4872 /* 4873 * Set up the infoframe sdp packet for HDR static metadata. 4874 * Prepare VSC Header for SU as per DP 1.4a spec, 4875 * Table 2-100 and Table 2-101 4876 */ 4877 4878 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 4879 sdp->sdp_header.HB0 = 0; 4880 /* 4881 * Packet Type 80h + Non-audio INFOFRAME Type value 4882 * HDMI_INFOFRAME_TYPE_DRM: 0x87 4883 * - 80h + Non-audio INFOFRAME Type value 4884 * - InfoFrame Type: 0x07 4885 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 4886 */ 4887 sdp->sdp_header.HB1 = drm_infoframe->type; 4888 /* 4889 * Least Significant Eight Bits of (Data Byte Count – 1) 4890 * infoframe_size - 1 4891 */ 4892 sdp->sdp_header.HB2 = 0x1D; 4893 /* INFOFRAME SDP Version Number */ 4894 sdp->sdp_header.HB3 = (0x13 << 2); 4895 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4896 sdp->db[0] = drm_infoframe->version; 4897 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4898 sdp->db[1] = drm_infoframe->length; 4899 /* 4900 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4901 * HDMI_INFOFRAME_HEADER_SIZE 4902 */ 4903 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4904 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4905 HDMI_DRM_INFOFRAME_SIZE); 4906 4907 /* 4908 * Size of DP infoframe sdp packet for HDR static metadata consists of 4909 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4910 * - Two Data Blocks: 2 bytes 4911 * CTA Header Byte2 (INFOFRAME Version Number) 4912 * CTA Header Byte3 (Length of INFOFRAME) 4913 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4914 * 4915 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4916 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4917 * will pad rest of the size. 4918 */ 4919 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 4920 } 4921 4922 static void intel_write_dp_sdp(struct intel_encoder *encoder, 4923 const struct intel_crtc_state *crtc_state, 4924 unsigned int type) 4925 { 4926 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4927 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4928 struct dp_sdp sdp = {}; 4929 ssize_t len; 4930 4931 if ((crtc_state->infoframes.enable & 4932 intel_hdmi_infoframe_enable(type)) == 0) 4933 return; 4934 4935 switch (type) { 4936 case DP_SDP_VSC: 4937 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 4938 sizeof(sdp)); 4939 break; 4940 case HDMI_PACKET_TYPE_GAMUT_METADATA: 4941 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 4942 &sdp, sizeof(sdp)); 4943 break; 4944 default: 4945 MISSING_CASE(type); 4946 return; 4947 } 4948 4949 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4950 return; 4951 4952 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 4953 } 4954 4955 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 4956 const struct intel_crtc_state *crtc_state, 4957 struct drm_dp_vsc_sdp *vsc) 4958 { 4959 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4960 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4961 struct dp_sdp sdp = {}; 4962 ssize_t len; 4963 4964 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 4965 4966 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4967 return; 4968 4969 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 4970 &sdp, len); 4971 } 4972 4973 void intel_dp_set_infoframes(struct intel_encoder *encoder, 4974 bool enable, 4975 const struct intel_crtc_state *crtc_state, 4976 const struct drm_connector_state *conn_state) 4977 { 4978 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4979 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4980 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 4981 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 4982 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 4983 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 4984 u32 val = intel_de_read(dev_priv, reg); 4985 4986 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 4987 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 4988 if (intel_psr_enabled(intel_dp)) 4989 val &= ~dip_enable; 4990 else 4991 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); 4992 4993 if (!enable) { 4994 intel_de_write(dev_priv, reg, val); 4995 intel_de_posting_read(dev_priv, reg); 4996 return; 4997 } 4998 4999 intel_de_write(dev_priv, reg, val); 5000 intel_de_posting_read(dev_priv, reg); 5001 5002 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5003 if (!intel_psr_enabled(intel_dp)) 5004 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 5005 5006 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 5007 } 5008 5009 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 5010 const void *buffer, size_t size) 5011 { 5012 const struct dp_sdp *sdp = buffer; 5013 5014 if (size < sizeof(struct dp_sdp)) 5015 return -EINVAL; 5016 5017 memset(vsc, 0, size); 5018 5019 if (sdp->sdp_header.HB0 != 0) 5020 return -EINVAL; 5021 5022 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 5023 return -EINVAL; 5024 5025 vsc->sdp_type = sdp->sdp_header.HB1; 5026 vsc->revision = sdp->sdp_header.HB2; 5027 vsc->length = sdp->sdp_header.HB3; 5028 5029 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 5030 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 5031 /* 5032 * - HB2 = 0x2, HB3 = 0x8 5033 * VSC SDP supporting 3D stereo + PSR 5034 * - HB2 = 0x4, HB3 = 0xe 5035 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 5036 * first scan line of the SU region (applies to eDP v1.4b 5037 * and higher). 5038 */ 5039 return 0; 5040 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 5041 /* 5042 * - HB2 = 0x5, HB3 = 0x13 5043 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 5044 * Format. 5045 */ 5046 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 5047 vsc->colorimetry = sdp->db[16] & 0xf; 5048 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 5049 5050 switch (sdp->db[17] & 0x7) { 5051 case 0x0: 5052 vsc->bpc = 6; 5053 break; 5054 case 0x1: 5055 vsc->bpc = 8; 5056 break; 5057 case 0x2: 5058 vsc->bpc = 10; 5059 break; 5060 case 0x3: 5061 vsc->bpc = 12; 5062 break; 5063 case 0x4: 5064 vsc->bpc = 16; 5065 break; 5066 default: 5067 MISSING_CASE(sdp->db[17] & 0x7); 5068 return -EINVAL; 5069 } 5070 5071 vsc->content_type = sdp->db[18] & 0x7; 5072 } else { 5073 return -EINVAL; 5074 } 5075 5076 return 0; 5077 } 5078 5079 static int 5080 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 5081 const void *buffer, size_t size) 5082 { 5083 int ret; 5084 5085 const struct dp_sdp *sdp = buffer; 5086 5087 if (size < sizeof(struct dp_sdp)) 5088 return -EINVAL; 5089 5090 if (sdp->sdp_header.HB0 != 0) 5091 return -EINVAL; 5092 5093 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 5094 return -EINVAL; 5095 5096 /* 5097 * Least Significant Eight Bits of (Data Byte Count – 1) 5098 * 1Dh (i.e., Data Byte Count = 30 bytes). 5099 */ 5100 if (sdp->sdp_header.HB2 != 0x1D) 5101 return -EINVAL; 5102 5103 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 5104 if ((sdp->sdp_header.HB3 & 0x3) != 0) 5105 return -EINVAL; 5106 5107 /* INFOFRAME SDP Version Number */ 5108 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 5109 return -EINVAL; 5110 5111 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5112 if (sdp->db[0] != 1) 5113 return -EINVAL; 5114 5115 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5116 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 5117 return -EINVAL; 5118 5119 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 5120 HDMI_DRM_INFOFRAME_SIZE); 5121 5122 return ret; 5123 } 5124 5125 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 5126 struct intel_crtc_state *crtc_state, 5127 struct drm_dp_vsc_sdp *vsc) 5128 { 5129 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5130 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5131 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5132 unsigned int type = DP_SDP_VSC; 5133 struct dp_sdp sdp = {}; 5134 int ret; 5135 5136 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5137 if (intel_psr_enabled(intel_dp)) 5138 return; 5139 5140 if ((crtc_state->infoframes.enable & 5141 intel_hdmi_infoframe_enable(type)) == 0) 5142 return; 5143 5144 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 5145 5146 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 5147 5148 if (ret) 5149 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 5150 } 5151 5152 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 5153 struct intel_crtc_state *crtc_state, 5154 struct hdmi_drm_infoframe *drm_infoframe) 5155 { 5156 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5157 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5158 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 5159 struct dp_sdp sdp = {}; 5160 int ret; 5161 5162 if ((crtc_state->infoframes.enable & 5163 intel_hdmi_infoframe_enable(type)) == 0) 5164 return; 5165 5166 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 5167 sizeof(sdp)); 5168 5169 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 5170 sizeof(sdp)); 5171 5172 if (ret) 5173 drm_dbg_kms(&dev_priv->drm, 5174 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 5175 } 5176 5177 void intel_read_dp_sdp(struct intel_encoder *encoder, 5178 struct intel_crtc_state *crtc_state, 5179 unsigned int type) 5180 { 5181 if (encoder->type != INTEL_OUTPUT_DDI) 5182 return; 5183 5184 switch (type) { 5185 case DP_SDP_VSC: 5186 intel_read_dp_vsc_sdp(encoder, crtc_state, 5187 &crtc_state->infoframes.vsc); 5188 break; 5189 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5190 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 5191 &crtc_state->infoframes.drm.drm); 5192 break; 5193 default: 5194 MISSING_CASE(type); 5195 break; 5196 } 5197 } 5198 5199 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 5200 { 5201 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5202 int status = 0; 5203 int test_link_rate; 5204 u8 test_lane_count, test_link_bw; 5205 /* (DP CTS 1.2) 5206 * 4.3.1.11 5207 */ 5208 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 5209 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 5210 &test_lane_count); 5211 5212 if (status <= 0) { 5213 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 5214 return DP_TEST_NAK; 5215 } 5216 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 5217 5218 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 5219 &test_link_bw); 5220 if (status <= 0) { 5221 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 5222 return DP_TEST_NAK; 5223 } 5224 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 5225 5226 /* Validate the requested link rate and lane count */ 5227 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 5228 test_lane_count)) 5229 return DP_TEST_NAK; 5230 5231 intel_dp->compliance.test_lane_count = test_lane_count; 5232 intel_dp->compliance.test_link_rate = test_link_rate; 5233 5234 return DP_TEST_ACK; 5235 } 5236 5237 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 5238 { 5239 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5240 u8 test_pattern; 5241 u8 test_misc; 5242 __be16 h_width, v_height; 5243 int status = 0; 5244 5245 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 5246 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 5247 &test_pattern); 5248 if (status <= 0) { 5249 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 5250 return DP_TEST_NAK; 5251 } 5252 if (test_pattern != DP_COLOR_RAMP) 5253 return DP_TEST_NAK; 5254 5255 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 5256 &h_width, 2); 5257 if (status <= 0) { 5258 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 5259 return DP_TEST_NAK; 5260 } 5261 5262 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 5263 &v_height, 2); 5264 if (status <= 0) { 5265 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 5266 return DP_TEST_NAK; 5267 } 5268 5269 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 5270 &test_misc); 5271 if (status <= 0) { 5272 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 5273 return DP_TEST_NAK; 5274 } 5275 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 5276 return DP_TEST_NAK; 5277 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 5278 return DP_TEST_NAK; 5279 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 5280 case DP_TEST_BIT_DEPTH_6: 5281 intel_dp->compliance.test_data.bpc = 6; 5282 break; 5283 case DP_TEST_BIT_DEPTH_8: 5284 intel_dp->compliance.test_data.bpc = 8; 5285 break; 5286 default: 5287 return DP_TEST_NAK; 5288 } 5289 5290 intel_dp->compliance.test_data.video_pattern = test_pattern; 5291 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 5292 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 5293 /* Set test active flag here so userspace doesn't interrupt things */ 5294 intel_dp->compliance.test_active = true; 5295 5296 return DP_TEST_ACK; 5297 } 5298 5299 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 5300 { 5301 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5302 u8 test_result = DP_TEST_ACK; 5303 struct intel_connector *intel_connector = intel_dp->attached_connector; 5304 struct drm_connector *connector = &intel_connector->base; 5305 5306 if (intel_connector->detect_edid == NULL || 5307 connector->edid_corrupt || 5308 intel_dp->aux.i2c_defer_count > 6) { 5309 /* Check EDID read for NACKs, DEFERs and corruption 5310 * (DP CTS 1.2 Core r1.1) 5311 * 4.2.2.4 : Failed EDID read, I2C_NAK 5312 * 4.2.2.5 : Failed EDID read, I2C_DEFER 5313 * 4.2.2.6 : EDID corruption detected 5314 * Use failsafe mode for all cases 5315 */ 5316 if (intel_dp->aux.i2c_nack_count > 0 || 5317 intel_dp->aux.i2c_defer_count > 0) 5318 drm_dbg_kms(&i915->drm, 5319 "EDID read had %d NACKs, %d DEFERs\n", 5320 intel_dp->aux.i2c_nack_count, 5321 intel_dp->aux.i2c_defer_count); 5322 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 5323 } else { 5324 struct edid *block = intel_connector->detect_edid; 5325 5326 /* We have to write the checksum 5327 * of the last block read 5328 */ 5329 block += intel_connector->detect_edid->extensions; 5330 5331 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 5332 block->checksum) <= 0) 5333 drm_dbg_kms(&i915->drm, 5334 "Failed to write EDID checksum\n"); 5335 5336 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 5337 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 5338 } 5339 5340 /* Set test active flag here so userspace doesn't interrupt things */ 5341 intel_dp->compliance.test_active = true; 5342 5343 return test_result; 5344 } 5345 5346 static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp) 5347 { 5348 struct drm_dp_phy_test_params *data = 5349 &intel_dp->compliance.test_data.phytest; 5350 5351 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 5352 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 5353 return DP_TEST_NAK; 5354 } 5355 5356 /* 5357 * link_mst is set to false to avoid executing mst related code 5358 * during compliance testing. 5359 */ 5360 intel_dp->link_mst = false; 5361 5362 return DP_TEST_ACK; 5363 } 5364 5365 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp) 5366 { 5367 struct drm_i915_private *dev_priv = 5368 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 5369 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5370 struct drm_dp_phy_test_params *data = 5371 &intel_dp->compliance.test_data.phytest; 5372 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5373 enum pipe pipe = crtc->pipe; 5374 u32 pattern_val; 5375 5376 switch (data->phy_pattern) { 5377 case DP_PHY_TEST_PATTERN_NONE: 5378 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 5379 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 5380 break; 5381 case DP_PHY_TEST_PATTERN_D10_2: 5382 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 5383 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5384 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 5385 break; 5386 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 5387 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 5388 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5389 DDI_DP_COMP_CTL_ENABLE | 5390 DDI_DP_COMP_CTL_SCRAMBLED_0); 5391 break; 5392 case DP_PHY_TEST_PATTERN_PRBS7: 5393 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 5394 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5395 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 5396 break; 5397 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 5398 /* 5399 * FIXME: Ideally pattern should come from DPCD 0x250. As 5400 * current firmware of DPR-100 could not set it, so hardcoding 5401 * now for complaince test. 5402 */ 5403 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 5404 pattern_val = 0x3e0f83e0; 5405 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 5406 pattern_val = 0x0f83e0f8; 5407 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 5408 pattern_val = 0x0000f83e; 5409 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 5410 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5411 DDI_DP_COMP_CTL_ENABLE | 5412 DDI_DP_COMP_CTL_CUSTOM80); 5413 break; 5414 case DP_PHY_TEST_PATTERN_CP2520: 5415 /* 5416 * FIXME: Ideally pattern should come from DPCD 0x24A. As 5417 * current firmware of DPR-100 could not set it, so hardcoding 5418 * now for complaince test. 5419 */ 5420 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 5421 pattern_val = 0xFB; 5422 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5423 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 5424 pattern_val); 5425 break; 5426 default: 5427 WARN(1, "Invalid Phy Test Pattern\n"); 5428 } 5429 } 5430 5431 static void 5432 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp) 5433 { 5434 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5435 struct drm_device *dev = dig_port->base.base.dev; 5436 struct drm_i915_private *dev_priv = to_i915(dev); 5437 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5438 enum pipe pipe = crtc->pipe; 5439 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5440 5441 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5442 TRANS_DDI_FUNC_CTL(pipe)); 5443 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5444 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5445 5446 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 5447 TGL_TRANS_DDI_PORT_MASK); 5448 trans_conf_value &= ~PIPECONF_ENABLE; 5449 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 5450 5451 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5452 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5453 trans_ddi_func_ctl_value); 5454 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5455 } 5456 5457 static void 5458 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt) 5459 { 5460 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5461 struct drm_device *dev = dig_port->base.base.dev; 5462 struct drm_i915_private *dev_priv = to_i915(dev); 5463 enum port port = dig_port->base.port; 5464 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5465 enum pipe pipe = crtc->pipe; 5466 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5467 5468 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5469 TRANS_DDI_FUNC_CTL(pipe)); 5470 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5471 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5472 5473 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 5474 TGL_TRANS_DDI_SELECT_PORT(port); 5475 trans_conf_value |= PIPECONF_ENABLE; 5476 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 5477 5478 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5479 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5480 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5481 trans_ddi_func_ctl_value); 5482 } 5483 5484 void intel_dp_process_phy_request(struct intel_dp *intel_dp) 5485 { 5486 struct drm_dp_phy_test_params *data = 5487 &intel_dp->compliance.test_data.phytest; 5488 u8 link_status[DP_LINK_STATUS_SIZE]; 5489 5490 if (!intel_dp_get_link_status(intel_dp, link_status)) { 5491 DRM_DEBUG_KMS("failed to get link status\n"); 5492 return; 5493 } 5494 5495 /* retrieve vswing & pre-emphasis setting */ 5496 intel_dp_get_adjust_train(intel_dp, link_status); 5497 5498 intel_dp_autotest_phy_ddi_disable(intel_dp); 5499 5500 intel_dp_set_signal_levels(intel_dp); 5501 5502 intel_dp_phy_pattern_update(intel_dp); 5503 5504 intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes); 5505 5506 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 5507 link_status[DP_DPCD_REV]); 5508 } 5509 5510 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 5511 { 5512 u8 test_result; 5513 5514 test_result = intel_dp_prepare_phytest(intel_dp); 5515 if (test_result != DP_TEST_ACK) 5516 DRM_ERROR("Phy test preparation failed\n"); 5517 5518 intel_dp_process_phy_request(intel_dp); 5519 5520 return test_result; 5521 } 5522 5523 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 5524 { 5525 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5526 u8 response = DP_TEST_NAK; 5527 u8 request = 0; 5528 int status; 5529 5530 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 5531 if (status <= 0) { 5532 drm_dbg_kms(&i915->drm, 5533 "Could not read test request from sink\n"); 5534 goto update_status; 5535 } 5536 5537 switch (request) { 5538 case DP_TEST_LINK_TRAINING: 5539 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 5540 response = intel_dp_autotest_link_training(intel_dp); 5541 break; 5542 case DP_TEST_LINK_VIDEO_PATTERN: 5543 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 5544 response = intel_dp_autotest_video_pattern(intel_dp); 5545 break; 5546 case DP_TEST_LINK_EDID_READ: 5547 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 5548 response = intel_dp_autotest_edid(intel_dp); 5549 break; 5550 case DP_TEST_LINK_PHY_TEST_PATTERN: 5551 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 5552 response = intel_dp_autotest_phy_pattern(intel_dp); 5553 break; 5554 default: 5555 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 5556 request); 5557 break; 5558 } 5559 5560 if (response & DP_TEST_ACK) 5561 intel_dp->compliance.test_type = request; 5562 5563 update_status: 5564 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 5565 if (status <= 0) 5566 drm_dbg_kms(&i915->drm, 5567 "Could not write test response to sink\n"); 5568 } 5569 5570 /** 5571 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 5572 * @intel_dp: Intel DP struct 5573 * 5574 * Read any pending MST interrupts, call MST core to handle these and ack the 5575 * interrupts. Check if the main and AUX link state is ok. 5576 * 5577 * Returns: 5578 * - %true if pending interrupts were serviced (or no interrupts were 5579 * pending) w/o detecting an error condition. 5580 * - %false if an error condition - like AUX failure or a loss of link - is 5581 * detected, which needs servicing from the hotplug work. 5582 */ 5583 static bool 5584 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5585 { 5586 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5587 bool link_ok = true; 5588 5589 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 5590 5591 for (;;) { 5592 u8 esi[DP_DPRX_ESI_LEN] = {}; 5593 bool handled; 5594 int retry; 5595 5596 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 5597 drm_dbg_kms(&i915->drm, 5598 "failed to get ESI - device may have failed\n"); 5599 link_ok = false; 5600 5601 break; 5602 } 5603 5604 /* check link status - esi[10] = 0x200c */ 5605 if (intel_dp->active_mst_links > 0 && link_ok && 5606 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 5607 drm_dbg_kms(&i915->drm, 5608 "channel EQ not ok, retraining\n"); 5609 link_ok = false; 5610 } 5611 5612 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 5613 5614 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 5615 if (!handled) 5616 break; 5617 5618 for (retry = 0; retry < 3; retry++) { 5619 int wret; 5620 5621 wret = drm_dp_dpcd_write(&intel_dp->aux, 5622 DP_SINK_COUNT_ESI+1, 5623 &esi[1], 3); 5624 if (wret == 3) 5625 break; 5626 } 5627 } 5628 5629 return link_ok; 5630 } 5631 5632 static bool 5633 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5634 { 5635 u8 link_status[DP_LINK_STATUS_SIZE]; 5636 5637 if (!intel_dp->link_trained) 5638 return false; 5639 5640 /* 5641 * While PSR source HW is enabled, it will control main-link sending 5642 * frames, enabling and disabling it so trying to do a retrain will fail 5643 * as the link would or not be on or it could mix training patterns 5644 * and frame data at the same time causing retrain to fail. 5645 * Also when exiting PSR, HW will retrain the link anyways fixing 5646 * any link status error. 5647 */ 5648 if (intel_psr_enabled(intel_dp)) 5649 return false; 5650 5651 if (!intel_dp_get_link_status(intel_dp, link_status)) 5652 return false; 5653 5654 /* 5655 * Validate the cached values of intel_dp->link_rate and 5656 * intel_dp->lane_count before attempting to retrain. 5657 */ 5658 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5659 intel_dp->lane_count)) 5660 return false; 5661 5662 /* Retrain if Channel EQ or CR not ok */ 5663 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5664 } 5665 5666 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 5667 const struct drm_connector_state *conn_state) 5668 { 5669 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5670 struct intel_encoder *encoder; 5671 enum pipe pipe; 5672 5673 if (!conn_state->best_encoder) 5674 return false; 5675 5676 /* SST */ 5677 encoder = &dp_to_dig_port(intel_dp)->base; 5678 if (conn_state->best_encoder == &encoder->base) 5679 return true; 5680 5681 /* MST */ 5682 for_each_pipe(i915, pipe) { 5683 encoder = &intel_dp->mst_encoders[pipe]->base; 5684 if (conn_state->best_encoder == &encoder->base) 5685 return true; 5686 } 5687 5688 return false; 5689 } 5690 5691 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 5692 struct drm_modeset_acquire_ctx *ctx, 5693 u32 *crtc_mask) 5694 { 5695 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5696 struct drm_connector_list_iter conn_iter; 5697 struct intel_connector *connector; 5698 int ret = 0; 5699 5700 *crtc_mask = 0; 5701 5702 if (!intel_dp_needs_link_retrain(intel_dp)) 5703 return 0; 5704 5705 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5706 for_each_intel_connector_iter(connector, &conn_iter) { 5707 struct drm_connector_state *conn_state = 5708 connector->base.state; 5709 struct intel_crtc_state *crtc_state; 5710 struct intel_crtc *crtc; 5711 5712 if (!intel_dp_has_connector(intel_dp, conn_state)) 5713 continue; 5714 5715 crtc = to_intel_crtc(conn_state->crtc); 5716 if (!crtc) 5717 continue; 5718 5719 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5720 if (ret) 5721 break; 5722 5723 crtc_state = to_intel_crtc_state(crtc->base.state); 5724 5725 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5726 5727 if (!crtc_state->hw.active) 5728 continue; 5729 5730 if (conn_state->commit && 5731 !try_wait_for_completion(&conn_state->commit->hw_done)) 5732 continue; 5733 5734 *crtc_mask |= drm_crtc_mask(&crtc->base); 5735 } 5736 drm_connector_list_iter_end(&conn_iter); 5737 5738 if (!intel_dp_needs_link_retrain(intel_dp)) 5739 *crtc_mask = 0; 5740 5741 return ret; 5742 } 5743 5744 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 5745 { 5746 struct intel_connector *connector = intel_dp->attached_connector; 5747 5748 return connector->base.status == connector_status_connected || 5749 intel_dp->is_mst; 5750 } 5751 5752 int intel_dp_retrain_link(struct intel_encoder *encoder, 5753 struct drm_modeset_acquire_ctx *ctx) 5754 { 5755 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5756 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5757 struct intel_crtc *crtc; 5758 u32 crtc_mask; 5759 int ret; 5760 5761 if (!intel_dp_is_connected(intel_dp)) 5762 return 0; 5763 5764 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5765 ctx); 5766 if (ret) 5767 return ret; 5768 5769 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 5770 if (ret) 5771 return ret; 5772 5773 if (crtc_mask == 0) 5774 return 0; 5775 5776 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 5777 encoder->base.base.id, encoder->base.name); 5778 5779 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5780 const struct intel_crtc_state *crtc_state = 5781 to_intel_crtc_state(crtc->base.state); 5782 5783 /* Suppress underruns caused by re-training */ 5784 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5785 if (crtc_state->has_pch_encoder) 5786 intel_set_pch_fifo_underrun_reporting(dev_priv, 5787 intel_crtc_pch_transcoder(crtc), false); 5788 } 5789 5790 intel_dp_start_link_train(intel_dp); 5791 intel_dp_stop_link_train(intel_dp); 5792 5793 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5794 const struct intel_crtc_state *crtc_state = 5795 to_intel_crtc_state(crtc->base.state); 5796 5797 /* Keep underrun reporting disabled until things are stable */ 5798 intel_wait_for_vblank(dev_priv, crtc->pipe); 5799 5800 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5801 if (crtc_state->has_pch_encoder) 5802 intel_set_pch_fifo_underrun_reporting(dev_priv, 5803 intel_crtc_pch_transcoder(crtc), true); 5804 } 5805 5806 return 0; 5807 } 5808 5809 /* 5810 * If display is now connected check links status, 5811 * there has been known issues of link loss triggering 5812 * long pulse. 5813 * 5814 * Some sinks (eg. ASUS PB287Q) seem to perform some 5815 * weird HPD ping pong during modesets. So we can apparently 5816 * end up with HPD going low during a modeset, and then 5817 * going back up soon after. And once that happens we must 5818 * retrain the link to get a picture. That's in case no 5819 * userspace component reacted to intermittent HPD dip. 5820 */ 5821 static enum intel_hotplug_state 5822 intel_dp_hotplug(struct intel_encoder *encoder, 5823 struct intel_connector *connector) 5824 { 5825 struct drm_modeset_acquire_ctx ctx; 5826 enum intel_hotplug_state state; 5827 int ret; 5828 5829 state = intel_encoder_hotplug(encoder, connector); 5830 5831 drm_modeset_acquire_init(&ctx, 0); 5832 5833 for (;;) { 5834 ret = intel_dp_retrain_link(encoder, &ctx); 5835 5836 if (ret == -EDEADLK) { 5837 drm_modeset_backoff(&ctx); 5838 continue; 5839 } 5840 5841 break; 5842 } 5843 5844 drm_modeset_drop_locks(&ctx); 5845 drm_modeset_acquire_fini(&ctx); 5846 drm_WARN(encoder->base.dev, ret, 5847 "Acquiring modeset locks failed with %i\n", ret); 5848 5849 /* 5850 * Keeping it consistent with intel_ddi_hotplug() and 5851 * intel_hdmi_hotplug(). 5852 */ 5853 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) 5854 state = INTEL_HOTPLUG_RETRY; 5855 5856 return state; 5857 } 5858 5859 static void intel_dp_check_service_irq(struct intel_dp *intel_dp) 5860 { 5861 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5862 u8 val; 5863 5864 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5865 return; 5866 5867 if (drm_dp_dpcd_readb(&intel_dp->aux, 5868 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5869 return; 5870 5871 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5872 5873 if (val & DP_AUTOMATED_TEST_REQUEST) 5874 intel_dp_handle_test_request(intel_dp); 5875 5876 if (val & DP_CP_IRQ) 5877 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5878 5879 if (val & DP_SINK_SPECIFIC_IRQ) 5880 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 5881 } 5882 5883 /* 5884 * According to DP spec 5885 * 5.1.2: 5886 * 1. Read DPCD 5887 * 2. Configure link according to Receiver Capabilities 5888 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5889 * 4. Check link status on receipt of hot-plug interrupt 5890 * 5891 * intel_dp_short_pulse - handles short pulse interrupts 5892 * when full detection is not required. 5893 * Returns %true if short pulse is handled and full detection 5894 * is NOT required and %false otherwise. 5895 */ 5896 static bool 5897 intel_dp_short_pulse(struct intel_dp *intel_dp) 5898 { 5899 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5900 u8 old_sink_count = intel_dp->sink_count; 5901 bool ret; 5902 5903 /* 5904 * Clearing compliance test variables to allow capturing 5905 * of values for next automated test request. 5906 */ 5907 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5908 5909 /* 5910 * Now read the DPCD to see if it's actually running 5911 * If the current value of sink count doesn't match with 5912 * the value that was stored earlier or dpcd read failed 5913 * we need to do full detection 5914 */ 5915 ret = intel_dp_get_dpcd(intel_dp); 5916 5917 if ((old_sink_count != intel_dp->sink_count) || !ret) { 5918 /* No need to proceed if we are going to do full detect */ 5919 return false; 5920 } 5921 5922 intel_dp_check_service_irq(intel_dp); 5923 5924 /* Handle CEC interrupts, if any */ 5925 drm_dp_cec_irq(&intel_dp->aux); 5926 5927 /* defer to the hotplug work for link retraining if needed */ 5928 if (intel_dp_needs_link_retrain(intel_dp)) 5929 return false; 5930 5931 intel_psr_short_pulse(intel_dp); 5932 5933 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 5934 drm_dbg_kms(&dev_priv->drm, 5935 "Link Training Compliance Test requested\n"); 5936 /* Send a Hotplug Uevent to userspace to start modeset */ 5937 drm_kms_helper_hotplug_event(&dev_priv->drm); 5938 } 5939 5940 return true; 5941 } 5942 5943 /* XXX this is probably wrong for multiple downstream ports */ 5944 static enum drm_connector_status 5945 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 5946 { 5947 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5948 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 5949 u8 *dpcd = intel_dp->dpcd; 5950 u8 type; 5951 5952 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 5953 return connector_status_connected; 5954 5955 if (lspcon->active) 5956 lspcon_resume(lspcon); 5957 5958 if (!intel_dp_get_dpcd(intel_dp)) 5959 return connector_status_disconnected; 5960 5961 /* if there's no downstream port, we're done */ 5962 if (!drm_dp_is_branch(dpcd)) 5963 return connector_status_connected; 5964 5965 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 5966 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 5967 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 5968 5969 return intel_dp->sink_count ? 5970 connector_status_connected : connector_status_disconnected; 5971 } 5972 5973 if (intel_dp_can_mst(intel_dp)) 5974 return connector_status_connected; 5975 5976 /* If no HPD, poke DDC gently */ 5977 if (drm_probe_ddc(&intel_dp->aux.ddc)) 5978 return connector_status_connected; 5979 5980 /* Well we tried, say unknown for unreliable port types */ 5981 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 5982 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 5983 if (type == DP_DS_PORT_TYPE_VGA || 5984 type == DP_DS_PORT_TYPE_NON_EDID) 5985 return connector_status_unknown; 5986 } else { 5987 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 5988 DP_DWN_STRM_PORT_TYPE_MASK; 5989 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 5990 type == DP_DWN_STRM_PORT_TYPE_OTHER) 5991 return connector_status_unknown; 5992 } 5993 5994 /* Anything else is out of spec, warn and ignore */ 5995 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 5996 return connector_status_disconnected; 5997 } 5998 5999 static enum drm_connector_status 6000 edp_detect(struct intel_dp *intel_dp) 6001 { 6002 return connector_status_connected; 6003 } 6004 6005 static bool ibx_digital_port_connected(struct intel_encoder *encoder) 6006 { 6007 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6008 u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; 6009 6010 return intel_de_read(dev_priv, SDEISR) & bit; 6011 } 6012 6013 static bool g4x_digital_port_connected(struct intel_encoder *encoder) 6014 { 6015 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6016 u32 bit; 6017 6018 switch (encoder->hpd_pin) { 6019 case HPD_PORT_B: 6020 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 6021 break; 6022 case HPD_PORT_C: 6023 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 6024 break; 6025 case HPD_PORT_D: 6026 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 6027 break; 6028 default: 6029 MISSING_CASE(encoder->hpd_pin); 6030 return false; 6031 } 6032 6033 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6034 } 6035 6036 static bool gm45_digital_port_connected(struct intel_encoder *encoder) 6037 { 6038 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6039 u32 bit; 6040 6041 switch (encoder->hpd_pin) { 6042 case HPD_PORT_B: 6043 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 6044 break; 6045 case HPD_PORT_C: 6046 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 6047 break; 6048 case HPD_PORT_D: 6049 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 6050 break; 6051 default: 6052 MISSING_CASE(encoder->hpd_pin); 6053 return false; 6054 } 6055 6056 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 6057 } 6058 6059 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 6060 { 6061 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6062 u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; 6063 6064 return intel_de_read(dev_priv, DEISR) & bit; 6065 } 6066 6067 /* 6068 * intel_digital_port_connected - is the specified port connected? 6069 * @encoder: intel_encoder 6070 * 6071 * In cases where there's a connector physically connected but it can't be used 6072 * by our hardware we also return false, since the rest of the driver should 6073 * pretty much treat the port as disconnected. This is relevant for type-C 6074 * (starting on ICL) where there's ownership involved. 6075 * 6076 * Return %true if port is connected, %false otherwise. 6077 */ 6078 bool intel_digital_port_connected(struct intel_encoder *encoder) 6079 { 6080 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6081 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6082 bool is_connected = false; 6083 intel_wakeref_t wakeref; 6084 6085 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 6086 is_connected = dig_port->connected(encoder); 6087 6088 return is_connected; 6089 } 6090 6091 static struct edid * 6092 intel_dp_get_edid(struct intel_dp *intel_dp) 6093 { 6094 struct intel_connector *intel_connector = intel_dp->attached_connector; 6095 6096 /* use cached edid if we have one */ 6097 if (intel_connector->edid) { 6098 /* invalid edid */ 6099 if (IS_ERR(intel_connector->edid)) 6100 return NULL; 6101 6102 return drm_edid_duplicate(intel_connector->edid); 6103 } else 6104 return drm_get_edid(&intel_connector->base, 6105 &intel_dp->aux.ddc); 6106 } 6107 6108 static void 6109 intel_dp_set_edid(struct intel_dp *intel_dp) 6110 { 6111 struct intel_connector *intel_connector = intel_dp->attached_connector; 6112 struct edid *edid; 6113 6114 intel_dp_unset_edid(intel_dp); 6115 edid = intel_dp_get_edid(intel_dp); 6116 intel_connector->detect_edid = edid; 6117 6118 intel_dp->has_audio = drm_detect_monitor_audio(edid); 6119 drm_dp_cec_set_edid(&intel_dp->aux, edid); 6120 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 6121 } 6122 6123 static void 6124 intel_dp_unset_edid(struct intel_dp *intel_dp) 6125 { 6126 struct intel_connector *intel_connector = intel_dp->attached_connector; 6127 6128 drm_dp_cec_unset_edid(&intel_dp->aux); 6129 kfree(intel_connector->detect_edid); 6130 intel_connector->detect_edid = NULL; 6131 6132 intel_dp->has_audio = false; 6133 intel_dp->edid_quirks = 0; 6134 } 6135 6136 static int 6137 intel_dp_detect(struct drm_connector *connector, 6138 struct drm_modeset_acquire_ctx *ctx, 6139 bool force) 6140 { 6141 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6142 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6143 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6144 struct intel_encoder *encoder = &dig_port->base; 6145 enum drm_connector_status status; 6146 6147 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6148 connector->base.id, connector->name); 6149 drm_WARN_ON(&dev_priv->drm, 6150 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 6151 6152 /* Can't disconnect eDP */ 6153 if (intel_dp_is_edp(intel_dp)) 6154 status = edp_detect(intel_dp); 6155 else if (intel_digital_port_connected(encoder)) 6156 status = intel_dp_detect_dpcd(intel_dp); 6157 else 6158 status = connector_status_disconnected; 6159 6160 if (status == connector_status_disconnected) { 6161 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 6162 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 6163 6164 if (intel_dp->is_mst) { 6165 drm_dbg_kms(&dev_priv->drm, 6166 "MST device may have disappeared %d vs %d\n", 6167 intel_dp->is_mst, 6168 intel_dp->mst_mgr.mst_state); 6169 intel_dp->is_mst = false; 6170 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6171 intel_dp->is_mst); 6172 } 6173 6174 goto out; 6175 } 6176 6177 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 6178 if (INTEL_GEN(dev_priv) >= 11) 6179 intel_dp_get_dsc_sink_cap(intel_dp); 6180 6181 intel_dp_configure_mst(intel_dp); 6182 6183 /* 6184 * TODO: Reset link params when switching to MST mode, until MST 6185 * supports link training fallback params. 6186 */ 6187 if (intel_dp->reset_link_params || intel_dp->is_mst) { 6188 /* Initial max link lane count */ 6189 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 6190 6191 /* Initial max link rate */ 6192 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 6193 6194 intel_dp->reset_link_params = false; 6195 } 6196 6197 intel_dp_print_rates(intel_dp); 6198 6199 if (intel_dp->is_mst) { 6200 /* 6201 * If we are in MST mode then this connector 6202 * won't appear connected or have anything 6203 * with EDID on it 6204 */ 6205 status = connector_status_disconnected; 6206 goto out; 6207 } 6208 6209 /* 6210 * Some external monitors do not signal loss of link synchronization 6211 * with an IRQ_HPD, so force a link status check. 6212 */ 6213 if (!intel_dp_is_edp(intel_dp)) { 6214 int ret; 6215 6216 ret = intel_dp_retrain_link(encoder, ctx); 6217 if (ret) 6218 return ret; 6219 } 6220 6221 /* 6222 * Clearing NACK and defer counts to get their exact values 6223 * while reading EDID which are required by Compliance tests 6224 * 4.2.2.4 and 4.2.2.5 6225 */ 6226 intel_dp->aux.i2c_nack_count = 0; 6227 intel_dp->aux.i2c_defer_count = 0; 6228 6229 intel_dp_set_edid(intel_dp); 6230 if (intel_dp_is_edp(intel_dp) || 6231 to_intel_connector(connector)->detect_edid) 6232 status = connector_status_connected; 6233 6234 intel_dp_check_service_irq(intel_dp); 6235 6236 out: 6237 if (status != connector_status_connected && !intel_dp->is_mst) 6238 intel_dp_unset_edid(intel_dp); 6239 6240 /* 6241 * Make sure the refs for power wells enabled during detect are 6242 * dropped to avoid a new detect cycle triggered by HPD polling. 6243 */ 6244 intel_display_power_flush_work(dev_priv); 6245 6246 return status; 6247 } 6248 6249 static void 6250 intel_dp_force(struct drm_connector *connector) 6251 { 6252 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6253 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6254 struct intel_encoder *intel_encoder = &dig_port->base; 6255 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 6256 enum intel_display_power_domain aux_domain = 6257 intel_aux_power_domain(dig_port); 6258 intel_wakeref_t wakeref; 6259 6260 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6261 connector->base.id, connector->name); 6262 intel_dp_unset_edid(intel_dp); 6263 6264 if (connector->status != connector_status_connected) 6265 return; 6266 6267 wakeref = intel_display_power_get(dev_priv, aux_domain); 6268 6269 intel_dp_set_edid(intel_dp); 6270 6271 intel_display_power_put(dev_priv, aux_domain, wakeref); 6272 } 6273 6274 static int intel_dp_get_modes(struct drm_connector *connector) 6275 { 6276 struct intel_connector *intel_connector = to_intel_connector(connector); 6277 struct edid *edid; 6278 6279 edid = intel_connector->detect_edid; 6280 if (edid) { 6281 int ret = intel_connector_update_modes(connector, edid); 6282 if (ret) 6283 return ret; 6284 } 6285 6286 /* if eDP has no EDID, fall back to fixed mode */ 6287 if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) && 6288 intel_connector->panel.fixed_mode) { 6289 struct drm_display_mode *mode; 6290 6291 mode = drm_mode_duplicate(connector->dev, 6292 intel_connector->panel.fixed_mode); 6293 if (mode) { 6294 drm_mode_probed_add(connector, mode); 6295 return 1; 6296 } 6297 } 6298 6299 return 0; 6300 } 6301 6302 static int 6303 intel_dp_connector_register(struct drm_connector *connector) 6304 { 6305 struct drm_i915_private *i915 = to_i915(connector->dev); 6306 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6307 int ret; 6308 6309 ret = intel_connector_register(connector); 6310 if (ret) 6311 return ret; 6312 6313 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 6314 intel_dp->aux.name, connector->kdev->kobj.name); 6315 6316 intel_dp->aux.dev = connector->kdev; 6317 ret = drm_dp_aux_register(&intel_dp->aux); 6318 if (!ret) 6319 drm_dp_cec_register_connector(&intel_dp->aux, connector); 6320 return ret; 6321 } 6322 6323 static void 6324 intel_dp_connector_unregister(struct drm_connector *connector) 6325 { 6326 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6327 6328 drm_dp_cec_unregister_connector(&intel_dp->aux); 6329 drm_dp_aux_unregister(&intel_dp->aux); 6330 intel_connector_unregister(connector); 6331 } 6332 6333 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 6334 { 6335 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 6336 struct intel_dp *intel_dp = &dig_port->dp; 6337 6338 intel_dp_mst_encoder_cleanup(dig_port); 6339 if (intel_dp_is_edp(intel_dp)) { 6340 intel_wakeref_t wakeref; 6341 6342 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6343 /* 6344 * vdd might still be enabled do to the delayed vdd off. 6345 * Make sure vdd is actually turned off here. 6346 */ 6347 with_pps_lock(intel_dp, wakeref) 6348 edp_panel_vdd_off_sync(intel_dp); 6349 6350 if (intel_dp->edp_notifier.notifier_call) { 6351 unregister_reboot_notifier(&intel_dp->edp_notifier); 6352 intel_dp->edp_notifier.notifier_call = NULL; 6353 } 6354 } 6355 6356 intel_dp_aux_fini(intel_dp); 6357 } 6358 6359 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 6360 { 6361 intel_dp_encoder_flush_work(encoder); 6362 6363 drm_encoder_cleanup(encoder); 6364 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 6365 } 6366 6367 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 6368 { 6369 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6370 intel_wakeref_t wakeref; 6371 6372 if (!intel_dp_is_edp(intel_dp)) 6373 return; 6374 6375 /* 6376 * vdd might still be enabled do to the delayed vdd off. 6377 * Make sure vdd is actually turned off here. 6378 */ 6379 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6380 with_pps_lock(intel_dp, wakeref) 6381 edp_panel_vdd_off_sync(intel_dp); 6382 } 6383 6384 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) 6385 { 6386 long ret; 6387 6388 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) 6389 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, 6390 msecs_to_jiffies(timeout)); 6391 6392 if (!ret) 6393 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); 6394 } 6395 6396 static 6397 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, 6398 u8 *an) 6399 { 6400 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6401 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&dig_port->base.base)); 6402 static const struct drm_dp_aux_msg msg = { 6403 .request = DP_AUX_NATIVE_WRITE, 6404 .address = DP_AUX_HDCP_AKSV, 6405 .size = DRM_HDCP_KSV_LEN, 6406 }; 6407 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; 6408 ssize_t dpcd_ret; 6409 int ret; 6410 6411 /* Output An first, that's easy */ 6412 dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN, 6413 an, DRM_HDCP_AN_LEN); 6414 if (dpcd_ret != DRM_HDCP_AN_LEN) { 6415 drm_dbg_kms(&i915->drm, 6416 "Failed to write An over DP/AUX (%zd)\n", 6417 dpcd_ret); 6418 return dpcd_ret >= 0 ? -EIO : dpcd_ret; 6419 } 6420 6421 /* 6422 * Since Aksv is Oh-So-Secret, we can't access it in software. So in 6423 * order to get it on the wire, we need to create the AUX header as if 6424 * we were writing the data, and then tickle the hardware to output the 6425 * data once the header is sent out. 6426 */ 6427 intel_dp_aux_header(txbuf, &msg); 6428 6429 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size, 6430 rxbuf, sizeof(rxbuf), 6431 DP_AUX_CH_CTL_AUX_AKSV_SELECT); 6432 if (ret < 0) { 6433 drm_dbg_kms(&i915->drm, 6434 "Write Aksv over DP/AUX failed (%d)\n", ret); 6435 return ret; 6436 } else if (ret == 0) { 6437 drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n"); 6438 return -EIO; 6439 } 6440 6441 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; 6442 if (reply != DP_AUX_NATIVE_REPLY_ACK) { 6443 drm_dbg_kms(&i915->drm, 6444 "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", 6445 reply); 6446 return -EIO; 6447 } 6448 return 0; 6449 } 6450 6451 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port, 6452 u8 *bksv) 6453 { 6454 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6455 ssize_t ret; 6456 6457 ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, 6458 DRM_HDCP_KSV_LEN); 6459 if (ret != DRM_HDCP_KSV_LEN) { 6460 drm_dbg_kms(&i915->drm, 6461 "Read Bksv from DP/AUX failed (%zd)\n", ret); 6462 return ret >= 0 ? -EIO : ret; 6463 } 6464 return 0; 6465 } 6466 6467 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, 6468 u8 *bstatus) 6469 { 6470 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6471 ssize_t ret; 6472 6473 /* 6474 * For some reason the HDMI and DP HDCP specs call this register 6475 * definition by different names. In the HDMI spec, it's called BSTATUS, 6476 * but in DP it's called BINFO. 6477 */ 6478 ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO, 6479 bstatus, DRM_HDCP_BSTATUS_LEN); 6480 if (ret != DRM_HDCP_BSTATUS_LEN) { 6481 drm_dbg_kms(&i915->drm, 6482 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6483 return ret >= 0 ? -EIO : ret; 6484 } 6485 return 0; 6486 } 6487 6488 static 6489 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port, 6490 u8 *bcaps) 6491 { 6492 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6493 ssize_t ret; 6494 6495 ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS, 6496 bcaps, 1); 6497 if (ret != 1) { 6498 drm_dbg_kms(&i915->drm, 6499 "Read bcaps from DP/AUX failed (%zd)\n", ret); 6500 return ret >= 0 ? -EIO : ret; 6501 } 6502 6503 return 0; 6504 } 6505 6506 static 6507 int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port, 6508 bool *repeater_present) 6509 { 6510 ssize_t ret; 6511 u8 bcaps; 6512 6513 ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); 6514 if (ret) 6515 return ret; 6516 6517 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; 6518 return 0; 6519 } 6520 6521 static 6522 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port, 6523 u8 *ri_prime) 6524 { 6525 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6526 ssize_t ret; 6527 6528 ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, 6529 ri_prime, DRM_HDCP_RI_LEN); 6530 if (ret != DRM_HDCP_RI_LEN) { 6531 drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", 6532 ret); 6533 return ret >= 0 ? -EIO : ret; 6534 } 6535 return 0; 6536 } 6537 6538 static 6539 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, 6540 bool *ksv_ready) 6541 { 6542 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6543 ssize_t ret; 6544 u8 bstatus; 6545 6546 ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6547 &bstatus, 1); 6548 if (ret != 1) { 6549 drm_dbg_kms(&i915->drm, 6550 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6551 return ret >= 0 ? -EIO : ret; 6552 } 6553 *ksv_ready = bstatus & DP_BSTATUS_READY; 6554 return 0; 6555 } 6556 6557 static 6558 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, 6559 int num_downstream, u8 *ksv_fifo) 6560 { 6561 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6562 ssize_t ret; 6563 int i; 6564 6565 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ 6566 for (i = 0; i < num_downstream; i += 3) { 6567 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; 6568 ret = drm_dp_dpcd_read(&dig_port->dp.aux, 6569 DP_AUX_HDCP_KSV_FIFO, 6570 ksv_fifo + i * DRM_HDCP_KSV_LEN, 6571 len); 6572 if (ret != len) { 6573 drm_dbg_kms(&i915->drm, 6574 "Read ksv[%d] from DP/AUX failed (%zd)\n", 6575 i, ret); 6576 return ret >= 0 ? -EIO : ret; 6577 } 6578 } 6579 return 0; 6580 } 6581 6582 static 6583 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, 6584 int i, u32 *part) 6585 { 6586 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6587 ssize_t ret; 6588 6589 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) 6590 return -EINVAL; 6591 6592 ret = drm_dp_dpcd_read(&dig_port->dp.aux, 6593 DP_AUX_HDCP_V_PRIME(i), part, 6594 DRM_HDCP_V_PRIME_PART_LEN); 6595 if (ret != DRM_HDCP_V_PRIME_PART_LEN) { 6596 drm_dbg_kms(&i915->drm, 6597 "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); 6598 return ret >= 0 ? -EIO : ret; 6599 } 6600 return 0; 6601 } 6602 6603 static 6604 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port, 6605 bool enable) 6606 { 6607 /* Not used for single stream DisplayPort setups */ 6608 return 0; 6609 } 6610 6611 static 6612 bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port) 6613 { 6614 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6615 ssize_t ret; 6616 u8 bstatus; 6617 6618 ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6619 &bstatus, 1); 6620 if (ret != 1) { 6621 drm_dbg_kms(&i915->drm, 6622 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6623 return false; 6624 } 6625 6626 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); 6627 } 6628 6629 static 6630 int intel_dp_hdcp_capable(struct intel_digital_port *dig_port, 6631 bool *hdcp_capable) 6632 { 6633 ssize_t ret; 6634 u8 bcaps; 6635 6636 ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); 6637 if (ret) 6638 return ret; 6639 6640 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; 6641 return 0; 6642 } 6643 6644 struct hdcp2_dp_errata_stream_type { 6645 u8 msg_id; 6646 u8 stream_type; 6647 } __packed; 6648 6649 struct hdcp2_dp_msg_data { 6650 u8 msg_id; 6651 u32 offset; 6652 bool msg_detectable; 6653 u32 timeout; 6654 u32 timeout2; /* Added for non_paired situation */ 6655 }; 6656 6657 static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { 6658 { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 }, 6659 { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, 6660 false, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, 6661 { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, 6662 false, 0, 0 }, 6663 { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, 6664 false, 0, 0 }, 6665 { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, 6666 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, 6667 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, 6668 { HDCP_2_2_AKE_SEND_PAIRING_INFO, 6669 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, 6670 HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, 6671 { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 }, 6672 { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, 6673 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 }, 6674 { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, 6675 0, 0 }, 6676 { HDCP_2_2_REP_SEND_RECVID_LIST, 6677 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, 6678 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, 6679 { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, 6680 0, 0 }, 6681 { HDCP_2_2_REP_STREAM_MANAGE, 6682 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, 6683 0, 0 }, 6684 { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, 6685 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, 6686 /* local define to shovel this through the write_2_2 interface */ 6687 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 6688 { HDCP_2_2_ERRATA_DP_STREAM_TYPE, 6689 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, 6690 0, 0 }, 6691 }; 6692 6693 static int 6694 intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port, 6695 u8 *rx_status) 6696 { 6697 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6698 ssize_t ret; 6699 6700 ret = drm_dp_dpcd_read(&dig_port->dp.aux, 6701 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, 6702 HDCP_2_2_DP_RXSTATUS_LEN); 6703 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { 6704 drm_dbg_kms(&i915->drm, 6705 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6706 return ret >= 0 ? -EIO : ret; 6707 } 6708 6709 return 0; 6710 } 6711 6712 static 6713 int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port, 6714 u8 msg_id, bool *msg_ready) 6715 { 6716 u8 rx_status; 6717 int ret; 6718 6719 *msg_ready = false; 6720 ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); 6721 if (ret < 0) 6722 return ret; 6723 6724 switch (msg_id) { 6725 case HDCP_2_2_AKE_SEND_HPRIME: 6726 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) 6727 *msg_ready = true; 6728 break; 6729 case HDCP_2_2_AKE_SEND_PAIRING_INFO: 6730 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) 6731 *msg_ready = true; 6732 break; 6733 case HDCP_2_2_REP_SEND_RECVID_LIST: 6734 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6735 *msg_ready = true; 6736 break; 6737 default: 6738 DRM_ERROR("Unidentified msg_id: %d\n", msg_id); 6739 return -EINVAL; 6740 } 6741 6742 return 0; 6743 } 6744 6745 static ssize_t 6746 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, 6747 const struct hdcp2_dp_msg_data *hdcp2_msg_data) 6748 { 6749 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6750 struct intel_dp *dp = &dig_port->dp; 6751 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6752 u8 msg_id = hdcp2_msg_data->msg_id; 6753 int ret, timeout; 6754 bool msg_ready = false; 6755 6756 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) 6757 timeout = hdcp2_msg_data->timeout2; 6758 else 6759 timeout = hdcp2_msg_data->timeout; 6760 6761 /* 6762 * There is no way to detect the CERT, LPRIME and STREAM_READY 6763 * availability. So Wait for timeout and read the msg. 6764 */ 6765 if (!hdcp2_msg_data->msg_detectable) { 6766 mdelay(timeout); 6767 ret = 0; 6768 } else { 6769 /* 6770 * As we want to check the msg availability at timeout, Ignoring 6771 * the timeout at wait for CP_IRQ. 6772 */ 6773 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); 6774 ret = hdcp2_detect_msg_availability(dig_port, 6775 msg_id, &msg_ready); 6776 if (!msg_ready) 6777 ret = -ETIMEDOUT; 6778 } 6779 6780 if (ret) 6781 drm_dbg_kms(&i915->drm, 6782 "msg_id %d, ret %d, timeout(mSec): %d\n", 6783 hdcp2_msg_data->msg_id, ret, timeout); 6784 6785 return ret; 6786 } 6787 6788 static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) 6789 { 6790 int i; 6791 6792 for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) 6793 if (hdcp2_dp_msg_data[i].msg_id == msg_id) 6794 return &hdcp2_dp_msg_data[i]; 6795 6796 return NULL; 6797 } 6798 6799 static 6800 int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port, 6801 void *buf, size_t size) 6802 { 6803 struct intel_dp *dp = &dig_port->dp; 6804 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6805 unsigned int offset; 6806 u8 *byte = buf; 6807 ssize_t ret, bytes_to_write, len; 6808 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6809 6810 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); 6811 if (!hdcp2_msg_data) 6812 return -EINVAL; 6813 6814 offset = hdcp2_msg_data->offset; 6815 6816 /* No msg_id in DP HDCP2.2 msgs */ 6817 bytes_to_write = size - 1; 6818 byte++; 6819 6820 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); 6821 6822 while (bytes_to_write) { 6823 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? 6824 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; 6825 6826 ret = drm_dp_dpcd_write(&dig_port->dp.aux, 6827 offset, (void *)byte, len); 6828 if (ret < 0) 6829 return ret; 6830 6831 bytes_to_write -= ret; 6832 byte += ret; 6833 offset += ret; 6834 } 6835 6836 return size; 6837 } 6838 6839 static 6840 ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port) 6841 { 6842 u8 rx_info[HDCP_2_2_RXINFO_LEN]; 6843 u32 dev_cnt; 6844 ssize_t ret; 6845 6846 ret = drm_dp_dpcd_read(&dig_port->dp.aux, 6847 DP_HDCP_2_2_REG_RXINFO_OFFSET, 6848 (void *)rx_info, HDCP_2_2_RXINFO_LEN); 6849 if (ret != HDCP_2_2_RXINFO_LEN) 6850 return ret >= 0 ? -EIO : ret; 6851 6852 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 6853 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 6854 6855 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) 6856 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; 6857 6858 ret = sizeof(struct hdcp2_rep_send_receiverid_list) - 6859 HDCP_2_2_RECEIVER_IDS_MAX_LEN + 6860 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); 6861 6862 return ret; 6863 } 6864 6865 static 6866 int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port, 6867 u8 msg_id, void *buf, size_t size) 6868 { 6869 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6870 unsigned int offset; 6871 u8 *byte = buf; 6872 ssize_t ret, bytes_to_recv, len; 6873 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6874 6875 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); 6876 if (!hdcp2_msg_data) 6877 return -EINVAL; 6878 offset = hdcp2_msg_data->offset; 6879 6880 ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data); 6881 if (ret < 0) 6882 return ret; 6883 6884 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { 6885 ret = get_receiver_id_list_size(dig_port); 6886 if (ret < 0) 6887 return ret; 6888 6889 size = ret; 6890 } 6891 bytes_to_recv = size - 1; 6892 6893 /* DP adaptation msgs has no msg_id */ 6894 byte++; 6895 6896 while (bytes_to_recv) { 6897 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? 6898 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; 6899 6900 ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset, 6901 (void *)byte, len); 6902 if (ret < 0) { 6903 drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", 6904 msg_id, ret); 6905 return ret; 6906 } 6907 6908 bytes_to_recv -= ret; 6909 byte += ret; 6910 offset += ret; 6911 } 6912 byte = buf; 6913 *byte = msg_id; 6914 6915 return size; 6916 } 6917 6918 static 6919 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port, 6920 bool is_repeater, u8 content_type) 6921 { 6922 int ret; 6923 struct hdcp2_dp_errata_stream_type stream_type_msg; 6924 6925 if (is_repeater) 6926 return 0; 6927 6928 /* 6929 * Errata for DP: As Stream type is used for encryption, Receiver 6930 * should be communicated with stream type for the decryption of the 6931 * content. 6932 * Repeater will be communicated with stream type as a part of it's 6933 * auth later in time. 6934 */ 6935 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; 6936 stream_type_msg.stream_type = content_type; 6937 6938 ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg, 6939 sizeof(stream_type_msg)); 6940 6941 return ret < 0 ? ret : 0; 6942 6943 } 6944 6945 static 6946 int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port) 6947 { 6948 u8 rx_status; 6949 int ret; 6950 6951 ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); 6952 if (ret) 6953 return ret; 6954 6955 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) 6956 ret = HDCP_REAUTH_REQUEST; 6957 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) 6958 ret = HDCP_LINK_INTEGRITY_FAILURE; 6959 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6960 ret = HDCP_TOPOLOGY_CHANGE; 6961 6962 return ret; 6963 } 6964 6965 static 6966 int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port, 6967 bool *capable) 6968 { 6969 u8 rx_caps[3]; 6970 int ret; 6971 6972 *capable = false; 6973 ret = drm_dp_dpcd_read(&dig_port->dp.aux, 6974 DP_HDCP_2_2_REG_RX_CAPS_OFFSET, 6975 rx_caps, HDCP_2_2_RXCAPS_LEN); 6976 if (ret != HDCP_2_2_RXCAPS_LEN) 6977 return ret >= 0 ? -EIO : ret; 6978 6979 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && 6980 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) 6981 *capable = true; 6982 6983 return 0; 6984 } 6985 6986 static const struct intel_hdcp_shim intel_dp_hdcp_shim = { 6987 .write_an_aksv = intel_dp_hdcp_write_an_aksv, 6988 .read_bksv = intel_dp_hdcp_read_bksv, 6989 .read_bstatus = intel_dp_hdcp_read_bstatus, 6990 .repeater_present = intel_dp_hdcp_repeater_present, 6991 .read_ri_prime = intel_dp_hdcp_read_ri_prime, 6992 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, 6993 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, 6994 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, 6995 .toggle_signalling = intel_dp_hdcp_toggle_signalling, 6996 .check_link = intel_dp_hdcp_check_link, 6997 .hdcp_capable = intel_dp_hdcp_capable, 6998 .write_2_2_msg = intel_dp_hdcp2_write_msg, 6999 .read_2_2_msg = intel_dp_hdcp2_read_msg, 7000 .config_stream_type = intel_dp_hdcp2_config_stream_type, 7001 .check_2_2_link = intel_dp_hdcp2_check_link, 7002 .hdcp_2_2_capable = intel_dp_hdcp2_capable, 7003 .protocol = HDCP_PROTOCOL_DP, 7004 }; 7005 7006 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 7007 { 7008 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7009 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 7010 7011 lockdep_assert_held(&dev_priv->pps_mutex); 7012 7013 if (!edp_have_panel_vdd(intel_dp)) 7014 return; 7015 7016 /* 7017 * The VDD bit needs a power domain reference, so if the bit is 7018 * already enabled when we boot or resume, grab this reference and 7019 * schedule a vdd off, so we don't hold on to the reference 7020 * indefinitely. 7021 */ 7022 drm_dbg_kms(&dev_priv->drm, 7023 "VDD left on by BIOS, adjusting state tracking\n"); 7024 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); 7025 7026 edp_panel_vdd_schedule_off(intel_dp); 7027 } 7028 7029 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 7030 { 7031 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7032 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 7033 enum pipe pipe; 7034 7035 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 7036 encoder->port, &pipe)) 7037 return pipe; 7038 7039 return INVALID_PIPE; 7040 } 7041 7042 void intel_dp_encoder_reset(struct drm_encoder *encoder) 7043 { 7044 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 7045 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 7046 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 7047 intel_wakeref_t wakeref; 7048 7049 if (!HAS_DDI(dev_priv)) 7050 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 7051 7052 if (lspcon->active) 7053 lspcon_resume(lspcon); 7054 7055 intel_dp->reset_link_params = true; 7056 7057 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 7058 !intel_dp_is_edp(intel_dp)) 7059 return; 7060 7061 with_pps_lock(intel_dp, wakeref) { 7062 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7063 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 7064 7065 if (intel_dp_is_edp(intel_dp)) { 7066 /* 7067 * Reinit the power sequencer, in case BIOS did 7068 * something nasty with it. 7069 */ 7070 intel_dp_pps_init(intel_dp); 7071 intel_edp_panel_vdd_sanitize(intel_dp); 7072 } 7073 } 7074 } 7075 7076 static int intel_modeset_tile_group(struct intel_atomic_state *state, 7077 int tile_group_id) 7078 { 7079 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7080 struct drm_connector_list_iter conn_iter; 7081 struct drm_connector *connector; 7082 int ret = 0; 7083 7084 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 7085 drm_for_each_connector_iter(connector, &conn_iter) { 7086 struct drm_connector_state *conn_state; 7087 struct intel_crtc_state *crtc_state; 7088 struct intel_crtc *crtc; 7089 7090 if (!connector->has_tile || 7091 connector->tile_group->id != tile_group_id) 7092 continue; 7093 7094 conn_state = drm_atomic_get_connector_state(&state->base, 7095 connector); 7096 if (IS_ERR(conn_state)) { 7097 ret = PTR_ERR(conn_state); 7098 break; 7099 } 7100 7101 crtc = to_intel_crtc(conn_state->crtc); 7102 7103 if (!crtc) 7104 continue; 7105 7106 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 7107 crtc_state->uapi.mode_changed = true; 7108 7109 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 7110 if (ret) 7111 break; 7112 } 7113 drm_connector_list_iter_end(&conn_iter); 7114 7115 return ret; 7116 } 7117 7118 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 7119 { 7120 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7121 struct intel_crtc *crtc; 7122 7123 if (transcoders == 0) 7124 return 0; 7125 7126 for_each_intel_crtc(&dev_priv->drm, crtc) { 7127 struct intel_crtc_state *crtc_state; 7128 int ret; 7129 7130 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 7131 if (IS_ERR(crtc_state)) 7132 return PTR_ERR(crtc_state); 7133 7134 if (!crtc_state->hw.enable) 7135 continue; 7136 7137 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 7138 continue; 7139 7140 crtc_state->uapi.mode_changed = true; 7141 7142 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 7143 if (ret) 7144 return ret; 7145 7146 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 7147 if (ret) 7148 return ret; 7149 7150 transcoders &= ~BIT(crtc_state->cpu_transcoder); 7151 } 7152 7153 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 7154 7155 return 0; 7156 } 7157 7158 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 7159 struct drm_connector *connector) 7160 { 7161 const struct drm_connector_state *old_conn_state = 7162 drm_atomic_get_old_connector_state(&state->base, connector); 7163 const struct intel_crtc_state *old_crtc_state; 7164 struct intel_crtc *crtc; 7165 u8 transcoders; 7166 7167 crtc = to_intel_crtc(old_conn_state->crtc); 7168 if (!crtc) 7169 return 0; 7170 7171 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 7172 7173 if (!old_crtc_state->hw.active) 7174 return 0; 7175 7176 transcoders = old_crtc_state->sync_mode_slaves_mask; 7177 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 7178 transcoders |= BIT(old_crtc_state->master_transcoder); 7179 7180 return intel_modeset_affected_transcoders(state, 7181 transcoders); 7182 } 7183 7184 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 7185 struct drm_atomic_state *_state) 7186 { 7187 struct drm_i915_private *dev_priv = to_i915(conn->dev); 7188 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7189 int ret; 7190 7191 ret = intel_digital_connector_atomic_check(conn, &state->base); 7192 if (ret) 7193 return ret; 7194 7195 /* 7196 * We don't enable port sync on BDW due to missing w/as and 7197 * due to not having adjusted the modeset sequence appropriately. 7198 */ 7199 if (INTEL_GEN(dev_priv) < 9) 7200 return 0; 7201 7202 if (!intel_connector_needs_modeset(state, conn)) 7203 return 0; 7204 7205 if (conn->has_tile) { 7206 ret = intel_modeset_tile_group(state, conn->tile_group->id); 7207 if (ret) 7208 return ret; 7209 } 7210 7211 return intel_modeset_synced_crtcs(state, conn); 7212 } 7213 7214 static const struct drm_connector_funcs intel_dp_connector_funcs = { 7215 .force = intel_dp_force, 7216 .fill_modes = drm_helper_probe_single_connector_modes, 7217 .atomic_get_property = intel_digital_connector_atomic_get_property, 7218 .atomic_set_property = intel_digital_connector_atomic_set_property, 7219 .late_register = intel_dp_connector_register, 7220 .early_unregister = intel_dp_connector_unregister, 7221 .destroy = intel_connector_destroy, 7222 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7223 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 7224 }; 7225 7226 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 7227 .detect_ctx = intel_dp_detect, 7228 .get_modes = intel_dp_get_modes, 7229 .mode_valid = intel_dp_mode_valid, 7230 .atomic_check = intel_dp_connector_atomic_check, 7231 }; 7232 7233 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 7234 .reset = intel_dp_encoder_reset, 7235 .destroy = intel_dp_encoder_destroy, 7236 }; 7237 7238 static bool intel_edp_have_power(struct intel_dp *intel_dp) 7239 { 7240 intel_wakeref_t wakeref; 7241 bool have_power = false; 7242 7243 with_pps_lock(intel_dp, wakeref) { 7244 have_power = edp_have_panel_power(intel_dp) && 7245 edp_have_panel_vdd(intel_dp); 7246 } 7247 7248 return have_power; 7249 } 7250 7251 enum irqreturn 7252 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 7253 { 7254 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 7255 struct intel_dp *intel_dp = &dig_port->dp; 7256 7257 if (dig_port->base.type == INTEL_OUTPUT_EDP && 7258 (long_hpd || !intel_edp_have_power(intel_dp))) { 7259 /* 7260 * vdd off can generate a long/short pulse on eDP which 7261 * would require vdd on to handle it, and thus we 7262 * would end up in an endless cycle of 7263 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 7264 */ 7265 drm_dbg_kms(&i915->drm, 7266 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 7267 long_hpd ? "long" : "short", 7268 dig_port->base.base.base.id, 7269 dig_port->base.base.name); 7270 return IRQ_HANDLED; 7271 } 7272 7273 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 7274 dig_port->base.base.base.id, 7275 dig_port->base.base.name, 7276 long_hpd ? "long" : "short"); 7277 7278 if (long_hpd) { 7279 intel_dp->reset_link_params = true; 7280 return IRQ_NONE; 7281 } 7282 7283 if (intel_dp->is_mst) { 7284 if (!intel_dp_check_mst_status(intel_dp)) 7285 return IRQ_NONE; 7286 } else if (!intel_dp_short_pulse(intel_dp)) { 7287 return IRQ_NONE; 7288 } 7289 7290 return IRQ_HANDLED; 7291 } 7292 7293 /* check the VBT to see whether the eDP is on another port */ 7294 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 7295 { 7296 /* 7297 * eDP not supported on g4x. so bail out early just 7298 * for a bit extra safety in case the VBT is bonkers. 7299 */ 7300 if (INTEL_GEN(dev_priv) < 5) 7301 return false; 7302 7303 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 7304 return true; 7305 7306 return intel_bios_is_port_edp(dev_priv, port); 7307 } 7308 7309 static void 7310 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 7311 { 7312 struct drm_i915_private *dev_priv = to_i915(connector->dev); 7313 enum port port = dp_to_dig_port(intel_dp)->base.port; 7314 7315 if (!IS_G4X(dev_priv) && port != PORT_A) 7316 intel_attach_force_audio_property(connector); 7317 7318 intel_attach_broadcast_rgb_property(connector); 7319 if (HAS_GMCH(dev_priv)) 7320 drm_connector_attach_max_bpc_property(connector, 6, 10); 7321 else if (INTEL_GEN(dev_priv) >= 5) 7322 drm_connector_attach_max_bpc_property(connector, 6, 12); 7323 7324 intel_attach_colorspace_property(connector); 7325 7326 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 7327 drm_object_attach_property(&connector->base, 7328 connector->dev->mode_config.hdr_output_metadata_property, 7329 0); 7330 7331 if (intel_dp_is_edp(intel_dp)) { 7332 u32 allowed_scalers; 7333 7334 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 7335 if (!HAS_GMCH(dev_priv)) 7336 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 7337 7338 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 7339 7340 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 7341 7342 } 7343 } 7344 7345 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 7346 { 7347 intel_dp->panel_power_off_time = ktime_get_boottime(); 7348 intel_dp->last_power_on = jiffies; 7349 intel_dp->last_backlight_off = jiffies; 7350 } 7351 7352 static void 7353 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 7354 { 7355 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7356 u32 pp_on, pp_off, pp_ctl; 7357 struct pps_registers regs; 7358 7359 intel_pps_get_registers(intel_dp, ®s); 7360 7361 pp_ctl = ilk_get_pp_control(intel_dp); 7362 7363 /* Ensure PPS is unlocked */ 7364 if (!HAS_DDI(dev_priv)) 7365 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7366 7367 pp_on = intel_de_read(dev_priv, regs.pp_on); 7368 pp_off = intel_de_read(dev_priv, regs.pp_off); 7369 7370 /* Pull timing values out of registers */ 7371 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 7372 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 7373 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 7374 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 7375 7376 if (i915_mmio_reg_valid(regs.pp_div)) { 7377 u32 pp_div; 7378 7379 pp_div = intel_de_read(dev_priv, regs.pp_div); 7380 7381 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 7382 } else { 7383 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 7384 } 7385 } 7386 7387 static void 7388 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 7389 { 7390 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 7391 state_name, 7392 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 7393 } 7394 7395 static void 7396 intel_pps_verify_state(struct intel_dp *intel_dp) 7397 { 7398 struct edp_power_seq hw; 7399 struct edp_power_seq *sw = &intel_dp->pps_delays; 7400 7401 intel_pps_readout_hw_state(intel_dp, &hw); 7402 7403 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 7404 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 7405 DRM_ERROR("PPS state mismatch\n"); 7406 intel_pps_dump_state("sw", sw); 7407 intel_pps_dump_state("hw", &hw); 7408 } 7409 } 7410 7411 static void 7412 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) 7413 { 7414 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7415 struct edp_power_seq cur, vbt, spec, 7416 *final = &intel_dp->pps_delays; 7417 7418 lockdep_assert_held(&dev_priv->pps_mutex); 7419 7420 /* already initialized? */ 7421 if (final->t11_t12 != 0) 7422 return; 7423 7424 intel_pps_readout_hw_state(intel_dp, &cur); 7425 7426 intel_pps_dump_state("cur", &cur); 7427 7428 vbt = dev_priv->vbt.edp.pps; 7429 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 7430 * of 500ms appears to be too short. Ocassionally the panel 7431 * just fails to power back on. Increasing the delay to 800ms 7432 * seems sufficient to avoid this problem. 7433 */ 7434 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 7435 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 7436 drm_dbg_kms(&dev_priv->drm, 7437 "Increasing T12 panel delay as per the quirk to %d\n", 7438 vbt.t11_t12); 7439 } 7440 /* T11_T12 delay is special and actually in units of 100ms, but zero 7441 * based in the hw (so we need to add 100 ms). But the sw vbt 7442 * table multiplies it with 1000 to make it in units of 100usec, 7443 * too. */ 7444 vbt.t11_t12 += 100 * 10; 7445 7446 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 7447 * our hw here, which are all in 100usec. */ 7448 spec.t1_t3 = 210 * 10; 7449 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 7450 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 7451 spec.t10 = 500 * 10; 7452 /* This one is special and actually in units of 100ms, but zero 7453 * based in the hw (so we need to add 100 ms). But the sw vbt 7454 * table multiplies it with 1000 to make it in units of 100usec, 7455 * too. */ 7456 spec.t11_t12 = (510 + 100) * 10; 7457 7458 intel_pps_dump_state("vbt", &vbt); 7459 7460 /* Use the max of the register settings and vbt. If both are 7461 * unset, fall back to the spec limits. */ 7462 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 7463 spec.field : \ 7464 max(cur.field, vbt.field)) 7465 assign_final(t1_t3); 7466 assign_final(t8); 7467 assign_final(t9); 7468 assign_final(t10); 7469 assign_final(t11_t12); 7470 #undef assign_final 7471 7472 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 7473 intel_dp->panel_power_up_delay = get_delay(t1_t3); 7474 intel_dp->backlight_on_delay = get_delay(t8); 7475 intel_dp->backlight_off_delay = get_delay(t9); 7476 intel_dp->panel_power_down_delay = get_delay(t10); 7477 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 7478 #undef get_delay 7479 7480 drm_dbg_kms(&dev_priv->drm, 7481 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 7482 intel_dp->panel_power_up_delay, 7483 intel_dp->panel_power_down_delay, 7484 intel_dp->panel_power_cycle_delay); 7485 7486 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", 7487 intel_dp->backlight_on_delay, 7488 intel_dp->backlight_off_delay); 7489 7490 /* 7491 * We override the HW backlight delays to 1 because we do manual waits 7492 * on them. For T8, even BSpec recommends doing it. For T9, if we 7493 * don't do this, we'll end up waiting for the backlight off delay 7494 * twice: once when we do the manual sleep, and once when we disable 7495 * the panel and wait for the PP_STATUS bit to become zero. 7496 */ 7497 final->t8 = 1; 7498 final->t9 = 1; 7499 7500 /* 7501 * HW has only a 100msec granularity for t11_t12 so round it up 7502 * accordingly. 7503 */ 7504 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 7505 } 7506 7507 static void 7508 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 7509 bool force_disable_vdd) 7510 { 7511 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7512 u32 pp_on, pp_off, port_sel = 0; 7513 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; 7514 struct pps_registers regs; 7515 enum port port = dp_to_dig_port(intel_dp)->base.port; 7516 const struct edp_power_seq *seq = &intel_dp->pps_delays; 7517 7518 lockdep_assert_held(&dev_priv->pps_mutex); 7519 7520 intel_pps_get_registers(intel_dp, ®s); 7521 7522 /* 7523 * On some VLV machines the BIOS can leave the VDD 7524 * enabled even on power sequencers which aren't 7525 * hooked up to any port. This would mess up the 7526 * power domain tracking the first time we pick 7527 * one of these power sequencers for use since 7528 * edp_panel_vdd_on() would notice that the VDD was 7529 * already on and therefore wouldn't grab the power 7530 * domain reference. Disable VDD first to avoid this. 7531 * This also avoids spuriously turning the VDD on as 7532 * soon as the new power sequencer gets initialized. 7533 */ 7534 if (force_disable_vdd) { 7535 u32 pp = ilk_get_pp_control(intel_dp); 7536 7537 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, 7538 "Panel power already on\n"); 7539 7540 if (pp & EDP_FORCE_VDD) 7541 drm_dbg_kms(&dev_priv->drm, 7542 "VDD already on, disabling first\n"); 7543 7544 pp &= ~EDP_FORCE_VDD; 7545 7546 intel_de_write(dev_priv, regs.pp_ctrl, pp); 7547 } 7548 7549 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 7550 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 7551 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 7552 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 7553 7554 /* Haswell doesn't have any port selection bits for the panel 7555 * power sequencer any more. */ 7556 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7557 port_sel = PANEL_PORT_SELECT_VLV(port); 7558 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7559 switch (port) { 7560 case PORT_A: 7561 port_sel = PANEL_PORT_SELECT_DPA; 7562 break; 7563 case PORT_C: 7564 port_sel = PANEL_PORT_SELECT_DPC; 7565 break; 7566 case PORT_D: 7567 port_sel = PANEL_PORT_SELECT_DPD; 7568 break; 7569 default: 7570 MISSING_CASE(port); 7571 break; 7572 } 7573 } 7574 7575 pp_on |= port_sel; 7576 7577 intel_de_write(dev_priv, regs.pp_on, pp_on); 7578 intel_de_write(dev_priv, regs.pp_off, pp_off); 7579 7580 /* 7581 * Compute the divisor for the pp clock, simply match the Bspec formula. 7582 */ 7583 if (i915_mmio_reg_valid(regs.pp_div)) { 7584 intel_de_write(dev_priv, regs.pp_div, 7585 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 7586 } else { 7587 u32 pp_ctl; 7588 7589 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); 7590 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 7591 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 7592 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7593 } 7594 7595 drm_dbg_kms(&dev_priv->drm, 7596 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 7597 intel_de_read(dev_priv, regs.pp_on), 7598 intel_de_read(dev_priv, regs.pp_off), 7599 i915_mmio_reg_valid(regs.pp_div) ? 7600 intel_de_read(dev_priv, regs.pp_div) : 7601 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 7602 } 7603 7604 static void intel_dp_pps_init(struct intel_dp *intel_dp) 7605 { 7606 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7607 7608 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7609 vlv_initial_power_sequencer_setup(intel_dp); 7610 } else { 7611 intel_dp_init_panel_power_sequencer(intel_dp); 7612 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 7613 } 7614 } 7615 7616 /** 7617 * intel_dp_set_drrs_state - program registers for RR switch to take effect 7618 * @dev_priv: i915 device 7619 * @crtc_state: a pointer to the active intel_crtc_state 7620 * @refresh_rate: RR to be programmed 7621 * 7622 * This function gets called when refresh rate (RR) has to be changed from 7623 * one frequency to another. Switches can be between high and low RR 7624 * supported by the panel or to any other RR based on media playback (in 7625 * this case, RR value needs to be passed from user space). 7626 * 7627 * The caller of this function needs to take a lock on dev_priv->drrs. 7628 */ 7629 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 7630 const struct intel_crtc_state *crtc_state, 7631 int refresh_rate) 7632 { 7633 struct intel_dp *intel_dp = dev_priv->drrs.dp; 7634 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 7635 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 7636 7637 if (refresh_rate <= 0) { 7638 drm_dbg_kms(&dev_priv->drm, 7639 "Refresh rate should be positive non-zero.\n"); 7640 return; 7641 } 7642 7643 if (intel_dp == NULL) { 7644 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 7645 return; 7646 } 7647 7648 if (!intel_crtc) { 7649 drm_dbg_kms(&dev_priv->drm, 7650 "DRRS: intel_crtc not initialized\n"); 7651 return; 7652 } 7653 7654 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 7655 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 7656 return; 7657 } 7658 7659 if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == 7660 refresh_rate) 7661 index = DRRS_LOW_RR; 7662 7663 if (index == dev_priv->drrs.refresh_rate_type) { 7664 drm_dbg_kms(&dev_priv->drm, 7665 "DRRS requested for previously set RR...ignoring\n"); 7666 return; 7667 } 7668 7669 if (!crtc_state->hw.active) { 7670 drm_dbg_kms(&dev_priv->drm, 7671 "eDP encoder disabled. CRTC not Active\n"); 7672 return; 7673 } 7674 7675 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 7676 switch (index) { 7677 case DRRS_HIGH_RR: 7678 intel_dp_set_m_n(crtc_state, M1_N1); 7679 break; 7680 case DRRS_LOW_RR: 7681 intel_dp_set_m_n(crtc_state, M2_N2); 7682 break; 7683 case DRRS_MAX_RR: 7684 default: 7685 drm_err(&dev_priv->drm, 7686 "Unsupported refreshrate type\n"); 7687 } 7688 } else if (INTEL_GEN(dev_priv) > 6) { 7689 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 7690 u32 val; 7691 7692 val = intel_de_read(dev_priv, reg); 7693 if (index > DRRS_HIGH_RR) { 7694 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7695 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7696 else 7697 val |= PIPECONF_EDP_RR_MODE_SWITCH; 7698 } else { 7699 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7700 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7701 else 7702 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 7703 } 7704 intel_de_write(dev_priv, reg, val); 7705 } 7706 7707 dev_priv->drrs.refresh_rate_type = index; 7708 7709 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 7710 refresh_rate); 7711 } 7712 7713 /** 7714 * intel_edp_drrs_enable - init drrs struct if supported 7715 * @intel_dp: DP struct 7716 * @crtc_state: A pointer to the active crtc state. 7717 * 7718 * Initializes frontbuffer_bits and drrs.dp 7719 */ 7720 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 7721 const struct intel_crtc_state *crtc_state) 7722 { 7723 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7724 7725 if (!crtc_state->has_drrs) { 7726 drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n"); 7727 return; 7728 } 7729 7730 if (dev_priv->psr.enabled) { 7731 drm_dbg_kms(&dev_priv->drm, 7732 "PSR enabled. Not enabling DRRS.\n"); 7733 return; 7734 } 7735 7736 mutex_lock(&dev_priv->drrs.mutex); 7737 if (dev_priv->drrs.dp) { 7738 drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n"); 7739 goto unlock; 7740 } 7741 7742 dev_priv->drrs.busy_frontbuffer_bits = 0; 7743 7744 dev_priv->drrs.dp = intel_dp; 7745 7746 unlock: 7747 mutex_unlock(&dev_priv->drrs.mutex); 7748 } 7749 7750 /** 7751 * intel_edp_drrs_disable - Disable DRRS 7752 * @intel_dp: DP struct 7753 * @old_crtc_state: Pointer to old crtc_state. 7754 * 7755 */ 7756 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 7757 const struct intel_crtc_state *old_crtc_state) 7758 { 7759 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7760 7761 if (!old_crtc_state->has_drrs) 7762 return; 7763 7764 mutex_lock(&dev_priv->drrs.mutex); 7765 if (!dev_priv->drrs.dp) { 7766 mutex_unlock(&dev_priv->drrs.mutex); 7767 return; 7768 } 7769 7770 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7771 intel_dp_set_drrs_state(dev_priv, old_crtc_state, 7772 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 7773 7774 dev_priv->drrs.dp = NULL; 7775 mutex_unlock(&dev_priv->drrs.mutex); 7776 7777 cancel_delayed_work_sync(&dev_priv->drrs.work); 7778 } 7779 7780 static void intel_edp_drrs_downclock_work(struct work_struct *work) 7781 { 7782 struct drm_i915_private *dev_priv = 7783 container_of(work, typeof(*dev_priv), drrs.work.work); 7784 struct intel_dp *intel_dp; 7785 7786 mutex_lock(&dev_priv->drrs.mutex); 7787 7788 intel_dp = dev_priv->drrs.dp; 7789 7790 if (!intel_dp) 7791 goto unlock; 7792 7793 /* 7794 * The delayed work can race with an invalidate hence we need to 7795 * recheck. 7796 */ 7797 7798 if (dev_priv->drrs.busy_frontbuffer_bits) 7799 goto unlock; 7800 7801 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 7802 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7803 7804 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7805 drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); 7806 } 7807 7808 unlock: 7809 mutex_unlock(&dev_priv->drrs.mutex); 7810 } 7811 7812 /** 7813 * intel_edp_drrs_invalidate - Disable Idleness DRRS 7814 * @dev_priv: i915 device 7815 * @frontbuffer_bits: frontbuffer plane tracking bits 7816 * 7817 * This function gets called everytime rendering on the given planes start. 7818 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 7819 * 7820 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7821 */ 7822 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 7823 unsigned int frontbuffer_bits) 7824 { 7825 struct intel_dp *intel_dp; 7826 struct drm_crtc *crtc; 7827 enum pipe pipe; 7828 7829 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7830 return; 7831 7832 cancel_delayed_work(&dev_priv->drrs.work); 7833 7834 mutex_lock(&dev_priv->drrs.mutex); 7835 7836 intel_dp = dev_priv->drrs.dp; 7837 if (!intel_dp) { 7838 mutex_unlock(&dev_priv->drrs.mutex); 7839 return; 7840 } 7841 7842 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7843 pipe = to_intel_crtc(crtc)->pipe; 7844 7845 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7846 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 7847 7848 /* invalidate means busy screen hence upclock */ 7849 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7850 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7851 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 7852 7853 mutex_unlock(&dev_priv->drrs.mutex); 7854 } 7855 7856 /** 7857 * intel_edp_drrs_flush - Restart Idleness DRRS 7858 * @dev_priv: i915 device 7859 * @frontbuffer_bits: frontbuffer plane tracking bits 7860 * 7861 * This function gets called every time rendering on the given planes has 7862 * completed or flip on a crtc is completed. So DRRS should be upclocked 7863 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 7864 * if no other planes are dirty. 7865 * 7866 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7867 */ 7868 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 7869 unsigned int frontbuffer_bits) 7870 { 7871 struct intel_dp *intel_dp; 7872 struct drm_crtc *crtc; 7873 enum pipe pipe; 7874 7875 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7876 return; 7877 7878 cancel_delayed_work(&dev_priv->drrs.work); 7879 7880 mutex_lock(&dev_priv->drrs.mutex); 7881 7882 intel_dp = dev_priv->drrs.dp; 7883 if (!intel_dp) { 7884 mutex_unlock(&dev_priv->drrs.mutex); 7885 return; 7886 } 7887 7888 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7889 pipe = to_intel_crtc(crtc)->pipe; 7890 7891 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7892 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 7893 7894 /* flush means busy screen hence upclock */ 7895 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7896 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7897 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 7898 7899 /* 7900 * flush also means no more activity hence schedule downclock, if all 7901 * other fbs are quiescent too 7902 */ 7903 if (!dev_priv->drrs.busy_frontbuffer_bits) 7904 schedule_delayed_work(&dev_priv->drrs.work, 7905 msecs_to_jiffies(1000)); 7906 mutex_unlock(&dev_priv->drrs.mutex); 7907 } 7908 7909 /** 7910 * DOC: Display Refresh Rate Switching (DRRS) 7911 * 7912 * Display Refresh Rate Switching (DRRS) is a power conservation feature 7913 * which enables swtching between low and high refresh rates, 7914 * dynamically, based on the usage scenario. This feature is applicable 7915 * for internal panels. 7916 * 7917 * Indication that the panel supports DRRS is given by the panel EDID, which 7918 * would list multiple refresh rates for one resolution. 7919 * 7920 * DRRS is of 2 types - static and seamless. 7921 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 7922 * (may appear as a blink on screen) and is used in dock-undock scenario. 7923 * Seamless DRRS involves changing RR without any visual effect to the user 7924 * and can be used during normal system usage. This is done by programming 7925 * certain registers. 7926 * 7927 * Support for static/seamless DRRS may be indicated in the VBT based on 7928 * inputs from the panel spec. 7929 * 7930 * DRRS saves power by switching to low RR based on usage scenarios. 7931 * 7932 * The implementation is based on frontbuffer tracking implementation. When 7933 * there is a disturbance on the screen triggered by user activity or a periodic 7934 * system activity, DRRS is disabled (RR is changed to high RR). When there is 7935 * no movement on screen, after a timeout of 1 second, a switch to low RR is 7936 * made. 7937 * 7938 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 7939 * and intel_edp_drrs_flush() are called. 7940 * 7941 * DRRS can be further extended to support other internal panels and also 7942 * the scenario of video playback wherein RR is set based on the rate 7943 * requested by userspace. 7944 */ 7945 7946 /** 7947 * intel_dp_drrs_init - Init basic DRRS work and mutex. 7948 * @connector: eDP connector 7949 * @fixed_mode: preferred mode of panel 7950 * 7951 * This function is called only once at driver load to initialize basic 7952 * DRRS stuff. 7953 * 7954 * Returns: 7955 * Downclock mode if panel supports it, else return NULL. 7956 * DRRS support is determined by the presence of downclock mode (apart 7957 * from VBT setting). 7958 */ 7959 static struct drm_display_mode * 7960 intel_dp_drrs_init(struct intel_connector *connector, 7961 struct drm_display_mode *fixed_mode) 7962 { 7963 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 7964 struct drm_display_mode *downclock_mode = NULL; 7965 7966 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 7967 mutex_init(&dev_priv->drrs.mutex); 7968 7969 if (INTEL_GEN(dev_priv) <= 6) { 7970 drm_dbg_kms(&dev_priv->drm, 7971 "DRRS supported for Gen7 and above\n"); 7972 return NULL; 7973 } 7974 7975 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 7976 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 7977 return NULL; 7978 } 7979 7980 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 7981 if (!downclock_mode) { 7982 drm_dbg_kms(&dev_priv->drm, 7983 "Downclock mode is not found. DRRS not supported\n"); 7984 return NULL; 7985 } 7986 7987 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 7988 7989 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 7990 drm_dbg_kms(&dev_priv->drm, 7991 "seamless DRRS supported for eDP panel.\n"); 7992 return downclock_mode; 7993 } 7994 7995 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 7996 struct intel_connector *intel_connector) 7997 { 7998 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7999 struct drm_device *dev = &dev_priv->drm; 8000 struct drm_connector *connector = &intel_connector->base; 8001 struct drm_display_mode *fixed_mode = NULL; 8002 struct drm_display_mode *downclock_mode = NULL; 8003 bool has_dpcd; 8004 enum pipe pipe = INVALID_PIPE; 8005 intel_wakeref_t wakeref; 8006 struct edid *edid; 8007 8008 if (!intel_dp_is_edp(intel_dp)) 8009 return true; 8010 8011 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); 8012 8013 /* 8014 * On IBX/CPT we may get here with LVDS already registered. Since the 8015 * driver uses the only internal power sequencer available for both 8016 * eDP and LVDS bail out early in this case to prevent interfering 8017 * with an already powered-on LVDS power sequencer. 8018 */ 8019 if (intel_get_lvds_encoder(dev_priv)) { 8020 drm_WARN_ON(dev, 8021 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 8022 drm_info(&dev_priv->drm, 8023 "LVDS was detected, not registering eDP\n"); 8024 8025 return false; 8026 } 8027 8028 with_pps_lock(intel_dp, wakeref) { 8029 intel_dp_init_panel_power_timestamps(intel_dp); 8030 intel_dp_pps_init(intel_dp); 8031 intel_edp_panel_vdd_sanitize(intel_dp); 8032 } 8033 8034 /* Cache DPCD and EDID for edp. */ 8035 has_dpcd = intel_edp_init_dpcd(intel_dp); 8036 8037 if (!has_dpcd) { 8038 /* if this fails, presume the device is a ghost */ 8039 drm_info(&dev_priv->drm, 8040 "failed to retrieve link info, disabling eDP\n"); 8041 goto out_vdd_off; 8042 } 8043 8044 mutex_lock(&dev->mode_config.mutex); 8045 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 8046 if (edid) { 8047 if (drm_add_edid_modes(connector, edid)) { 8048 drm_connector_update_edid_property(connector, edid); 8049 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 8050 } else { 8051 kfree(edid); 8052 edid = ERR_PTR(-EINVAL); 8053 } 8054 } else { 8055 edid = ERR_PTR(-ENOENT); 8056 } 8057 intel_connector->edid = edid; 8058 8059 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 8060 if (fixed_mode) 8061 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 8062 8063 /* fallback to VBT if available for eDP */ 8064 if (!fixed_mode) 8065 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 8066 mutex_unlock(&dev->mode_config.mutex); 8067 8068 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 8069 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 8070 register_reboot_notifier(&intel_dp->edp_notifier); 8071 8072 /* 8073 * Figure out the current pipe for the initial backlight setup. 8074 * If the current pipe isn't valid, try the PPS pipe, and if that 8075 * fails just assume pipe A. 8076 */ 8077 pipe = vlv_active_pipe(intel_dp); 8078 8079 if (pipe != PIPE_A && pipe != PIPE_B) 8080 pipe = intel_dp->pps_pipe; 8081 8082 if (pipe != PIPE_A && pipe != PIPE_B) 8083 pipe = PIPE_A; 8084 8085 drm_dbg_kms(&dev_priv->drm, 8086 "using pipe %c for initial backlight setup\n", 8087 pipe_name(pipe)); 8088 } 8089 8090 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 8091 intel_connector->panel.backlight.power = intel_edp_backlight_power; 8092 intel_panel_setup_backlight(connector, pipe); 8093 8094 if (fixed_mode) { 8095 drm_connector_set_panel_orientation_with_quirk(connector, 8096 dev_priv->vbt.orientation, 8097 fixed_mode->hdisplay, fixed_mode->vdisplay); 8098 } 8099 8100 return true; 8101 8102 out_vdd_off: 8103 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 8104 /* 8105 * vdd might still be enabled do to the delayed vdd off. 8106 * Make sure vdd is actually turned off here. 8107 */ 8108 with_pps_lock(intel_dp, wakeref) 8109 edp_panel_vdd_off_sync(intel_dp); 8110 8111 return false; 8112 } 8113 8114 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 8115 { 8116 struct intel_connector *intel_connector; 8117 struct drm_connector *connector; 8118 8119 intel_connector = container_of(work, typeof(*intel_connector), 8120 modeset_retry_work); 8121 connector = &intel_connector->base; 8122 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 8123 connector->name); 8124 8125 /* Grab the locks before changing connector property*/ 8126 mutex_lock(&connector->dev->mode_config.mutex); 8127 /* Set connector link status to BAD and send a Uevent to notify 8128 * userspace to do a modeset. 8129 */ 8130 drm_connector_set_link_status_property(connector, 8131 DRM_MODE_LINK_STATUS_BAD); 8132 mutex_unlock(&connector->dev->mode_config.mutex); 8133 /* Send Hotplug uevent so userspace can reprobe */ 8134 drm_kms_helper_hotplug_event(connector->dev); 8135 } 8136 8137 bool 8138 intel_dp_init_connector(struct intel_digital_port *dig_port, 8139 struct intel_connector *intel_connector) 8140 { 8141 struct drm_connector *connector = &intel_connector->base; 8142 struct intel_dp *intel_dp = &dig_port->dp; 8143 struct intel_encoder *intel_encoder = &dig_port->base; 8144 struct drm_device *dev = intel_encoder->base.dev; 8145 struct drm_i915_private *dev_priv = to_i915(dev); 8146 enum port port = intel_encoder->port; 8147 enum phy phy = intel_port_to_phy(dev_priv, port); 8148 int type; 8149 8150 /* Initialize the work for modeset in case of link train failure */ 8151 INIT_WORK(&intel_connector->modeset_retry_work, 8152 intel_dp_modeset_retry_work_fn); 8153 8154 if (drm_WARN(dev, dig_port->max_lanes < 1, 8155 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 8156 dig_port->max_lanes, intel_encoder->base.base.id, 8157 intel_encoder->base.name)) 8158 return false; 8159 8160 intel_dp_set_source_rates(intel_dp); 8161 8162 intel_dp->reset_link_params = true; 8163 intel_dp->pps_pipe = INVALID_PIPE; 8164 intel_dp->active_pipe = INVALID_PIPE; 8165 8166 /* Preserve the current hw state. */ 8167 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 8168 intel_dp->attached_connector = intel_connector; 8169 8170 if (intel_dp_is_port_edp(dev_priv, port)) { 8171 /* 8172 * Currently we don't support eDP on TypeC ports, although in 8173 * theory it could work on TypeC legacy ports. 8174 */ 8175 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 8176 type = DRM_MODE_CONNECTOR_eDP; 8177 } else { 8178 type = DRM_MODE_CONNECTOR_DisplayPort; 8179 } 8180 8181 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 8182 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 8183 8184 /* 8185 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 8186 * for DP the encoder type can be set by the caller to 8187 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 8188 */ 8189 if (type == DRM_MODE_CONNECTOR_eDP) 8190 intel_encoder->type = INTEL_OUTPUT_EDP; 8191 8192 /* eDP only on port B and/or C on vlv/chv */ 8193 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 8194 IS_CHERRYVIEW(dev_priv)) && 8195 intel_dp_is_edp(intel_dp) && 8196 port != PORT_B && port != PORT_C)) 8197 return false; 8198 8199 drm_dbg_kms(&dev_priv->drm, 8200 "Adding %s connector on [ENCODER:%d:%s]\n", 8201 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 8202 intel_encoder->base.base.id, intel_encoder->base.name); 8203 8204 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 8205 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 8206 8207 if (!HAS_GMCH(dev_priv)) 8208 connector->interlace_allowed = true; 8209 connector->doublescan_allowed = 0; 8210 8211 if (INTEL_GEN(dev_priv) >= 11) 8212 connector->ycbcr_420_allowed = true; 8213 8214 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 8215 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 8216 8217 intel_dp_aux_init(intel_dp); 8218 8219 intel_connector_attach_encoder(intel_connector, intel_encoder); 8220 8221 if (HAS_DDI(dev_priv)) 8222 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 8223 else 8224 intel_connector->get_hw_state = intel_connector_get_hw_state; 8225 8226 /* init MST on ports that can support it */ 8227 intel_dp_mst_encoder_init(dig_port, 8228 intel_connector->base.base.id); 8229 8230 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 8231 intel_dp_aux_fini(intel_dp); 8232 intel_dp_mst_encoder_cleanup(dig_port); 8233 goto fail; 8234 } 8235 8236 intel_dp_add_properties(intel_dp, connector); 8237 8238 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 8239 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); 8240 if (ret) 8241 drm_dbg_kms(&dev_priv->drm, 8242 "HDCP init failed, skipping.\n"); 8243 } 8244 8245 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 8246 * 0xd. Failure to do so will result in spurious interrupts being 8247 * generated on the port when a cable is not attached. 8248 */ 8249 if (IS_G45(dev_priv)) { 8250 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 8251 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 8252 (temp & ~0xf) | 0xd); 8253 } 8254 8255 return true; 8256 8257 fail: 8258 drm_connector_cleanup(connector); 8259 8260 return false; 8261 } 8262 8263 bool intel_dp_init(struct drm_i915_private *dev_priv, 8264 i915_reg_t output_reg, 8265 enum port port) 8266 { 8267 struct intel_digital_port *dig_port; 8268 struct intel_encoder *intel_encoder; 8269 struct drm_encoder *encoder; 8270 struct intel_connector *intel_connector; 8271 8272 dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); 8273 if (!dig_port) 8274 return false; 8275 8276 intel_connector = intel_connector_alloc(); 8277 if (!intel_connector) 8278 goto err_connector_alloc; 8279 8280 intel_encoder = &dig_port->base; 8281 encoder = &intel_encoder->base; 8282 8283 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 8284 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 8285 "DP %c", port_name(port))) 8286 goto err_encoder_init; 8287 8288 intel_encoder->hotplug = intel_dp_hotplug; 8289 intel_encoder->compute_config = intel_dp_compute_config; 8290 intel_encoder->get_hw_state = intel_dp_get_hw_state; 8291 intel_encoder->get_config = intel_dp_get_config; 8292 intel_encoder->update_pipe = intel_panel_update_backlight; 8293 intel_encoder->suspend = intel_dp_encoder_suspend; 8294 if (IS_CHERRYVIEW(dev_priv)) { 8295 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 8296 intel_encoder->pre_enable = chv_pre_enable_dp; 8297 intel_encoder->enable = vlv_enable_dp; 8298 intel_encoder->disable = vlv_disable_dp; 8299 intel_encoder->post_disable = chv_post_disable_dp; 8300 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 8301 } else if (IS_VALLEYVIEW(dev_priv)) { 8302 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 8303 intel_encoder->pre_enable = vlv_pre_enable_dp; 8304 intel_encoder->enable = vlv_enable_dp; 8305 intel_encoder->disable = vlv_disable_dp; 8306 intel_encoder->post_disable = vlv_post_disable_dp; 8307 } else { 8308 intel_encoder->pre_enable = g4x_pre_enable_dp; 8309 intel_encoder->enable = g4x_enable_dp; 8310 intel_encoder->disable = g4x_disable_dp; 8311 intel_encoder->post_disable = g4x_post_disable_dp; 8312 } 8313 8314 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 8315 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) 8316 dig_port->dp.set_link_train = cpt_set_link_train; 8317 else 8318 dig_port->dp.set_link_train = g4x_set_link_train; 8319 8320 if (IS_CHERRYVIEW(dev_priv)) 8321 dig_port->dp.set_signal_levels = chv_set_signal_levels; 8322 else if (IS_VALLEYVIEW(dev_priv)) 8323 dig_port->dp.set_signal_levels = vlv_set_signal_levels; 8324 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 8325 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; 8326 else if (IS_GEN(dev_priv, 6) && port == PORT_A) 8327 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; 8328 else 8329 dig_port->dp.set_signal_levels = g4x_set_signal_levels; 8330 8331 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || 8332 (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { 8333 dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3; 8334 dig_port->dp.voltage_max = intel_dp_voltage_max_3; 8335 } else { 8336 dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2; 8337 dig_port->dp.voltage_max = intel_dp_voltage_max_2; 8338 } 8339 8340 dig_port->dp.output_reg = output_reg; 8341 dig_port->max_lanes = 4; 8342 dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); 8343 dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); 8344 8345 intel_encoder->type = INTEL_OUTPUT_DP; 8346 intel_encoder->power_domain = intel_port_to_power_domain(port); 8347 if (IS_CHERRYVIEW(dev_priv)) { 8348 if (port == PORT_D) 8349 intel_encoder->pipe_mask = BIT(PIPE_C); 8350 else 8351 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 8352 } else { 8353 intel_encoder->pipe_mask = ~0; 8354 } 8355 intel_encoder->cloneable = 0; 8356 intel_encoder->port = port; 8357 8358 dig_port->hpd_pulse = intel_dp_hpd_pulse; 8359 8360 if (HAS_GMCH(dev_priv)) { 8361 if (IS_GM45(dev_priv)) 8362 dig_port->connected = gm45_digital_port_connected; 8363 else 8364 dig_port->connected = g4x_digital_port_connected; 8365 } else { 8366 if (port == PORT_A) 8367 dig_port->connected = ilk_digital_port_connected; 8368 else 8369 dig_port->connected = ibx_digital_port_connected; 8370 } 8371 8372 if (port != PORT_A) 8373 intel_infoframe_init(dig_port); 8374 8375 dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 8376 if (!intel_dp_init_connector(dig_port, intel_connector)) 8377 goto err_init_connector; 8378 8379 return true; 8380 8381 err_init_connector: 8382 drm_encoder_cleanup(encoder); 8383 err_encoder_init: 8384 kfree(intel_connector); 8385 err_connector_alloc: 8386 kfree(dig_port); 8387 return false; 8388 } 8389 8390 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 8391 { 8392 struct intel_encoder *encoder; 8393 8394 for_each_intel_encoder(&dev_priv->drm, encoder) { 8395 struct intel_dp *intel_dp; 8396 8397 if (encoder->type != INTEL_OUTPUT_DDI) 8398 continue; 8399 8400 intel_dp = enc_to_intel_dp(encoder); 8401 8402 if (!intel_dp->can_mst) 8403 continue; 8404 8405 if (intel_dp->is_mst) 8406 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 8407 } 8408 } 8409 8410 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 8411 { 8412 struct intel_encoder *encoder; 8413 8414 for_each_intel_encoder(&dev_priv->drm, encoder) { 8415 struct intel_dp *intel_dp; 8416 int ret; 8417 8418 if (encoder->type != INTEL_OUTPUT_DDI) 8419 continue; 8420 8421 intel_dp = enc_to_intel_dp(encoder); 8422 8423 if (!intel_dp->can_mst) 8424 continue; 8425 8426 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 8427 true); 8428 if (ret) { 8429 intel_dp->is_mst = false; 8430 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 8431 false); 8432 } 8433 } 8434 } 8435