1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/reboot.h> 32 #include <linux/slab.h> 33 #include <linux/types.h> 34 35 #include <asm/byteorder.h> 36 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_crtc.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_hdcp.h> 42 #include <drm/drm_probe_helper.h> 43 44 #include "i915_debugfs.h" 45 #include "i915_drv.h" 46 #include "i915_trace.h" 47 #include "intel_atomic.h" 48 #include "intel_audio.h" 49 #include "intel_connector.h" 50 #include "intel_ddi.h" 51 #include "intel_display_types.h" 52 #include "intel_dp.h" 53 #include "intel_dp_link_training.h" 54 #include "intel_dp_mst.h" 55 #include "intel_dpio_phy.h" 56 #include "intel_fifo_underrun.h" 57 #include "intel_hdcp.h" 58 #include "intel_hdmi.h" 59 #include "intel_hotplug.h" 60 #include "intel_lspcon.h" 61 #include "intel_lvds.h" 62 #include "intel_panel.h" 63 #include "intel_psr.h" 64 #include "intel_sideband.h" 65 #include "intel_tc.h" 66 #include "intel_vdsc.h" 67 68 #define DP_DPRX_ESI_LEN 14 69 70 /* DP DSC throughput values used for slice count calculations KPixels/s */ 71 #define DP_DSC_PEAK_PIXEL_RATE 2720000 72 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 73 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 74 75 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 76 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 77 78 /* Compliance test status bits */ 79 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 80 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 81 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 82 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 83 84 struct dp_link_dpll { 85 int clock; 86 struct dpll dpll; 87 }; 88 89 static const struct dp_link_dpll g4x_dpll[] = { 90 { 162000, 91 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 92 { 270000, 93 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 94 }; 95 96 static const struct dp_link_dpll pch_dpll[] = { 97 { 162000, 98 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 99 { 270000, 100 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 101 }; 102 103 static const struct dp_link_dpll vlv_dpll[] = { 104 { 162000, 105 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 106 { 270000, 107 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 108 }; 109 110 /* 111 * CHV supports eDP 1.4 that have more link rates. 112 * Below only provides the fixed rate but exclude variable rate. 113 */ 114 static const struct dp_link_dpll chv_dpll[] = { 115 /* 116 * CHV requires to program fractional division for m2. 117 * m2 is stored in fixed point format using formula below 118 * (m2_int << 22) | m2_fraction 119 */ 120 { 162000, /* m2_int = 32, m2_fraction = 1677722 */ 121 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 122 { 270000, /* m2_int = 27, m2_fraction = 0 */ 123 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 124 }; 125 126 /* Constants for DP DSC configurations */ 127 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 128 129 /* With Single pipe configuration, HW is capable of supporting maximum 130 * of 4 slices per line. 131 */ 132 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 133 134 /** 135 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 136 * @intel_dp: DP struct 137 * 138 * If a CPU or PCH DP output is attached to an eDP panel, this function 139 * will return true, and false otherwise. 140 */ 141 bool intel_dp_is_edp(struct intel_dp *intel_dp) 142 { 143 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 144 145 return dig_port->base.type == INTEL_OUTPUT_EDP; 146 } 147 148 static void intel_dp_link_down(struct intel_encoder *encoder, 149 const struct intel_crtc_state *old_crtc_state); 150 static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 151 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 152 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 153 const struct intel_crtc_state *crtc_state); 154 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 155 enum pipe pipe); 156 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 157 158 /* update sink rates from dpcd */ 159 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 160 { 161 static const int dp_rates[] = { 162 162000, 270000, 540000, 810000 163 }; 164 int i, max_rate; 165 166 if (drm_dp_has_quirk(&intel_dp->desc, 0, 167 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 168 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 169 static const int quirk_rates[] = { 162000, 270000, 324000 }; 170 171 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 172 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 173 174 return; 175 } 176 177 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 178 179 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 180 if (dp_rates[i] > max_rate) 181 break; 182 intel_dp->sink_rates[i] = dp_rates[i]; 183 } 184 185 intel_dp->num_sink_rates = i; 186 } 187 188 /* Get length of rates array potentially limited by max_rate. */ 189 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 190 { 191 int i; 192 193 /* Limit results by potentially reduced max rate */ 194 for (i = 0; i < len; i++) { 195 if (rates[len - i - 1] <= max_rate) 196 return len - i; 197 } 198 199 return 0; 200 } 201 202 /* Get length of common rates array potentially limited by max_rate. */ 203 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 204 int max_rate) 205 { 206 return intel_dp_rate_limit_len(intel_dp->common_rates, 207 intel_dp->num_common_rates, max_rate); 208 } 209 210 /* Theoretical max between source and sink */ 211 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 212 { 213 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 214 } 215 216 /* Theoretical max between source and sink */ 217 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 218 { 219 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 220 int source_max = dig_port->max_lanes; 221 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 222 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 223 224 return min3(source_max, sink_max, fia_max); 225 } 226 227 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 228 { 229 return intel_dp->max_link_lane_count; 230 } 231 232 int 233 intel_dp_link_required(int pixel_clock, int bpp) 234 { 235 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 236 return DIV_ROUND_UP(pixel_clock * bpp, 8); 237 } 238 239 int 240 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 241 { 242 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 243 * link rate that is generally expressed in Gbps. Since, 8 bits of data 244 * is transmitted every LS_Clk per lane, there is no need to account for 245 * the channel encoding that is done in the PHY layer here. 246 */ 247 248 return max_link_clock * max_lanes; 249 } 250 251 static int 252 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) 253 { 254 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 255 struct intel_encoder *encoder = &dig_port->base; 256 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 257 int max_dotclk = dev_priv->max_dotclk_freq; 258 int ds_max_dotclk; 259 260 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 261 262 if (type != DP_DS_PORT_TYPE_VGA) 263 return max_dotclk; 264 265 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd, 266 intel_dp->downstream_ports); 267 268 if (ds_max_dotclk != 0) 269 max_dotclk = min(max_dotclk, ds_max_dotclk); 270 271 return max_dotclk; 272 } 273 274 static int cnl_max_source_rate(struct intel_dp *intel_dp) 275 { 276 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 277 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 278 enum port port = dig_port->base.port; 279 280 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; 281 282 /* Low voltage SKUs are limited to max of 5.4G */ 283 if (voltage == VOLTAGE_INFO_0_85V) 284 return 540000; 285 286 /* For this SKU 8.1G is supported in all ports */ 287 if (IS_CNL_WITH_PORT_F(dev_priv)) 288 return 810000; 289 290 /* For other SKUs, max rate on ports A and D is 5.4G */ 291 if (port == PORT_A || port == PORT_D) 292 return 540000; 293 294 return 810000; 295 } 296 297 static int icl_max_source_rate(struct intel_dp *intel_dp) 298 { 299 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 300 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 301 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 302 303 if (intel_phy_is_combo(dev_priv, phy) && 304 !IS_ELKHARTLAKE(dev_priv) && 305 !intel_dp_is_edp(intel_dp)) 306 return 540000; 307 308 return 810000; 309 } 310 311 static void 312 intel_dp_set_source_rates(struct intel_dp *intel_dp) 313 { 314 /* The values must be in increasing order */ 315 static const int cnl_rates[] = { 316 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 317 }; 318 static const int bxt_rates[] = { 319 162000, 216000, 243000, 270000, 324000, 432000, 540000 320 }; 321 static const int skl_rates[] = { 322 162000, 216000, 270000, 324000, 432000, 540000 323 }; 324 static const int hsw_rates[] = { 325 162000, 270000, 540000 326 }; 327 static const int g4x_rates[] = { 328 162000, 270000 329 }; 330 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 331 struct intel_encoder *encoder = &dig_port->base; 332 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 333 const int *source_rates; 334 int size, max_rate = 0, vbt_max_rate; 335 336 /* This should only be done once */ 337 drm_WARN_ON(&dev_priv->drm, 338 intel_dp->source_rates || intel_dp->num_source_rates); 339 340 if (INTEL_GEN(dev_priv) >= 10) { 341 source_rates = cnl_rates; 342 size = ARRAY_SIZE(cnl_rates); 343 if (IS_GEN(dev_priv, 10)) 344 max_rate = cnl_max_source_rate(intel_dp); 345 else 346 max_rate = icl_max_source_rate(intel_dp); 347 } else if (IS_GEN9_LP(dev_priv)) { 348 source_rates = bxt_rates; 349 size = ARRAY_SIZE(bxt_rates); 350 } else if (IS_GEN9_BC(dev_priv)) { 351 source_rates = skl_rates; 352 size = ARRAY_SIZE(skl_rates); 353 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 354 IS_BROADWELL(dev_priv)) { 355 source_rates = hsw_rates; 356 size = ARRAY_SIZE(hsw_rates); 357 } else { 358 source_rates = g4x_rates; 359 size = ARRAY_SIZE(g4x_rates); 360 } 361 362 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 363 if (max_rate && vbt_max_rate) 364 max_rate = min(max_rate, vbt_max_rate); 365 else if (vbt_max_rate) 366 max_rate = vbt_max_rate; 367 368 if (max_rate) 369 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 370 371 intel_dp->source_rates = source_rates; 372 intel_dp->num_source_rates = size; 373 } 374 375 static int intersect_rates(const int *source_rates, int source_len, 376 const int *sink_rates, int sink_len, 377 int *common_rates) 378 { 379 int i = 0, j = 0, k = 0; 380 381 while (i < source_len && j < sink_len) { 382 if (source_rates[i] == sink_rates[j]) { 383 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 384 return k; 385 common_rates[k] = source_rates[i]; 386 ++k; 387 ++i; 388 ++j; 389 } else if (source_rates[i] < sink_rates[j]) { 390 ++i; 391 } else { 392 ++j; 393 } 394 } 395 return k; 396 } 397 398 /* return index of rate in rates array, or -1 if not found */ 399 static int intel_dp_rate_index(const int *rates, int len, int rate) 400 { 401 int i; 402 403 for (i = 0; i < len; i++) 404 if (rate == rates[i]) 405 return i; 406 407 return -1; 408 } 409 410 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 411 { 412 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 413 414 drm_WARN_ON(&i915->drm, 415 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 416 417 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 418 intel_dp->num_source_rates, 419 intel_dp->sink_rates, 420 intel_dp->num_sink_rates, 421 intel_dp->common_rates); 422 423 /* Paranoia, there should always be something in common. */ 424 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 425 intel_dp->common_rates[0] = 162000; 426 intel_dp->num_common_rates = 1; 427 } 428 } 429 430 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 431 u8 lane_count) 432 { 433 /* 434 * FIXME: we need to synchronize the current link parameters with 435 * hardware readout. Currently fast link training doesn't work on 436 * boot-up. 437 */ 438 if (link_rate == 0 || 439 link_rate > intel_dp->max_link_rate) 440 return false; 441 442 if (lane_count == 0 || 443 lane_count > intel_dp_max_lane_count(intel_dp)) 444 return false; 445 446 return true; 447 } 448 449 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 450 int link_rate, 451 u8 lane_count) 452 { 453 const struct drm_display_mode *fixed_mode = 454 intel_dp->attached_connector->panel.fixed_mode; 455 int mode_rate, max_rate; 456 457 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 458 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 459 if (mode_rate > max_rate) 460 return false; 461 462 return true; 463 } 464 465 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 466 int link_rate, u8 lane_count) 467 { 468 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 469 int index; 470 471 /* 472 * TODO: Enable fallback on MST links once MST link compute can handle 473 * the fallback params. 474 */ 475 if (intel_dp->is_mst) { 476 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 477 return -1; 478 } 479 480 index = intel_dp_rate_index(intel_dp->common_rates, 481 intel_dp->num_common_rates, 482 link_rate); 483 if (index > 0) { 484 if (intel_dp_is_edp(intel_dp) && 485 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 486 intel_dp->common_rates[index - 1], 487 lane_count)) { 488 drm_dbg_kms(&i915->drm, 489 "Retrying Link training for eDP with same parameters\n"); 490 return 0; 491 } 492 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 493 intel_dp->max_link_lane_count = lane_count; 494 } else if (lane_count > 1) { 495 if (intel_dp_is_edp(intel_dp) && 496 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 497 intel_dp_max_common_rate(intel_dp), 498 lane_count >> 1)) { 499 drm_dbg_kms(&i915->drm, 500 "Retrying Link training for eDP with same parameters\n"); 501 return 0; 502 } 503 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 504 intel_dp->max_link_lane_count = lane_count >> 1; 505 } else { 506 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 507 return -1; 508 } 509 510 return 0; 511 } 512 513 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 514 { 515 return div_u64(mul_u32_u32(mode_clock, 1000000U), 516 DP_DSC_FEC_OVERHEAD_FACTOR); 517 } 518 519 static int 520 small_joiner_ram_size_bits(struct drm_i915_private *i915) 521 { 522 if (INTEL_GEN(i915) >= 11) 523 return 7680 * 8; 524 else 525 return 6144 * 8; 526 } 527 528 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 529 u32 link_clock, u32 lane_count, 530 u32 mode_clock, u32 mode_hdisplay) 531 { 532 u32 bits_per_pixel, max_bpp_small_joiner_ram; 533 int i; 534 535 /* 536 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 537 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 538 * for SST -> TimeSlotsPerMTP is 1, 539 * for MST -> TimeSlotsPerMTP has to be calculated 540 */ 541 bits_per_pixel = (link_clock * lane_count * 8) / 542 intel_dp_mode_to_fec_clock(mode_clock); 543 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 544 545 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 546 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 547 mode_hdisplay; 548 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 549 max_bpp_small_joiner_ram); 550 551 /* 552 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 553 * check, output bpp from small joiner RAM check) 554 */ 555 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 556 557 /* Error out if the max bpp is less than smallest allowed valid bpp */ 558 if (bits_per_pixel < valid_dsc_bpp[0]) { 559 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 560 bits_per_pixel, valid_dsc_bpp[0]); 561 return 0; 562 } 563 564 /* Find the nearest match in the array of known BPPs from VESA */ 565 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 566 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 567 break; 568 } 569 bits_per_pixel = valid_dsc_bpp[i]; 570 571 /* 572 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 573 * fractional part is 0 574 */ 575 return bits_per_pixel << 4; 576 } 577 578 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 579 int mode_clock, int mode_hdisplay) 580 { 581 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 582 u8 min_slice_count, i; 583 int max_slice_width; 584 585 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 586 min_slice_count = DIV_ROUND_UP(mode_clock, 587 DP_DSC_MAX_ENC_THROUGHPUT_0); 588 else 589 min_slice_count = DIV_ROUND_UP(mode_clock, 590 DP_DSC_MAX_ENC_THROUGHPUT_1); 591 592 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 593 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 594 drm_dbg_kms(&i915->drm, 595 "Unsupported slice width %d by DP DSC Sink device\n", 596 max_slice_width); 597 return 0; 598 } 599 /* Also take into account max slice width */ 600 min_slice_count = min_t(u8, min_slice_count, 601 DIV_ROUND_UP(mode_hdisplay, 602 max_slice_width)); 603 604 /* Find the closest match to the valid slice count values */ 605 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 606 if (valid_dsc_slicecount[i] > 607 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 608 false)) 609 break; 610 if (min_slice_count <= valid_dsc_slicecount[i]) 611 return valid_dsc_slicecount[i]; 612 } 613 614 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 615 min_slice_count); 616 return 0; 617 } 618 619 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 620 int hdisplay) 621 { 622 /* 623 * Older platforms don't like hdisplay==4096 with DP. 624 * 625 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 626 * and frame counter increment), but we don't get vblank interrupts, 627 * and the pipe underruns immediately. The link also doesn't seem 628 * to get trained properly. 629 * 630 * On CHV the vblank interrupts don't seem to disappear but 631 * otherwise the symptoms are similar. 632 * 633 * TODO: confirm the behaviour on HSW+ 634 */ 635 return hdisplay == 4096 && !HAS_DDI(dev_priv); 636 } 637 638 static enum drm_mode_status 639 intel_dp_mode_valid(struct drm_connector *connector, 640 struct drm_display_mode *mode) 641 { 642 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 643 struct intel_connector *intel_connector = to_intel_connector(connector); 644 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 645 struct drm_i915_private *dev_priv = to_i915(connector->dev); 646 int target_clock = mode->clock; 647 int max_rate, mode_rate, max_lanes, max_link_clock; 648 int max_dotclk; 649 u16 dsc_max_output_bpp = 0; 650 u8 dsc_slice_count = 0; 651 652 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 653 return MODE_NO_DBLESCAN; 654 655 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 656 657 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 658 if (mode->hdisplay > fixed_mode->hdisplay) 659 return MODE_PANEL; 660 661 if (mode->vdisplay > fixed_mode->vdisplay) 662 return MODE_PANEL; 663 664 target_clock = fixed_mode->clock; 665 } 666 667 max_link_clock = intel_dp_max_link_rate(intel_dp); 668 max_lanes = intel_dp_max_lane_count(intel_dp); 669 670 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 671 mode_rate = intel_dp_link_required(target_clock, 18); 672 673 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 674 return MODE_H_ILLEGAL; 675 676 /* 677 * Output bpp is stored in 6.4 format so right shift by 4 to get the 678 * integer value since we support only integer values of bpp. 679 */ 680 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && 681 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 682 if (intel_dp_is_edp(intel_dp)) { 683 dsc_max_output_bpp = 684 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 685 dsc_slice_count = 686 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 687 true); 688 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 689 dsc_max_output_bpp = 690 intel_dp_dsc_get_output_bpp(dev_priv, 691 max_link_clock, 692 max_lanes, 693 target_clock, 694 mode->hdisplay) >> 4; 695 dsc_slice_count = 696 intel_dp_dsc_get_slice_count(intel_dp, 697 target_clock, 698 mode->hdisplay); 699 } 700 } 701 702 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || 703 target_clock > max_dotclk) 704 return MODE_CLOCK_HIGH; 705 706 if (mode->clock < 10000) 707 return MODE_CLOCK_LOW; 708 709 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 710 return MODE_H_ILLEGAL; 711 712 return intel_mode_valid_max_plane_size(dev_priv, mode); 713 } 714 715 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 716 { 717 int i; 718 u32 v = 0; 719 720 if (src_bytes > 4) 721 src_bytes = 4; 722 for (i = 0; i < src_bytes; i++) 723 v |= ((u32)src[i]) << ((3 - i) * 8); 724 return v; 725 } 726 727 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 728 { 729 int i; 730 if (dst_bytes > 4) 731 dst_bytes = 4; 732 for (i = 0; i < dst_bytes; i++) 733 dst[i] = src >> ((3-i) * 8); 734 } 735 736 static void 737 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); 738 static void 739 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 740 bool force_disable_vdd); 741 static void 742 intel_dp_pps_init(struct intel_dp *intel_dp); 743 744 static intel_wakeref_t 745 pps_lock(struct intel_dp *intel_dp) 746 { 747 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 748 intel_wakeref_t wakeref; 749 750 /* 751 * See intel_power_sequencer_reset() why we need 752 * a power domain reference here. 753 */ 754 wakeref = intel_display_power_get(dev_priv, 755 intel_aux_power_domain(dp_to_dig_port(intel_dp))); 756 757 mutex_lock(&dev_priv->pps_mutex); 758 759 return wakeref; 760 } 761 762 static intel_wakeref_t 763 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) 764 { 765 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 766 767 mutex_unlock(&dev_priv->pps_mutex); 768 intel_display_power_put(dev_priv, 769 intel_aux_power_domain(dp_to_dig_port(intel_dp)), 770 wakeref); 771 return 0; 772 } 773 774 #define with_pps_lock(dp, wf) \ 775 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) 776 777 static void 778 vlv_power_sequencer_kick(struct intel_dp *intel_dp) 779 { 780 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 781 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 782 enum pipe pipe = intel_dp->pps_pipe; 783 bool pll_enabled, release_cl_override = false; 784 enum dpio_phy phy = DPIO_PHY(pipe); 785 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 786 u32 DP; 787 788 if (drm_WARN(&dev_priv->drm, 789 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, 790 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", 791 pipe_name(pipe), dig_port->base.base.base.id, 792 dig_port->base.base.name)) 793 return; 794 795 drm_dbg_kms(&dev_priv->drm, 796 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", 797 pipe_name(pipe), dig_port->base.base.base.id, 798 dig_port->base.base.name); 799 800 /* Preserve the BIOS-computed detected bit. This is 801 * supposed to be read-only. 802 */ 803 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 804 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 805 DP |= DP_PORT_WIDTH(1); 806 DP |= DP_LINK_TRAIN_PAT_1; 807 808 if (IS_CHERRYVIEW(dev_priv)) 809 DP |= DP_PIPE_SEL_CHV(pipe); 810 else 811 DP |= DP_PIPE_SEL(pipe); 812 813 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; 814 815 /* 816 * The DPLL for the pipe must be enabled for this to work. 817 * So enable temporarily it if it's not already enabled. 818 */ 819 if (!pll_enabled) { 820 release_cl_override = IS_CHERRYVIEW(dev_priv) && 821 !chv_phy_powergate_ch(dev_priv, phy, ch, true); 822 823 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? 824 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { 825 drm_err(&dev_priv->drm, 826 "Failed to force on pll for pipe %c!\n", 827 pipe_name(pipe)); 828 return; 829 } 830 } 831 832 /* 833 * Similar magic as in intel_dp_enable_port(). 834 * We _must_ do this port enable + disable trick 835 * to make this power sequencer lock onto the port. 836 * Otherwise even VDD force bit won't work. 837 */ 838 intel_de_write(dev_priv, intel_dp->output_reg, DP); 839 intel_de_posting_read(dev_priv, intel_dp->output_reg); 840 841 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); 842 intel_de_posting_read(dev_priv, intel_dp->output_reg); 843 844 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); 845 intel_de_posting_read(dev_priv, intel_dp->output_reg); 846 847 if (!pll_enabled) { 848 vlv_force_pll_off(dev_priv, pipe); 849 850 if (release_cl_override) 851 chv_phy_powergate_ch(dev_priv, phy, ch, false); 852 } 853 } 854 855 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) 856 { 857 struct intel_encoder *encoder; 858 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 859 860 /* 861 * We don't have power sequencer currently. 862 * Pick one that's not used by other ports. 863 */ 864 for_each_intel_dp(&dev_priv->drm, encoder) { 865 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 866 867 if (encoder->type == INTEL_OUTPUT_EDP) { 868 drm_WARN_ON(&dev_priv->drm, 869 intel_dp->active_pipe != INVALID_PIPE && 870 intel_dp->active_pipe != 871 intel_dp->pps_pipe); 872 873 if (intel_dp->pps_pipe != INVALID_PIPE) 874 pipes &= ~(1 << intel_dp->pps_pipe); 875 } else { 876 drm_WARN_ON(&dev_priv->drm, 877 intel_dp->pps_pipe != INVALID_PIPE); 878 879 if (intel_dp->active_pipe != INVALID_PIPE) 880 pipes &= ~(1 << intel_dp->active_pipe); 881 } 882 } 883 884 if (pipes == 0) 885 return INVALID_PIPE; 886 887 return ffs(pipes) - 1; 888 } 889 890 static enum pipe 891 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 892 { 893 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 894 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 895 enum pipe pipe; 896 897 lockdep_assert_held(&dev_priv->pps_mutex); 898 899 /* We should never land here with regular DP ports */ 900 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 901 902 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE && 903 intel_dp->active_pipe != intel_dp->pps_pipe); 904 905 if (intel_dp->pps_pipe != INVALID_PIPE) 906 return intel_dp->pps_pipe; 907 908 pipe = vlv_find_free_pps(dev_priv); 909 910 /* 911 * Didn't find one. This should not happen since there 912 * are two power sequencers and up to two eDP ports. 913 */ 914 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) 915 pipe = PIPE_A; 916 917 vlv_steal_power_sequencer(dev_priv, pipe); 918 intel_dp->pps_pipe = pipe; 919 920 drm_dbg_kms(&dev_priv->drm, 921 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", 922 pipe_name(intel_dp->pps_pipe), 923 dig_port->base.base.base.id, 924 dig_port->base.base.name); 925 926 /* init power sequencer on this pipe and port */ 927 intel_dp_init_panel_power_sequencer(intel_dp); 928 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 929 930 /* 931 * Even vdd force doesn't work until we've made 932 * the power sequencer lock in on the port. 933 */ 934 vlv_power_sequencer_kick(intel_dp); 935 936 return intel_dp->pps_pipe; 937 } 938 939 static int 940 bxt_power_sequencer_idx(struct intel_dp *intel_dp) 941 { 942 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 943 int backlight_controller = dev_priv->vbt.backlight.controller; 944 945 lockdep_assert_held(&dev_priv->pps_mutex); 946 947 /* We should never land here with regular DP ports */ 948 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); 949 950 if (!intel_dp->pps_reset) 951 return backlight_controller; 952 953 intel_dp->pps_reset = false; 954 955 /* 956 * Only the HW needs to be reprogrammed, the SW state is fixed and 957 * has been setup during connector init. 958 */ 959 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 960 961 return backlight_controller; 962 } 963 964 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 965 enum pipe pipe); 966 967 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, 968 enum pipe pipe) 969 { 970 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON; 971 } 972 973 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, 974 enum pipe pipe) 975 { 976 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD; 977 } 978 979 static bool vlv_pipe_any(struct drm_i915_private *dev_priv, 980 enum pipe pipe) 981 { 982 return true; 983 } 984 985 static enum pipe 986 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, 987 enum port port, 988 vlv_pipe_check pipe_check) 989 { 990 enum pipe pipe; 991 992 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 993 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & 994 PANEL_PORT_SELECT_MASK; 995 996 if (port_sel != PANEL_PORT_SELECT_VLV(port)) 997 continue; 998 999 if (!pipe_check(dev_priv, pipe)) 1000 continue; 1001 1002 return pipe; 1003 } 1004 1005 return INVALID_PIPE; 1006 } 1007 1008 static void 1009 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) 1010 { 1011 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1012 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1013 enum port port = dig_port->base.port; 1014 1015 lockdep_assert_held(&dev_priv->pps_mutex); 1016 1017 /* try to find a pipe with this port selected */ 1018 /* first pick one where the panel is on */ 1019 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1020 vlv_pipe_has_pp_on); 1021 /* didn't find one? pick one where vdd is on */ 1022 if (intel_dp->pps_pipe == INVALID_PIPE) 1023 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1024 vlv_pipe_has_vdd_on); 1025 /* didn't find one? pick one with just the correct port */ 1026 if (intel_dp->pps_pipe == INVALID_PIPE) 1027 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, 1028 vlv_pipe_any); 1029 1030 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ 1031 if (intel_dp->pps_pipe == INVALID_PIPE) { 1032 drm_dbg_kms(&dev_priv->drm, 1033 "no initial power sequencer for [ENCODER:%d:%s]\n", 1034 dig_port->base.base.base.id, 1035 dig_port->base.base.name); 1036 return; 1037 } 1038 1039 drm_dbg_kms(&dev_priv->drm, 1040 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", 1041 dig_port->base.base.base.id, 1042 dig_port->base.base.name, 1043 pipe_name(intel_dp->pps_pipe)); 1044 1045 intel_dp_init_panel_power_sequencer(intel_dp); 1046 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 1047 } 1048 1049 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 1050 { 1051 struct intel_encoder *encoder; 1052 1053 if (drm_WARN_ON(&dev_priv->drm, 1054 !(IS_VALLEYVIEW(dev_priv) || 1055 IS_CHERRYVIEW(dev_priv) || 1056 IS_GEN9_LP(dev_priv)))) 1057 return; 1058 1059 /* 1060 * We can't grab pps_mutex here due to deadlock with power_domain 1061 * mutex when power_domain functions are called while holding pps_mutex. 1062 * That also means that in order to use pps_pipe the code needs to 1063 * hold both a power domain reference and pps_mutex, and the power domain 1064 * reference get/put must be done while _not_ holding pps_mutex. 1065 * pps_{lock,unlock}() do these steps in the correct order, so one 1066 * should use them always. 1067 */ 1068 1069 for_each_intel_dp(&dev_priv->drm, encoder) { 1070 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1071 1072 drm_WARN_ON(&dev_priv->drm, 1073 intel_dp->active_pipe != INVALID_PIPE); 1074 1075 if (encoder->type != INTEL_OUTPUT_EDP) 1076 continue; 1077 1078 if (IS_GEN9_LP(dev_priv)) 1079 intel_dp->pps_reset = true; 1080 else 1081 intel_dp->pps_pipe = INVALID_PIPE; 1082 } 1083 } 1084 1085 struct pps_registers { 1086 i915_reg_t pp_ctrl; 1087 i915_reg_t pp_stat; 1088 i915_reg_t pp_on; 1089 i915_reg_t pp_off; 1090 i915_reg_t pp_div; 1091 }; 1092 1093 static void intel_pps_get_registers(struct intel_dp *intel_dp, 1094 struct pps_registers *regs) 1095 { 1096 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1097 int pps_idx = 0; 1098 1099 memset(regs, 0, sizeof(*regs)); 1100 1101 if (IS_GEN9_LP(dev_priv)) 1102 pps_idx = bxt_power_sequencer_idx(intel_dp); 1103 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1104 pps_idx = vlv_power_sequencer_pipe(intel_dp); 1105 1106 regs->pp_ctrl = PP_CONTROL(pps_idx); 1107 regs->pp_stat = PP_STATUS(pps_idx); 1108 regs->pp_on = PP_ON_DELAYS(pps_idx); 1109 regs->pp_off = PP_OFF_DELAYS(pps_idx); 1110 1111 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ 1112 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 1113 regs->pp_div = INVALID_MMIO_REG; 1114 else 1115 regs->pp_div = PP_DIVISOR(pps_idx); 1116 } 1117 1118 static i915_reg_t 1119 _pp_ctrl_reg(struct intel_dp *intel_dp) 1120 { 1121 struct pps_registers regs; 1122 1123 intel_pps_get_registers(intel_dp, ®s); 1124 1125 return regs.pp_ctrl; 1126 } 1127 1128 static i915_reg_t 1129 _pp_stat_reg(struct intel_dp *intel_dp) 1130 { 1131 struct pps_registers regs; 1132 1133 intel_pps_get_registers(intel_dp, ®s); 1134 1135 return regs.pp_stat; 1136 } 1137 1138 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing 1139 This function only applicable when panel PM state is not to be tracked */ 1140 static int edp_notify_handler(struct notifier_block *this, unsigned long code, 1141 void *unused) 1142 { 1143 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 1144 edp_notifier); 1145 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1146 intel_wakeref_t wakeref; 1147 1148 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) 1149 return 0; 1150 1151 with_pps_lock(intel_dp, wakeref) { 1152 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1153 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 1154 i915_reg_t pp_ctrl_reg, pp_div_reg; 1155 u32 pp_div; 1156 1157 pp_ctrl_reg = PP_CONTROL(pipe); 1158 pp_div_reg = PP_DIVISOR(pipe); 1159 pp_div = intel_de_read(dev_priv, pp_div_reg); 1160 pp_div &= PP_REFERENCE_DIVIDER_MASK; 1161 1162 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 1163 intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F); 1164 intel_de_write(dev_priv, pp_ctrl_reg, 1165 PANEL_UNLOCK_REGS); 1166 msleep(intel_dp->panel_power_cycle_delay); 1167 } 1168 } 1169 1170 return 0; 1171 } 1172 1173 static bool edp_have_panel_power(struct intel_dp *intel_dp) 1174 { 1175 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1176 1177 lockdep_assert_held(&dev_priv->pps_mutex); 1178 1179 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1180 intel_dp->pps_pipe == INVALID_PIPE) 1181 return false; 1182 1183 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; 1184 } 1185 1186 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 1187 { 1188 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1189 1190 lockdep_assert_held(&dev_priv->pps_mutex); 1191 1192 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1193 intel_dp->pps_pipe == INVALID_PIPE) 1194 return false; 1195 1196 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 1197 } 1198 1199 static void 1200 intel_dp_check_edp(struct intel_dp *intel_dp) 1201 { 1202 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1203 1204 if (!intel_dp_is_edp(intel_dp)) 1205 return; 1206 1207 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 1208 drm_WARN(&dev_priv->drm, 1, 1209 "eDP powered off while attempting aux channel communication.\n"); 1210 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n", 1211 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), 1212 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); 1213 } 1214 } 1215 1216 static u32 1217 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 1218 { 1219 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1220 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1221 const unsigned int timeout_ms = 10; 1222 u32 status; 1223 bool done; 1224 1225 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1226 done = wait_event_timeout(i915->gmbus_wait_queue, C, 1227 msecs_to_jiffies_timeout(timeout_ms)); 1228 1229 /* just trace the final value */ 1230 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1231 1232 if (!done) 1233 drm_err(&i915->drm, 1234 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 1235 intel_dp->aux.name, timeout_ms, status); 1236 #undef C 1237 1238 return status; 1239 } 1240 1241 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1242 { 1243 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1244 1245 if (index) 1246 return 0; 1247 1248 /* 1249 * The clock divider is based off the hrawclk, and would like to run at 1250 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 1251 */ 1252 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 1253 } 1254 1255 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1256 { 1257 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1258 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1259 u32 freq; 1260 1261 if (index) 1262 return 0; 1263 1264 /* 1265 * The clock divider is based off the cdclk or PCH rawclk, and would 1266 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 1267 * divide by 2000 and use that 1268 */ 1269 if (dig_port->aux_ch == AUX_CH_A) 1270 freq = dev_priv->cdclk.hw.cdclk; 1271 else 1272 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 1273 return DIV_ROUND_CLOSEST(freq, 2000); 1274 } 1275 1276 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1277 { 1278 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1279 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1280 1281 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 1282 /* Workaround for non-ULT HSW */ 1283 switch (index) { 1284 case 0: return 63; 1285 case 1: return 72; 1286 default: return 0; 1287 } 1288 } 1289 1290 return ilk_get_aux_clock_divider(intel_dp, index); 1291 } 1292 1293 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 1294 { 1295 /* 1296 * SKL doesn't need us to program the AUX clock divider (Hardware will 1297 * derive the clock from CDCLK automatically). We still implement the 1298 * get_aux_clock_divider vfunc to plug-in into the existing code. 1299 */ 1300 return index ? 0 : 1; 1301 } 1302 1303 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 1304 int send_bytes, 1305 u32 aux_clock_divider) 1306 { 1307 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1308 struct drm_i915_private *dev_priv = 1309 to_i915(dig_port->base.base.dev); 1310 u32 precharge, timeout; 1311 1312 if (IS_GEN(dev_priv, 6)) 1313 precharge = 3; 1314 else 1315 precharge = 5; 1316 1317 if (IS_BROADWELL(dev_priv)) 1318 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 1319 else 1320 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 1321 1322 return DP_AUX_CH_CTL_SEND_BUSY | 1323 DP_AUX_CH_CTL_DONE | 1324 DP_AUX_CH_CTL_INTERRUPT | 1325 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1326 timeout | 1327 DP_AUX_CH_CTL_RECEIVE_ERROR | 1328 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1329 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1330 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 1331 } 1332 1333 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 1334 int send_bytes, 1335 u32 unused) 1336 { 1337 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1338 struct drm_i915_private *i915 = 1339 to_i915(dig_port->base.base.dev); 1340 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 1341 u32 ret; 1342 1343 ret = DP_AUX_CH_CTL_SEND_BUSY | 1344 DP_AUX_CH_CTL_DONE | 1345 DP_AUX_CH_CTL_INTERRUPT | 1346 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1347 DP_AUX_CH_CTL_TIME_OUT_MAX | 1348 DP_AUX_CH_CTL_RECEIVE_ERROR | 1349 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1350 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1351 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1352 1353 if (intel_phy_is_tc(i915, phy) && 1354 dig_port->tc_mode == TC_PORT_TBT_ALT) 1355 ret |= DP_AUX_CH_CTL_TBT_IO; 1356 1357 return ret; 1358 } 1359 1360 static int 1361 intel_dp_aux_xfer(struct intel_dp *intel_dp, 1362 const u8 *send, int send_bytes, 1363 u8 *recv, int recv_size, 1364 u32 aux_send_ctl_flags) 1365 { 1366 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1367 struct drm_i915_private *i915 = 1368 to_i915(dig_port->base.base.dev); 1369 struct intel_uncore *uncore = &i915->uncore; 1370 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 1371 bool is_tc_port = intel_phy_is_tc(i915, phy); 1372 i915_reg_t ch_ctl, ch_data[5]; 1373 u32 aux_clock_divider; 1374 enum intel_display_power_domain aux_domain; 1375 intel_wakeref_t aux_wakeref; 1376 intel_wakeref_t pps_wakeref; 1377 int i, ret, recv_bytes; 1378 int try, clock = 0; 1379 u32 status; 1380 bool vdd; 1381 1382 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 1383 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 1384 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 1385 1386 if (is_tc_port) 1387 intel_tc_port_lock(dig_port); 1388 1389 aux_domain = intel_aux_power_domain(dig_port); 1390 1391 aux_wakeref = intel_display_power_get(i915, aux_domain); 1392 pps_wakeref = pps_lock(intel_dp); 1393 1394 /* 1395 * We will be called with VDD already enabled for dpcd/edid/oui reads. 1396 * In such cases we want to leave VDD enabled and it's up to upper layers 1397 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 1398 * ourselves. 1399 */ 1400 vdd = edp_panel_vdd_on(intel_dp); 1401 1402 /* dp aux is extremely sensitive to irq latency, hence request the 1403 * lowest possible wakeup latency and so prevent the cpu from going into 1404 * deep sleep states. 1405 */ 1406 cpu_latency_qos_update_request(&i915->pm_qos, 0); 1407 1408 intel_dp_check_edp(intel_dp); 1409 1410 /* Try to wait for any previous AUX channel activity */ 1411 for (try = 0; try < 3; try++) { 1412 status = intel_uncore_read_notrace(uncore, ch_ctl); 1413 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 1414 break; 1415 msleep(1); 1416 } 1417 /* just trace the final value */ 1418 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 1419 1420 if (try == 3) { 1421 const u32 status = intel_uncore_read(uncore, ch_ctl); 1422 1423 if (status != intel_dp->aux_busy_last_status) { 1424 drm_WARN(&i915->drm, 1, 1425 "%s: not started (status 0x%08x)\n", 1426 intel_dp->aux.name, status); 1427 intel_dp->aux_busy_last_status = status; 1428 } 1429 1430 ret = -EBUSY; 1431 goto out; 1432 } 1433 1434 /* Only 5 data registers! */ 1435 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 1436 ret = -E2BIG; 1437 goto out; 1438 } 1439 1440 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 1441 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 1442 send_bytes, 1443 aux_clock_divider); 1444 1445 send_ctl |= aux_send_ctl_flags; 1446 1447 /* Must try at least 3 times according to DP spec */ 1448 for (try = 0; try < 5; try++) { 1449 /* Load the send data into the aux channel data registers */ 1450 for (i = 0; i < send_bytes; i += 4) 1451 intel_uncore_write(uncore, 1452 ch_data[i >> 2], 1453 intel_dp_pack_aux(send + i, 1454 send_bytes - i)); 1455 1456 /* Send the command and wait for it to complete */ 1457 intel_uncore_write(uncore, ch_ctl, send_ctl); 1458 1459 status = intel_dp_aux_wait_done(intel_dp); 1460 1461 /* Clear done status and any errors */ 1462 intel_uncore_write(uncore, 1463 ch_ctl, 1464 status | 1465 DP_AUX_CH_CTL_DONE | 1466 DP_AUX_CH_CTL_TIME_OUT_ERROR | 1467 DP_AUX_CH_CTL_RECEIVE_ERROR); 1468 1469 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 1470 * 400us delay required for errors and timeouts 1471 * Timeout errors from the HW already meet this 1472 * requirement so skip to next iteration 1473 */ 1474 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 1475 continue; 1476 1477 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1478 usleep_range(400, 500); 1479 continue; 1480 } 1481 if (status & DP_AUX_CH_CTL_DONE) 1482 goto done; 1483 } 1484 } 1485 1486 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 1487 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 1488 intel_dp->aux.name, status); 1489 ret = -EBUSY; 1490 goto out; 1491 } 1492 1493 done: 1494 /* Check for timeout or receive error. 1495 * Timeouts occur when the sink is not connected 1496 */ 1497 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 1498 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 1499 intel_dp->aux.name, status); 1500 ret = -EIO; 1501 goto out; 1502 } 1503 1504 /* Timeouts occur when the device isn't connected, so they're 1505 * "normal" -- don't fill the kernel log with these */ 1506 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 1507 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 1508 intel_dp->aux.name, status); 1509 ret = -ETIMEDOUT; 1510 goto out; 1511 } 1512 1513 /* Unload any bytes sent back from the other side */ 1514 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 1515 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 1516 1517 /* 1518 * By BSpec: "Message sizes of 0 or >20 are not allowed." 1519 * We have no idea of what happened so we return -EBUSY so 1520 * drm layer takes care for the necessary retries. 1521 */ 1522 if (recv_bytes == 0 || recv_bytes > 20) { 1523 drm_dbg_kms(&i915->drm, 1524 "%s: Forbidden recv_bytes = %d on aux transaction\n", 1525 intel_dp->aux.name, recv_bytes); 1526 ret = -EBUSY; 1527 goto out; 1528 } 1529 1530 if (recv_bytes > recv_size) 1531 recv_bytes = recv_size; 1532 1533 for (i = 0; i < recv_bytes; i += 4) 1534 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 1535 recv + i, recv_bytes - i); 1536 1537 ret = recv_bytes; 1538 out: 1539 cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); 1540 1541 if (vdd) 1542 edp_panel_vdd_off(intel_dp, false); 1543 1544 pps_unlock(intel_dp, pps_wakeref); 1545 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 1546 1547 if (is_tc_port) 1548 intel_tc_port_unlock(dig_port); 1549 1550 return ret; 1551 } 1552 1553 #define BARE_ADDRESS_SIZE 3 1554 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 1555 1556 static void 1557 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 1558 const struct drm_dp_aux_msg *msg) 1559 { 1560 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 1561 txbuf[1] = (msg->address >> 8) & 0xff; 1562 txbuf[2] = msg->address & 0xff; 1563 txbuf[3] = msg->size - 1; 1564 } 1565 1566 static ssize_t 1567 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 1568 { 1569 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 1570 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1571 u8 txbuf[20], rxbuf[20]; 1572 size_t txsize, rxsize; 1573 int ret; 1574 1575 intel_dp_aux_header(txbuf, msg); 1576 1577 switch (msg->request & ~DP_AUX_I2C_MOT) { 1578 case DP_AUX_NATIVE_WRITE: 1579 case DP_AUX_I2C_WRITE: 1580 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 1581 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 1582 rxsize = 2; /* 0 or 1 data bytes */ 1583 1584 if (drm_WARN_ON(&i915->drm, txsize > 20)) 1585 return -E2BIG; 1586 1587 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); 1588 1589 if (msg->buffer) 1590 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 1591 1592 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1593 rxbuf, rxsize, 0); 1594 if (ret > 0) { 1595 msg->reply = rxbuf[0] >> 4; 1596 1597 if (ret > 1) { 1598 /* Number of bytes written in a short write. */ 1599 ret = clamp_t(int, rxbuf[1], 0, msg->size); 1600 } else { 1601 /* Return payload size. */ 1602 ret = msg->size; 1603 } 1604 } 1605 break; 1606 1607 case DP_AUX_NATIVE_READ: 1608 case DP_AUX_I2C_READ: 1609 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 1610 rxsize = msg->size + 1; 1611 1612 if (drm_WARN_ON(&i915->drm, rxsize > 20)) 1613 return -E2BIG; 1614 1615 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 1616 rxbuf, rxsize, 0); 1617 if (ret > 0) { 1618 msg->reply = rxbuf[0] >> 4; 1619 /* 1620 * Assume happy day, and copy the data. The caller is 1621 * expected to check msg->reply before touching it. 1622 * 1623 * Return payload size. 1624 */ 1625 ret--; 1626 memcpy(msg->buffer, rxbuf + 1, ret); 1627 } 1628 break; 1629 1630 default: 1631 ret = -EINVAL; 1632 break; 1633 } 1634 1635 return ret; 1636 } 1637 1638 1639 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 1640 { 1641 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1642 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1643 enum aux_ch aux_ch = dig_port->aux_ch; 1644 1645 switch (aux_ch) { 1646 case AUX_CH_B: 1647 case AUX_CH_C: 1648 case AUX_CH_D: 1649 return DP_AUX_CH_CTL(aux_ch); 1650 default: 1651 MISSING_CASE(aux_ch); 1652 return DP_AUX_CH_CTL(AUX_CH_B); 1653 } 1654 } 1655 1656 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 1657 { 1658 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1659 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1660 enum aux_ch aux_ch = dig_port->aux_ch; 1661 1662 switch (aux_ch) { 1663 case AUX_CH_B: 1664 case AUX_CH_C: 1665 case AUX_CH_D: 1666 return DP_AUX_CH_DATA(aux_ch, index); 1667 default: 1668 MISSING_CASE(aux_ch); 1669 return DP_AUX_CH_DATA(AUX_CH_B, index); 1670 } 1671 } 1672 1673 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 1674 { 1675 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1676 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1677 enum aux_ch aux_ch = dig_port->aux_ch; 1678 1679 switch (aux_ch) { 1680 case AUX_CH_A: 1681 return DP_AUX_CH_CTL(aux_ch); 1682 case AUX_CH_B: 1683 case AUX_CH_C: 1684 case AUX_CH_D: 1685 return PCH_DP_AUX_CH_CTL(aux_ch); 1686 default: 1687 MISSING_CASE(aux_ch); 1688 return DP_AUX_CH_CTL(AUX_CH_A); 1689 } 1690 } 1691 1692 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 1693 { 1694 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1695 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1696 enum aux_ch aux_ch = dig_port->aux_ch; 1697 1698 switch (aux_ch) { 1699 case AUX_CH_A: 1700 return DP_AUX_CH_DATA(aux_ch, index); 1701 case AUX_CH_B: 1702 case AUX_CH_C: 1703 case AUX_CH_D: 1704 return PCH_DP_AUX_CH_DATA(aux_ch, index); 1705 default: 1706 MISSING_CASE(aux_ch); 1707 return DP_AUX_CH_DATA(AUX_CH_A, index); 1708 } 1709 } 1710 1711 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 1712 { 1713 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1714 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1715 enum aux_ch aux_ch = dig_port->aux_ch; 1716 1717 switch (aux_ch) { 1718 case AUX_CH_A: 1719 case AUX_CH_B: 1720 case AUX_CH_C: 1721 case AUX_CH_D: 1722 case AUX_CH_E: 1723 case AUX_CH_F: 1724 case AUX_CH_G: 1725 return DP_AUX_CH_CTL(aux_ch); 1726 default: 1727 MISSING_CASE(aux_ch); 1728 return DP_AUX_CH_CTL(AUX_CH_A); 1729 } 1730 } 1731 1732 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 1733 { 1734 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1735 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1736 enum aux_ch aux_ch = dig_port->aux_ch; 1737 1738 switch (aux_ch) { 1739 case AUX_CH_A: 1740 case AUX_CH_B: 1741 case AUX_CH_C: 1742 case AUX_CH_D: 1743 case AUX_CH_E: 1744 case AUX_CH_F: 1745 case AUX_CH_G: 1746 return DP_AUX_CH_DATA(aux_ch, index); 1747 default: 1748 MISSING_CASE(aux_ch); 1749 return DP_AUX_CH_DATA(AUX_CH_A, index); 1750 } 1751 } 1752 1753 static void 1754 intel_dp_aux_fini(struct intel_dp *intel_dp) 1755 { 1756 kfree(intel_dp->aux.name); 1757 } 1758 1759 static void 1760 intel_dp_aux_init(struct intel_dp *intel_dp) 1761 { 1762 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1763 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1764 struct intel_encoder *encoder = &dig_port->base; 1765 1766 if (INTEL_GEN(dev_priv) >= 9) { 1767 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 1768 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 1769 } else if (HAS_PCH_SPLIT(dev_priv)) { 1770 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 1771 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 1772 } else { 1773 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 1774 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 1775 } 1776 1777 if (INTEL_GEN(dev_priv) >= 9) 1778 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 1779 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 1780 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 1781 else if (HAS_PCH_SPLIT(dev_priv)) 1782 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 1783 else 1784 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 1785 1786 if (INTEL_GEN(dev_priv) >= 9) 1787 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 1788 else 1789 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 1790 1791 drm_dp_aux_init(&intel_dp->aux); 1792 1793 /* Failure to allocate our preferred name is not critical */ 1794 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c", 1795 aux_ch_name(dig_port->aux_ch), 1796 port_name(encoder->port)); 1797 intel_dp->aux.transfer = intel_dp_aux_transfer; 1798 } 1799 1800 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 1801 { 1802 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1803 1804 return max_rate >= 540000; 1805 } 1806 1807 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 1808 { 1809 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 1810 1811 return max_rate >= 810000; 1812 } 1813 1814 static void 1815 intel_dp_set_clock(struct intel_encoder *encoder, 1816 struct intel_crtc_state *pipe_config) 1817 { 1818 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1819 const struct dp_link_dpll *divisor = NULL; 1820 int i, count = 0; 1821 1822 if (IS_G4X(dev_priv)) { 1823 divisor = g4x_dpll; 1824 count = ARRAY_SIZE(g4x_dpll); 1825 } else if (HAS_PCH_SPLIT(dev_priv)) { 1826 divisor = pch_dpll; 1827 count = ARRAY_SIZE(pch_dpll); 1828 } else if (IS_CHERRYVIEW(dev_priv)) { 1829 divisor = chv_dpll; 1830 count = ARRAY_SIZE(chv_dpll); 1831 } else if (IS_VALLEYVIEW(dev_priv)) { 1832 divisor = vlv_dpll; 1833 count = ARRAY_SIZE(vlv_dpll); 1834 } 1835 1836 if (divisor && count) { 1837 for (i = 0; i < count; i++) { 1838 if (pipe_config->port_clock == divisor[i].clock) { 1839 pipe_config->dpll = divisor[i].dpll; 1840 pipe_config->clock_set = true; 1841 break; 1842 } 1843 } 1844 } 1845 } 1846 1847 static void snprintf_int_array(char *str, size_t len, 1848 const int *array, int nelem) 1849 { 1850 int i; 1851 1852 str[0] = '\0'; 1853 1854 for (i = 0; i < nelem; i++) { 1855 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 1856 if (r >= len) 1857 return; 1858 str += r; 1859 len -= r; 1860 } 1861 } 1862 1863 static void intel_dp_print_rates(struct intel_dp *intel_dp) 1864 { 1865 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1866 char str[128]; /* FIXME: too big for stack? */ 1867 1868 if (!drm_debug_enabled(DRM_UT_KMS)) 1869 return; 1870 1871 snprintf_int_array(str, sizeof(str), 1872 intel_dp->source_rates, intel_dp->num_source_rates); 1873 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 1874 1875 snprintf_int_array(str, sizeof(str), 1876 intel_dp->sink_rates, intel_dp->num_sink_rates); 1877 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 1878 1879 snprintf_int_array(str, sizeof(str), 1880 intel_dp->common_rates, intel_dp->num_common_rates); 1881 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 1882 } 1883 1884 int 1885 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1886 { 1887 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1888 int len; 1889 1890 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1891 if (drm_WARN_ON(&i915->drm, len <= 0)) 1892 return 162000; 1893 1894 return intel_dp->common_rates[len - 1]; 1895 } 1896 1897 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 1898 { 1899 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1900 int i = intel_dp_rate_index(intel_dp->sink_rates, 1901 intel_dp->num_sink_rates, rate); 1902 1903 if (drm_WARN_ON(&i915->drm, i < 0)) 1904 i = 0; 1905 1906 return i; 1907 } 1908 1909 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 1910 u8 *link_bw, u8 *rate_select) 1911 { 1912 /* eDP 1.4 rate select method. */ 1913 if (intel_dp->use_rate_select) { 1914 *link_bw = 0; 1915 *rate_select = 1916 intel_dp_rate_select(intel_dp, port_clock); 1917 } else { 1918 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 1919 *rate_select = 0; 1920 } 1921 } 1922 1923 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 1924 const struct intel_crtc_state *pipe_config) 1925 { 1926 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1927 1928 /* On TGL, FEC is supported on all Pipes */ 1929 if (INTEL_GEN(dev_priv) >= 12) 1930 return true; 1931 1932 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) 1933 return true; 1934 1935 return false; 1936 } 1937 1938 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1939 const struct intel_crtc_state *pipe_config) 1940 { 1941 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1942 drm_dp_sink_supports_fec(intel_dp->fec_capable); 1943 } 1944 1945 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 1946 const struct intel_crtc_state *crtc_state) 1947 { 1948 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1949 1950 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable) 1951 return false; 1952 1953 return intel_dsc_source_support(encoder, crtc_state) && 1954 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 1955 } 1956 1957 static int intel_dp_compute_bpp(struct intel_dp *intel_dp, 1958 struct intel_crtc_state *pipe_config) 1959 { 1960 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1961 struct intel_connector *intel_connector = intel_dp->attached_connector; 1962 int bpp, bpc; 1963 1964 bpp = pipe_config->pipe_bpp; 1965 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports); 1966 1967 if (bpc > 0) 1968 bpp = min(bpp, 3*bpc); 1969 1970 if (intel_dp_is_edp(intel_dp)) { 1971 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1972 if (intel_connector->base.display_info.bpc == 0 && 1973 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1974 drm_dbg_kms(&dev_priv->drm, 1975 "clamping bpp for eDP panel to BIOS-provided %i\n", 1976 dev_priv->vbt.edp.bpp); 1977 bpp = dev_priv->vbt.edp.bpp; 1978 } 1979 } 1980 1981 return bpp; 1982 } 1983 1984 /* Adjust link config limits based on compliance test requests. */ 1985 void 1986 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1987 struct intel_crtc_state *pipe_config, 1988 struct link_config_limits *limits) 1989 { 1990 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1991 1992 /* For DP Compliance we override the computed bpp for the pipe */ 1993 if (intel_dp->compliance.test_data.bpc != 0) { 1994 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1995 1996 limits->min_bpp = limits->max_bpp = bpp; 1997 pipe_config->dither_force_disable = bpp == 6 * 3; 1998 1999 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 2000 } 2001 2002 /* Use values requested by Compliance Test Request */ 2003 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 2004 int index; 2005 2006 /* Validate the compliance test data since max values 2007 * might have changed due to link train fallback. 2008 */ 2009 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 2010 intel_dp->compliance.test_lane_count)) { 2011 index = intel_dp_rate_index(intel_dp->common_rates, 2012 intel_dp->num_common_rates, 2013 intel_dp->compliance.test_link_rate); 2014 if (index >= 0) 2015 limits->min_clock = limits->max_clock = index; 2016 limits->min_lane_count = limits->max_lane_count = 2017 intel_dp->compliance.test_lane_count; 2018 } 2019 } 2020 } 2021 2022 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp) 2023 { 2024 /* 2025 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 2026 * format of the number of bytes per pixel will be half the number 2027 * of bytes of RGB pixel. 2028 */ 2029 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2030 bpp /= 2; 2031 2032 return bpp; 2033 } 2034 2035 /* Optimize link config in order: max bpp, min clock, min lanes */ 2036 static int 2037 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 2038 struct intel_crtc_state *pipe_config, 2039 const struct link_config_limits *limits) 2040 { 2041 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2042 int bpp, clock, lane_count; 2043 int mode_rate, link_clock, link_avail; 2044 2045 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 2046 int output_bpp = intel_dp_output_bpp(pipe_config, bpp); 2047 2048 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 2049 output_bpp); 2050 2051 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 2052 for (lane_count = limits->min_lane_count; 2053 lane_count <= limits->max_lane_count; 2054 lane_count <<= 1) { 2055 link_clock = intel_dp->common_rates[clock]; 2056 link_avail = intel_dp_max_data_rate(link_clock, 2057 lane_count); 2058 2059 if (mode_rate <= link_avail) { 2060 pipe_config->lane_count = lane_count; 2061 pipe_config->pipe_bpp = bpp; 2062 pipe_config->port_clock = link_clock; 2063 2064 return 0; 2065 } 2066 } 2067 } 2068 } 2069 2070 return -EINVAL; 2071 } 2072 2073 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) 2074 { 2075 int i, num_bpc; 2076 u8 dsc_bpc[3] = {0}; 2077 2078 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 2079 dsc_bpc); 2080 for (i = 0; i < num_bpc; i++) { 2081 if (dsc_max_bpc >= dsc_bpc[i]) 2082 return dsc_bpc[i] * 3; 2083 } 2084 2085 return 0; 2086 } 2087 2088 #define DSC_SUPPORTED_VERSION_MIN 1 2089 2090 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 2091 struct intel_crtc_state *crtc_state) 2092 { 2093 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2094 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2095 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2096 u8 line_buf_depth; 2097 int ret; 2098 2099 ret = intel_dsc_compute_params(encoder, crtc_state); 2100 if (ret) 2101 return ret; 2102 2103 /* 2104 * Slice Height of 8 works for all currently available panels. So start 2105 * with that if pic_height is an integral multiple of 8. Eventually add 2106 * logic to try multiple slice heights. 2107 */ 2108 if (vdsc_cfg->pic_height % 8 == 0) 2109 vdsc_cfg->slice_height = 8; 2110 else if (vdsc_cfg->pic_height % 4 == 0) 2111 vdsc_cfg->slice_height = 4; 2112 else 2113 vdsc_cfg->slice_height = 2; 2114 2115 vdsc_cfg->dsc_version_major = 2116 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2117 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 2118 vdsc_cfg->dsc_version_minor = 2119 min(DSC_SUPPORTED_VERSION_MIN, 2120 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 2121 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 2122 2123 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 2124 DP_DSC_RGB; 2125 2126 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 2127 if (!line_buf_depth) { 2128 drm_dbg_kms(&i915->drm, 2129 "DSC Sink Line Buffer Depth invalid\n"); 2130 return -EINVAL; 2131 } 2132 2133 if (vdsc_cfg->dsc_version_minor == 2) 2134 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 2135 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 2136 else 2137 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 2138 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 2139 2140 vdsc_cfg->block_pred_enable = 2141 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 2142 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 2143 2144 return drm_dsc_compute_rc_parameters(vdsc_cfg); 2145 } 2146 2147 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 2148 struct intel_crtc_state *pipe_config, 2149 struct drm_connector_state *conn_state, 2150 struct link_config_limits *limits) 2151 { 2152 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2153 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 2154 const struct drm_display_mode *adjusted_mode = 2155 &pipe_config->hw.adjusted_mode; 2156 u8 dsc_max_bpc; 2157 int pipe_bpp; 2158 int ret; 2159 2160 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2161 intel_dp_supports_fec(intel_dp, pipe_config); 2162 2163 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 2164 return -EINVAL; 2165 2166 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 2167 if (INTEL_GEN(dev_priv) >= 12) 2168 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); 2169 else 2170 dsc_max_bpc = min_t(u8, 10, 2171 conn_state->max_requested_bpc); 2172 2173 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); 2174 2175 /* Min Input BPC for ICL+ is 8 */ 2176 if (pipe_bpp < 8 * 3) { 2177 drm_dbg_kms(&dev_priv->drm, 2178 "No DSC support for less than 8bpc\n"); 2179 return -EINVAL; 2180 } 2181 2182 /* 2183 * For now enable DSC for max bpp, max link rate, max lane count. 2184 * Optimize this later for the minimum possible link rate/lane count 2185 * with DSC enabled for the requested mode. 2186 */ 2187 pipe_config->pipe_bpp = pipe_bpp; 2188 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 2189 pipe_config->lane_count = limits->max_lane_count; 2190 2191 if (intel_dp_is_edp(intel_dp)) { 2192 pipe_config->dsc.compressed_bpp = 2193 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 2194 pipe_config->pipe_bpp); 2195 pipe_config->dsc.slice_count = 2196 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 2197 true); 2198 } else { 2199 u16 dsc_max_output_bpp; 2200 u8 dsc_dp_slice_count; 2201 2202 dsc_max_output_bpp = 2203 intel_dp_dsc_get_output_bpp(dev_priv, 2204 pipe_config->port_clock, 2205 pipe_config->lane_count, 2206 adjusted_mode->crtc_clock, 2207 adjusted_mode->crtc_hdisplay); 2208 dsc_dp_slice_count = 2209 intel_dp_dsc_get_slice_count(intel_dp, 2210 adjusted_mode->crtc_clock, 2211 adjusted_mode->crtc_hdisplay); 2212 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 2213 drm_dbg_kms(&dev_priv->drm, 2214 "Compressed BPP/Slice Count not supported\n"); 2215 return -EINVAL; 2216 } 2217 pipe_config->dsc.compressed_bpp = min_t(u16, 2218 dsc_max_output_bpp >> 4, 2219 pipe_config->pipe_bpp); 2220 pipe_config->dsc.slice_count = dsc_dp_slice_count; 2221 } 2222 /* 2223 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 2224 * is greater than the maximum Cdclock and if slice count is even 2225 * then we need to use 2 VDSC instances. 2226 */ 2227 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { 2228 if (pipe_config->dsc.slice_count > 1) { 2229 pipe_config->dsc.dsc_split = true; 2230 } else { 2231 drm_dbg_kms(&dev_priv->drm, 2232 "Cannot split stream to use 2 VDSC instances\n"); 2233 return -EINVAL; 2234 } 2235 } 2236 2237 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 2238 if (ret < 0) { 2239 drm_dbg_kms(&dev_priv->drm, 2240 "Cannot compute valid DSC parameters for Input Bpp = %d " 2241 "Compressed BPP = %d\n", 2242 pipe_config->pipe_bpp, 2243 pipe_config->dsc.compressed_bpp); 2244 return ret; 2245 } 2246 2247 pipe_config->dsc.compression_enable = true; 2248 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2249 "Compressed Bpp = %d Slice Count = %d\n", 2250 pipe_config->pipe_bpp, 2251 pipe_config->dsc.compressed_bpp, 2252 pipe_config->dsc.slice_count); 2253 2254 return 0; 2255 } 2256 2257 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state) 2258 { 2259 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) 2260 return 6 * 3; 2261 else 2262 return 8 * 3; 2263 } 2264 2265 static int 2266 intel_dp_compute_link_config(struct intel_encoder *encoder, 2267 struct intel_crtc_state *pipe_config, 2268 struct drm_connector_state *conn_state) 2269 { 2270 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2271 const struct drm_display_mode *adjusted_mode = 2272 &pipe_config->hw.adjusted_mode; 2273 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2274 struct link_config_limits limits; 2275 int common_len; 2276 int ret; 2277 2278 common_len = intel_dp_common_len_rate_limit(intel_dp, 2279 intel_dp->max_link_rate); 2280 2281 /* No common link rates between source and sink */ 2282 drm_WARN_ON(encoder->base.dev, common_len <= 0); 2283 2284 limits.min_clock = 0; 2285 limits.max_clock = common_len - 1; 2286 2287 limits.min_lane_count = 1; 2288 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 2289 2290 limits.min_bpp = intel_dp_min_bpp(pipe_config); 2291 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); 2292 2293 if (intel_dp_is_edp(intel_dp)) { 2294 /* 2295 * Use the maximum clock and number of lanes the eDP panel 2296 * advertizes being capable of. The panels are generally 2297 * designed to support only a single clock and lane 2298 * configuration, and typically these values correspond to the 2299 * native resolution of the panel. 2300 */ 2301 limits.min_lane_count = limits.max_lane_count; 2302 limits.min_clock = limits.max_clock; 2303 } 2304 2305 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 2306 2307 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 2308 "max rate %d max bpp %d pixel clock %iKHz\n", 2309 limits.max_lane_count, 2310 intel_dp->common_rates[limits.max_clock], 2311 limits.max_bpp, adjusted_mode->crtc_clock); 2312 2313 /* 2314 * Optimize for slow and wide. This is the place to add alternative 2315 * optimization policy. 2316 */ 2317 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 2318 2319 /* enable compression if the mode doesn't fit available BW */ 2320 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 2321 if (ret || intel_dp->force_dsc_en) { 2322 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 2323 conn_state, &limits); 2324 if (ret < 0) 2325 return ret; 2326 } 2327 2328 if (pipe_config->dsc.compression_enable) { 2329 drm_dbg_kms(&i915->drm, 2330 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2331 pipe_config->lane_count, pipe_config->port_clock, 2332 pipe_config->pipe_bpp, 2333 pipe_config->dsc.compressed_bpp); 2334 2335 drm_dbg_kms(&i915->drm, 2336 "DP link rate required %i available %i\n", 2337 intel_dp_link_required(adjusted_mode->crtc_clock, 2338 pipe_config->dsc.compressed_bpp), 2339 intel_dp_max_data_rate(pipe_config->port_clock, 2340 pipe_config->lane_count)); 2341 } else { 2342 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 2343 pipe_config->lane_count, pipe_config->port_clock, 2344 pipe_config->pipe_bpp); 2345 2346 drm_dbg_kms(&i915->drm, 2347 "DP link rate required %i available %i\n", 2348 intel_dp_link_required(adjusted_mode->crtc_clock, 2349 pipe_config->pipe_bpp), 2350 intel_dp_max_data_rate(pipe_config->port_clock, 2351 pipe_config->lane_count)); 2352 } 2353 return 0; 2354 } 2355 2356 static int 2357 intel_dp_ycbcr420_config(struct intel_dp *intel_dp, 2358 struct intel_crtc_state *crtc_state, 2359 const struct drm_connector_state *conn_state) 2360 { 2361 struct drm_connector *connector = conn_state->connector; 2362 const struct drm_display_info *info = &connector->display_info; 2363 const struct drm_display_mode *adjusted_mode = 2364 &crtc_state->hw.adjusted_mode; 2365 2366 if (!drm_mode_is_420_only(info, adjusted_mode) || 2367 !intel_dp_get_colorimetry_status(intel_dp) || 2368 !connector->ycbcr_420_allowed) 2369 return 0; 2370 2371 crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2372 2373 return intel_pch_panel_fitting(crtc_state, conn_state); 2374 } 2375 2376 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 2377 const struct drm_connector_state *conn_state) 2378 { 2379 const struct intel_digital_connector_state *intel_conn_state = 2380 to_intel_digital_connector_state(conn_state); 2381 const struct drm_display_mode *adjusted_mode = 2382 &crtc_state->hw.adjusted_mode; 2383 2384 /* 2385 * Our YCbCr output is always limited range. 2386 * crtc_state->limited_color_range only applies to RGB, 2387 * and it must never be set for YCbCr or we risk setting 2388 * some conflicting bits in PIPECONF which will mess up 2389 * the colors on the monitor. 2390 */ 2391 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 2392 return false; 2393 2394 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 2395 /* 2396 * See: 2397 * CEA-861-E - 5.1 Default Encoding Parameters 2398 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 2399 */ 2400 return crtc_state->pipe_bpp != 18 && 2401 drm_default_rgb_quant_range(adjusted_mode) == 2402 HDMI_QUANTIZATION_RANGE_LIMITED; 2403 } else { 2404 return intel_conn_state->broadcast_rgb == 2405 INTEL_BROADCAST_RGB_LIMITED; 2406 } 2407 } 2408 2409 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 2410 enum port port) 2411 { 2412 if (IS_G4X(dev_priv)) 2413 return false; 2414 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A) 2415 return false; 2416 2417 return true; 2418 } 2419 2420 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 2421 const struct drm_connector_state *conn_state, 2422 struct drm_dp_vsc_sdp *vsc) 2423 { 2424 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2425 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2426 2427 /* 2428 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2429 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2430 * Colorimetry Format indication. 2431 */ 2432 vsc->revision = 0x5; 2433 vsc->length = 0x13; 2434 2435 /* DP 1.4a spec, Table 2-120 */ 2436 switch (crtc_state->output_format) { 2437 case INTEL_OUTPUT_FORMAT_YCBCR444: 2438 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 2439 break; 2440 case INTEL_OUTPUT_FORMAT_YCBCR420: 2441 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 2442 break; 2443 case INTEL_OUTPUT_FORMAT_RGB: 2444 default: 2445 vsc->pixelformat = DP_PIXELFORMAT_RGB; 2446 } 2447 2448 switch (conn_state->colorspace) { 2449 case DRM_MODE_COLORIMETRY_BT709_YCC: 2450 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2451 break; 2452 case DRM_MODE_COLORIMETRY_XVYCC_601: 2453 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 2454 break; 2455 case DRM_MODE_COLORIMETRY_XVYCC_709: 2456 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 2457 break; 2458 case DRM_MODE_COLORIMETRY_SYCC_601: 2459 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 2460 break; 2461 case DRM_MODE_COLORIMETRY_OPYCC_601: 2462 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 2463 break; 2464 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2465 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 2466 break; 2467 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2468 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 2469 break; 2470 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2471 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 2472 break; 2473 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 2474 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 2475 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 2476 break; 2477 default: 2478 /* 2479 * RGB->YCBCR color conversion uses the BT.709 2480 * color space. 2481 */ 2482 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2483 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 2484 else 2485 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 2486 break; 2487 } 2488 2489 vsc->bpc = crtc_state->pipe_bpp / 3; 2490 2491 /* only RGB pixelformat supports 6 bpc */ 2492 drm_WARN_ON(&dev_priv->drm, 2493 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 2494 2495 /* all YCbCr are always limited range */ 2496 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 2497 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 2498 } 2499 2500 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 2501 struct intel_crtc_state *crtc_state, 2502 const struct drm_connector_state *conn_state) 2503 { 2504 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 2505 2506 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 2507 if (crtc_state->has_psr) 2508 return; 2509 2510 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 2511 return; 2512 2513 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 2514 vsc->sdp_type = DP_SDP_VSC; 2515 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2516 &crtc_state->infoframes.vsc); 2517 } 2518 2519 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 2520 const struct intel_crtc_state *crtc_state, 2521 const struct drm_connector_state *conn_state, 2522 struct drm_dp_vsc_sdp *vsc) 2523 { 2524 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2525 2526 vsc->sdp_type = DP_SDP_VSC; 2527 2528 if (dev_priv->psr.psr2_enabled) { 2529 if (dev_priv->psr.colorimetry_support && 2530 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2531 /* [PSR2, +Colorimetry] */ 2532 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2533 vsc); 2534 } else { 2535 /* 2536 * [PSR2, -Colorimetry] 2537 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 2538 * 3D stereo + PSR/PSR2 + Y-coordinate. 2539 */ 2540 vsc->revision = 0x4; 2541 vsc->length = 0xe; 2542 } 2543 } else { 2544 /* 2545 * [PSR1] 2546 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2547 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 2548 * higher). 2549 */ 2550 vsc->revision = 0x2; 2551 vsc->length = 0x8; 2552 } 2553 } 2554 2555 static void 2556 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 2557 struct intel_crtc_state *crtc_state, 2558 const struct drm_connector_state *conn_state) 2559 { 2560 int ret; 2561 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2562 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 2563 2564 if (!conn_state->hdr_output_metadata) 2565 return; 2566 2567 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 2568 2569 if (ret) { 2570 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 2571 return; 2572 } 2573 2574 crtc_state->infoframes.enable |= 2575 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2576 } 2577 2578 int 2579 intel_dp_compute_config(struct intel_encoder *encoder, 2580 struct intel_crtc_state *pipe_config, 2581 struct drm_connector_state *conn_state) 2582 { 2583 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2584 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2585 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2586 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); 2587 enum port port = encoder->port; 2588 struct intel_connector *intel_connector = intel_dp->attached_connector; 2589 struct intel_digital_connector_state *intel_conn_state = 2590 to_intel_digital_connector_state(conn_state); 2591 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, 2592 DP_DPCD_QUIRK_CONSTANT_N); 2593 int ret = 0, output_bpp; 2594 2595 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 2596 pipe_config->has_pch_encoder = true; 2597 2598 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2599 2600 if (lspcon->active) 2601 lspcon_ycbcr420_config(&intel_connector->base, pipe_config); 2602 else 2603 ret = intel_dp_ycbcr420_config(intel_dp, pipe_config, 2604 conn_state); 2605 if (ret) 2606 return ret; 2607 2608 pipe_config->has_drrs = false; 2609 if (!intel_dp_port_has_audio(dev_priv, port)) 2610 pipe_config->has_audio = false; 2611 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 2612 pipe_config->has_audio = intel_dp->has_audio; 2613 else 2614 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 2615 2616 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 2617 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 2618 adjusted_mode); 2619 2620 if (HAS_GMCH(dev_priv)) 2621 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 2622 else 2623 ret = intel_pch_panel_fitting(pipe_config, conn_state); 2624 if (ret) 2625 return ret; 2626 } 2627 2628 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 2629 return -EINVAL; 2630 2631 if (HAS_GMCH(dev_priv) && 2632 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 2633 return -EINVAL; 2634 2635 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2636 return -EINVAL; 2637 2638 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 2639 return -EINVAL; 2640 2641 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 2642 if (ret < 0) 2643 return ret; 2644 2645 pipe_config->limited_color_range = 2646 intel_dp_limited_color_range(pipe_config, conn_state); 2647 2648 if (pipe_config->dsc.compression_enable) 2649 output_bpp = pipe_config->dsc.compressed_bpp; 2650 else 2651 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); 2652 2653 intel_link_compute_m_n(output_bpp, 2654 pipe_config->lane_count, 2655 adjusted_mode->crtc_clock, 2656 pipe_config->port_clock, 2657 &pipe_config->dp_m_n, 2658 constant_n, pipe_config->fec_enable); 2659 2660 if (intel_connector->panel.downclock_mode != NULL && 2661 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { 2662 pipe_config->has_drrs = true; 2663 intel_link_compute_m_n(output_bpp, 2664 pipe_config->lane_count, 2665 intel_connector->panel.downclock_mode->clock, 2666 pipe_config->port_clock, 2667 &pipe_config->dp_m2_n2, 2668 constant_n, pipe_config->fec_enable); 2669 } 2670 2671 if (!HAS_DDI(dev_priv)) 2672 intel_dp_set_clock(encoder, pipe_config); 2673 2674 intel_psr_compute_config(intel_dp, pipe_config); 2675 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 2676 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 2677 2678 return 0; 2679 } 2680 2681 void intel_dp_set_link_params(struct intel_dp *intel_dp, 2682 int link_rate, u8 lane_count, 2683 bool link_mst) 2684 { 2685 intel_dp->link_trained = false; 2686 intel_dp->link_rate = link_rate; 2687 intel_dp->lane_count = lane_count; 2688 intel_dp->link_mst = link_mst; 2689 } 2690 2691 static void intel_dp_prepare(struct intel_encoder *encoder, 2692 const struct intel_crtc_state *pipe_config) 2693 { 2694 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2695 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2696 enum port port = encoder->port; 2697 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2698 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2699 2700 intel_dp_set_link_params(intel_dp, pipe_config->port_clock, 2701 pipe_config->lane_count, 2702 intel_crtc_has_type(pipe_config, 2703 INTEL_OUTPUT_DP_MST)); 2704 2705 /* 2706 * There are four kinds of DP registers: 2707 * 2708 * IBX PCH 2709 * SNB CPU 2710 * IVB CPU 2711 * CPT PCH 2712 * 2713 * IBX PCH and CPU are the same for almost everything, 2714 * except that the CPU DP PLL is configured in this 2715 * register 2716 * 2717 * CPT PCH is quite different, having many bits moved 2718 * to the TRANS_DP_CTL register instead. That 2719 * configuration happens (oddly) in ilk_pch_enable 2720 */ 2721 2722 /* Preserve the BIOS-computed detected bit. This is 2723 * supposed to be read-only. 2724 */ 2725 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; 2726 2727 /* Handle DP bits in common between all three register formats */ 2728 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 2729 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); 2730 2731 /* Split out the IBX/CPU vs CPT settings */ 2732 2733 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { 2734 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2735 intel_dp->DP |= DP_SYNC_HS_HIGH; 2736 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2737 intel_dp->DP |= DP_SYNC_VS_HIGH; 2738 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2739 2740 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2741 intel_dp->DP |= DP_ENHANCED_FRAMING; 2742 2743 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 2744 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 2745 u32 trans_dp; 2746 2747 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 2748 2749 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 2750 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2751 trans_dp |= TRANS_DP_ENH_FRAMING; 2752 else 2753 trans_dp &= ~TRANS_DP_ENH_FRAMING; 2754 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 2755 } else { 2756 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 2757 intel_dp->DP |= DP_COLOR_RANGE_16_235; 2758 2759 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 2760 intel_dp->DP |= DP_SYNC_HS_HIGH; 2761 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 2762 intel_dp->DP |= DP_SYNC_VS_HIGH; 2763 intel_dp->DP |= DP_LINK_TRAIN_OFF; 2764 2765 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 2766 intel_dp->DP |= DP_ENHANCED_FRAMING; 2767 2768 if (IS_CHERRYVIEW(dev_priv)) 2769 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); 2770 else 2771 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); 2772 } 2773 } 2774 2775 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 2776 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 2777 2778 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 2779 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 2780 2781 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 2782 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 2783 2784 static void intel_pps_verify_state(struct intel_dp *intel_dp); 2785 2786 static void wait_panel_status(struct intel_dp *intel_dp, 2787 u32 mask, 2788 u32 value) 2789 { 2790 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2791 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2792 2793 lockdep_assert_held(&dev_priv->pps_mutex); 2794 2795 intel_pps_verify_state(intel_dp); 2796 2797 pp_stat_reg = _pp_stat_reg(intel_dp); 2798 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2799 2800 drm_dbg_kms(&dev_priv->drm, 2801 "mask %08x value %08x status %08x control %08x\n", 2802 mask, value, 2803 intel_de_read(dev_priv, pp_stat_reg), 2804 intel_de_read(dev_priv, pp_ctrl_reg)); 2805 2806 if (intel_de_wait_for_register(dev_priv, pp_stat_reg, 2807 mask, value, 5000)) 2808 drm_err(&dev_priv->drm, 2809 "Panel status timeout: status %08x control %08x\n", 2810 intel_de_read(dev_priv, pp_stat_reg), 2811 intel_de_read(dev_priv, pp_ctrl_reg)); 2812 2813 drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); 2814 } 2815 2816 static void wait_panel_on(struct intel_dp *intel_dp) 2817 { 2818 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2819 2820 drm_dbg_kms(&i915->drm, "Wait for panel power on\n"); 2821 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 2822 } 2823 2824 static void wait_panel_off(struct intel_dp *intel_dp) 2825 { 2826 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2827 2828 drm_dbg_kms(&i915->drm, "Wait for panel power off time\n"); 2829 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 2830 } 2831 2832 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 2833 { 2834 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2835 ktime_t panel_power_on_time; 2836 s64 panel_power_off_duration; 2837 2838 drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n"); 2839 2840 /* take the difference of currrent time and panel power off time 2841 * and then make panel wait for t11_t12 if needed. */ 2842 panel_power_on_time = ktime_get_boottime(); 2843 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); 2844 2845 /* When we disable the VDD override bit last we have to do the manual 2846 * wait. */ 2847 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) 2848 wait_remaining_ms_from_jiffies(jiffies, 2849 intel_dp->panel_power_cycle_delay - panel_power_off_duration); 2850 2851 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 2852 } 2853 2854 static void wait_backlight_on(struct intel_dp *intel_dp) 2855 { 2856 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 2857 intel_dp->backlight_on_delay); 2858 } 2859 2860 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 2861 { 2862 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 2863 intel_dp->backlight_off_delay); 2864 } 2865 2866 /* Read the current pp_control value, unlocking the register if it 2867 * is locked 2868 */ 2869 2870 static u32 ilk_get_pp_control(struct intel_dp *intel_dp) 2871 { 2872 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2873 u32 control; 2874 2875 lockdep_assert_held(&dev_priv->pps_mutex); 2876 2877 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); 2878 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && 2879 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { 2880 control &= ~PANEL_UNLOCK_MASK; 2881 control |= PANEL_UNLOCK_REGS; 2882 } 2883 return control; 2884 } 2885 2886 /* 2887 * Must be paired with edp_panel_vdd_off(). 2888 * Must hold pps_mutex around the whole on/off sequence. 2889 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 2890 */ 2891 static bool edp_panel_vdd_on(struct intel_dp *intel_dp) 2892 { 2893 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2894 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2895 u32 pp; 2896 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2897 bool need_to_disable = !intel_dp->want_panel_vdd; 2898 2899 lockdep_assert_held(&dev_priv->pps_mutex); 2900 2901 if (!intel_dp_is_edp(intel_dp)) 2902 return false; 2903 2904 cancel_delayed_work(&intel_dp->panel_vdd_work); 2905 intel_dp->want_panel_vdd = true; 2906 2907 if (edp_have_panel_vdd(intel_dp)) 2908 return need_to_disable; 2909 2910 intel_display_power_get(dev_priv, 2911 intel_aux_power_domain(dig_port)); 2912 2913 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", 2914 dig_port->base.base.base.id, 2915 dig_port->base.base.name); 2916 2917 if (!edp_have_panel_power(intel_dp)) 2918 wait_panel_power_cycle(intel_dp); 2919 2920 pp = ilk_get_pp_control(intel_dp); 2921 pp |= EDP_FORCE_VDD; 2922 2923 pp_stat_reg = _pp_stat_reg(intel_dp); 2924 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2925 2926 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2927 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2928 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2929 intel_de_read(dev_priv, pp_stat_reg), 2930 intel_de_read(dev_priv, pp_ctrl_reg)); 2931 /* 2932 * If the panel wasn't on, delay before accessing aux channel 2933 */ 2934 if (!edp_have_panel_power(intel_dp)) { 2935 drm_dbg_kms(&dev_priv->drm, 2936 "[ENCODER:%d:%s] panel power wasn't enabled\n", 2937 dig_port->base.base.base.id, 2938 dig_port->base.base.name); 2939 msleep(intel_dp->panel_power_up_delay); 2940 } 2941 2942 return need_to_disable; 2943 } 2944 2945 /* 2946 * Must be paired with intel_edp_panel_vdd_off() or 2947 * intel_edp_panel_off(). 2948 * Nested calls to these functions are not allowed since 2949 * we drop the lock. Caller must use some higher level 2950 * locking to prevent nested calls from other threads. 2951 */ 2952 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 2953 { 2954 intel_wakeref_t wakeref; 2955 bool vdd; 2956 2957 if (!intel_dp_is_edp(intel_dp)) 2958 return; 2959 2960 vdd = false; 2961 with_pps_lock(intel_dp, wakeref) 2962 vdd = edp_panel_vdd_on(intel_dp); 2963 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", 2964 dp_to_dig_port(intel_dp)->base.base.base.id, 2965 dp_to_dig_port(intel_dp)->base.base.name); 2966 } 2967 2968 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 2969 { 2970 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2971 struct intel_digital_port *dig_port = 2972 dp_to_dig_port(intel_dp); 2973 u32 pp; 2974 i915_reg_t pp_stat_reg, pp_ctrl_reg; 2975 2976 lockdep_assert_held(&dev_priv->pps_mutex); 2977 2978 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd); 2979 2980 if (!edp_have_panel_vdd(intel_dp)) 2981 return; 2982 2983 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", 2984 dig_port->base.base.base.id, 2985 dig_port->base.base.name); 2986 2987 pp = ilk_get_pp_control(intel_dp); 2988 pp &= ~EDP_FORCE_VDD; 2989 2990 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 2991 pp_stat_reg = _pp_stat_reg(intel_dp); 2992 2993 intel_de_write(dev_priv, pp_ctrl_reg, pp); 2994 intel_de_posting_read(dev_priv, pp_ctrl_reg); 2995 2996 /* Make sure sequencer is idle before allowing subsequent activity */ 2997 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 2998 intel_de_read(dev_priv, pp_stat_reg), 2999 intel_de_read(dev_priv, pp_ctrl_reg)); 3000 3001 if ((pp & PANEL_POWER_ON) == 0) 3002 intel_dp->panel_power_off_time = ktime_get_boottime(); 3003 3004 intel_display_power_put_unchecked(dev_priv, 3005 intel_aux_power_domain(dig_port)); 3006 } 3007 3008 static void edp_panel_vdd_work(struct work_struct *__work) 3009 { 3010 struct intel_dp *intel_dp = 3011 container_of(to_delayed_work(__work), 3012 struct intel_dp, panel_vdd_work); 3013 intel_wakeref_t wakeref; 3014 3015 with_pps_lock(intel_dp, wakeref) { 3016 if (!intel_dp->want_panel_vdd) 3017 edp_panel_vdd_off_sync(intel_dp); 3018 } 3019 } 3020 3021 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 3022 { 3023 unsigned long delay; 3024 3025 /* 3026 * Queue the timer to fire a long time from now (relative to the power 3027 * down delay) to keep the panel power up across a sequence of 3028 * operations. 3029 */ 3030 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); 3031 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 3032 } 3033 3034 /* 3035 * Must be paired with edp_panel_vdd_on(). 3036 * Must hold pps_mutex around the whole on/off sequence. 3037 * Can be nested with intel_edp_panel_vdd_{on,off}() calls. 3038 */ 3039 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 3040 { 3041 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3042 3043 lockdep_assert_held(&dev_priv->pps_mutex); 3044 3045 if (!intel_dp_is_edp(intel_dp)) 3046 return; 3047 3048 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", 3049 dp_to_dig_port(intel_dp)->base.base.base.id, 3050 dp_to_dig_port(intel_dp)->base.base.name); 3051 3052 intel_dp->want_panel_vdd = false; 3053 3054 if (sync) 3055 edp_panel_vdd_off_sync(intel_dp); 3056 else 3057 edp_panel_vdd_schedule_off(intel_dp); 3058 } 3059 3060 static void edp_panel_on(struct intel_dp *intel_dp) 3061 { 3062 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3063 u32 pp; 3064 i915_reg_t pp_ctrl_reg; 3065 3066 lockdep_assert_held(&dev_priv->pps_mutex); 3067 3068 if (!intel_dp_is_edp(intel_dp)) 3069 return; 3070 3071 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n", 3072 dp_to_dig_port(intel_dp)->base.base.base.id, 3073 dp_to_dig_port(intel_dp)->base.base.name); 3074 3075 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), 3076 "[ENCODER:%d:%s] panel power already on\n", 3077 dp_to_dig_port(intel_dp)->base.base.base.id, 3078 dp_to_dig_port(intel_dp)->base.base.name)) 3079 return; 3080 3081 wait_panel_power_cycle(intel_dp); 3082 3083 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3084 pp = ilk_get_pp_control(intel_dp); 3085 if (IS_GEN(dev_priv, 5)) { 3086 /* ILK workaround: disable reset around power sequence */ 3087 pp &= ~PANEL_POWER_RESET; 3088 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3089 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3090 } 3091 3092 pp |= PANEL_POWER_ON; 3093 if (!IS_GEN(dev_priv, 5)) 3094 pp |= PANEL_POWER_RESET; 3095 3096 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3097 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3098 3099 wait_panel_on(intel_dp); 3100 intel_dp->last_power_on = jiffies; 3101 3102 if (IS_GEN(dev_priv, 5)) { 3103 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 3104 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3105 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3106 } 3107 } 3108 3109 void intel_edp_panel_on(struct intel_dp *intel_dp) 3110 { 3111 intel_wakeref_t wakeref; 3112 3113 if (!intel_dp_is_edp(intel_dp)) 3114 return; 3115 3116 with_pps_lock(intel_dp, wakeref) 3117 edp_panel_on(intel_dp); 3118 } 3119 3120 3121 static void edp_panel_off(struct intel_dp *intel_dp) 3122 { 3123 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3124 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3125 u32 pp; 3126 i915_reg_t pp_ctrl_reg; 3127 3128 lockdep_assert_held(&dev_priv->pps_mutex); 3129 3130 if (!intel_dp_is_edp(intel_dp)) 3131 return; 3132 3133 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n", 3134 dig_port->base.base.base.id, dig_port->base.base.name); 3135 3136 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd, 3137 "Need [ENCODER:%d:%s] VDD to turn off panel\n", 3138 dig_port->base.base.base.id, dig_port->base.base.name); 3139 3140 pp = ilk_get_pp_control(intel_dp); 3141 /* We need to switch off panel power _and_ force vdd, for otherwise some 3142 * panels get very unhappy and cease to work. */ 3143 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 3144 EDP_BLC_ENABLE); 3145 3146 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3147 3148 intel_dp->want_panel_vdd = false; 3149 3150 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3151 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3152 3153 wait_panel_off(intel_dp); 3154 intel_dp->panel_power_off_time = ktime_get_boottime(); 3155 3156 /* We got a reference when we enabled the VDD. */ 3157 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); 3158 } 3159 3160 void intel_edp_panel_off(struct intel_dp *intel_dp) 3161 { 3162 intel_wakeref_t wakeref; 3163 3164 if (!intel_dp_is_edp(intel_dp)) 3165 return; 3166 3167 with_pps_lock(intel_dp, wakeref) 3168 edp_panel_off(intel_dp); 3169 } 3170 3171 /* Enable backlight in the panel power control. */ 3172 static void _intel_edp_backlight_on(struct intel_dp *intel_dp) 3173 { 3174 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3175 intel_wakeref_t wakeref; 3176 3177 /* 3178 * If we enable the backlight right away following a panel power 3179 * on, we may see slight flicker as the panel syncs with the eDP 3180 * link. So delay a bit to make sure the image is solid before 3181 * allowing it to appear. 3182 */ 3183 wait_backlight_on(intel_dp); 3184 3185 with_pps_lock(intel_dp, wakeref) { 3186 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3187 u32 pp; 3188 3189 pp = ilk_get_pp_control(intel_dp); 3190 pp |= EDP_BLC_ENABLE; 3191 3192 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3193 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3194 } 3195 } 3196 3197 /* Enable backlight PWM and backlight PP control. */ 3198 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 3199 const struct drm_connector_state *conn_state) 3200 { 3201 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 3202 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3203 3204 if (!intel_dp_is_edp(intel_dp)) 3205 return; 3206 3207 drm_dbg_kms(&i915->drm, "\n"); 3208 3209 intel_panel_enable_backlight(crtc_state, conn_state); 3210 _intel_edp_backlight_on(intel_dp); 3211 } 3212 3213 /* Disable backlight in the panel power control. */ 3214 static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 3215 { 3216 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3217 intel_wakeref_t wakeref; 3218 3219 if (!intel_dp_is_edp(intel_dp)) 3220 return; 3221 3222 with_pps_lock(intel_dp, wakeref) { 3223 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 3224 u32 pp; 3225 3226 pp = ilk_get_pp_control(intel_dp); 3227 pp &= ~EDP_BLC_ENABLE; 3228 3229 intel_de_write(dev_priv, pp_ctrl_reg, pp); 3230 intel_de_posting_read(dev_priv, pp_ctrl_reg); 3231 } 3232 3233 intel_dp->last_backlight_off = jiffies; 3234 edp_wait_backlight_off(intel_dp); 3235 } 3236 3237 /* Disable backlight PP control and backlight PWM. */ 3238 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 3239 { 3240 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 3241 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3242 3243 if (!intel_dp_is_edp(intel_dp)) 3244 return; 3245 3246 drm_dbg_kms(&i915->drm, "\n"); 3247 3248 _intel_edp_backlight_off(intel_dp); 3249 intel_panel_disable_backlight(old_conn_state); 3250 } 3251 3252 /* 3253 * Hook for controlling the panel power control backlight through the bl_power 3254 * sysfs attribute. Take care to handle multiple calls. 3255 */ 3256 static void intel_edp_backlight_power(struct intel_connector *connector, 3257 bool enable) 3258 { 3259 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3260 struct intel_dp *intel_dp = intel_attached_dp(connector); 3261 intel_wakeref_t wakeref; 3262 bool is_enabled; 3263 3264 is_enabled = false; 3265 with_pps_lock(intel_dp, wakeref) 3266 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE; 3267 if (is_enabled == enable) 3268 return; 3269 3270 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n", 3271 enable ? "enable" : "disable"); 3272 3273 if (enable) 3274 _intel_edp_backlight_on(intel_dp); 3275 else 3276 _intel_edp_backlight_off(intel_dp); 3277 } 3278 3279 static void assert_dp_port(struct intel_dp *intel_dp, bool state) 3280 { 3281 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3282 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3283 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; 3284 3285 I915_STATE_WARN(cur_state != state, 3286 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", 3287 dig_port->base.base.base.id, dig_port->base.base.name, 3288 onoff(state), onoff(cur_state)); 3289 } 3290 #define assert_dp_port_disabled(d) assert_dp_port((d), false) 3291 3292 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) 3293 { 3294 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; 3295 3296 I915_STATE_WARN(cur_state != state, 3297 "eDP PLL state assertion failure (expected %s, current %s)\n", 3298 onoff(state), onoff(cur_state)); 3299 } 3300 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true) 3301 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) 3302 3303 static void ilk_edp_pll_on(struct intel_dp *intel_dp, 3304 const struct intel_crtc_state *pipe_config) 3305 { 3306 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3307 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3308 3309 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); 3310 assert_dp_port_disabled(intel_dp); 3311 assert_edp_pll_disabled(dev_priv); 3312 3313 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", 3314 pipe_config->port_clock); 3315 3316 intel_dp->DP &= ~DP_PLL_FREQ_MASK; 3317 3318 if (pipe_config->port_clock == 162000) 3319 intel_dp->DP |= DP_PLL_FREQ_162MHZ; 3320 else 3321 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 3322 3323 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3324 intel_de_posting_read(dev_priv, DP_A); 3325 udelay(500); 3326 3327 /* 3328 * [DevILK] Work around required when enabling DP PLL 3329 * while a pipe is enabled going to FDI: 3330 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI 3331 * 2. Program DP PLL enable 3332 */ 3333 if (IS_GEN(dev_priv, 5)) 3334 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); 3335 3336 intel_dp->DP |= DP_PLL_ENABLE; 3337 3338 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3339 intel_de_posting_read(dev_priv, DP_A); 3340 udelay(200); 3341 } 3342 3343 static void ilk_edp_pll_off(struct intel_dp *intel_dp, 3344 const struct intel_crtc_state *old_crtc_state) 3345 { 3346 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 3347 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3348 3349 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); 3350 assert_dp_port_disabled(intel_dp); 3351 assert_edp_pll_enabled(dev_priv); 3352 3353 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); 3354 3355 intel_dp->DP &= ~DP_PLL_ENABLE; 3356 3357 intel_de_write(dev_priv, DP_A, intel_dp->DP); 3358 intel_de_posting_read(dev_priv, DP_A); 3359 udelay(200); 3360 } 3361 3362 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 3363 { 3364 /* 3365 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 3366 * be capable of signalling downstream hpd with a long pulse. 3367 * Whether or not that means D3 is safe to use is not clear, 3368 * but let's assume so until proven otherwise. 3369 * 3370 * FIXME should really check all downstream ports... 3371 */ 3372 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 3373 drm_dp_is_branch(intel_dp->dpcd) && 3374 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3375 } 3376 3377 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3378 const struct intel_crtc_state *crtc_state, 3379 bool enable) 3380 { 3381 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3382 int ret; 3383 3384 if (!crtc_state->dsc.compression_enable) 3385 return; 3386 3387 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3388 enable ? DP_DECOMPRESSION_EN : 0); 3389 if (ret < 0) 3390 drm_dbg_kms(&i915->drm, 3391 "Failed to %s sink decompression state\n", 3392 enable ? "enable" : "disable"); 3393 } 3394 3395 /* If the sink supports it, try to set the power state appropriately */ 3396 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 3397 { 3398 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3399 int ret, i; 3400 3401 /* Should have a valid DPCD by this point */ 3402 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3403 return; 3404 3405 if (mode != DRM_MODE_DPMS_ON) { 3406 if (downstream_hpd_needs_d0(intel_dp)) 3407 return; 3408 3409 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3410 DP_SET_POWER_D3); 3411 } else { 3412 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3413 3414 /* 3415 * When turning on, we need to retry for 1ms to give the sink 3416 * time to wake up. 3417 */ 3418 for (i = 0; i < 3; i++) { 3419 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 3420 DP_SET_POWER_D0); 3421 if (ret == 1) 3422 break; 3423 msleep(1); 3424 } 3425 3426 if (ret == 1 && lspcon->active) 3427 lspcon_wait_pcon_mode(lspcon); 3428 } 3429 3430 if (ret != 1) 3431 drm_dbg_kms(&i915->drm, "failed to %s sink power state\n", 3432 mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); 3433 } 3434 3435 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, 3436 enum port port, enum pipe *pipe) 3437 { 3438 enum pipe p; 3439 3440 for_each_pipe(dev_priv, p) { 3441 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); 3442 3443 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { 3444 *pipe = p; 3445 return true; 3446 } 3447 } 3448 3449 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", 3450 port_name(port)); 3451 3452 /* must initialize pipe to something for the asserts */ 3453 *pipe = PIPE_A; 3454 3455 return false; 3456 } 3457 3458 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, 3459 i915_reg_t dp_reg, enum port port, 3460 enum pipe *pipe) 3461 { 3462 bool ret; 3463 u32 val; 3464 3465 val = intel_de_read(dev_priv, dp_reg); 3466 3467 ret = val & DP_PORT_EN; 3468 3469 /* asserts want to know the pipe even if the port is disabled */ 3470 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 3471 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; 3472 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) 3473 ret &= cpt_dp_port_selected(dev_priv, port, pipe); 3474 else if (IS_CHERRYVIEW(dev_priv)) 3475 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; 3476 else 3477 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; 3478 3479 return ret; 3480 } 3481 3482 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 3483 enum pipe *pipe) 3484 { 3485 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3486 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3487 intel_wakeref_t wakeref; 3488 bool ret; 3489 3490 wakeref = intel_display_power_get_if_enabled(dev_priv, 3491 encoder->power_domain); 3492 if (!wakeref) 3493 return false; 3494 3495 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 3496 encoder->port, pipe); 3497 3498 intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 3499 3500 return ret; 3501 } 3502 3503 static void intel_dp_get_config(struct intel_encoder *encoder, 3504 struct intel_crtc_state *pipe_config) 3505 { 3506 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3507 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3508 u32 tmp, flags = 0; 3509 enum port port = encoder->port; 3510 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3511 3512 if (encoder->type == INTEL_OUTPUT_EDP) 3513 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); 3514 else 3515 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); 3516 3517 tmp = intel_de_read(dev_priv, intel_dp->output_reg); 3518 3519 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; 3520 3521 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 3522 u32 trans_dp = intel_de_read(dev_priv, 3523 TRANS_DP_CTL(crtc->pipe)); 3524 3525 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) 3526 flags |= DRM_MODE_FLAG_PHSYNC; 3527 else 3528 flags |= DRM_MODE_FLAG_NHSYNC; 3529 3530 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) 3531 flags |= DRM_MODE_FLAG_PVSYNC; 3532 else 3533 flags |= DRM_MODE_FLAG_NVSYNC; 3534 } else { 3535 if (tmp & DP_SYNC_HS_HIGH) 3536 flags |= DRM_MODE_FLAG_PHSYNC; 3537 else 3538 flags |= DRM_MODE_FLAG_NHSYNC; 3539 3540 if (tmp & DP_SYNC_VS_HIGH) 3541 flags |= DRM_MODE_FLAG_PVSYNC; 3542 else 3543 flags |= DRM_MODE_FLAG_NVSYNC; 3544 } 3545 3546 pipe_config->hw.adjusted_mode.flags |= flags; 3547 3548 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) 3549 pipe_config->limited_color_range = true; 3550 3551 pipe_config->lane_count = 3552 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 3553 3554 intel_dp_get_m_n(crtc, pipe_config); 3555 3556 if (port == PORT_A) { 3557 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) 3558 pipe_config->port_clock = 162000; 3559 else 3560 pipe_config->port_clock = 270000; 3561 } 3562 3563 pipe_config->hw.adjusted_mode.crtc_clock = 3564 intel_dotclock_calculate(pipe_config->port_clock, 3565 &pipe_config->dp_m_n); 3566 3567 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && 3568 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { 3569 /* 3570 * This is a big fat ugly hack. 3571 * 3572 * Some machines in UEFI boot mode provide us a VBT that has 18 3573 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 3574 * unknown we fail to light up. Yet the same BIOS boots up with 3575 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 3576 * max, not what it tells us to use. 3577 * 3578 * Note: This will still be broken if the eDP panel is not lit 3579 * up by the BIOS, and thus we can't get the mode at module 3580 * load. 3581 */ 3582 drm_dbg_kms(&dev_priv->drm, 3583 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 3584 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); 3585 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3586 } 3587 } 3588 3589 static void intel_disable_dp(struct intel_atomic_state *state, 3590 struct intel_encoder *encoder, 3591 const struct intel_crtc_state *old_crtc_state, 3592 const struct drm_connector_state *old_conn_state) 3593 { 3594 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3595 3596 intel_dp->link_trained = false; 3597 3598 if (old_crtc_state->has_audio) 3599 intel_audio_codec_disable(encoder, 3600 old_crtc_state, old_conn_state); 3601 3602 /* Make sure the panel is off before trying to change the mode. But also 3603 * ensure that we have vdd while we switch off the panel. */ 3604 intel_edp_panel_vdd_on(intel_dp); 3605 intel_edp_backlight_off(old_conn_state); 3606 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 3607 intel_edp_panel_off(intel_dp); 3608 } 3609 3610 static void g4x_disable_dp(struct intel_atomic_state *state, 3611 struct intel_encoder *encoder, 3612 const struct intel_crtc_state *old_crtc_state, 3613 const struct drm_connector_state *old_conn_state) 3614 { 3615 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3616 } 3617 3618 static void vlv_disable_dp(struct intel_atomic_state *state, 3619 struct intel_encoder *encoder, 3620 const struct intel_crtc_state *old_crtc_state, 3621 const struct drm_connector_state *old_conn_state) 3622 { 3623 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state); 3624 } 3625 3626 static void g4x_post_disable_dp(struct intel_atomic_state *state, 3627 struct intel_encoder *encoder, 3628 const struct intel_crtc_state *old_crtc_state, 3629 const struct drm_connector_state *old_conn_state) 3630 { 3631 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3632 enum port port = encoder->port; 3633 3634 /* 3635 * Bspec does not list a specific disable sequence for g4x DP. 3636 * Follow the ilk+ sequence (disable pipe before the port) for 3637 * g4x DP as it does not suffer from underruns like the normal 3638 * g4x modeset sequence (disable pipe after the port). 3639 */ 3640 intel_dp_link_down(encoder, old_crtc_state); 3641 3642 /* Only ilk+ has port A */ 3643 if (port == PORT_A) 3644 ilk_edp_pll_off(intel_dp, old_crtc_state); 3645 } 3646 3647 static void vlv_post_disable_dp(struct intel_atomic_state *state, 3648 struct intel_encoder *encoder, 3649 const struct intel_crtc_state *old_crtc_state, 3650 const struct drm_connector_state *old_conn_state) 3651 { 3652 intel_dp_link_down(encoder, old_crtc_state); 3653 } 3654 3655 static void chv_post_disable_dp(struct intel_atomic_state *state, 3656 struct intel_encoder *encoder, 3657 const struct intel_crtc_state *old_crtc_state, 3658 const struct drm_connector_state *old_conn_state) 3659 { 3660 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3661 3662 intel_dp_link_down(encoder, old_crtc_state); 3663 3664 vlv_dpio_get(dev_priv); 3665 3666 /* Assert data lane reset */ 3667 chv_data_lane_soft_reset(encoder, old_crtc_state, true); 3668 3669 vlv_dpio_put(dev_priv); 3670 } 3671 3672 static void 3673 cpt_set_link_train(struct intel_dp *intel_dp, 3674 u8 dp_train_pat) 3675 { 3676 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3677 u32 *DP = &intel_dp->DP; 3678 3679 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 3680 3681 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3682 case DP_TRAINING_PATTERN_DISABLE: 3683 *DP |= DP_LINK_TRAIN_OFF_CPT; 3684 break; 3685 case DP_TRAINING_PATTERN_1: 3686 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 3687 break; 3688 case DP_TRAINING_PATTERN_2: 3689 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3690 break; 3691 case DP_TRAINING_PATTERN_3: 3692 drm_dbg_kms(&dev_priv->drm, 3693 "TPS3 not supported, using TPS2 instead\n"); 3694 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 3695 break; 3696 } 3697 3698 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3699 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3700 } 3701 3702 static void 3703 g4x_set_link_train(struct intel_dp *intel_dp, 3704 u8 dp_train_pat) 3705 { 3706 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3707 u32 *DP = &intel_dp->DP; 3708 3709 *DP &= ~DP_LINK_TRAIN_MASK; 3710 3711 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 3712 case DP_TRAINING_PATTERN_DISABLE: 3713 *DP |= DP_LINK_TRAIN_OFF; 3714 break; 3715 case DP_TRAINING_PATTERN_1: 3716 *DP |= DP_LINK_TRAIN_PAT_1; 3717 break; 3718 case DP_TRAINING_PATTERN_2: 3719 *DP |= DP_LINK_TRAIN_PAT_2; 3720 break; 3721 case DP_TRAINING_PATTERN_3: 3722 drm_dbg_kms(&dev_priv->drm, 3723 "TPS3 not supported, using TPS2 instead\n"); 3724 *DP |= DP_LINK_TRAIN_PAT_2; 3725 break; 3726 } 3727 3728 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3729 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3730 } 3731 3732 static void intel_dp_enable_port(struct intel_dp *intel_dp, 3733 const struct intel_crtc_state *old_crtc_state) 3734 { 3735 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3736 3737 /* enable with pattern 1 (as per spec) */ 3738 3739 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); 3740 3741 /* 3742 * Magic for VLV/CHV. We _must_ first set up the register 3743 * without actually enabling the port, and then do another 3744 * write to enable the port. Otherwise link training will 3745 * fail when the power sequencer is freshly used for this port. 3746 */ 3747 intel_dp->DP |= DP_PORT_EN; 3748 if (old_crtc_state->has_audio) 3749 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 3750 3751 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 3752 intel_de_posting_read(dev_priv, intel_dp->output_reg); 3753 } 3754 3755 static void intel_enable_dp(struct intel_atomic_state *state, 3756 struct intel_encoder *encoder, 3757 const struct intel_crtc_state *pipe_config, 3758 const struct drm_connector_state *conn_state) 3759 { 3760 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3761 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3762 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 3763 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); 3764 enum pipe pipe = crtc->pipe; 3765 intel_wakeref_t wakeref; 3766 3767 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) 3768 return; 3769 3770 with_pps_lock(intel_dp, wakeref) { 3771 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3772 vlv_init_panel_power_sequencer(encoder, pipe_config); 3773 3774 intel_dp_enable_port(intel_dp, pipe_config); 3775 3776 edp_panel_vdd_on(intel_dp); 3777 edp_panel_on(intel_dp); 3778 edp_panel_vdd_off(intel_dp, true); 3779 } 3780 3781 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 3782 unsigned int lane_mask = 0x0; 3783 3784 if (IS_CHERRYVIEW(dev_priv)) 3785 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); 3786 3787 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), 3788 lane_mask); 3789 } 3790 3791 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 3792 intel_dp_start_link_train(intel_dp); 3793 intel_dp_stop_link_train(intel_dp); 3794 3795 if (pipe_config->has_audio) { 3796 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", 3797 pipe_name(pipe)); 3798 intel_audio_codec_enable(encoder, pipe_config, conn_state); 3799 } 3800 } 3801 3802 static void g4x_enable_dp(struct intel_atomic_state *state, 3803 struct intel_encoder *encoder, 3804 const struct intel_crtc_state *pipe_config, 3805 const struct drm_connector_state *conn_state) 3806 { 3807 intel_enable_dp(state, encoder, pipe_config, conn_state); 3808 intel_edp_backlight_on(pipe_config, conn_state); 3809 } 3810 3811 static void vlv_enable_dp(struct intel_atomic_state *state, 3812 struct intel_encoder *encoder, 3813 const struct intel_crtc_state *pipe_config, 3814 const struct drm_connector_state *conn_state) 3815 { 3816 intel_edp_backlight_on(pipe_config, conn_state); 3817 } 3818 3819 static void g4x_pre_enable_dp(struct intel_atomic_state *state, 3820 struct intel_encoder *encoder, 3821 const struct intel_crtc_state *pipe_config, 3822 const struct drm_connector_state *conn_state) 3823 { 3824 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3825 enum port port = encoder->port; 3826 3827 intel_dp_prepare(encoder, pipe_config); 3828 3829 /* Only ilk+ has port A */ 3830 if (port == PORT_A) 3831 ilk_edp_pll_on(intel_dp, pipe_config); 3832 } 3833 3834 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 3835 { 3836 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3837 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 3838 enum pipe pipe = intel_dp->pps_pipe; 3839 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); 3840 3841 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3842 3843 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) 3844 return; 3845 3846 edp_panel_vdd_off_sync(intel_dp); 3847 3848 /* 3849 * VLV seems to get confused when multiple power sequencers 3850 * have the same port selected (even if only one has power/vdd 3851 * enabled). The failure manifests as vlv_wait_port_ready() failing 3852 * CHV on the other hand doesn't seem to mind having the same port 3853 * selected in multiple power sequencers, but let's clear the 3854 * port select always when logically disconnecting a power sequencer 3855 * from a port. 3856 */ 3857 drm_dbg_kms(&dev_priv->drm, 3858 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", 3859 pipe_name(pipe), dig_port->base.base.base.id, 3860 dig_port->base.base.name); 3861 intel_de_write(dev_priv, pp_on_reg, 0); 3862 intel_de_posting_read(dev_priv, pp_on_reg); 3863 3864 intel_dp->pps_pipe = INVALID_PIPE; 3865 } 3866 3867 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, 3868 enum pipe pipe) 3869 { 3870 struct intel_encoder *encoder; 3871 3872 lockdep_assert_held(&dev_priv->pps_mutex); 3873 3874 for_each_intel_dp(&dev_priv->drm, encoder) { 3875 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3876 3877 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe, 3878 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", 3879 pipe_name(pipe), encoder->base.base.id, 3880 encoder->base.name); 3881 3882 if (intel_dp->pps_pipe != pipe) 3883 continue; 3884 3885 drm_dbg_kms(&dev_priv->drm, 3886 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", 3887 pipe_name(pipe), encoder->base.base.id, 3888 encoder->base.name); 3889 3890 /* make sure vdd is off before we steal it */ 3891 vlv_detach_power_sequencer(intel_dp); 3892 } 3893 } 3894 3895 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, 3896 const struct intel_crtc_state *crtc_state) 3897 { 3898 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3899 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3900 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3901 3902 lockdep_assert_held(&dev_priv->pps_mutex); 3903 3904 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); 3905 3906 if (intel_dp->pps_pipe != INVALID_PIPE && 3907 intel_dp->pps_pipe != crtc->pipe) { 3908 /* 3909 * If another power sequencer was being used on this 3910 * port previously make sure to turn off vdd there while 3911 * we still have control of it. 3912 */ 3913 vlv_detach_power_sequencer(intel_dp); 3914 } 3915 3916 /* 3917 * We may be stealing the power 3918 * sequencer from another port. 3919 */ 3920 vlv_steal_power_sequencer(dev_priv, crtc->pipe); 3921 3922 intel_dp->active_pipe = crtc->pipe; 3923 3924 if (!intel_dp_is_edp(intel_dp)) 3925 return; 3926 3927 /* now it's all ours */ 3928 intel_dp->pps_pipe = crtc->pipe; 3929 3930 drm_dbg_kms(&dev_priv->drm, 3931 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", 3932 pipe_name(intel_dp->pps_pipe), encoder->base.base.id, 3933 encoder->base.name); 3934 3935 /* init power sequencer on this pipe and port */ 3936 intel_dp_init_panel_power_sequencer(intel_dp); 3937 intel_dp_init_panel_power_sequencer_registers(intel_dp, true); 3938 } 3939 3940 static void vlv_pre_enable_dp(struct intel_atomic_state *state, 3941 struct intel_encoder *encoder, 3942 const struct intel_crtc_state *pipe_config, 3943 const struct drm_connector_state *conn_state) 3944 { 3945 vlv_phy_pre_encoder_enable(encoder, pipe_config); 3946 3947 intel_enable_dp(state, encoder, pipe_config, conn_state); 3948 } 3949 3950 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, 3951 struct intel_encoder *encoder, 3952 const struct intel_crtc_state *pipe_config, 3953 const struct drm_connector_state *conn_state) 3954 { 3955 intel_dp_prepare(encoder, pipe_config); 3956 3957 vlv_phy_pre_pll_enable(encoder, pipe_config); 3958 } 3959 3960 static void chv_pre_enable_dp(struct intel_atomic_state *state, 3961 struct intel_encoder *encoder, 3962 const struct intel_crtc_state *pipe_config, 3963 const struct drm_connector_state *conn_state) 3964 { 3965 chv_phy_pre_encoder_enable(encoder, pipe_config); 3966 3967 intel_enable_dp(state, encoder, pipe_config, conn_state); 3968 3969 /* Second common lane will stay alive on its own now */ 3970 chv_phy_release_cl2_override(encoder); 3971 } 3972 3973 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, 3974 struct intel_encoder *encoder, 3975 const struct intel_crtc_state *pipe_config, 3976 const struct drm_connector_state *conn_state) 3977 { 3978 intel_dp_prepare(encoder, pipe_config); 3979 3980 chv_phy_pre_pll_enable(encoder, pipe_config); 3981 } 3982 3983 static void chv_dp_post_pll_disable(struct intel_atomic_state *state, 3984 struct intel_encoder *encoder, 3985 const struct intel_crtc_state *old_crtc_state, 3986 const struct drm_connector_state *old_conn_state) 3987 { 3988 chv_phy_post_pll_disable(encoder, old_crtc_state); 3989 } 3990 3991 /* 3992 * Fetch AUX CH registers 0x202 - 0x207 which contain 3993 * link status information 3994 */ 3995 bool 3996 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) 3997 { 3998 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, 3999 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; 4000 } 4001 4002 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp) 4003 { 4004 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 4005 } 4006 4007 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp) 4008 { 4009 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 4010 } 4011 4012 static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp) 4013 { 4014 return DP_TRAIN_PRE_EMPH_LEVEL_2; 4015 } 4016 4017 static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp) 4018 { 4019 return DP_TRAIN_PRE_EMPH_LEVEL_3; 4020 } 4021 4022 static void vlv_set_signal_levels(struct intel_dp *intel_dp) 4023 { 4024 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4025 unsigned long demph_reg_value, preemph_reg_value, 4026 uniqtranscale_reg_value; 4027 u8 train_set = intel_dp->train_set[0]; 4028 4029 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4030 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4031 preemph_reg_value = 0x0004000; 4032 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4034 demph_reg_value = 0x2B405555; 4035 uniqtranscale_reg_value = 0x552AB83A; 4036 break; 4037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4038 demph_reg_value = 0x2B404040; 4039 uniqtranscale_reg_value = 0x5548B83A; 4040 break; 4041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4042 demph_reg_value = 0x2B245555; 4043 uniqtranscale_reg_value = 0x5560B83A; 4044 break; 4045 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4046 demph_reg_value = 0x2B405555; 4047 uniqtranscale_reg_value = 0x5598DA3A; 4048 break; 4049 default: 4050 return; 4051 } 4052 break; 4053 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4054 preemph_reg_value = 0x0002000; 4055 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4056 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4057 demph_reg_value = 0x2B404040; 4058 uniqtranscale_reg_value = 0x5552B83A; 4059 break; 4060 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4061 demph_reg_value = 0x2B404848; 4062 uniqtranscale_reg_value = 0x5580B83A; 4063 break; 4064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4065 demph_reg_value = 0x2B404040; 4066 uniqtranscale_reg_value = 0x55ADDA3A; 4067 break; 4068 default: 4069 return; 4070 } 4071 break; 4072 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4073 preemph_reg_value = 0x0000000; 4074 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4075 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4076 demph_reg_value = 0x2B305555; 4077 uniqtranscale_reg_value = 0x5570B83A; 4078 break; 4079 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4080 demph_reg_value = 0x2B2B4040; 4081 uniqtranscale_reg_value = 0x55ADDA3A; 4082 break; 4083 default: 4084 return; 4085 } 4086 break; 4087 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4088 preemph_reg_value = 0x0006000; 4089 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4090 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4091 demph_reg_value = 0x1B405555; 4092 uniqtranscale_reg_value = 0x55ADDA3A; 4093 break; 4094 default: 4095 return; 4096 } 4097 break; 4098 default: 4099 return; 4100 } 4101 4102 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, 4103 uniqtranscale_reg_value, 0); 4104 } 4105 4106 static void chv_set_signal_levels(struct intel_dp *intel_dp) 4107 { 4108 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 4109 u32 deemph_reg_value, margin_reg_value; 4110 bool uniq_trans_scale = false; 4111 u8 train_set = intel_dp->train_set[0]; 4112 4113 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4114 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4115 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4116 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4117 deemph_reg_value = 128; 4118 margin_reg_value = 52; 4119 break; 4120 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4121 deemph_reg_value = 128; 4122 margin_reg_value = 77; 4123 break; 4124 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4125 deemph_reg_value = 128; 4126 margin_reg_value = 102; 4127 break; 4128 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4129 deemph_reg_value = 128; 4130 margin_reg_value = 154; 4131 uniq_trans_scale = true; 4132 break; 4133 default: 4134 return; 4135 } 4136 break; 4137 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4138 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4140 deemph_reg_value = 85; 4141 margin_reg_value = 78; 4142 break; 4143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4144 deemph_reg_value = 85; 4145 margin_reg_value = 116; 4146 break; 4147 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4148 deemph_reg_value = 85; 4149 margin_reg_value = 154; 4150 break; 4151 default: 4152 return; 4153 } 4154 break; 4155 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4156 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4157 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4158 deemph_reg_value = 64; 4159 margin_reg_value = 104; 4160 break; 4161 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4162 deemph_reg_value = 64; 4163 margin_reg_value = 154; 4164 break; 4165 default: 4166 return; 4167 } 4168 break; 4169 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4170 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4171 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4172 deemph_reg_value = 43; 4173 margin_reg_value = 154; 4174 break; 4175 default: 4176 return; 4177 } 4178 break; 4179 default: 4180 return; 4181 } 4182 4183 chv_set_phy_signal_level(encoder, deemph_reg_value, 4184 margin_reg_value, uniq_trans_scale); 4185 } 4186 4187 static u32 g4x_signal_levels(u8 train_set) 4188 { 4189 u32 signal_levels = 0; 4190 4191 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 4192 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 4193 default: 4194 signal_levels |= DP_VOLTAGE_0_4; 4195 break; 4196 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: 4197 signal_levels |= DP_VOLTAGE_0_6; 4198 break; 4199 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: 4200 signal_levels |= DP_VOLTAGE_0_8; 4201 break; 4202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 4203 signal_levels |= DP_VOLTAGE_1_2; 4204 break; 4205 } 4206 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 4207 case DP_TRAIN_PRE_EMPH_LEVEL_0: 4208 default: 4209 signal_levels |= DP_PRE_EMPHASIS_0; 4210 break; 4211 case DP_TRAIN_PRE_EMPH_LEVEL_1: 4212 signal_levels |= DP_PRE_EMPHASIS_3_5; 4213 break; 4214 case DP_TRAIN_PRE_EMPH_LEVEL_2: 4215 signal_levels |= DP_PRE_EMPHASIS_6; 4216 break; 4217 case DP_TRAIN_PRE_EMPH_LEVEL_3: 4218 signal_levels |= DP_PRE_EMPHASIS_9_5; 4219 break; 4220 } 4221 return signal_levels; 4222 } 4223 4224 static void 4225 g4x_set_signal_levels(struct intel_dp *intel_dp) 4226 { 4227 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4228 u8 train_set = intel_dp->train_set[0]; 4229 u32 signal_levels; 4230 4231 signal_levels = g4x_signal_levels(train_set); 4232 4233 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4234 signal_levels); 4235 4236 intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK); 4237 intel_dp->DP |= signal_levels; 4238 4239 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4240 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4241 } 4242 4243 /* SNB CPU eDP voltage swing and pre-emphasis control */ 4244 static u32 snb_cpu_edp_signal_levels(u8 train_set) 4245 { 4246 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4247 DP_TRAIN_PRE_EMPHASIS_MASK); 4248 4249 switch (signal_levels) { 4250 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4251 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4252 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4253 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4254 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 4255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4256 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4257 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 4258 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4259 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4260 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 4261 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4262 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4263 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 4264 default: 4265 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4266 "0x%x\n", signal_levels); 4267 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 4268 } 4269 } 4270 4271 static void 4272 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4273 { 4274 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4275 u8 train_set = intel_dp->train_set[0]; 4276 u32 signal_levels; 4277 4278 signal_levels = snb_cpu_edp_signal_levels(train_set); 4279 4280 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4281 signal_levels); 4282 4283 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 4284 intel_dp->DP |= signal_levels; 4285 4286 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4287 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4288 } 4289 4290 /* IVB CPU eDP voltage swing and pre-emphasis control */ 4291 static u32 ivb_cpu_edp_signal_levels(u8 train_set) 4292 { 4293 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 4294 DP_TRAIN_PRE_EMPHASIS_MASK); 4295 4296 switch (signal_levels) { 4297 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4298 return EDP_LINK_TRAIN_400MV_0DB_IVB; 4299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4300 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 4301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: 4303 return EDP_LINK_TRAIN_400MV_6DB_IVB; 4304 4305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4306 return EDP_LINK_TRAIN_600MV_0DB_IVB; 4307 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4308 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 4309 4310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: 4311 return EDP_LINK_TRAIN_800MV_0DB_IVB; 4312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: 4313 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 4314 4315 default: 4316 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 4317 "0x%x\n", signal_levels); 4318 return EDP_LINK_TRAIN_500MV_0DB_IVB; 4319 } 4320 } 4321 4322 static void 4323 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp) 4324 { 4325 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4326 u8 train_set = intel_dp->train_set[0]; 4327 u32 signal_levels; 4328 4329 signal_levels = ivb_cpu_edp_signal_levels(train_set); 4330 4331 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", 4332 signal_levels); 4333 4334 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 4335 intel_dp->DP |= signal_levels; 4336 4337 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 4338 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4339 } 4340 4341 void intel_dp_set_signal_levels(struct intel_dp *intel_dp) 4342 { 4343 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4344 u8 train_set = intel_dp->train_set[0]; 4345 4346 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", 4347 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 4348 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); 4349 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", 4350 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 4351 DP_TRAIN_PRE_EMPHASIS_SHIFT, 4352 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 4353 " (max)" : ""); 4354 4355 intel_dp->set_signal_levels(intel_dp); 4356 } 4357 4358 void 4359 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 4360 u8 dp_train_pat) 4361 { 4362 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4363 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); 4364 4365 if (dp_train_pat & train_pat_mask) 4366 drm_dbg_kms(&dev_priv->drm, 4367 "Using DP training pattern TPS%d\n", 4368 dp_train_pat & train_pat_mask); 4369 4370 intel_dp->set_link_train(intel_dp, dp_train_pat); 4371 } 4372 4373 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 4374 { 4375 if (intel_dp->set_idle_link_train) 4376 intel_dp->set_idle_link_train(intel_dp); 4377 } 4378 4379 static void 4380 intel_dp_link_down(struct intel_encoder *encoder, 4381 const struct intel_crtc_state *old_crtc_state) 4382 { 4383 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4384 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4385 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 4386 enum port port = encoder->port; 4387 u32 DP = intel_dp->DP; 4388 4389 if (drm_WARN_ON(&dev_priv->drm, 4390 (intel_de_read(dev_priv, intel_dp->output_reg) & 4391 DP_PORT_EN) == 0)) 4392 return; 4393 4394 drm_dbg_kms(&dev_priv->drm, "\n"); 4395 4396 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 4397 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { 4398 DP &= ~DP_LINK_TRAIN_MASK_CPT; 4399 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; 4400 } else { 4401 DP &= ~DP_LINK_TRAIN_MASK; 4402 DP |= DP_LINK_TRAIN_PAT_IDLE; 4403 } 4404 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4405 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4406 4407 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 4408 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4409 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4410 4411 /* 4412 * HW workaround for IBX, we need to move the port 4413 * to transcoder A after disabling it to allow the 4414 * matching HDMI port to be enabled on transcoder A. 4415 */ 4416 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { 4417 /* 4418 * We get CPU/PCH FIFO underruns on the other pipe when 4419 * doing the workaround. Sweep them under the rug. 4420 */ 4421 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4422 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); 4423 4424 /* always enable with pattern 1 (as per spec) */ 4425 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); 4426 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | 4427 DP_LINK_TRAIN_PAT_1; 4428 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4429 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4430 4431 DP &= ~DP_PORT_EN; 4432 intel_de_write(dev_priv, intel_dp->output_reg, DP); 4433 intel_de_posting_read(dev_priv, intel_dp->output_reg); 4434 4435 intel_wait_for_vblank_if_active(dev_priv, PIPE_A); 4436 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4437 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 4438 } 4439 4440 msleep(intel_dp->panel_power_down_delay); 4441 4442 intel_dp->DP = DP; 4443 4444 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4445 intel_wakeref_t wakeref; 4446 4447 with_pps_lock(intel_dp, wakeref) 4448 intel_dp->active_pipe = INVALID_PIPE; 4449 } 4450 } 4451 4452 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 4453 { 4454 u8 dprx = 0; 4455 4456 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 4457 &dprx) != 1) 4458 return false; 4459 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 4460 } 4461 4462 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 4463 { 4464 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4465 4466 /* 4467 * Clear the cached register set to avoid using stale values 4468 * for the sinks that do not support DSC. 4469 */ 4470 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4471 4472 /* Clear fec_capable to avoid using stale values */ 4473 intel_dp->fec_capable = 0; 4474 4475 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 4476 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 4477 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4478 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 4479 intel_dp->dsc_dpcd, 4480 sizeof(intel_dp->dsc_dpcd)) < 0) 4481 drm_err(&i915->drm, 4482 "Failed to read DPCD register 0x%x\n", 4483 DP_DSC_SUPPORT); 4484 4485 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 4486 (int)sizeof(intel_dp->dsc_dpcd), 4487 intel_dp->dsc_dpcd); 4488 4489 /* FEC is supported only on DP 1.4 */ 4490 if (!intel_dp_is_edp(intel_dp) && 4491 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 4492 &intel_dp->fec_capable) < 0) 4493 drm_err(&i915->drm, 4494 "Failed to read FEC DPCD register\n"); 4495 4496 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 4497 intel_dp->fec_capable); 4498 } 4499 } 4500 4501 static bool 4502 intel_edp_init_dpcd(struct intel_dp *intel_dp) 4503 { 4504 struct drm_i915_private *dev_priv = 4505 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 4506 4507 /* this function is meant to be called only once */ 4508 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 4509 4510 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 4511 return false; 4512 4513 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4514 drm_dp_is_branch(intel_dp->dpcd)); 4515 4516 /* 4517 * Read the eDP display control registers. 4518 * 4519 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 4520 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 4521 * set, but require eDP 1.4+ detection (e.g. for supported link rates 4522 * method). The display control registers should read zero if they're 4523 * not supported anyway. 4524 */ 4525 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 4526 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 4527 sizeof(intel_dp->edp_dpcd)) 4528 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 4529 (int)sizeof(intel_dp->edp_dpcd), 4530 intel_dp->edp_dpcd); 4531 4532 /* 4533 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 4534 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 4535 */ 4536 intel_psr_init_dpcd(intel_dp); 4537 4538 /* Read the eDP 1.4+ supported link rates. */ 4539 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 4540 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 4541 int i; 4542 4543 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 4544 sink_rates, sizeof(sink_rates)); 4545 4546 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 4547 int val = le16_to_cpu(sink_rates[i]); 4548 4549 if (val == 0) 4550 break; 4551 4552 /* Value read multiplied by 200kHz gives the per-lane 4553 * link rate in kHz. The source rates are, however, 4554 * stored in terms of LS_Clk kHz. The full conversion 4555 * back to symbols is 4556 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 4557 */ 4558 intel_dp->sink_rates[i] = (val * 200) / 10; 4559 } 4560 intel_dp->num_sink_rates = i; 4561 } 4562 4563 /* 4564 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 4565 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 4566 */ 4567 if (intel_dp->num_sink_rates) 4568 intel_dp->use_rate_select = true; 4569 else 4570 intel_dp_set_sink_rates(intel_dp); 4571 4572 intel_dp_set_common_rates(intel_dp); 4573 4574 /* Read the eDP DSC DPCD registers */ 4575 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 4576 intel_dp_get_dsc_sink_cap(intel_dp); 4577 4578 return true; 4579 } 4580 4581 static bool 4582 intel_dp_has_sink_count(struct intel_dp *intel_dp) 4583 { 4584 if (!intel_dp->attached_connector) 4585 return false; 4586 4587 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 4588 intel_dp->dpcd, 4589 &intel_dp->desc); 4590 } 4591 4592 static bool 4593 intel_dp_get_dpcd(struct intel_dp *intel_dp) 4594 { 4595 int ret; 4596 4597 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) 4598 return false; 4599 4600 /* 4601 * Don't clobber cached eDP rates. Also skip re-reading 4602 * the OUI/ID since we know it won't change. 4603 */ 4604 if (!intel_dp_is_edp(intel_dp)) { 4605 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4606 drm_dp_is_branch(intel_dp->dpcd)); 4607 4608 intel_dp_set_sink_rates(intel_dp); 4609 intel_dp_set_common_rates(intel_dp); 4610 } 4611 4612 if (intel_dp_has_sink_count(intel_dp)) { 4613 ret = drm_dp_read_sink_count(&intel_dp->aux); 4614 if (ret < 0) 4615 return false; 4616 4617 /* 4618 * Sink count can change between short pulse hpd hence 4619 * a member variable in intel_dp will track any changes 4620 * between short pulse interrupts. 4621 */ 4622 intel_dp->sink_count = ret; 4623 4624 /* 4625 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 4626 * a dongle is present but no display. Unless we require to know 4627 * if a dongle is present or not, we don't need to update 4628 * downstream port information. So, an early return here saves 4629 * time from performing other operations which are not required. 4630 */ 4631 if (!intel_dp->sink_count) 4632 return false; 4633 } 4634 4635 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 4636 intel_dp->downstream_ports) == 0; 4637 } 4638 4639 static bool 4640 intel_dp_can_mst(struct intel_dp *intel_dp) 4641 { 4642 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4643 4644 return i915->params.enable_dp_mst && 4645 intel_dp->can_mst && 4646 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4647 } 4648 4649 static void 4650 intel_dp_configure_mst(struct intel_dp *intel_dp) 4651 { 4652 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4653 struct intel_encoder *encoder = 4654 &dp_to_dig_port(intel_dp)->base; 4655 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4656 4657 drm_dbg_kms(&i915->drm, 4658 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 4659 encoder->base.base.id, encoder->base.name, 4660 yesno(intel_dp->can_mst), yesno(sink_can_mst), 4661 yesno(i915->params.enable_dp_mst)); 4662 4663 if (!intel_dp->can_mst) 4664 return; 4665 4666 intel_dp->is_mst = sink_can_mst && 4667 i915->params.enable_dp_mst; 4668 4669 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4670 intel_dp->is_mst); 4671 } 4672 4673 static bool 4674 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 4675 { 4676 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4677 sink_irq_vector, DP_DPRX_ESI_LEN) == 4678 DP_DPRX_ESI_LEN; 4679 } 4680 4681 bool 4682 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 4683 const struct drm_connector_state *conn_state) 4684 { 4685 /* 4686 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 4687 * of Color Encoding Format and Content Color Gamut], in order to 4688 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 4689 */ 4690 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 4691 return true; 4692 4693 switch (conn_state->colorspace) { 4694 case DRM_MODE_COLORIMETRY_SYCC_601: 4695 case DRM_MODE_COLORIMETRY_OPYCC_601: 4696 case DRM_MODE_COLORIMETRY_BT2020_YCC: 4697 case DRM_MODE_COLORIMETRY_BT2020_RGB: 4698 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 4699 return true; 4700 default: 4701 break; 4702 } 4703 4704 return false; 4705 } 4706 4707 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 4708 struct dp_sdp *sdp, size_t size) 4709 { 4710 size_t length = sizeof(struct dp_sdp); 4711 4712 if (size < length) 4713 return -ENOSPC; 4714 4715 memset(sdp, 0, size); 4716 4717 /* 4718 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 4719 * VSC SDP Header Bytes 4720 */ 4721 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 4722 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 4723 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 4724 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 4725 4726 /* 4727 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 4728 * per DP 1.4a spec. 4729 */ 4730 if (vsc->revision != 0x5) 4731 goto out; 4732 4733 /* VSC SDP Payload for DB16 through DB18 */ 4734 /* Pixel Encoding and Colorimetry Formats */ 4735 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 4736 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 4737 4738 switch (vsc->bpc) { 4739 case 6: 4740 /* 6bpc: 0x0 */ 4741 break; 4742 case 8: 4743 sdp->db[17] = 0x1; /* DB17[3:0] */ 4744 break; 4745 case 10: 4746 sdp->db[17] = 0x2; 4747 break; 4748 case 12: 4749 sdp->db[17] = 0x3; 4750 break; 4751 case 16: 4752 sdp->db[17] = 0x4; 4753 break; 4754 default: 4755 MISSING_CASE(vsc->bpc); 4756 break; 4757 } 4758 /* Dynamic Range and Component Bit Depth */ 4759 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 4760 sdp->db[17] |= 0x80; /* DB17[7] */ 4761 4762 /* Content Type */ 4763 sdp->db[18] = vsc->content_type & 0x7; 4764 4765 out: 4766 return length; 4767 } 4768 4769 static ssize_t 4770 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 4771 struct dp_sdp *sdp, 4772 size_t size) 4773 { 4774 size_t length = sizeof(struct dp_sdp); 4775 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 4776 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 4777 ssize_t len; 4778 4779 if (size < length) 4780 return -ENOSPC; 4781 4782 memset(sdp, 0, size); 4783 4784 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 4785 if (len < 0) { 4786 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 4787 return -ENOSPC; 4788 } 4789 4790 if (len != infoframe_size) { 4791 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 4792 return -ENOSPC; 4793 } 4794 4795 /* 4796 * Set up the infoframe sdp packet for HDR static metadata. 4797 * Prepare VSC Header for SU as per DP 1.4a spec, 4798 * Table 2-100 and Table 2-101 4799 */ 4800 4801 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 4802 sdp->sdp_header.HB0 = 0; 4803 /* 4804 * Packet Type 80h + Non-audio INFOFRAME Type value 4805 * HDMI_INFOFRAME_TYPE_DRM: 0x87 4806 * - 80h + Non-audio INFOFRAME Type value 4807 * - InfoFrame Type: 0x07 4808 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 4809 */ 4810 sdp->sdp_header.HB1 = drm_infoframe->type; 4811 /* 4812 * Least Significant Eight Bits of (Data Byte Count – 1) 4813 * infoframe_size - 1 4814 */ 4815 sdp->sdp_header.HB2 = 0x1D; 4816 /* INFOFRAME SDP Version Number */ 4817 sdp->sdp_header.HB3 = (0x13 << 2); 4818 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 4819 sdp->db[0] = drm_infoframe->version; 4820 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 4821 sdp->db[1] = drm_infoframe->length; 4822 /* 4823 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 4824 * HDMI_INFOFRAME_HEADER_SIZE 4825 */ 4826 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 4827 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 4828 HDMI_DRM_INFOFRAME_SIZE); 4829 4830 /* 4831 * Size of DP infoframe sdp packet for HDR static metadata consists of 4832 * - DP SDP Header(struct dp_sdp_header): 4 bytes 4833 * - Two Data Blocks: 2 bytes 4834 * CTA Header Byte2 (INFOFRAME Version Number) 4835 * CTA Header Byte3 (Length of INFOFRAME) 4836 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 4837 * 4838 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 4839 * infoframe size. But GEN11+ has larger than that size, write_infoframe 4840 * will pad rest of the size. 4841 */ 4842 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 4843 } 4844 4845 static void intel_write_dp_sdp(struct intel_encoder *encoder, 4846 const struct intel_crtc_state *crtc_state, 4847 unsigned int type) 4848 { 4849 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4850 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4851 struct dp_sdp sdp = {}; 4852 ssize_t len; 4853 4854 if ((crtc_state->infoframes.enable & 4855 intel_hdmi_infoframe_enable(type)) == 0) 4856 return; 4857 4858 switch (type) { 4859 case DP_SDP_VSC: 4860 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 4861 sizeof(sdp)); 4862 break; 4863 case HDMI_PACKET_TYPE_GAMUT_METADATA: 4864 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 4865 &sdp, sizeof(sdp)); 4866 break; 4867 default: 4868 MISSING_CASE(type); 4869 return; 4870 } 4871 4872 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4873 return; 4874 4875 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 4876 } 4877 4878 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 4879 const struct intel_crtc_state *crtc_state, 4880 struct drm_dp_vsc_sdp *vsc) 4881 { 4882 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4883 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4884 struct dp_sdp sdp = {}; 4885 ssize_t len; 4886 4887 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 4888 4889 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 4890 return; 4891 4892 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 4893 &sdp, len); 4894 } 4895 4896 void intel_dp_set_infoframes(struct intel_encoder *encoder, 4897 bool enable, 4898 const struct intel_crtc_state *crtc_state, 4899 const struct drm_connector_state *conn_state) 4900 { 4901 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4902 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4903 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 4904 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 4905 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 4906 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 4907 u32 val = intel_de_read(dev_priv, reg); 4908 4909 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 4910 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 4911 if (intel_psr_enabled(intel_dp)) 4912 val &= ~dip_enable; 4913 else 4914 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW); 4915 4916 if (!enable) { 4917 intel_de_write(dev_priv, reg, val); 4918 intel_de_posting_read(dev_priv, reg); 4919 return; 4920 } 4921 4922 intel_de_write(dev_priv, reg, val); 4923 intel_de_posting_read(dev_priv, reg); 4924 4925 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 4926 if (!intel_psr_enabled(intel_dp)) 4927 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 4928 4929 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 4930 } 4931 4932 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 4933 const void *buffer, size_t size) 4934 { 4935 const struct dp_sdp *sdp = buffer; 4936 4937 if (size < sizeof(struct dp_sdp)) 4938 return -EINVAL; 4939 4940 memset(vsc, 0, size); 4941 4942 if (sdp->sdp_header.HB0 != 0) 4943 return -EINVAL; 4944 4945 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 4946 return -EINVAL; 4947 4948 vsc->sdp_type = sdp->sdp_header.HB1; 4949 vsc->revision = sdp->sdp_header.HB2; 4950 vsc->length = sdp->sdp_header.HB3; 4951 4952 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 4953 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 4954 /* 4955 * - HB2 = 0x2, HB3 = 0x8 4956 * VSC SDP supporting 3D stereo + PSR 4957 * - HB2 = 0x4, HB3 = 0xe 4958 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 4959 * first scan line of the SU region (applies to eDP v1.4b 4960 * and higher). 4961 */ 4962 return 0; 4963 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 4964 /* 4965 * - HB2 = 0x5, HB3 = 0x13 4966 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 4967 * Format. 4968 */ 4969 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 4970 vsc->colorimetry = sdp->db[16] & 0xf; 4971 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 4972 4973 switch (sdp->db[17] & 0x7) { 4974 case 0x0: 4975 vsc->bpc = 6; 4976 break; 4977 case 0x1: 4978 vsc->bpc = 8; 4979 break; 4980 case 0x2: 4981 vsc->bpc = 10; 4982 break; 4983 case 0x3: 4984 vsc->bpc = 12; 4985 break; 4986 case 0x4: 4987 vsc->bpc = 16; 4988 break; 4989 default: 4990 MISSING_CASE(sdp->db[17] & 0x7); 4991 return -EINVAL; 4992 } 4993 4994 vsc->content_type = sdp->db[18] & 0x7; 4995 } else { 4996 return -EINVAL; 4997 } 4998 4999 return 0; 5000 } 5001 5002 static int 5003 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 5004 const void *buffer, size_t size) 5005 { 5006 int ret; 5007 5008 const struct dp_sdp *sdp = buffer; 5009 5010 if (size < sizeof(struct dp_sdp)) 5011 return -EINVAL; 5012 5013 if (sdp->sdp_header.HB0 != 0) 5014 return -EINVAL; 5015 5016 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 5017 return -EINVAL; 5018 5019 /* 5020 * Least Significant Eight Bits of (Data Byte Count – 1) 5021 * 1Dh (i.e., Data Byte Count = 30 bytes). 5022 */ 5023 if (sdp->sdp_header.HB2 != 0x1D) 5024 return -EINVAL; 5025 5026 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 5027 if ((sdp->sdp_header.HB3 & 0x3) != 0) 5028 return -EINVAL; 5029 5030 /* INFOFRAME SDP Version Number */ 5031 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 5032 return -EINVAL; 5033 5034 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 5035 if (sdp->db[0] != 1) 5036 return -EINVAL; 5037 5038 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 5039 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 5040 return -EINVAL; 5041 5042 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 5043 HDMI_DRM_INFOFRAME_SIZE); 5044 5045 return ret; 5046 } 5047 5048 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 5049 struct intel_crtc_state *crtc_state, 5050 struct drm_dp_vsc_sdp *vsc) 5051 { 5052 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5053 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5054 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5055 unsigned int type = DP_SDP_VSC; 5056 struct dp_sdp sdp = {}; 5057 int ret; 5058 5059 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 5060 if (intel_psr_enabled(intel_dp)) 5061 return; 5062 5063 if ((crtc_state->infoframes.enable & 5064 intel_hdmi_infoframe_enable(type)) == 0) 5065 return; 5066 5067 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 5068 5069 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 5070 5071 if (ret) 5072 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 5073 } 5074 5075 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 5076 struct intel_crtc_state *crtc_state, 5077 struct hdmi_drm_infoframe *drm_infoframe) 5078 { 5079 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 5080 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5081 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 5082 struct dp_sdp sdp = {}; 5083 int ret; 5084 5085 if ((crtc_state->infoframes.enable & 5086 intel_hdmi_infoframe_enable(type)) == 0) 5087 return; 5088 5089 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 5090 sizeof(sdp)); 5091 5092 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 5093 sizeof(sdp)); 5094 5095 if (ret) 5096 drm_dbg_kms(&dev_priv->drm, 5097 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 5098 } 5099 5100 void intel_read_dp_sdp(struct intel_encoder *encoder, 5101 struct intel_crtc_state *crtc_state, 5102 unsigned int type) 5103 { 5104 if (encoder->type != INTEL_OUTPUT_DDI) 5105 return; 5106 5107 switch (type) { 5108 case DP_SDP_VSC: 5109 intel_read_dp_vsc_sdp(encoder, crtc_state, 5110 &crtc_state->infoframes.vsc); 5111 break; 5112 case HDMI_PACKET_TYPE_GAMUT_METADATA: 5113 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 5114 &crtc_state->infoframes.drm.drm); 5115 break; 5116 default: 5117 MISSING_CASE(type); 5118 break; 5119 } 5120 } 5121 5122 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 5123 { 5124 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5125 int status = 0; 5126 int test_link_rate; 5127 u8 test_lane_count, test_link_bw; 5128 /* (DP CTS 1.2) 5129 * 4.3.1.11 5130 */ 5131 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 5132 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 5133 &test_lane_count); 5134 5135 if (status <= 0) { 5136 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 5137 return DP_TEST_NAK; 5138 } 5139 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 5140 5141 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 5142 &test_link_bw); 5143 if (status <= 0) { 5144 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 5145 return DP_TEST_NAK; 5146 } 5147 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 5148 5149 /* Validate the requested link rate and lane count */ 5150 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 5151 test_lane_count)) 5152 return DP_TEST_NAK; 5153 5154 intel_dp->compliance.test_lane_count = test_lane_count; 5155 intel_dp->compliance.test_link_rate = test_link_rate; 5156 5157 return DP_TEST_ACK; 5158 } 5159 5160 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 5161 { 5162 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5163 u8 test_pattern; 5164 u8 test_misc; 5165 __be16 h_width, v_height; 5166 int status = 0; 5167 5168 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 5169 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 5170 &test_pattern); 5171 if (status <= 0) { 5172 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 5173 return DP_TEST_NAK; 5174 } 5175 if (test_pattern != DP_COLOR_RAMP) 5176 return DP_TEST_NAK; 5177 5178 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 5179 &h_width, 2); 5180 if (status <= 0) { 5181 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 5182 return DP_TEST_NAK; 5183 } 5184 5185 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 5186 &v_height, 2); 5187 if (status <= 0) { 5188 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 5189 return DP_TEST_NAK; 5190 } 5191 5192 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 5193 &test_misc); 5194 if (status <= 0) { 5195 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 5196 return DP_TEST_NAK; 5197 } 5198 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 5199 return DP_TEST_NAK; 5200 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 5201 return DP_TEST_NAK; 5202 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 5203 case DP_TEST_BIT_DEPTH_6: 5204 intel_dp->compliance.test_data.bpc = 6; 5205 break; 5206 case DP_TEST_BIT_DEPTH_8: 5207 intel_dp->compliance.test_data.bpc = 8; 5208 break; 5209 default: 5210 return DP_TEST_NAK; 5211 } 5212 5213 intel_dp->compliance.test_data.video_pattern = test_pattern; 5214 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 5215 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 5216 /* Set test active flag here so userspace doesn't interrupt things */ 5217 intel_dp->compliance.test_active = true; 5218 5219 return DP_TEST_ACK; 5220 } 5221 5222 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 5223 { 5224 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5225 u8 test_result = DP_TEST_ACK; 5226 struct intel_connector *intel_connector = intel_dp->attached_connector; 5227 struct drm_connector *connector = &intel_connector->base; 5228 5229 if (intel_connector->detect_edid == NULL || 5230 connector->edid_corrupt || 5231 intel_dp->aux.i2c_defer_count > 6) { 5232 /* Check EDID read for NACKs, DEFERs and corruption 5233 * (DP CTS 1.2 Core r1.1) 5234 * 4.2.2.4 : Failed EDID read, I2C_NAK 5235 * 4.2.2.5 : Failed EDID read, I2C_DEFER 5236 * 4.2.2.6 : EDID corruption detected 5237 * Use failsafe mode for all cases 5238 */ 5239 if (intel_dp->aux.i2c_nack_count > 0 || 5240 intel_dp->aux.i2c_defer_count > 0) 5241 drm_dbg_kms(&i915->drm, 5242 "EDID read had %d NACKs, %d DEFERs\n", 5243 intel_dp->aux.i2c_nack_count, 5244 intel_dp->aux.i2c_defer_count); 5245 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 5246 } else { 5247 struct edid *block = intel_connector->detect_edid; 5248 5249 /* We have to write the checksum 5250 * of the last block read 5251 */ 5252 block += intel_connector->detect_edid->extensions; 5253 5254 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 5255 block->checksum) <= 0) 5256 drm_dbg_kms(&i915->drm, 5257 "Failed to write EDID checksum\n"); 5258 5259 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 5260 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 5261 } 5262 5263 /* Set test active flag here so userspace doesn't interrupt things */ 5264 intel_dp->compliance.test_active = true; 5265 5266 return test_result; 5267 } 5268 5269 static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp) 5270 { 5271 struct drm_dp_phy_test_params *data = 5272 &intel_dp->compliance.test_data.phytest; 5273 5274 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 5275 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 5276 return DP_TEST_NAK; 5277 } 5278 5279 /* 5280 * link_mst is set to false to avoid executing mst related code 5281 * during compliance testing. 5282 */ 5283 intel_dp->link_mst = false; 5284 5285 return DP_TEST_ACK; 5286 } 5287 5288 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp) 5289 { 5290 struct drm_i915_private *dev_priv = 5291 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 5292 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5293 struct drm_dp_phy_test_params *data = 5294 &intel_dp->compliance.test_data.phytest; 5295 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5296 enum pipe pipe = crtc->pipe; 5297 u32 pattern_val; 5298 5299 switch (data->phy_pattern) { 5300 case DP_PHY_TEST_PATTERN_NONE: 5301 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 5302 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 5303 break; 5304 case DP_PHY_TEST_PATTERN_D10_2: 5305 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 5306 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5307 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 5308 break; 5309 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 5310 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 5311 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5312 DDI_DP_COMP_CTL_ENABLE | 5313 DDI_DP_COMP_CTL_SCRAMBLED_0); 5314 break; 5315 case DP_PHY_TEST_PATTERN_PRBS7: 5316 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 5317 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5318 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 5319 break; 5320 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 5321 /* 5322 * FIXME: Ideally pattern should come from DPCD 0x250. As 5323 * current firmware of DPR-100 could not set it, so hardcoding 5324 * now for complaince test. 5325 */ 5326 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 5327 pattern_val = 0x3e0f83e0; 5328 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 5329 pattern_val = 0x0f83e0f8; 5330 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 5331 pattern_val = 0x0000f83e; 5332 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 5333 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5334 DDI_DP_COMP_CTL_ENABLE | 5335 DDI_DP_COMP_CTL_CUSTOM80); 5336 break; 5337 case DP_PHY_TEST_PATTERN_CP2520: 5338 /* 5339 * FIXME: Ideally pattern should come from DPCD 0x24A. As 5340 * current firmware of DPR-100 could not set it, so hardcoding 5341 * now for complaince test. 5342 */ 5343 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 5344 pattern_val = 0xFB; 5345 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 5346 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 5347 pattern_val); 5348 break; 5349 default: 5350 WARN(1, "Invalid Phy Test Pattern\n"); 5351 } 5352 } 5353 5354 static void 5355 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp) 5356 { 5357 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5358 struct drm_device *dev = dig_port->base.base.dev; 5359 struct drm_i915_private *dev_priv = to_i915(dev); 5360 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5361 enum pipe pipe = crtc->pipe; 5362 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5363 5364 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5365 TRANS_DDI_FUNC_CTL(pipe)); 5366 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5367 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5368 5369 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 5370 TGL_TRANS_DDI_PORT_MASK); 5371 trans_conf_value &= ~PIPECONF_ENABLE; 5372 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 5373 5374 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5375 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5376 trans_ddi_func_ctl_value); 5377 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5378 } 5379 5380 static void 5381 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt) 5382 { 5383 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5384 struct drm_device *dev = dig_port->base.base.dev; 5385 struct drm_i915_private *dev_priv = to_i915(dev); 5386 enum port port = dig_port->base.port; 5387 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 5388 enum pipe pipe = crtc->pipe; 5389 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 5390 5391 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 5392 TRANS_DDI_FUNC_CTL(pipe)); 5393 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 5394 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 5395 5396 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 5397 TGL_TRANS_DDI_SELECT_PORT(port); 5398 trans_conf_value |= PIPECONF_ENABLE; 5399 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 5400 5401 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 5402 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 5403 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 5404 trans_ddi_func_ctl_value); 5405 } 5406 5407 void intel_dp_process_phy_request(struct intel_dp *intel_dp) 5408 { 5409 struct drm_dp_phy_test_params *data = 5410 &intel_dp->compliance.test_data.phytest; 5411 u8 link_status[DP_LINK_STATUS_SIZE]; 5412 5413 if (!intel_dp_get_link_status(intel_dp, link_status)) { 5414 DRM_DEBUG_KMS("failed to get link status\n"); 5415 return; 5416 } 5417 5418 /* retrieve vswing & pre-emphasis setting */ 5419 intel_dp_get_adjust_train(intel_dp, link_status); 5420 5421 intel_dp_autotest_phy_ddi_disable(intel_dp); 5422 5423 intel_dp_set_signal_levels(intel_dp); 5424 5425 intel_dp_phy_pattern_update(intel_dp); 5426 5427 intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes); 5428 5429 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 5430 link_status[DP_DPCD_REV]); 5431 } 5432 5433 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 5434 { 5435 u8 test_result; 5436 5437 test_result = intel_dp_prepare_phytest(intel_dp); 5438 if (test_result != DP_TEST_ACK) 5439 DRM_ERROR("Phy test preparation failed\n"); 5440 5441 intel_dp_process_phy_request(intel_dp); 5442 5443 return test_result; 5444 } 5445 5446 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 5447 { 5448 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5449 u8 response = DP_TEST_NAK; 5450 u8 request = 0; 5451 int status; 5452 5453 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 5454 if (status <= 0) { 5455 drm_dbg_kms(&i915->drm, 5456 "Could not read test request from sink\n"); 5457 goto update_status; 5458 } 5459 5460 switch (request) { 5461 case DP_TEST_LINK_TRAINING: 5462 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 5463 response = intel_dp_autotest_link_training(intel_dp); 5464 break; 5465 case DP_TEST_LINK_VIDEO_PATTERN: 5466 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 5467 response = intel_dp_autotest_video_pattern(intel_dp); 5468 break; 5469 case DP_TEST_LINK_EDID_READ: 5470 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 5471 response = intel_dp_autotest_edid(intel_dp); 5472 break; 5473 case DP_TEST_LINK_PHY_TEST_PATTERN: 5474 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 5475 response = intel_dp_autotest_phy_pattern(intel_dp); 5476 break; 5477 default: 5478 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 5479 request); 5480 break; 5481 } 5482 5483 if (response & DP_TEST_ACK) 5484 intel_dp->compliance.test_type = request; 5485 5486 update_status: 5487 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 5488 if (status <= 0) 5489 drm_dbg_kms(&i915->drm, 5490 "Could not write test response to sink\n"); 5491 } 5492 5493 /** 5494 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 5495 * @intel_dp: Intel DP struct 5496 * 5497 * Read any pending MST interrupts, call MST core to handle these and ack the 5498 * interrupts. Check if the main and AUX link state is ok. 5499 * 5500 * Returns: 5501 * - %true if pending interrupts were serviced (or no interrupts were 5502 * pending) w/o detecting an error condition. 5503 * - %false if an error condition - like AUX failure or a loss of link - is 5504 * detected, which needs servicing from the hotplug work. 5505 */ 5506 static bool 5507 intel_dp_check_mst_status(struct intel_dp *intel_dp) 5508 { 5509 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5510 bool link_ok = true; 5511 5512 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 5513 5514 for (;;) { 5515 u8 esi[DP_DPRX_ESI_LEN] = {}; 5516 bool handled; 5517 int retry; 5518 5519 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 5520 drm_dbg_kms(&i915->drm, 5521 "failed to get ESI - device may have failed\n"); 5522 link_ok = false; 5523 5524 break; 5525 } 5526 5527 /* check link status - esi[10] = 0x200c */ 5528 if (intel_dp->active_mst_links > 0 && link_ok && 5529 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 5530 drm_dbg_kms(&i915->drm, 5531 "channel EQ not ok, retraining\n"); 5532 link_ok = false; 5533 } 5534 5535 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 5536 5537 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); 5538 if (!handled) 5539 break; 5540 5541 for (retry = 0; retry < 3; retry++) { 5542 int wret; 5543 5544 wret = drm_dp_dpcd_write(&intel_dp->aux, 5545 DP_SINK_COUNT_ESI+1, 5546 &esi[1], 3); 5547 if (wret == 3) 5548 break; 5549 } 5550 } 5551 5552 return link_ok; 5553 } 5554 5555 static bool 5556 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 5557 { 5558 u8 link_status[DP_LINK_STATUS_SIZE]; 5559 5560 if (!intel_dp->link_trained) 5561 return false; 5562 5563 /* 5564 * While PSR source HW is enabled, it will control main-link sending 5565 * frames, enabling and disabling it so trying to do a retrain will fail 5566 * as the link would or not be on or it could mix training patterns 5567 * and frame data at the same time causing retrain to fail. 5568 * Also when exiting PSR, HW will retrain the link anyways fixing 5569 * any link status error. 5570 */ 5571 if (intel_psr_enabled(intel_dp)) 5572 return false; 5573 5574 if (!intel_dp_get_link_status(intel_dp, link_status)) 5575 return false; 5576 5577 /* 5578 * Validate the cached values of intel_dp->link_rate and 5579 * intel_dp->lane_count before attempting to retrain. 5580 */ 5581 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 5582 intel_dp->lane_count)) 5583 return false; 5584 5585 /* Retrain if Channel EQ or CR not ok */ 5586 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 5587 } 5588 5589 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 5590 const struct drm_connector_state *conn_state) 5591 { 5592 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5593 struct intel_encoder *encoder; 5594 enum pipe pipe; 5595 5596 if (!conn_state->best_encoder) 5597 return false; 5598 5599 /* SST */ 5600 encoder = &dp_to_dig_port(intel_dp)->base; 5601 if (conn_state->best_encoder == &encoder->base) 5602 return true; 5603 5604 /* MST */ 5605 for_each_pipe(i915, pipe) { 5606 encoder = &intel_dp->mst_encoders[pipe]->base; 5607 if (conn_state->best_encoder == &encoder->base) 5608 return true; 5609 } 5610 5611 return false; 5612 } 5613 5614 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 5615 struct drm_modeset_acquire_ctx *ctx, 5616 u32 *crtc_mask) 5617 { 5618 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5619 struct drm_connector_list_iter conn_iter; 5620 struct intel_connector *connector; 5621 int ret = 0; 5622 5623 *crtc_mask = 0; 5624 5625 if (!intel_dp_needs_link_retrain(intel_dp)) 5626 return 0; 5627 5628 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 5629 for_each_intel_connector_iter(connector, &conn_iter) { 5630 struct drm_connector_state *conn_state = 5631 connector->base.state; 5632 struct intel_crtc_state *crtc_state; 5633 struct intel_crtc *crtc; 5634 5635 if (!intel_dp_has_connector(intel_dp, conn_state)) 5636 continue; 5637 5638 crtc = to_intel_crtc(conn_state->crtc); 5639 if (!crtc) 5640 continue; 5641 5642 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 5643 if (ret) 5644 break; 5645 5646 crtc_state = to_intel_crtc_state(crtc->base.state); 5647 5648 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 5649 5650 if (!crtc_state->hw.active) 5651 continue; 5652 5653 if (conn_state->commit && 5654 !try_wait_for_completion(&conn_state->commit->hw_done)) 5655 continue; 5656 5657 *crtc_mask |= drm_crtc_mask(&crtc->base); 5658 } 5659 drm_connector_list_iter_end(&conn_iter); 5660 5661 if (!intel_dp_needs_link_retrain(intel_dp)) 5662 *crtc_mask = 0; 5663 5664 return ret; 5665 } 5666 5667 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 5668 { 5669 struct intel_connector *connector = intel_dp->attached_connector; 5670 5671 return connector->base.status == connector_status_connected || 5672 intel_dp->is_mst; 5673 } 5674 5675 int intel_dp_retrain_link(struct intel_encoder *encoder, 5676 struct drm_modeset_acquire_ctx *ctx) 5677 { 5678 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5679 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 5680 struct intel_crtc *crtc; 5681 u32 crtc_mask; 5682 int ret; 5683 5684 if (!intel_dp_is_connected(intel_dp)) 5685 return 0; 5686 5687 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 5688 ctx); 5689 if (ret) 5690 return ret; 5691 5692 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 5693 if (ret) 5694 return ret; 5695 5696 if (crtc_mask == 0) 5697 return 0; 5698 5699 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 5700 encoder->base.base.id, encoder->base.name); 5701 5702 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5703 const struct intel_crtc_state *crtc_state = 5704 to_intel_crtc_state(crtc->base.state); 5705 5706 /* Suppress underruns caused by re-training */ 5707 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 5708 if (crtc_state->has_pch_encoder) 5709 intel_set_pch_fifo_underrun_reporting(dev_priv, 5710 intel_crtc_pch_transcoder(crtc), false); 5711 } 5712 5713 intel_dp_start_link_train(intel_dp); 5714 intel_dp_stop_link_train(intel_dp); 5715 5716 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 5717 const struct intel_crtc_state *crtc_state = 5718 to_intel_crtc_state(crtc->base.state); 5719 5720 /* Keep underrun reporting disabled until things are stable */ 5721 intel_wait_for_vblank(dev_priv, crtc->pipe); 5722 5723 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 5724 if (crtc_state->has_pch_encoder) 5725 intel_set_pch_fifo_underrun_reporting(dev_priv, 5726 intel_crtc_pch_transcoder(crtc), true); 5727 } 5728 5729 return 0; 5730 } 5731 5732 /* 5733 * If display is now connected check links status, 5734 * there has been known issues of link loss triggering 5735 * long pulse. 5736 * 5737 * Some sinks (eg. ASUS PB287Q) seem to perform some 5738 * weird HPD ping pong during modesets. So we can apparently 5739 * end up with HPD going low during a modeset, and then 5740 * going back up soon after. And once that happens we must 5741 * retrain the link to get a picture. That's in case no 5742 * userspace component reacted to intermittent HPD dip. 5743 */ 5744 static enum intel_hotplug_state 5745 intel_dp_hotplug(struct intel_encoder *encoder, 5746 struct intel_connector *connector) 5747 { 5748 struct drm_modeset_acquire_ctx ctx; 5749 enum intel_hotplug_state state; 5750 int ret; 5751 5752 state = intel_encoder_hotplug(encoder, connector); 5753 5754 drm_modeset_acquire_init(&ctx, 0); 5755 5756 for (;;) { 5757 ret = intel_dp_retrain_link(encoder, &ctx); 5758 5759 if (ret == -EDEADLK) { 5760 drm_modeset_backoff(&ctx); 5761 continue; 5762 } 5763 5764 break; 5765 } 5766 5767 drm_modeset_drop_locks(&ctx); 5768 drm_modeset_acquire_fini(&ctx); 5769 drm_WARN(encoder->base.dev, ret, 5770 "Acquiring modeset locks failed with %i\n", ret); 5771 5772 /* 5773 * Keeping it consistent with intel_ddi_hotplug() and 5774 * intel_hdmi_hotplug(). 5775 */ 5776 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries) 5777 state = INTEL_HOTPLUG_RETRY; 5778 5779 return state; 5780 } 5781 5782 static void intel_dp_check_service_irq(struct intel_dp *intel_dp) 5783 { 5784 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5785 u8 val; 5786 5787 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 5788 return; 5789 5790 if (drm_dp_dpcd_readb(&intel_dp->aux, 5791 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 5792 return; 5793 5794 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 5795 5796 if (val & DP_AUTOMATED_TEST_REQUEST) 5797 intel_dp_handle_test_request(intel_dp); 5798 5799 if (val & DP_CP_IRQ) 5800 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 5801 5802 if (val & DP_SINK_SPECIFIC_IRQ) 5803 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 5804 } 5805 5806 /* 5807 * According to DP spec 5808 * 5.1.2: 5809 * 1. Read DPCD 5810 * 2. Configure link according to Receiver Capabilities 5811 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 5812 * 4. Check link status on receipt of hot-plug interrupt 5813 * 5814 * intel_dp_short_pulse - handles short pulse interrupts 5815 * when full detection is not required. 5816 * Returns %true if short pulse is handled and full detection 5817 * is NOT required and %false otherwise. 5818 */ 5819 static bool 5820 intel_dp_short_pulse(struct intel_dp *intel_dp) 5821 { 5822 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 5823 u8 old_sink_count = intel_dp->sink_count; 5824 bool ret; 5825 5826 /* 5827 * Clearing compliance test variables to allow capturing 5828 * of values for next automated test request. 5829 */ 5830 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5831 5832 /* 5833 * Now read the DPCD to see if it's actually running 5834 * If the current value of sink count doesn't match with 5835 * the value that was stored earlier or dpcd read failed 5836 * we need to do full detection 5837 */ 5838 ret = intel_dp_get_dpcd(intel_dp); 5839 5840 if ((old_sink_count != intel_dp->sink_count) || !ret) { 5841 /* No need to proceed if we are going to do full detect */ 5842 return false; 5843 } 5844 5845 intel_dp_check_service_irq(intel_dp); 5846 5847 /* Handle CEC interrupts, if any */ 5848 drm_dp_cec_irq(&intel_dp->aux); 5849 5850 /* defer to the hotplug work for link retraining if needed */ 5851 if (intel_dp_needs_link_retrain(intel_dp)) 5852 return false; 5853 5854 intel_psr_short_pulse(intel_dp); 5855 5856 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 5857 drm_dbg_kms(&dev_priv->drm, 5858 "Link Training Compliance Test requested\n"); 5859 /* Send a Hotplug Uevent to userspace to start modeset */ 5860 drm_kms_helper_hotplug_event(&dev_priv->drm); 5861 } 5862 5863 return true; 5864 } 5865 5866 /* XXX this is probably wrong for multiple downstream ports */ 5867 static enum drm_connector_status 5868 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 5869 { 5870 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 5871 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 5872 u8 *dpcd = intel_dp->dpcd; 5873 u8 type; 5874 5875 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 5876 return connector_status_connected; 5877 5878 if (lspcon->active) 5879 lspcon_resume(lspcon); 5880 5881 if (!intel_dp_get_dpcd(intel_dp)) 5882 return connector_status_disconnected; 5883 5884 /* if there's no downstream port, we're done */ 5885 if (!drm_dp_is_branch(dpcd)) 5886 return connector_status_connected; 5887 5888 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 5889 if (intel_dp_has_sink_count(intel_dp) && 5890 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 5891 return intel_dp->sink_count ? 5892 connector_status_connected : connector_status_disconnected; 5893 } 5894 5895 if (intel_dp_can_mst(intel_dp)) 5896 return connector_status_connected; 5897 5898 /* If no HPD, poke DDC gently */ 5899 if (drm_probe_ddc(&intel_dp->aux.ddc)) 5900 return connector_status_connected; 5901 5902 /* Well we tried, say unknown for unreliable port types */ 5903 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 5904 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 5905 if (type == DP_DS_PORT_TYPE_VGA || 5906 type == DP_DS_PORT_TYPE_NON_EDID) 5907 return connector_status_unknown; 5908 } else { 5909 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 5910 DP_DWN_STRM_PORT_TYPE_MASK; 5911 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 5912 type == DP_DWN_STRM_PORT_TYPE_OTHER) 5913 return connector_status_unknown; 5914 } 5915 5916 /* Anything else is out of spec, warn and ignore */ 5917 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 5918 return connector_status_disconnected; 5919 } 5920 5921 static enum drm_connector_status 5922 edp_detect(struct intel_dp *intel_dp) 5923 { 5924 return connector_status_connected; 5925 } 5926 5927 static bool ibx_digital_port_connected(struct intel_encoder *encoder) 5928 { 5929 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5930 u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; 5931 5932 return intel_de_read(dev_priv, SDEISR) & bit; 5933 } 5934 5935 static bool g4x_digital_port_connected(struct intel_encoder *encoder) 5936 { 5937 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5938 u32 bit; 5939 5940 switch (encoder->hpd_pin) { 5941 case HPD_PORT_B: 5942 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 5943 break; 5944 case HPD_PORT_C: 5945 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 5946 break; 5947 case HPD_PORT_D: 5948 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 5949 break; 5950 default: 5951 MISSING_CASE(encoder->hpd_pin); 5952 return false; 5953 } 5954 5955 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 5956 } 5957 5958 static bool gm45_digital_port_connected(struct intel_encoder *encoder) 5959 { 5960 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5961 u32 bit; 5962 5963 switch (encoder->hpd_pin) { 5964 case HPD_PORT_B: 5965 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 5966 break; 5967 case HPD_PORT_C: 5968 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 5969 break; 5970 case HPD_PORT_D: 5971 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 5972 break; 5973 default: 5974 MISSING_CASE(encoder->hpd_pin); 5975 return false; 5976 } 5977 5978 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 5979 } 5980 5981 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 5982 { 5983 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 5984 u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; 5985 5986 return intel_de_read(dev_priv, DEISR) & bit; 5987 } 5988 5989 /* 5990 * intel_digital_port_connected - is the specified port connected? 5991 * @encoder: intel_encoder 5992 * 5993 * In cases where there's a connector physically connected but it can't be used 5994 * by our hardware we also return false, since the rest of the driver should 5995 * pretty much treat the port as disconnected. This is relevant for type-C 5996 * (starting on ICL) where there's ownership involved. 5997 * 5998 * Return %true if port is connected, %false otherwise. 5999 */ 6000 bool intel_digital_port_connected(struct intel_encoder *encoder) 6001 { 6002 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 6003 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 6004 bool is_connected = false; 6005 intel_wakeref_t wakeref; 6006 6007 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 6008 is_connected = dig_port->connected(encoder); 6009 6010 return is_connected; 6011 } 6012 6013 static struct edid * 6014 intel_dp_get_edid(struct intel_dp *intel_dp) 6015 { 6016 struct intel_connector *intel_connector = intel_dp->attached_connector; 6017 6018 /* use cached edid if we have one */ 6019 if (intel_connector->edid) { 6020 /* invalid edid */ 6021 if (IS_ERR(intel_connector->edid)) 6022 return NULL; 6023 6024 return drm_edid_duplicate(intel_connector->edid); 6025 } else 6026 return drm_get_edid(&intel_connector->base, 6027 &intel_dp->aux.ddc); 6028 } 6029 6030 static void 6031 intel_dp_set_edid(struct intel_dp *intel_dp) 6032 { 6033 struct intel_connector *intel_connector = intel_dp->attached_connector; 6034 struct edid *edid; 6035 6036 intel_dp_unset_edid(intel_dp); 6037 edid = intel_dp_get_edid(intel_dp); 6038 intel_connector->detect_edid = edid; 6039 6040 intel_dp->has_audio = drm_detect_monitor_audio(edid); 6041 drm_dp_cec_set_edid(&intel_dp->aux, edid); 6042 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 6043 } 6044 6045 static void 6046 intel_dp_unset_edid(struct intel_dp *intel_dp) 6047 { 6048 struct intel_connector *intel_connector = intel_dp->attached_connector; 6049 6050 drm_dp_cec_unset_edid(&intel_dp->aux); 6051 kfree(intel_connector->detect_edid); 6052 intel_connector->detect_edid = NULL; 6053 6054 intel_dp->has_audio = false; 6055 intel_dp->edid_quirks = 0; 6056 } 6057 6058 static int 6059 intel_dp_detect(struct drm_connector *connector, 6060 struct drm_modeset_acquire_ctx *ctx, 6061 bool force) 6062 { 6063 struct drm_i915_private *dev_priv = to_i915(connector->dev); 6064 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6065 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6066 struct intel_encoder *encoder = &dig_port->base; 6067 enum drm_connector_status status; 6068 6069 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6070 connector->base.id, connector->name); 6071 drm_WARN_ON(&dev_priv->drm, 6072 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 6073 6074 /* Can't disconnect eDP */ 6075 if (intel_dp_is_edp(intel_dp)) 6076 status = edp_detect(intel_dp); 6077 else if (intel_digital_port_connected(encoder)) 6078 status = intel_dp_detect_dpcd(intel_dp); 6079 else 6080 status = connector_status_disconnected; 6081 6082 if (status == connector_status_disconnected) { 6083 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 6084 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 6085 6086 if (intel_dp->is_mst) { 6087 drm_dbg_kms(&dev_priv->drm, 6088 "MST device may have disappeared %d vs %d\n", 6089 intel_dp->is_mst, 6090 intel_dp->mst_mgr.mst_state); 6091 intel_dp->is_mst = false; 6092 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 6093 intel_dp->is_mst); 6094 } 6095 6096 goto out; 6097 } 6098 6099 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 6100 if (INTEL_GEN(dev_priv) >= 11) 6101 intel_dp_get_dsc_sink_cap(intel_dp); 6102 6103 intel_dp_configure_mst(intel_dp); 6104 6105 /* 6106 * TODO: Reset link params when switching to MST mode, until MST 6107 * supports link training fallback params. 6108 */ 6109 if (intel_dp->reset_link_params || intel_dp->is_mst) { 6110 /* Initial max link lane count */ 6111 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 6112 6113 /* Initial max link rate */ 6114 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 6115 6116 intel_dp->reset_link_params = false; 6117 } 6118 6119 intel_dp_print_rates(intel_dp); 6120 6121 if (intel_dp->is_mst) { 6122 /* 6123 * If we are in MST mode then this connector 6124 * won't appear connected or have anything 6125 * with EDID on it 6126 */ 6127 status = connector_status_disconnected; 6128 goto out; 6129 } 6130 6131 /* 6132 * Some external monitors do not signal loss of link synchronization 6133 * with an IRQ_HPD, so force a link status check. 6134 */ 6135 if (!intel_dp_is_edp(intel_dp)) { 6136 int ret; 6137 6138 ret = intel_dp_retrain_link(encoder, ctx); 6139 if (ret) 6140 return ret; 6141 } 6142 6143 /* 6144 * Clearing NACK and defer counts to get their exact values 6145 * while reading EDID which are required by Compliance tests 6146 * 4.2.2.4 and 4.2.2.5 6147 */ 6148 intel_dp->aux.i2c_nack_count = 0; 6149 intel_dp->aux.i2c_defer_count = 0; 6150 6151 intel_dp_set_edid(intel_dp); 6152 if (intel_dp_is_edp(intel_dp) || 6153 to_intel_connector(connector)->detect_edid) 6154 status = connector_status_connected; 6155 6156 intel_dp_check_service_irq(intel_dp); 6157 6158 out: 6159 if (status != connector_status_connected && !intel_dp->is_mst) 6160 intel_dp_unset_edid(intel_dp); 6161 6162 /* 6163 * Make sure the refs for power wells enabled during detect are 6164 * dropped to avoid a new detect cycle triggered by HPD polling. 6165 */ 6166 intel_display_power_flush_work(dev_priv); 6167 6168 if (!intel_dp_is_edp(intel_dp)) 6169 drm_dp_set_subconnector_property(connector, 6170 status, 6171 intel_dp->dpcd, 6172 intel_dp->downstream_ports); 6173 return status; 6174 } 6175 6176 static void 6177 intel_dp_force(struct drm_connector *connector) 6178 { 6179 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6180 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6181 struct intel_encoder *intel_encoder = &dig_port->base; 6182 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 6183 enum intel_display_power_domain aux_domain = 6184 intel_aux_power_domain(dig_port); 6185 intel_wakeref_t wakeref; 6186 6187 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 6188 connector->base.id, connector->name); 6189 intel_dp_unset_edid(intel_dp); 6190 6191 if (connector->status != connector_status_connected) 6192 return; 6193 6194 wakeref = intel_display_power_get(dev_priv, aux_domain); 6195 6196 intel_dp_set_edid(intel_dp); 6197 6198 intel_display_power_put(dev_priv, aux_domain, wakeref); 6199 } 6200 6201 static int intel_dp_get_modes(struct drm_connector *connector) 6202 { 6203 struct intel_connector *intel_connector = to_intel_connector(connector); 6204 struct edid *edid; 6205 6206 edid = intel_connector->detect_edid; 6207 if (edid) { 6208 int ret = intel_connector_update_modes(connector, edid); 6209 if (ret) 6210 return ret; 6211 } 6212 6213 /* if eDP has no EDID, fall back to fixed mode */ 6214 if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) && 6215 intel_connector->panel.fixed_mode) { 6216 struct drm_display_mode *mode; 6217 6218 mode = drm_mode_duplicate(connector->dev, 6219 intel_connector->panel.fixed_mode); 6220 if (mode) { 6221 drm_mode_probed_add(connector, mode); 6222 return 1; 6223 } 6224 } 6225 6226 return 0; 6227 } 6228 6229 static int 6230 intel_dp_connector_register(struct drm_connector *connector) 6231 { 6232 struct drm_i915_private *i915 = to_i915(connector->dev); 6233 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6234 int ret; 6235 6236 ret = intel_connector_register(connector); 6237 if (ret) 6238 return ret; 6239 6240 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 6241 intel_dp->aux.name, connector->kdev->kobj.name); 6242 6243 intel_dp->aux.dev = connector->kdev; 6244 ret = drm_dp_aux_register(&intel_dp->aux); 6245 if (!ret) 6246 drm_dp_cec_register_connector(&intel_dp->aux, connector); 6247 return ret; 6248 } 6249 6250 static void 6251 intel_dp_connector_unregister(struct drm_connector *connector) 6252 { 6253 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 6254 6255 drm_dp_cec_unregister_connector(&intel_dp->aux); 6256 drm_dp_aux_unregister(&intel_dp->aux); 6257 intel_connector_unregister(connector); 6258 } 6259 6260 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 6261 { 6262 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 6263 struct intel_dp *intel_dp = &dig_port->dp; 6264 6265 intel_dp_mst_encoder_cleanup(dig_port); 6266 if (intel_dp_is_edp(intel_dp)) { 6267 intel_wakeref_t wakeref; 6268 6269 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6270 /* 6271 * vdd might still be enabled do to the delayed vdd off. 6272 * Make sure vdd is actually turned off here. 6273 */ 6274 with_pps_lock(intel_dp, wakeref) 6275 edp_panel_vdd_off_sync(intel_dp); 6276 6277 if (intel_dp->edp_notifier.notifier_call) { 6278 unregister_reboot_notifier(&intel_dp->edp_notifier); 6279 intel_dp->edp_notifier.notifier_call = NULL; 6280 } 6281 } 6282 6283 intel_dp_aux_fini(intel_dp); 6284 } 6285 6286 static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 6287 { 6288 intel_dp_encoder_flush_work(encoder); 6289 6290 drm_encoder_cleanup(encoder); 6291 kfree(enc_to_dig_port(to_intel_encoder(encoder))); 6292 } 6293 6294 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 6295 { 6296 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 6297 intel_wakeref_t wakeref; 6298 6299 if (!intel_dp_is_edp(intel_dp)) 6300 return; 6301 6302 /* 6303 * vdd might still be enabled do to the delayed vdd off. 6304 * Make sure vdd is actually turned off here. 6305 */ 6306 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 6307 with_pps_lock(intel_dp, wakeref) 6308 edp_panel_vdd_off_sync(intel_dp); 6309 } 6310 6311 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) 6312 { 6313 long ret; 6314 6315 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) 6316 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, 6317 msecs_to_jiffies(timeout)); 6318 6319 if (!ret) 6320 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); 6321 } 6322 6323 static 6324 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, 6325 u8 *an) 6326 { 6327 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6328 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&dig_port->base.base)); 6329 static const struct drm_dp_aux_msg msg = { 6330 .request = DP_AUX_NATIVE_WRITE, 6331 .address = DP_AUX_HDCP_AKSV, 6332 .size = DRM_HDCP_KSV_LEN, 6333 }; 6334 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; 6335 ssize_t dpcd_ret; 6336 int ret; 6337 6338 /* Output An first, that's easy */ 6339 dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN, 6340 an, DRM_HDCP_AN_LEN); 6341 if (dpcd_ret != DRM_HDCP_AN_LEN) { 6342 drm_dbg_kms(&i915->drm, 6343 "Failed to write An over DP/AUX (%zd)\n", 6344 dpcd_ret); 6345 return dpcd_ret >= 0 ? -EIO : dpcd_ret; 6346 } 6347 6348 /* 6349 * Since Aksv is Oh-So-Secret, we can't access it in software. So in 6350 * order to get it on the wire, we need to create the AUX header as if 6351 * we were writing the data, and then tickle the hardware to output the 6352 * data once the header is sent out. 6353 */ 6354 intel_dp_aux_header(txbuf, &msg); 6355 6356 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size, 6357 rxbuf, sizeof(rxbuf), 6358 DP_AUX_CH_CTL_AUX_AKSV_SELECT); 6359 if (ret < 0) { 6360 drm_dbg_kms(&i915->drm, 6361 "Write Aksv over DP/AUX failed (%d)\n", ret); 6362 return ret; 6363 } else if (ret == 0) { 6364 drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n"); 6365 return -EIO; 6366 } 6367 6368 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; 6369 if (reply != DP_AUX_NATIVE_REPLY_ACK) { 6370 drm_dbg_kms(&i915->drm, 6371 "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", 6372 reply); 6373 return -EIO; 6374 } 6375 return 0; 6376 } 6377 6378 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port, 6379 u8 *bksv) 6380 { 6381 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6382 ssize_t ret; 6383 6384 ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, 6385 DRM_HDCP_KSV_LEN); 6386 if (ret != DRM_HDCP_KSV_LEN) { 6387 drm_dbg_kms(&i915->drm, 6388 "Read Bksv from DP/AUX failed (%zd)\n", ret); 6389 return ret >= 0 ? -EIO : ret; 6390 } 6391 return 0; 6392 } 6393 6394 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, 6395 u8 *bstatus) 6396 { 6397 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6398 ssize_t ret; 6399 6400 /* 6401 * For some reason the HDMI and DP HDCP specs call this register 6402 * definition by different names. In the HDMI spec, it's called BSTATUS, 6403 * but in DP it's called BINFO. 6404 */ 6405 ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO, 6406 bstatus, DRM_HDCP_BSTATUS_LEN); 6407 if (ret != DRM_HDCP_BSTATUS_LEN) { 6408 drm_dbg_kms(&i915->drm, 6409 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6410 return ret >= 0 ? -EIO : ret; 6411 } 6412 return 0; 6413 } 6414 6415 static 6416 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port, 6417 u8 *bcaps) 6418 { 6419 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6420 ssize_t ret; 6421 6422 ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS, 6423 bcaps, 1); 6424 if (ret != 1) { 6425 drm_dbg_kms(&i915->drm, 6426 "Read bcaps from DP/AUX failed (%zd)\n", ret); 6427 return ret >= 0 ? -EIO : ret; 6428 } 6429 6430 return 0; 6431 } 6432 6433 static 6434 int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port, 6435 bool *repeater_present) 6436 { 6437 ssize_t ret; 6438 u8 bcaps; 6439 6440 ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); 6441 if (ret) 6442 return ret; 6443 6444 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; 6445 return 0; 6446 } 6447 6448 static 6449 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port, 6450 u8 *ri_prime) 6451 { 6452 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6453 ssize_t ret; 6454 6455 ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, 6456 ri_prime, DRM_HDCP_RI_LEN); 6457 if (ret != DRM_HDCP_RI_LEN) { 6458 drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", 6459 ret); 6460 return ret >= 0 ? -EIO : ret; 6461 } 6462 return 0; 6463 } 6464 6465 static 6466 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, 6467 bool *ksv_ready) 6468 { 6469 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6470 ssize_t ret; 6471 u8 bstatus; 6472 6473 ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6474 &bstatus, 1); 6475 if (ret != 1) { 6476 drm_dbg_kms(&i915->drm, 6477 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6478 return ret >= 0 ? -EIO : ret; 6479 } 6480 *ksv_ready = bstatus & DP_BSTATUS_READY; 6481 return 0; 6482 } 6483 6484 static 6485 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, 6486 int num_downstream, u8 *ksv_fifo) 6487 { 6488 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6489 ssize_t ret; 6490 int i; 6491 6492 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ 6493 for (i = 0; i < num_downstream; i += 3) { 6494 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; 6495 ret = drm_dp_dpcd_read(&dig_port->dp.aux, 6496 DP_AUX_HDCP_KSV_FIFO, 6497 ksv_fifo + i * DRM_HDCP_KSV_LEN, 6498 len); 6499 if (ret != len) { 6500 drm_dbg_kms(&i915->drm, 6501 "Read ksv[%d] from DP/AUX failed (%zd)\n", 6502 i, ret); 6503 return ret >= 0 ? -EIO : ret; 6504 } 6505 } 6506 return 0; 6507 } 6508 6509 static 6510 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, 6511 int i, u32 *part) 6512 { 6513 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6514 ssize_t ret; 6515 6516 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) 6517 return -EINVAL; 6518 6519 ret = drm_dp_dpcd_read(&dig_port->dp.aux, 6520 DP_AUX_HDCP_V_PRIME(i), part, 6521 DRM_HDCP_V_PRIME_PART_LEN); 6522 if (ret != DRM_HDCP_V_PRIME_PART_LEN) { 6523 drm_dbg_kms(&i915->drm, 6524 "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); 6525 return ret >= 0 ? -EIO : ret; 6526 } 6527 return 0; 6528 } 6529 6530 static 6531 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port, 6532 bool enable) 6533 { 6534 /* Not used for single stream DisplayPort setups */ 6535 return 0; 6536 } 6537 6538 static 6539 bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port) 6540 { 6541 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6542 ssize_t ret; 6543 u8 bstatus; 6544 6545 ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, 6546 &bstatus, 1); 6547 if (ret != 1) { 6548 drm_dbg_kms(&i915->drm, 6549 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6550 return false; 6551 } 6552 6553 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); 6554 } 6555 6556 static 6557 int intel_dp_hdcp_capable(struct intel_digital_port *dig_port, 6558 bool *hdcp_capable) 6559 { 6560 ssize_t ret; 6561 u8 bcaps; 6562 6563 ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); 6564 if (ret) 6565 return ret; 6566 6567 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; 6568 return 0; 6569 } 6570 6571 struct hdcp2_dp_errata_stream_type { 6572 u8 msg_id; 6573 u8 stream_type; 6574 } __packed; 6575 6576 struct hdcp2_dp_msg_data { 6577 u8 msg_id; 6578 u32 offset; 6579 bool msg_detectable; 6580 u32 timeout; 6581 u32 timeout2; /* Added for non_paired situation */ 6582 }; 6583 6584 static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { 6585 { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 }, 6586 { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, 6587 false, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, 6588 { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, 6589 false, 0, 0 }, 6590 { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, 6591 false, 0, 0 }, 6592 { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, 6593 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, 6594 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, 6595 { HDCP_2_2_AKE_SEND_PAIRING_INFO, 6596 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, 6597 HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, 6598 { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 }, 6599 { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, 6600 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 }, 6601 { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, 6602 0, 0 }, 6603 { HDCP_2_2_REP_SEND_RECVID_LIST, 6604 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, 6605 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, 6606 { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, 6607 0, 0 }, 6608 { HDCP_2_2_REP_STREAM_MANAGE, 6609 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, 6610 0, 0 }, 6611 { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, 6612 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, 6613 /* local define to shovel this through the write_2_2 interface */ 6614 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 6615 { HDCP_2_2_ERRATA_DP_STREAM_TYPE, 6616 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, 6617 0, 0 }, 6618 }; 6619 6620 static int 6621 intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port, 6622 u8 *rx_status) 6623 { 6624 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6625 ssize_t ret; 6626 6627 ret = drm_dp_dpcd_read(&dig_port->dp.aux, 6628 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, 6629 HDCP_2_2_DP_RXSTATUS_LEN); 6630 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { 6631 drm_dbg_kms(&i915->drm, 6632 "Read bstatus from DP/AUX failed (%zd)\n", ret); 6633 return ret >= 0 ? -EIO : ret; 6634 } 6635 6636 return 0; 6637 } 6638 6639 static 6640 int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port, 6641 u8 msg_id, bool *msg_ready) 6642 { 6643 u8 rx_status; 6644 int ret; 6645 6646 *msg_ready = false; 6647 ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); 6648 if (ret < 0) 6649 return ret; 6650 6651 switch (msg_id) { 6652 case HDCP_2_2_AKE_SEND_HPRIME: 6653 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) 6654 *msg_ready = true; 6655 break; 6656 case HDCP_2_2_AKE_SEND_PAIRING_INFO: 6657 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) 6658 *msg_ready = true; 6659 break; 6660 case HDCP_2_2_REP_SEND_RECVID_LIST: 6661 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6662 *msg_ready = true; 6663 break; 6664 default: 6665 DRM_ERROR("Unidentified msg_id: %d\n", msg_id); 6666 return -EINVAL; 6667 } 6668 6669 return 0; 6670 } 6671 6672 static ssize_t 6673 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, 6674 const struct hdcp2_dp_msg_data *hdcp2_msg_data) 6675 { 6676 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6677 struct intel_dp *dp = &dig_port->dp; 6678 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6679 u8 msg_id = hdcp2_msg_data->msg_id; 6680 int ret, timeout; 6681 bool msg_ready = false; 6682 6683 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) 6684 timeout = hdcp2_msg_data->timeout2; 6685 else 6686 timeout = hdcp2_msg_data->timeout; 6687 6688 /* 6689 * There is no way to detect the CERT, LPRIME and STREAM_READY 6690 * availability. So Wait for timeout and read the msg. 6691 */ 6692 if (!hdcp2_msg_data->msg_detectable) { 6693 mdelay(timeout); 6694 ret = 0; 6695 } else { 6696 /* 6697 * As we want to check the msg availability at timeout, Ignoring 6698 * the timeout at wait for CP_IRQ. 6699 */ 6700 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); 6701 ret = hdcp2_detect_msg_availability(dig_port, 6702 msg_id, &msg_ready); 6703 if (!msg_ready) 6704 ret = -ETIMEDOUT; 6705 } 6706 6707 if (ret) 6708 drm_dbg_kms(&i915->drm, 6709 "msg_id %d, ret %d, timeout(mSec): %d\n", 6710 hdcp2_msg_data->msg_id, ret, timeout); 6711 6712 return ret; 6713 } 6714 6715 static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) 6716 { 6717 int i; 6718 6719 for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++) 6720 if (hdcp2_dp_msg_data[i].msg_id == msg_id) 6721 return &hdcp2_dp_msg_data[i]; 6722 6723 return NULL; 6724 } 6725 6726 static 6727 int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port, 6728 void *buf, size_t size) 6729 { 6730 struct intel_dp *dp = &dig_port->dp; 6731 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; 6732 unsigned int offset; 6733 u8 *byte = buf; 6734 ssize_t ret, bytes_to_write, len; 6735 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6736 6737 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); 6738 if (!hdcp2_msg_data) 6739 return -EINVAL; 6740 6741 offset = hdcp2_msg_data->offset; 6742 6743 /* No msg_id in DP HDCP2.2 msgs */ 6744 bytes_to_write = size - 1; 6745 byte++; 6746 6747 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); 6748 6749 while (bytes_to_write) { 6750 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? 6751 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; 6752 6753 ret = drm_dp_dpcd_write(&dig_port->dp.aux, 6754 offset, (void *)byte, len); 6755 if (ret < 0) 6756 return ret; 6757 6758 bytes_to_write -= ret; 6759 byte += ret; 6760 offset += ret; 6761 } 6762 6763 return size; 6764 } 6765 6766 static 6767 ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port) 6768 { 6769 u8 rx_info[HDCP_2_2_RXINFO_LEN]; 6770 u32 dev_cnt; 6771 ssize_t ret; 6772 6773 ret = drm_dp_dpcd_read(&dig_port->dp.aux, 6774 DP_HDCP_2_2_REG_RXINFO_OFFSET, 6775 (void *)rx_info, HDCP_2_2_RXINFO_LEN); 6776 if (ret != HDCP_2_2_RXINFO_LEN) 6777 return ret >= 0 ? -EIO : ret; 6778 6779 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 6780 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 6781 6782 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) 6783 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; 6784 6785 ret = sizeof(struct hdcp2_rep_send_receiverid_list) - 6786 HDCP_2_2_RECEIVER_IDS_MAX_LEN + 6787 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); 6788 6789 return ret; 6790 } 6791 6792 static 6793 int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port, 6794 u8 msg_id, void *buf, size_t size) 6795 { 6796 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 6797 unsigned int offset; 6798 u8 *byte = buf; 6799 ssize_t ret, bytes_to_recv, len; 6800 const struct hdcp2_dp_msg_data *hdcp2_msg_data; 6801 6802 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); 6803 if (!hdcp2_msg_data) 6804 return -EINVAL; 6805 offset = hdcp2_msg_data->offset; 6806 6807 ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data); 6808 if (ret < 0) 6809 return ret; 6810 6811 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { 6812 ret = get_receiver_id_list_size(dig_port); 6813 if (ret < 0) 6814 return ret; 6815 6816 size = ret; 6817 } 6818 bytes_to_recv = size - 1; 6819 6820 /* DP adaptation msgs has no msg_id */ 6821 byte++; 6822 6823 while (bytes_to_recv) { 6824 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? 6825 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; 6826 6827 ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset, 6828 (void *)byte, len); 6829 if (ret < 0) { 6830 drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", 6831 msg_id, ret); 6832 return ret; 6833 } 6834 6835 bytes_to_recv -= ret; 6836 byte += ret; 6837 offset += ret; 6838 } 6839 byte = buf; 6840 *byte = msg_id; 6841 6842 return size; 6843 } 6844 6845 static 6846 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port, 6847 bool is_repeater, u8 content_type) 6848 { 6849 int ret; 6850 struct hdcp2_dp_errata_stream_type stream_type_msg; 6851 6852 if (is_repeater) 6853 return 0; 6854 6855 /* 6856 * Errata for DP: As Stream type is used for encryption, Receiver 6857 * should be communicated with stream type for the decryption of the 6858 * content. 6859 * Repeater will be communicated with stream type as a part of it's 6860 * auth later in time. 6861 */ 6862 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; 6863 stream_type_msg.stream_type = content_type; 6864 6865 ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg, 6866 sizeof(stream_type_msg)); 6867 6868 return ret < 0 ? ret : 0; 6869 6870 } 6871 6872 static 6873 int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port) 6874 { 6875 u8 rx_status; 6876 int ret; 6877 6878 ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); 6879 if (ret) 6880 return ret; 6881 6882 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) 6883 ret = HDCP_REAUTH_REQUEST; 6884 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) 6885 ret = HDCP_LINK_INTEGRITY_FAILURE; 6886 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) 6887 ret = HDCP_TOPOLOGY_CHANGE; 6888 6889 return ret; 6890 } 6891 6892 static 6893 int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port, 6894 bool *capable) 6895 { 6896 u8 rx_caps[3]; 6897 int ret; 6898 6899 *capable = false; 6900 ret = drm_dp_dpcd_read(&dig_port->dp.aux, 6901 DP_HDCP_2_2_REG_RX_CAPS_OFFSET, 6902 rx_caps, HDCP_2_2_RXCAPS_LEN); 6903 if (ret != HDCP_2_2_RXCAPS_LEN) 6904 return ret >= 0 ? -EIO : ret; 6905 6906 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && 6907 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) 6908 *capable = true; 6909 6910 return 0; 6911 } 6912 6913 static const struct intel_hdcp_shim intel_dp_hdcp_shim = { 6914 .write_an_aksv = intel_dp_hdcp_write_an_aksv, 6915 .read_bksv = intel_dp_hdcp_read_bksv, 6916 .read_bstatus = intel_dp_hdcp_read_bstatus, 6917 .repeater_present = intel_dp_hdcp_repeater_present, 6918 .read_ri_prime = intel_dp_hdcp_read_ri_prime, 6919 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, 6920 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, 6921 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, 6922 .toggle_signalling = intel_dp_hdcp_toggle_signalling, 6923 .check_link = intel_dp_hdcp_check_link, 6924 .hdcp_capable = intel_dp_hdcp_capable, 6925 .write_2_2_msg = intel_dp_hdcp2_write_msg, 6926 .read_2_2_msg = intel_dp_hdcp2_read_msg, 6927 .config_stream_type = intel_dp_hdcp2_config_stream_type, 6928 .check_2_2_link = intel_dp_hdcp2_check_link, 6929 .hdcp_2_2_capable = intel_dp_hdcp2_capable, 6930 .protocol = HDCP_PROTOCOL_DP, 6931 }; 6932 6933 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) 6934 { 6935 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6936 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 6937 6938 lockdep_assert_held(&dev_priv->pps_mutex); 6939 6940 if (!edp_have_panel_vdd(intel_dp)) 6941 return; 6942 6943 /* 6944 * The VDD bit needs a power domain reference, so if the bit is 6945 * already enabled when we boot or resume, grab this reference and 6946 * schedule a vdd off, so we don't hold on to the reference 6947 * indefinitely. 6948 */ 6949 drm_dbg_kms(&dev_priv->drm, 6950 "VDD left on by BIOS, adjusting state tracking\n"); 6951 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); 6952 6953 edp_panel_vdd_schedule_off(intel_dp); 6954 } 6955 6956 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) 6957 { 6958 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 6959 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 6960 enum pipe pipe; 6961 6962 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, 6963 encoder->port, &pipe)) 6964 return pipe; 6965 6966 return INVALID_PIPE; 6967 } 6968 6969 void intel_dp_encoder_reset(struct drm_encoder *encoder) 6970 { 6971 struct drm_i915_private *dev_priv = to_i915(encoder->dev); 6972 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder)); 6973 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 6974 intel_wakeref_t wakeref; 6975 6976 if (!HAS_DDI(dev_priv)) 6977 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 6978 6979 if (lspcon->active) 6980 lspcon_resume(lspcon); 6981 6982 intel_dp->reset_link_params = true; 6983 6984 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 6985 !intel_dp_is_edp(intel_dp)) 6986 return; 6987 6988 with_pps_lock(intel_dp, wakeref) { 6989 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6990 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 6991 6992 if (intel_dp_is_edp(intel_dp)) { 6993 /* 6994 * Reinit the power sequencer, in case BIOS did 6995 * something nasty with it. 6996 */ 6997 intel_dp_pps_init(intel_dp); 6998 intel_edp_panel_vdd_sanitize(intel_dp); 6999 } 7000 } 7001 } 7002 7003 static int intel_modeset_tile_group(struct intel_atomic_state *state, 7004 int tile_group_id) 7005 { 7006 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7007 struct drm_connector_list_iter conn_iter; 7008 struct drm_connector *connector; 7009 int ret = 0; 7010 7011 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 7012 drm_for_each_connector_iter(connector, &conn_iter) { 7013 struct drm_connector_state *conn_state; 7014 struct intel_crtc_state *crtc_state; 7015 struct intel_crtc *crtc; 7016 7017 if (!connector->has_tile || 7018 connector->tile_group->id != tile_group_id) 7019 continue; 7020 7021 conn_state = drm_atomic_get_connector_state(&state->base, 7022 connector); 7023 if (IS_ERR(conn_state)) { 7024 ret = PTR_ERR(conn_state); 7025 break; 7026 } 7027 7028 crtc = to_intel_crtc(conn_state->crtc); 7029 7030 if (!crtc) 7031 continue; 7032 7033 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 7034 crtc_state->uapi.mode_changed = true; 7035 7036 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 7037 if (ret) 7038 break; 7039 } 7040 drm_connector_list_iter_end(&conn_iter); 7041 7042 return ret; 7043 } 7044 7045 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 7046 { 7047 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7048 struct intel_crtc *crtc; 7049 7050 if (transcoders == 0) 7051 return 0; 7052 7053 for_each_intel_crtc(&dev_priv->drm, crtc) { 7054 struct intel_crtc_state *crtc_state; 7055 int ret; 7056 7057 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 7058 if (IS_ERR(crtc_state)) 7059 return PTR_ERR(crtc_state); 7060 7061 if (!crtc_state->hw.enable) 7062 continue; 7063 7064 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 7065 continue; 7066 7067 crtc_state->uapi.mode_changed = true; 7068 7069 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 7070 if (ret) 7071 return ret; 7072 7073 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 7074 if (ret) 7075 return ret; 7076 7077 transcoders &= ~BIT(crtc_state->cpu_transcoder); 7078 } 7079 7080 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 7081 7082 return 0; 7083 } 7084 7085 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 7086 struct drm_connector *connector) 7087 { 7088 const struct drm_connector_state *old_conn_state = 7089 drm_atomic_get_old_connector_state(&state->base, connector); 7090 const struct intel_crtc_state *old_crtc_state; 7091 struct intel_crtc *crtc; 7092 u8 transcoders; 7093 7094 crtc = to_intel_crtc(old_conn_state->crtc); 7095 if (!crtc) 7096 return 0; 7097 7098 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 7099 7100 if (!old_crtc_state->hw.active) 7101 return 0; 7102 7103 transcoders = old_crtc_state->sync_mode_slaves_mask; 7104 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 7105 transcoders |= BIT(old_crtc_state->master_transcoder); 7106 7107 return intel_modeset_affected_transcoders(state, 7108 transcoders); 7109 } 7110 7111 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 7112 struct drm_atomic_state *_state) 7113 { 7114 struct drm_i915_private *dev_priv = to_i915(conn->dev); 7115 struct intel_atomic_state *state = to_intel_atomic_state(_state); 7116 int ret; 7117 7118 ret = intel_digital_connector_atomic_check(conn, &state->base); 7119 if (ret) 7120 return ret; 7121 7122 /* 7123 * We don't enable port sync on BDW due to missing w/as and 7124 * due to not having adjusted the modeset sequence appropriately. 7125 */ 7126 if (INTEL_GEN(dev_priv) < 9) 7127 return 0; 7128 7129 if (!intel_connector_needs_modeset(state, conn)) 7130 return 0; 7131 7132 if (conn->has_tile) { 7133 ret = intel_modeset_tile_group(state, conn->tile_group->id); 7134 if (ret) 7135 return ret; 7136 } 7137 7138 return intel_modeset_synced_crtcs(state, conn); 7139 } 7140 7141 static const struct drm_connector_funcs intel_dp_connector_funcs = { 7142 .force = intel_dp_force, 7143 .fill_modes = drm_helper_probe_single_connector_modes, 7144 .atomic_get_property = intel_digital_connector_atomic_get_property, 7145 .atomic_set_property = intel_digital_connector_atomic_set_property, 7146 .late_register = intel_dp_connector_register, 7147 .early_unregister = intel_dp_connector_unregister, 7148 .destroy = intel_connector_destroy, 7149 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7150 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 7151 }; 7152 7153 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 7154 .detect_ctx = intel_dp_detect, 7155 .get_modes = intel_dp_get_modes, 7156 .mode_valid = intel_dp_mode_valid, 7157 .atomic_check = intel_dp_connector_atomic_check, 7158 }; 7159 7160 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 7161 .reset = intel_dp_encoder_reset, 7162 .destroy = intel_dp_encoder_destroy, 7163 }; 7164 7165 static bool intel_edp_have_power(struct intel_dp *intel_dp) 7166 { 7167 intel_wakeref_t wakeref; 7168 bool have_power = false; 7169 7170 with_pps_lock(intel_dp, wakeref) { 7171 have_power = edp_have_panel_power(intel_dp) && 7172 edp_have_panel_vdd(intel_dp); 7173 } 7174 7175 return have_power; 7176 } 7177 7178 enum irqreturn 7179 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 7180 { 7181 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 7182 struct intel_dp *intel_dp = &dig_port->dp; 7183 7184 if (dig_port->base.type == INTEL_OUTPUT_EDP && 7185 (long_hpd || !intel_edp_have_power(intel_dp))) { 7186 /* 7187 * vdd off can generate a long/short pulse on eDP which 7188 * would require vdd on to handle it, and thus we 7189 * would end up in an endless cycle of 7190 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 7191 */ 7192 drm_dbg_kms(&i915->drm, 7193 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 7194 long_hpd ? "long" : "short", 7195 dig_port->base.base.base.id, 7196 dig_port->base.base.name); 7197 return IRQ_HANDLED; 7198 } 7199 7200 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 7201 dig_port->base.base.base.id, 7202 dig_port->base.base.name, 7203 long_hpd ? "long" : "short"); 7204 7205 if (long_hpd) { 7206 intel_dp->reset_link_params = true; 7207 return IRQ_NONE; 7208 } 7209 7210 if (intel_dp->is_mst) { 7211 if (!intel_dp_check_mst_status(intel_dp)) 7212 return IRQ_NONE; 7213 } else if (!intel_dp_short_pulse(intel_dp)) { 7214 return IRQ_NONE; 7215 } 7216 7217 return IRQ_HANDLED; 7218 } 7219 7220 /* check the VBT to see whether the eDP is on another port */ 7221 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 7222 { 7223 /* 7224 * eDP not supported on g4x. so bail out early just 7225 * for a bit extra safety in case the VBT is bonkers. 7226 */ 7227 if (INTEL_GEN(dev_priv) < 5) 7228 return false; 7229 7230 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) 7231 return true; 7232 7233 return intel_bios_is_port_edp(dev_priv, port); 7234 } 7235 7236 static void 7237 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 7238 { 7239 struct drm_i915_private *dev_priv = to_i915(connector->dev); 7240 enum port port = dp_to_dig_port(intel_dp)->base.port; 7241 7242 if (!intel_dp_is_edp(intel_dp)) 7243 drm_connector_attach_dp_subconnector_property(connector); 7244 7245 if (!IS_G4X(dev_priv) && port != PORT_A) 7246 intel_attach_force_audio_property(connector); 7247 7248 intel_attach_broadcast_rgb_property(connector); 7249 if (HAS_GMCH(dev_priv)) 7250 drm_connector_attach_max_bpc_property(connector, 6, 10); 7251 else if (INTEL_GEN(dev_priv) >= 5) 7252 drm_connector_attach_max_bpc_property(connector, 6, 12); 7253 7254 intel_attach_colorspace_property(connector); 7255 7256 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 7257 drm_object_attach_property(&connector->base, 7258 connector->dev->mode_config.hdr_output_metadata_property, 7259 0); 7260 7261 if (intel_dp_is_edp(intel_dp)) { 7262 u32 allowed_scalers; 7263 7264 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 7265 if (!HAS_GMCH(dev_priv)) 7266 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 7267 7268 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 7269 7270 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 7271 7272 } 7273 } 7274 7275 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 7276 { 7277 intel_dp->panel_power_off_time = ktime_get_boottime(); 7278 intel_dp->last_power_on = jiffies; 7279 intel_dp->last_backlight_off = jiffies; 7280 } 7281 7282 static void 7283 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) 7284 { 7285 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7286 u32 pp_on, pp_off, pp_ctl; 7287 struct pps_registers regs; 7288 7289 intel_pps_get_registers(intel_dp, ®s); 7290 7291 pp_ctl = ilk_get_pp_control(intel_dp); 7292 7293 /* Ensure PPS is unlocked */ 7294 if (!HAS_DDI(dev_priv)) 7295 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7296 7297 pp_on = intel_de_read(dev_priv, regs.pp_on); 7298 pp_off = intel_de_read(dev_priv, regs.pp_off); 7299 7300 /* Pull timing values out of registers */ 7301 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); 7302 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); 7303 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); 7304 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); 7305 7306 if (i915_mmio_reg_valid(regs.pp_div)) { 7307 u32 pp_div; 7308 7309 pp_div = intel_de_read(dev_priv, regs.pp_div); 7310 7311 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; 7312 } else { 7313 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; 7314 } 7315 } 7316 7317 static void 7318 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) 7319 { 7320 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 7321 state_name, 7322 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); 7323 } 7324 7325 static void 7326 intel_pps_verify_state(struct intel_dp *intel_dp) 7327 { 7328 struct edp_power_seq hw; 7329 struct edp_power_seq *sw = &intel_dp->pps_delays; 7330 7331 intel_pps_readout_hw_state(intel_dp, &hw); 7332 7333 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || 7334 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { 7335 DRM_ERROR("PPS state mismatch\n"); 7336 intel_pps_dump_state("sw", sw); 7337 intel_pps_dump_state("hw", &hw); 7338 } 7339 } 7340 7341 static void 7342 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) 7343 { 7344 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7345 struct edp_power_seq cur, vbt, spec, 7346 *final = &intel_dp->pps_delays; 7347 7348 lockdep_assert_held(&dev_priv->pps_mutex); 7349 7350 /* already initialized? */ 7351 if (final->t11_t12 != 0) 7352 return; 7353 7354 intel_pps_readout_hw_state(intel_dp, &cur); 7355 7356 intel_pps_dump_state("cur", &cur); 7357 7358 vbt = dev_priv->vbt.edp.pps; 7359 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay 7360 * of 500ms appears to be too short. Ocassionally the panel 7361 * just fails to power back on. Increasing the delay to 800ms 7362 * seems sufficient to avoid this problem. 7363 */ 7364 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 7365 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); 7366 drm_dbg_kms(&dev_priv->drm, 7367 "Increasing T12 panel delay as per the quirk to %d\n", 7368 vbt.t11_t12); 7369 } 7370 /* T11_T12 delay is special and actually in units of 100ms, but zero 7371 * based in the hw (so we need to add 100 ms). But the sw vbt 7372 * table multiplies it with 1000 to make it in units of 100usec, 7373 * too. */ 7374 vbt.t11_t12 += 100 * 10; 7375 7376 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 7377 * our hw here, which are all in 100usec. */ 7378 spec.t1_t3 = 210 * 10; 7379 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 7380 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 7381 spec.t10 = 500 * 10; 7382 /* This one is special and actually in units of 100ms, but zero 7383 * based in the hw (so we need to add 100 ms). But the sw vbt 7384 * table multiplies it with 1000 to make it in units of 100usec, 7385 * too. */ 7386 spec.t11_t12 = (510 + 100) * 10; 7387 7388 intel_pps_dump_state("vbt", &vbt); 7389 7390 /* Use the max of the register settings and vbt. If both are 7391 * unset, fall back to the spec limits. */ 7392 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ 7393 spec.field : \ 7394 max(cur.field, vbt.field)) 7395 assign_final(t1_t3); 7396 assign_final(t8); 7397 assign_final(t9); 7398 assign_final(t10); 7399 assign_final(t11_t12); 7400 #undef assign_final 7401 7402 #define get_delay(field) (DIV_ROUND_UP(final->field, 10)) 7403 intel_dp->panel_power_up_delay = get_delay(t1_t3); 7404 intel_dp->backlight_on_delay = get_delay(t8); 7405 intel_dp->backlight_off_delay = get_delay(t9); 7406 intel_dp->panel_power_down_delay = get_delay(t10); 7407 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 7408 #undef get_delay 7409 7410 drm_dbg_kms(&dev_priv->drm, 7411 "panel power up delay %d, power down delay %d, power cycle delay %d\n", 7412 intel_dp->panel_power_up_delay, 7413 intel_dp->panel_power_down_delay, 7414 intel_dp->panel_power_cycle_delay); 7415 7416 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", 7417 intel_dp->backlight_on_delay, 7418 intel_dp->backlight_off_delay); 7419 7420 /* 7421 * We override the HW backlight delays to 1 because we do manual waits 7422 * on them. For T8, even BSpec recommends doing it. For T9, if we 7423 * don't do this, we'll end up waiting for the backlight off delay 7424 * twice: once when we do the manual sleep, and once when we disable 7425 * the panel and wait for the PP_STATUS bit to become zero. 7426 */ 7427 final->t8 = 1; 7428 final->t9 = 1; 7429 7430 /* 7431 * HW has only a 100msec granularity for t11_t12 so round it up 7432 * accordingly. 7433 */ 7434 final->t11_t12 = roundup(final->t11_t12, 100 * 10); 7435 } 7436 7437 static void 7438 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, 7439 bool force_disable_vdd) 7440 { 7441 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7442 u32 pp_on, pp_off, port_sel = 0; 7443 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; 7444 struct pps_registers regs; 7445 enum port port = dp_to_dig_port(intel_dp)->base.port; 7446 const struct edp_power_seq *seq = &intel_dp->pps_delays; 7447 7448 lockdep_assert_held(&dev_priv->pps_mutex); 7449 7450 intel_pps_get_registers(intel_dp, ®s); 7451 7452 /* 7453 * On some VLV machines the BIOS can leave the VDD 7454 * enabled even on power sequencers which aren't 7455 * hooked up to any port. This would mess up the 7456 * power domain tracking the first time we pick 7457 * one of these power sequencers for use since 7458 * edp_panel_vdd_on() would notice that the VDD was 7459 * already on and therefore wouldn't grab the power 7460 * domain reference. Disable VDD first to avoid this. 7461 * This also avoids spuriously turning the VDD on as 7462 * soon as the new power sequencer gets initialized. 7463 */ 7464 if (force_disable_vdd) { 7465 u32 pp = ilk_get_pp_control(intel_dp); 7466 7467 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, 7468 "Panel power already on\n"); 7469 7470 if (pp & EDP_FORCE_VDD) 7471 drm_dbg_kms(&dev_priv->drm, 7472 "VDD already on, disabling first\n"); 7473 7474 pp &= ~EDP_FORCE_VDD; 7475 7476 intel_de_write(dev_priv, regs.pp_ctrl, pp); 7477 } 7478 7479 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | 7480 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); 7481 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | 7482 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); 7483 7484 /* Haswell doesn't have any port selection bits for the panel 7485 * power sequencer any more. */ 7486 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7487 port_sel = PANEL_PORT_SELECT_VLV(port); 7488 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 7489 switch (port) { 7490 case PORT_A: 7491 port_sel = PANEL_PORT_SELECT_DPA; 7492 break; 7493 case PORT_C: 7494 port_sel = PANEL_PORT_SELECT_DPC; 7495 break; 7496 case PORT_D: 7497 port_sel = PANEL_PORT_SELECT_DPD; 7498 break; 7499 default: 7500 MISSING_CASE(port); 7501 break; 7502 } 7503 } 7504 7505 pp_on |= port_sel; 7506 7507 intel_de_write(dev_priv, regs.pp_on, pp_on); 7508 intel_de_write(dev_priv, regs.pp_off, pp_off); 7509 7510 /* 7511 * Compute the divisor for the pp clock, simply match the Bspec formula. 7512 */ 7513 if (i915_mmio_reg_valid(regs.pp_div)) { 7514 intel_de_write(dev_priv, regs.pp_div, 7515 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 7516 } else { 7517 u32 pp_ctl; 7518 7519 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); 7520 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 7521 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 7522 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 7523 } 7524 7525 drm_dbg_kms(&dev_priv->drm, 7526 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 7527 intel_de_read(dev_priv, regs.pp_on), 7528 intel_de_read(dev_priv, regs.pp_off), 7529 i915_mmio_reg_valid(regs.pp_div) ? 7530 intel_de_read(dev_priv, regs.pp_div) : 7531 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); 7532 } 7533 7534 static void intel_dp_pps_init(struct intel_dp *intel_dp) 7535 { 7536 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7537 7538 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7539 vlv_initial_power_sequencer_setup(intel_dp); 7540 } else { 7541 intel_dp_init_panel_power_sequencer(intel_dp); 7542 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 7543 } 7544 } 7545 7546 /** 7547 * intel_dp_set_drrs_state - program registers for RR switch to take effect 7548 * @dev_priv: i915 device 7549 * @crtc_state: a pointer to the active intel_crtc_state 7550 * @refresh_rate: RR to be programmed 7551 * 7552 * This function gets called when refresh rate (RR) has to be changed from 7553 * one frequency to another. Switches can be between high and low RR 7554 * supported by the panel or to any other RR based on media playback (in 7555 * this case, RR value needs to be passed from user space). 7556 * 7557 * The caller of this function needs to take a lock on dev_priv->drrs. 7558 */ 7559 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, 7560 const struct intel_crtc_state *crtc_state, 7561 int refresh_rate) 7562 { 7563 struct intel_dp *intel_dp = dev_priv->drrs.dp; 7564 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); 7565 enum drrs_refresh_rate_type index = DRRS_HIGH_RR; 7566 7567 if (refresh_rate <= 0) { 7568 drm_dbg_kms(&dev_priv->drm, 7569 "Refresh rate should be positive non-zero.\n"); 7570 return; 7571 } 7572 7573 if (intel_dp == NULL) { 7574 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); 7575 return; 7576 } 7577 7578 if (!intel_crtc) { 7579 drm_dbg_kms(&dev_priv->drm, 7580 "DRRS: intel_crtc not initialized\n"); 7581 return; 7582 } 7583 7584 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { 7585 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); 7586 return; 7587 } 7588 7589 if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == 7590 refresh_rate) 7591 index = DRRS_LOW_RR; 7592 7593 if (index == dev_priv->drrs.refresh_rate_type) { 7594 drm_dbg_kms(&dev_priv->drm, 7595 "DRRS requested for previously set RR...ignoring\n"); 7596 return; 7597 } 7598 7599 if (!crtc_state->hw.active) { 7600 drm_dbg_kms(&dev_priv->drm, 7601 "eDP encoder disabled. CRTC not Active\n"); 7602 return; 7603 } 7604 7605 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { 7606 switch (index) { 7607 case DRRS_HIGH_RR: 7608 intel_dp_set_m_n(crtc_state, M1_N1); 7609 break; 7610 case DRRS_LOW_RR: 7611 intel_dp_set_m_n(crtc_state, M2_N2); 7612 break; 7613 case DRRS_MAX_RR: 7614 default: 7615 drm_err(&dev_priv->drm, 7616 "Unsupported refreshrate type\n"); 7617 } 7618 } else if (INTEL_GEN(dev_priv) > 6) { 7619 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); 7620 u32 val; 7621 7622 val = intel_de_read(dev_priv, reg); 7623 if (index > DRRS_HIGH_RR) { 7624 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7625 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7626 else 7627 val |= PIPECONF_EDP_RR_MODE_SWITCH; 7628 } else { 7629 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 7630 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; 7631 else 7632 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 7633 } 7634 intel_de_write(dev_priv, reg, val); 7635 } 7636 7637 dev_priv->drrs.refresh_rate_type = index; 7638 7639 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", 7640 refresh_rate); 7641 } 7642 7643 /** 7644 * intel_edp_drrs_enable - init drrs struct if supported 7645 * @intel_dp: DP struct 7646 * @crtc_state: A pointer to the active crtc state. 7647 * 7648 * Initializes frontbuffer_bits and drrs.dp 7649 */ 7650 void intel_edp_drrs_enable(struct intel_dp *intel_dp, 7651 const struct intel_crtc_state *crtc_state) 7652 { 7653 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7654 7655 if (!crtc_state->has_drrs) { 7656 drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n"); 7657 return; 7658 } 7659 7660 if (dev_priv->psr.enabled) { 7661 drm_dbg_kms(&dev_priv->drm, 7662 "PSR enabled. Not enabling DRRS.\n"); 7663 return; 7664 } 7665 7666 mutex_lock(&dev_priv->drrs.mutex); 7667 if (dev_priv->drrs.dp) { 7668 drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n"); 7669 goto unlock; 7670 } 7671 7672 dev_priv->drrs.busy_frontbuffer_bits = 0; 7673 7674 dev_priv->drrs.dp = intel_dp; 7675 7676 unlock: 7677 mutex_unlock(&dev_priv->drrs.mutex); 7678 } 7679 7680 /** 7681 * intel_edp_drrs_disable - Disable DRRS 7682 * @intel_dp: DP struct 7683 * @old_crtc_state: Pointer to old crtc_state. 7684 * 7685 */ 7686 void intel_edp_drrs_disable(struct intel_dp *intel_dp, 7687 const struct intel_crtc_state *old_crtc_state) 7688 { 7689 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7690 7691 if (!old_crtc_state->has_drrs) 7692 return; 7693 7694 mutex_lock(&dev_priv->drrs.mutex); 7695 if (!dev_priv->drrs.dp) { 7696 mutex_unlock(&dev_priv->drrs.mutex); 7697 return; 7698 } 7699 7700 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7701 intel_dp_set_drrs_state(dev_priv, old_crtc_state, 7702 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 7703 7704 dev_priv->drrs.dp = NULL; 7705 mutex_unlock(&dev_priv->drrs.mutex); 7706 7707 cancel_delayed_work_sync(&dev_priv->drrs.work); 7708 } 7709 7710 static void intel_edp_drrs_downclock_work(struct work_struct *work) 7711 { 7712 struct drm_i915_private *dev_priv = 7713 container_of(work, typeof(*dev_priv), drrs.work.work); 7714 struct intel_dp *intel_dp; 7715 7716 mutex_lock(&dev_priv->drrs.mutex); 7717 7718 intel_dp = dev_priv->drrs.dp; 7719 7720 if (!intel_dp) 7721 goto unlock; 7722 7723 /* 7724 * The delayed work can race with an invalidate hence we need to 7725 * recheck. 7726 */ 7727 7728 if (dev_priv->drrs.busy_frontbuffer_bits) 7729 goto unlock; 7730 7731 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { 7732 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7733 7734 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7735 drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); 7736 } 7737 7738 unlock: 7739 mutex_unlock(&dev_priv->drrs.mutex); 7740 } 7741 7742 /** 7743 * intel_edp_drrs_invalidate - Disable Idleness DRRS 7744 * @dev_priv: i915 device 7745 * @frontbuffer_bits: frontbuffer plane tracking bits 7746 * 7747 * This function gets called everytime rendering on the given planes start. 7748 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 7749 * 7750 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7751 */ 7752 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, 7753 unsigned int frontbuffer_bits) 7754 { 7755 struct intel_dp *intel_dp; 7756 struct drm_crtc *crtc; 7757 enum pipe pipe; 7758 7759 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7760 return; 7761 7762 cancel_delayed_work(&dev_priv->drrs.work); 7763 7764 mutex_lock(&dev_priv->drrs.mutex); 7765 7766 intel_dp = dev_priv->drrs.dp; 7767 if (!intel_dp) { 7768 mutex_unlock(&dev_priv->drrs.mutex); 7769 return; 7770 } 7771 7772 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7773 pipe = to_intel_crtc(crtc)->pipe; 7774 7775 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7776 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 7777 7778 /* invalidate means busy screen hence upclock */ 7779 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7780 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7781 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 7782 7783 mutex_unlock(&dev_priv->drrs.mutex); 7784 } 7785 7786 /** 7787 * intel_edp_drrs_flush - Restart Idleness DRRS 7788 * @dev_priv: i915 device 7789 * @frontbuffer_bits: frontbuffer plane tracking bits 7790 * 7791 * This function gets called every time rendering on the given planes has 7792 * completed or flip on a crtc is completed. So DRRS should be upclocked 7793 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 7794 * if no other planes are dirty. 7795 * 7796 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 7797 */ 7798 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, 7799 unsigned int frontbuffer_bits) 7800 { 7801 struct intel_dp *intel_dp; 7802 struct drm_crtc *crtc; 7803 enum pipe pipe; 7804 7805 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) 7806 return; 7807 7808 cancel_delayed_work(&dev_priv->drrs.work); 7809 7810 mutex_lock(&dev_priv->drrs.mutex); 7811 7812 intel_dp = dev_priv->drrs.dp; 7813 if (!intel_dp) { 7814 mutex_unlock(&dev_priv->drrs.mutex); 7815 return; 7816 } 7817 7818 crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 7819 pipe = to_intel_crtc(crtc)->pipe; 7820 7821 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); 7822 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 7823 7824 /* flush means busy screen hence upclock */ 7825 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 7826 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, 7827 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); 7828 7829 /* 7830 * flush also means no more activity hence schedule downclock, if all 7831 * other fbs are quiescent too 7832 */ 7833 if (!dev_priv->drrs.busy_frontbuffer_bits) 7834 schedule_delayed_work(&dev_priv->drrs.work, 7835 msecs_to_jiffies(1000)); 7836 mutex_unlock(&dev_priv->drrs.mutex); 7837 } 7838 7839 /** 7840 * DOC: Display Refresh Rate Switching (DRRS) 7841 * 7842 * Display Refresh Rate Switching (DRRS) is a power conservation feature 7843 * which enables swtching between low and high refresh rates, 7844 * dynamically, based on the usage scenario. This feature is applicable 7845 * for internal panels. 7846 * 7847 * Indication that the panel supports DRRS is given by the panel EDID, which 7848 * would list multiple refresh rates for one resolution. 7849 * 7850 * DRRS is of 2 types - static and seamless. 7851 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 7852 * (may appear as a blink on screen) and is used in dock-undock scenario. 7853 * Seamless DRRS involves changing RR without any visual effect to the user 7854 * and can be used during normal system usage. This is done by programming 7855 * certain registers. 7856 * 7857 * Support for static/seamless DRRS may be indicated in the VBT based on 7858 * inputs from the panel spec. 7859 * 7860 * DRRS saves power by switching to low RR based on usage scenarios. 7861 * 7862 * The implementation is based on frontbuffer tracking implementation. When 7863 * there is a disturbance on the screen triggered by user activity or a periodic 7864 * system activity, DRRS is disabled (RR is changed to high RR). When there is 7865 * no movement on screen, after a timeout of 1 second, a switch to low RR is 7866 * made. 7867 * 7868 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() 7869 * and intel_edp_drrs_flush() are called. 7870 * 7871 * DRRS can be further extended to support other internal panels and also 7872 * the scenario of video playback wherein RR is set based on the rate 7873 * requested by userspace. 7874 */ 7875 7876 /** 7877 * intel_dp_drrs_init - Init basic DRRS work and mutex. 7878 * @connector: eDP connector 7879 * @fixed_mode: preferred mode of panel 7880 * 7881 * This function is called only once at driver load to initialize basic 7882 * DRRS stuff. 7883 * 7884 * Returns: 7885 * Downclock mode if panel supports it, else return NULL. 7886 * DRRS support is determined by the presence of downclock mode (apart 7887 * from VBT setting). 7888 */ 7889 static struct drm_display_mode * 7890 intel_dp_drrs_init(struct intel_connector *connector, 7891 struct drm_display_mode *fixed_mode) 7892 { 7893 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 7894 struct drm_display_mode *downclock_mode = NULL; 7895 7896 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 7897 mutex_init(&dev_priv->drrs.mutex); 7898 7899 if (INTEL_GEN(dev_priv) <= 6) { 7900 drm_dbg_kms(&dev_priv->drm, 7901 "DRRS supported for Gen7 and above\n"); 7902 return NULL; 7903 } 7904 7905 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 7906 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); 7907 return NULL; 7908 } 7909 7910 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); 7911 if (!downclock_mode) { 7912 drm_dbg_kms(&dev_priv->drm, 7913 "Downclock mode is not found. DRRS not supported\n"); 7914 return NULL; 7915 } 7916 7917 dev_priv->drrs.type = dev_priv->vbt.drrs_type; 7918 7919 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; 7920 drm_dbg_kms(&dev_priv->drm, 7921 "seamless DRRS supported for eDP panel.\n"); 7922 return downclock_mode; 7923 } 7924 7925 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 7926 struct intel_connector *intel_connector) 7927 { 7928 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 7929 struct drm_device *dev = &dev_priv->drm; 7930 struct drm_connector *connector = &intel_connector->base; 7931 struct drm_display_mode *fixed_mode = NULL; 7932 struct drm_display_mode *downclock_mode = NULL; 7933 bool has_dpcd; 7934 enum pipe pipe = INVALID_PIPE; 7935 intel_wakeref_t wakeref; 7936 struct edid *edid; 7937 7938 if (!intel_dp_is_edp(intel_dp)) 7939 return true; 7940 7941 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); 7942 7943 /* 7944 * On IBX/CPT we may get here with LVDS already registered. Since the 7945 * driver uses the only internal power sequencer available for both 7946 * eDP and LVDS bail out early in this case to prevent interfering 7947 * with an already powered-on LVDS power sequencer. 7948 */ 7949 if (intel_get_lvds_encoder(dev_priv)) { 7950 drm_WARN_ON(dev, 7951 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 7952 drm_info(&dev_priv->drm, 7953 "LVDS was detected, not registering eDP\n"); 7954 7955 return false; 7956 } 7957 7958 with_pps_lock(intel_dp, wakeref) { 7959 intel_dp_init_panel_power_timestamps(intel_dp); 7960 intel_dp_pps_init(intel_dp); 7961 intel_edp_panel_vdd_sanitize(intel_dp); 7962 } 7963 7964 /* Cache DPCD and EDID for edp. */ 7965 has_dpcd = intel_edp_init_dpcd(intel_dp); 7966 7967 if (!has_dpcd) { 7968 /* if this fails, presume the device is a ghost */ 7969 drm_info(&dev_priv->drm, 7970 "failed to retrieve link info, disabling eDP\n"); 7971 goto out_vdd_off; 7972 } 7973 7974 mutex_lock(&dev->mode_config.mutex); 7975 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 7976 if (edid) { 7977 if (drm_add_edid_modes(connector, edid)) { 7978 drm_connector_update_edid_property(connector, edid); 7979 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); 7980 } else { 7981 kfree(edid); 7982 edid = ERR_PTR(-EINVAL); 7983 } 7984 } else { 7985 edid = ERR_PTR(-ENOENT); 7986 } 7987 intel_connector->edid = edid; 7988 7989 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 7990 if (fixed_mode) 7991 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 7992 7993 /* fallback to VBT if available for eDP */ 7994 if (!fixed_mode) 7995 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 7996 mutex_unlock(&dev->mode_config.mutex); 7997 7998 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7999 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 8000 register_reboot_notifier(&intel_dp->edp_notifier); 8001 8002 /* 8003 * Figure out the current pipe for the initial backlight setup. 8004 * If the current pipe isn't valid, try the PPS pipe, and if that 8005 * fails just assume pipe A. 8006 */ 8007 pipe = vlv_active_pipe(intel_dp); 8008 8009 if (pipe != PIPE_A && pipe != PIPE_B) 8010 pipe = intel_dp->pps_pipe; 8011 8012 if (pipe != PIPE_A && pipe != PIPE_B) 8013 pipe = PIPE_A; 8014 8015 drm_dbg_kms(&dev_priv->drm, 8016 "using pipe %c for initial backlight setup\n", 8017 pipe_name(pipe)); 8018 } 8019 8020 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 8021 intel_connector->panel.backlight.power = intel_edp_backlight_power; 8022 intel_panel_setup_backlight(connector, pipe); 8023 8024 if (fixed_mode) { 8025 drm_connector_set_panel_orientation_with_quirk(connector, 8026 dev_priv->vbt.orientation, 8027 fixed_mode->hdisplay, fixed_mode->vdisplay); 8028 } 8029 8030 return true; 8031 8032 out_vdd_off: 8033 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 8034 /* 8035 * vdd might still be enabled do to the delayed vdd off. 8036 * Make sure vdd is actually turned off here. 8037 */ 8038 with_pps_lock(intel_dp, wakeref) 8039 edp_panel_vdd_off_sync(intel_dp); 8040 8041 return false; 8042 } 8043 8044 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 8045 { 8046 struct intel_connector *intel_connector; 8047 struct drm_connector *connector; 8048 8049 intel_connector = container_of(work, typeof(*intel_connector), 8050 modeset_retry_work); 8051 connector = &intel_connector->base; 8052 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 8053 connector->name); 8054 8055 /* Grab the locks before changing connector property*/ 8056 mutex_lock(&connector->dev->mode_config.mutex); 8057 /* Set connector link status to BAD and send a Uevent to notify 8058 * userspace to do a modeset. 8059 */ 8060 drm_connector_set_link_status_property(connector, 8061 DRM_MODE_LINK_STATUS_BAD); 8062 mutex_unlock(&connector->dev->mode_config.mutex); 8063 /* Send Hotplug uevent so userspace can reprobe */ 8064 drm_kms_helper_hotplug_event(connector->dev); 8065 } 8066 8067 bool 8068 intel_dp_init_connector(struct intel_digital_port *dig_port, 8069 struct intel_connector *intel_connector) 8070 { 8071 struct drm_connector *connector = &intel_connector->base; 8072 struct intel_dp *intel_dp = &dig_port->dp; 8073 struct intel_encoder *intel_encoder = &dig_port->base; 8074 struct drm_device *dev = intel_encoder->base.dev; 8075 struct drm_i915_private *dev_priv = to_i915(dev); 8076 enum port port = intel_encoder->port; 8077 enum phy phy = intel_port_to_phy(dev_priv, port); 8078 int type; 8079 8080 /* Initialize the work for modeset in case of link train failure */ 8081 INIT_WORK(&intel_connector->modeset_retry_work, 8082 intel_dp_modeset_retry_work_fn); 8083 8084 if (drm_WARN(dev, dig_port->max_lanes < 1, 8085 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 8086 dig_port->max_lanes, intel_encoder->base.base.id, 8087 intel_encoder->base.name)) 8088 return false; 8089 8090 intel_dp_set_source_rates(intel_dp); 8091 8092 intel_dp->reset_link_params = true; 8093 intel_dp->pps_pipe = INVALID_PIPE; 8094 intel_dp->active_pipe = INVALID_PIPE; 8095 8096 /* Preserve the current hw state. */ 8097 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 8098 intel_dp->attached_connector = intel_connector; 8099 8100 if (intel_dp_is_port_edp(dev_priv, port)) { 8101 /* 8102 * Currently we don't support eDP on TypeC ports, although in 8103 * theory it could work on TypeC legacy ports. 8104 */ 8105 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 8106 type = DRM_MODE_CONNECTOR_eDP; 8107 } else { 8108 type = DRM_MODE_CONNECTOR_DisplayPort; 8109 } 8110 8111 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 8112 intel_dp->active_pipe = vlv_active_pipe(intel_dp); 8113 8114 /* 8115 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 8116 * for DP the encoder type can be set by the caller to 8117 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 8118 */ 8119 if (type == DRM_MODE_CONNECTOR_eDP) 8120 intel_encoder->type = INTEL_OUTPUT_EDP; 8121 8122 /* eDP only on port B and/or C on vlv/chv */ 8123 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 8124 IS_CHERRYVIEW(dev_priv)) && 8125 intel_dp_is_edp(intel_dp) && 8126 port != PORT_B && port != PORT_C)) 8127 return false; 8128 8129 drm_dbg_kms(&dev_priv->drm, 8130 "Adding %s connector on [ENCODER:%d:%s]\n", 8131 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 8132 intel_encoder->base.base.id, intel_encoder->base.name); 8133 8134 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 8135 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 8136 8137 if (!HAS_GMCH(dev_priv)) 8138 connector->interlace_allowed = true; 8139 connector->doublescan_allowed = 0; 8140 8141 if (INTEL_GEN(dev_priv) >= 11) 8142 connector->ycbcr_420_allowed = true; 8143 8144 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); 8145 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 8146 8147 intel_dp_aux_init(intel_dp); 8148 8149 intel_connector_attach_encoder(intel_connector, intel_encoder); 8150 8151 if (HAS_DDI(dev_priv)) 8152 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 8153 else 8154 intel_connector->get_hw_state = intel_connector_get_hw_state; 8155 8156 /* init MST on ports that can support it */ 8157 intel_dp_mst_encoder_init(dig_port, 8158 intel_connector->base.base.id); 8159 8160 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 8161 intel_dp_aux_fini(intel_dp); 8162 intel_dp_mst_encoder_cleanup(dig_port); 8163 goto fail; 8164 } 8165 8166 intel_dp_add_properties(intel_dp, connector); 8167 8168 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 8169 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); 8170 if (ret) 8171 drm_dbg_kms(&dev_priv->drm, 8172 "HDCP init failed, skipping.\n"); 8173 } 8174 8175 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 8176 * 0xd. Failure to do so will result in spurious interrupts being 8177 * generated on the port when a cable is not attached. 8178 */ 8179 if (IS_G45(dev_priv)) { 8180 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 8181 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 8182 (temp & ~0xf) | 0xd); 8183 } 8184 8185 return true; 8186 8187 fail: 8188 drm_connector_cleanup(connector); 8189 8190 return false; 8191 } 8192 8193 bool intel_dp_init(struct drm_i915_private *dev_priv, 8194 i915_reg_t output_reg, 8195 enum port port) 8196 { 8197 struct intel_digital_port *dig_port; 8198 struct intel_encoder *intel_encoder; 8199 struct drm_encoder *encoder; 8200 struct intel_connector *intel_connector; 8201 8202 dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); 8203 if (!dig_port) 8204 return false; 8205 8206 intel_connector = intel_connector_alloc(); 8207 if (!intel_connector) 8208 goto err_connector_alloc; 8209 8210 intel_encoder = &dig_port->base; 8211 encoder = &intel_encoder->base; 8212 8213 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, 8214 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, 8215 "DP %c", port_name(port))) 8216 goto err_encoder_init; 8217 8218 intel_encoder->hotplug = intel_dp_hotplug; 8219 intel_encoder->compute_config = intel_dp_compute_config; 8220 intel_encoder->get_hw_state = intel_dp_get_hw_state; 8221 intel_encoder->get_config = intel_dp_get_config; 8222 intel_encoder->update_pipe = intel_panel_update_backlight; 8223 intel_encoder->suspend = intel_dp_encoder_suspend; 8224 if (IS_CHERRYVIEW(dev_priv)) { 8225 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 8226 intel_encoder->pre_enable = chv_pre_enable_dp; 8227 intel_encoder->enable = vlv_enable_dp; 8228 intel_encoder->disable = vlv_disable_dp; 8229 intel_encoder->post_disable = chv_post_disable_dp; 8230 intel_encoder->post_pll_disable = chv_dp_post_pll_disable; 8231 } else if (IS_VALLEYVIEW(dev_priv)) { 8232 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 8233 intel_encoder->pre_enable = vlv_pre_enable_dp; 8234 intel_encoder->enable = vlv_enable_dp; 8235 intel_encoder->disable = vlv_disable_dp; 8236 intel_encoder->post_disable = vlv_post_disable_dp; 8237 } else { 8238 intel_encoder->pre_enable = g4x_pre_enable_dp; 8239 intel_encoder->enable = g4x_enable_dp; 8240 intel_encoder->disable = g4x_disable_dp; 8241 intel_encoder->post_disable = g4x_post_disable_dp; 8242 } 8243 8244 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 8245 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) 8246 dig_port->dp.set_link_train = cpt_set_link_train; 8247 else 8248 dig_port->dp.set_link_train = g4x_set_link_train; 8249 8250 if (IS_CHERRYVIEW(dev_priv)) 8251 dig_port->dp.set_signal_levels = chv_set_signal_levels; 8252 else if (IS_VALLEYVIEW(dev_priv)) 8253 dig_port->dp.set_signal_levels = vlv_set_signal_levels; 8254 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) 8255 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; 8256 else if (IS_GEN(dev_priv, 6) && port == PORT_A) 8257 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; 8258 else 8259 dig_port->dp.set_signal_levels = g4x_set_signal_levels; 8260 8261 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || 8262 (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { 8263 dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3; 8264 dig_port->dp.voltage_max = intel_dp_voltage_max_3; 8265 } else { 8266 dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2; 8267 dig_port->dp.voltage_max = intel_dp_voltage_max_2; 8268 } 8269 8270 dig_port->dp.output_reg = output_reg; 8271 dig_port->max_lanes = 4; 8272 dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); 8273 dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); 8274 8275 intel_encoder->type = INTEL_OUTPUT_DP; 8276 intel_encoder->power_domain = intel_port_to_power_domain(port); 8277 if (IS_CHERRYVIEW(dev_priv)) { 8278 if (port == PORT_D) 8279 intel_encoder->pipe_mask = BIT(PIPE_C); 8280 else 8281 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); 8282 } else { 8283 intel_encoder->pipe_mask = ~0; 8284 } 8285 intel_encoder->cloneable = 0; 8286 intel_encoder->port = port; 8287 8288 dig_port->hpd_pulse = intel_dp_hpd_pulse; 8289 8290 if (HAS_GMCH(dev_priv)) { 8291 if (IS_GM45(dev_priv)) 8292 dig_port->connected = gm45_digital_port_connected; 8293 else 8294 dig_port->connected = g4x_digital_port_connected; 8295 } else { 8296 if (port == PORT_A) 8297 dig_port->connected = ilk_digital_port_connected; 8298 else 8299 dig_port->connected = ibx_digital_port_connected; 8300 } 8301 8302 if (port != PORT_A) 8303 intel_infoframe_init(dig_port); 8304 8305 dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 8306 if (!intel_dp_init_connector(dig_port, intel_connector)) 8307 goto err_init_connector; 8308 8309 return true; 8310 8311 err_init_connector: 8312 drm_encoder_cleanup(encoder); 8313 err_encoder_init: 8314 kfree(intel_connector); 8315 err_connector_alloc: 8316 kfree(dig_port); 8317 return false; 8318 } 8319 8320 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 8321 { 8322 struct intel_encoder *encoder; 8323 8324 for_each_intel_encoder(&dev_priv->drm, encoder) { 8325 struct intel_dp *intel_dp; 8326 8327 if (encoder->type != INTEL_OUTPUT_DDI) 8328 continue; 8329 8330 intel_dp = enc_to_intel_dp(encoder); 8331 8332 if (!intel_dp->can_mst) 8333 continue; 8334 8335 if (intel_dp->is_mst) 8336 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 8337 } 8338 } 8339 8340 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 8341 { 8342 struct intel_encoder *encoder; 8343 8344 for_each_intel_encoder(&dev_priv->drm, encoder) { 8345 struct intel_dp *intel_dp; 8346 int ret; 8347 8348 if (encoder->type != INTEL_OUTPUT_DDI) 8349 continue; 8350 8351 intel_dp = enc_to_intel_dp(encoder); 8352 8353 if (!intel_dp->can_mst) 8354 continue; 8355 8356 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 8357 true); 8358 if (ret) { 8359 intel_dp->is_mst = false; 8360 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 8361 false); 8362 } 8363 } 8364 } 8365