1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "intel_display_types.h" 25 #include "intel_dp.h" 26 #include "intel_dp_link_training.h" 27 28 static void 29 intel_dp_dump_link_status(struct drm_device *drm, 30 const u8 link_status[DP_LINK_STATUS_SIZE]) 31 { 32 drm_dbg_kms(drm, 33 "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n", 34 link_status[0], link_status[1], link_status[2], 35 link_status[3], link_status[4], link_status[5]); 36 } 37 38 static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp) 39 { 40 intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT - 41 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0; 42 } 43 44 static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy, 45 char *buf, size_t buf_size) 46 { 47 if (dp_phy == DP_PHY_DPRX) 48 snprintf(buf, buf_size, "DPRX"); 49 else 50 snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1); 51 52 return buf; 53 } 54 55 static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp, 56 enum drm_dp_phy dp_phy) 57 { 58 return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1]; 59 } 60 61 static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, 62 enum drm_dp_phy dp_phy) 63 { 64 u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 65 char phy_name[10]; 66 67 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); 68 69 if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) { 70 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 71 "failed to read the PHY caps for %s\n", 72 phy_name); 73 return; 74 } 75 76 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 77 "%s PHY capabilities: %*ph\n", 78 phy_name, 79 (int)sizeof(intel_dp->lttpr_phy_caps[0]), 80 phy_caps); 81 } 82 83 static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp) 84 { 85 if (drm_dp_read_lttpr_common_caps(&intel_dp->aux, 86 intel_dp->lttpr_common_caps) < 0) { 87 memset(intel_dp->lttpr_common_caps, 0, 88 sizeof(intel_dp->lttpr_common_caps)); 89 return false; 90 } 91 92 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 93 "LTTPR common capabilities: %*ph\n", 94 (int)sizeof(intel_dp->lttpr_common_caps), 95 intel_dp->lttpr_common_caps); 96 97 return true; 98 } 99 100 static bool 101 intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) 102 { 103 u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT : 104 DP_PHY_REPEATER_MODE_NON_TRANSPARENT; 105 106 return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1; 107 } 108 109 /** 110 * intel_dp_lttpr_init - detect LTTPRs and init the LTTPR link training mode 111 * @intel_dp: Intel DP struct 112 * 113 * Read the LTTPR common capabilities, switch to non-transparent link training 114 * mode if any is detected and read the PHY capabilities for all detected 115 * LTTPRs. In case of an LTTPR detection error or if the number of 116 * LTTPRs is more than is supported (8), fall back to the no-LTTPR, 117 * transparent mode link training mode. 118 * 119 * Returns: 120 * >0 if LTTPRs were detected and the non-transparent LT mode was set 121 * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a 122 * detection failure and the transparent LT mode was set 123 */ 124 int intel_dp_lttpr_init(struct intel_dp *intel_dp) 125 { 126 int lttpr_count; 127 bool ret; 128 int i; 129 130 if (intel_dp_is_edp(intel_dp)) 131 return 0; 132 133 ret = intel_dp_read_lttpr_common_caps(intel_dp); 134 if (!ret) 135 return 0; 136 137 lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); 138 /* 139 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are 140 * detected as this breaks link training at least on the Dell WD19TB 141 * dock. 142 */ 143 if (lttpr_count == 0) 144 return 0; 145 146 /* 147 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of 148 * non-transparent mode and the disable->enable non-transparent mode 149 * sequence. 150 */ 151 intel_dp_set_lttpr_transparent_mode(intel_dp, true); 152 153 /* 154 * In case of unsupported number of LTTPRs or failing to switch to 155 * non-transparent mode fall-back to transparent link training mode, 156 * still taking into account any LTTPR common lane- rate/count limits. 157 */ 158 if (lttpr_count < 0) 159 return 0; 160 161 if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) { 162 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 163 "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n"); 164 165 intel_dp_set_lttpr_transparent_mode(intel_dp, true); 166 intel_dp_reset_lttpr_count(intel_dp); 167 168 return 0; 169 } 170 171 for (i = 0; i < lttpr_count; i++) 172 intel_dp_read_lttpr_phy_caps(intel_dp, DP_PHY_LTTPR(i)); 173 174 return lttpr_count; 175 } 176 EXPORT_SYMBOL(intel_dp_lttpr_init); 177 178 static u8 dp_voltage_max(u8 preemph) 179 { 180 switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) { 181 case DP_TRAIN_PRE_EMPH_LEVEL_0: 182 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 183 case DP_TRAIN_PRE_EMPH_LEVEL_1: 184 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 185 case DP_TRAIN_PRE_EMPH_LEVEL_2: 186 return DP_TRAIN_VOLTAGE_SWING_LEVEL_1; 187 case DP_TRAIN_PRE_EMPH_LEVEL_3: 188 default: 189 return DP_TRAIN_VOLTAGE_SWING_LEVEL_0; 190 } 191 } 192 193 static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp, 194 enum drm_dp_phy dp_phy) 195 { 196 const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 197 198 if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps)) 199 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 200 else 201 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 202 } 203 204 static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp, 205 enum drm_dp_phy dp_phy) 206 { 207 const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 208 209 if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps)) 210 return DP_TRAIN_PRE_EMPH_LEVEL_3; 211 else 212 return DP_TRAIN_PRE_EMPH_LEVEL_2; 213 } 214 215 static bool 216 intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp, 217 enum drm_dp_phy dp_phy) 218 { 219 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 220 int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); 221 222 drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX); 223 224 return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1); 225 } 226 227 static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp, 228 const struct intel_crtc_state *crtc_state, 229 enum drm_dp_phy dp_phy) 230 { 231 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 232 u8 voltage_max; 233 234 /* 235 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from 236 * the DPRX_PHY we train. 237 */ 238 if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) 239 voltage_max = intel_dp->voltage_max(intel_dp, crtc_state); 240 else 241 voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1); 242 243 drm_WARN_ON_ONCE(&i915->drm, 244 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 && 245 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3); 246 247 return voltage_max; 248 } 249 250 static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, 251 enum drm_dp_phy dp_phy) 252 { 253 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 254 u8 preemph_max; 255 256 /* 257 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from 258 * the DPRX_PHY we train. 259 */ 260 if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) 261 preemph_max = intel_dp->preemph_max(intel_dp); 262 else 263 preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1); 264 265 drm_WARN_ON_ONCE(&i915->drm, 266 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 && 267 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3); 268 269 return preemph_max; 270 } 271 272 void 273 intel_dp_get_adjust_train(struct intel_dp *intel_dp, 274 const struct intel_crtc_state *crtc_state, 275 enum drm_dp_phy dp_phy, 276 const u8 link_status[DP_LINK_STATUS_SIZE]) 277 { 278 u8 v = 0; 279 u8 p = 0; 280 int lane; 281 u8 voltage_max; 282 u8 preemph_max; 283 284 for (lane = 0; lane < crtc_state->lane_count; lane++) { 285 v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane)); 286 p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane)); 287 } 288 289 preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy); 290 if (p >= preemph_max) 291 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 292 293 v = min(v, dp_voltage_max(p)); 294 295 voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy); 296 if (v >= voltage_max) 297 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 298 299 for (lane = 0; lane < 4; lane++) 300 intel_dp->train_set[lane] = v | p; 301 } 302 303 static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp, 304 enum drm_dp_phy dp_phy) 305 { 306 return dp_phy == DP_PHY_DPRX ? 307 DP_TRAINING_PATTERN_SET : 308 DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy); 309 } 310 311 static bool 312 intel_dp_set_link_train(struct intel_dp *intel_dp, 313 const struct intel_crtc_state *crtc_state, 314 enum drm_dp_phy dp_phy, 315 u8 dp_train_pat) 316 { 317 int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); 318 u8 buf[sizeof(intel_dp->train_set) + 1]; 319 int len; 320 321 intel_dp_program_link_training_pattern(intel_dp, crtc_state, 322 dp_train_pat); 323 324 buf[0] = dp_train_pat; 325 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ 326 memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count); 327 len = crtc_state->lane_count + 1; 328 329 return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len; 330 } 331 332 void intel_dp_set_signal_levels(struct intel_dp *intel_dp, 333 const struct intel_crtc_state *crtc_state, 334 enum drm_dp_phy dp_phy) 335 { 336 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 337 u8 train_set = intel_dp->train_set[0]; 338 char phy_name[10]; 339 340 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n", 341 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 342 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "", 343 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 344 DP_TRAIN_PRE_EMPHASIS_SHIFT, 345 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 346 " (max)" : "", 347 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name))); 348 349 if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) 350 intel_dp->set_signal_levels(intel_dp, crtc_state); 351 } 352 353 static bool 354 intel_dp_reset_link_train(struct intel_dp *intel_dp, 355 const struct intel_crtc_state *crtc_state, 356 enum drm_dp_phy dp_phy, 357 u8 dp_train_pat) 358 { 359 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 360 intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); 361 return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat); 362 } 363 364 static bool 365 intel_dp_update_link_train(struct intel_dp *intel_dp, 366 const struct intel_crtc_state *crtc_state, 367 enum drm_dp_phy dp_phy) 368 { 369 int reg = dp_phy == DP_PHY_DPRX ? 370 DP_TRAINING_LANE0_SET : 371 DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy); 372 int ret; 373 374 intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); 375 376 ret = drm_dp_dpcd_write(&intel_dp->aux, reg, 377 intel_dp->train_set, crtc_state->lane_count); 378 379 return ret == crtc_state->lane_count; 380 } 381 382 static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp, 383 const struct intel_crtc_state *crtc_state) 384 { 385 int lane; 386 387 for (lane = 0; lane < crtc_state->lane_count; lane++) 388 if ((intel_dp->train_set[lane] & 389 DP_TRAIN_MAX_SWING_REACHED) == 0) 390 return false; 391 392 return true; 393 } 394 395 /* 396 * Prepare link training by configuring the link parameters. On DDI platforms 397 * also enable the port here. 398 */ 399 static bool 400 intel_dp_prepare_link_train(struct intel_dp *intel_dp, 401 const struct intel_crtc_state *crtc_state) 402 { 403 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 404 u8 link_config[2]; 405 u8 link_bw, rate_select; 406 407 if (intel_dp->prepare_link_retrain) 408 intel_dp->prepare_link_retrain(intel_dp, crtc_state); 409 410 intel_dp_compute_rate(intel_dp, crtc_state->port_clock, 411 &link_bw, &rate_select); 412 413 if (link_bw) 414 drm_dbg_kms(&i915->drm, 415 "Using LINK_BW_SET value %02x\n", link_bw); 416 else 417 drm_dbg_kms(&i915->drm, 418 "Using LINK_RATE_SET value %02x\n", rate_select); 419 420 /* Write the link configuration data */ 421 link_config[0] = link_bw; 422 link_config[1] = crtc_state->lane_count; 423 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 424 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 425 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); 426 427 /* eDP 1.4 rate select method. */ 428 if (!link_bw) 429 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, 430 &rate_select, 1); 431 432 link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0; 433 link_config[1] = DP_SET_ANSI_8B10B; 434 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); 435 436 intel_dp->DP |= DP_PORT_EN; 437 438 return true; 439 } 440 441 static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp, 442 enum drm_dp_phy dp_phy) 443 { 444 if (dp_phy == DP_PHY_DPRX) 445 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 446 else 447 drm_dp_lttpr_link_train_clock_recovery_delay(); 448 } 449 450 /* 451 * Perform the link training clock recovery phase on the given DP PHY using 452 * training pattern 1. 453 */ 454 static bool 455 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, 456 const struct intel_crtc_state *crtc_state, 457 enum drm_dp_phy dp_phy) 458 { 459 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 460 u8 voltage; 461 int voltage_tries, cr_tries, max_cr_tries; 462 bool max_vswing_reached = false; 463 464 /* clock recovery */ 465 if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy, 466 DP_TRAINING_PATTERN_1 | 467 DP_LINK_SCRAMBLING_DISABLE)) { 468 drm_err(&i915->drm, "failed to enable link training\n"); 469 return false; 470 } 471 472 /* 473 * The DP 1.4 spec defines the max clock recovery retries value 474 * as 10 but for pre-DP 1.4 devices we set a very tolerant 475 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x 476 * x 5 identical voltage retries). Since the previous specs didn't 477 * define a limit and created the possibility of an infinite loop 478 * we want to prevent any sync from triggering that corner case. 479 */ 480 if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) 481 max_cr_tries = 10; 482 else 483 max_cr_tries = 80; 484 485 voltage_tries = 1; 486 for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { 487 u8 link_status[DP_LINK_STATUS_SIZE]; 488 489 intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy); 490 491 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, 492 link_status) < 0) { 493 drm_err(&i915->drm, "failed to get link status\n"); 494 return false; 495 } 496 497 if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { 498 drm_dbg_kms(&i915->drm, "clock recovery OK\n"); 499 return true; 500 } 501 502 if (voltage_tries == 5) { 503 drm_dbg_kms(&i915->drm, 504 "Same voltage tried 5 times\n"); 505 return false; 506 } 507 508 if (max_vswing_reached) { 509 drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n"); 510 return false; 511 } 512 513 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 514 515 /* Update training set as requested by target */ 516 intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, 517 link_status); 518 if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { 519 drm_err(&i915->drm, 520 "failed to update link training\n"); 521 return false; 522 } 523 524 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == 525 voltage) 526 ++voltage_tries; 527 else 528 voltage_tries = 1; 529 530 if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state)) 531 max_vswing_reached = true; 532 533 } 534 drm_err(&i915->drm, 535 "Failed clock recovery %d times, giving up!\n", max_cr_tries); 536 return false; 537 } 538 539 /* 540 * Pick training pattern for channel equalization. Training pattern 4 for HBR3 541 * or for 1.4 devices that support it, training Pattern 3 for HBR2 542 * or 1.2 devices that support it, Training Pattern 2 otherwise. 543 */ 544 static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, 545 const struct intel_crtc_state *crtc_state, 546 enum drm_dp_phy dp_phy) 547 { 548 bool source_tps3, sink_tps3, source_tps4, sink_tps4; 549 550 /* 551 * Intel platforms that support HBR3 also support TPS4. It is mandatory 552 * for all downstream devices that support HBR3. There are no known eDP 553 * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 554 * specification. 555 * LTTPRs must support TPS4. 556 */ 557 source_tps4 = intel_dp_source_supports_hbr3(intel_dp); 558 sink_tps4 = dp_phy != DP_PHY_DPRX || 559 drm_dp_tps4_supported(intel_dp->dpcd); 560 if (source_tps4 && sink_tps4) { 561 return DP_TRAINING_PATTERN_4; 562 } else if (crtc_state->port_clock == 810000) { 563 if (!source_tps4) 564 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 565 "8.1 Gbps link rate without source HBR3/TPS4 support\n"); 566 if (!sink_tps4) 567 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 568 "8.1 Gbps link rate without sink TPS4 support\n"); 569 } 570 /* 571 * Intel platforms that support HBR2 also support TPS3. TPS3 support is 572 * also mandatory for downstream devices that support HBR2. However, not 573 * all sinks follow the spec. 574 */ 575 source_tps3 = intel_dp_source_supports_hbr2(intel_dp); 576 sink_tps3 = dp_phy != DP_PHY_DPRX || 577 drm_dp_tps3_supported(intel_dp->dpcd); 578 if (source_tps3 && sink_tps3) { 579 return DP_TRAINING_PATTERN_3; 580 } else if (crtc_state->port_clock >= 540000) { 581 if (!source_tps3) 582 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 583 ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n"); 584 if (!sink_tps3) 585 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 586 ">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); 587 } 588 589 return DP_TRAINING_PATTERN_2; 590 } 591 592 static void 593 intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp, 594 enum drm_dp_phy dp_phy) 595 { 596 if (dp_phy == DP_PHY_DPRX) { 597 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 598 } else { 599 const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 600 601 drm_dp_lttpr_link_train_channel_eq_delay(phy_caps); 602 } 603 } 604 605 /* 606 * Perform the link training channel equalization phase on the given DP PHY 607 * using one of training pattern 2, 3 or 4 depending on the source and 608 * sink capabilities. 609 */ 610 static bool 611 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, 612 const struct intel_crtc_state *crtc_state, 613 enum drm_dp_phy dp_phy) 614 { 615 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 616 int tries; 617 u32 training_pattern; 618 u8 link_status[DP_LINK_STATUS_SIZE]; 619 bool channel_eq = false; 620 621 training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy); 622 /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */ 623 if (training_pattern != DP_TRAINING_PATTERN_4) 624 training_pattern |= DP_LINK_SCRAMBLING_DISABLE; 625 626 /* channel equalization */ 627 if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, 628 training_pattern)) { 629 drm_err(&i915->drm, "failed to start channel equalization\n"); 630 return false; 631 } 632 633 for (tries = 0; tries < 5; tries++) { 634 intel_dp_link_training_channel_equalization_delay(intel_dp, 635 dp_phy); 636 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, 637 link_status) < 0) { 638 drm_err(&i915->drm, 639 "failed to get link status\n"); 640 break; 641 } 642 643 /* Make sure clock is still ok */ 644 if (!drm_dp_clock_recovery_ok(link_status, 645 crtc_state->lane_count)) { 646 intel_dp_dump_link_status(&i915->drm, link_status); 647 drm_dbg_kms(&i915->drm, 648 "Clock recovery check failed, cannot " 649 "continue channel equalization\n"); 650 break; 651 } 652 653 if (drm_dp_channel_eq_ok(link_status, 654 crtc_state->lane_count)) { 655 channel_eq = true; 656 drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training " 657 "successful\n"); 658 break; 659 } 660 661 /* Update training set as requested by target */ 662 intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, 663 link_status); 664 if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { 665 drm_err(&i915->drm, 666 "failed to update link training\n"); 667 break; 668 } 669 } 670 671 /* Try 5 times, else fail and try at lower BW */ 672 if (tries == 5) { 673 intel_dp_dump_link_status(&i915->drm, link_status); 674 drm_dbg_kms(&i915->drm, 675 "Channel equalization failed 5 times\n"); 676 } 677 678 return channel_eq; 679 } 680 681 static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp, 682 enum drm_dp_phy dp_phy) 683 { 684 int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); 685 u8 val = DP_TRAINING_PATTERN_DISABLE; 686 687 return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1; 688 } 689 690 /** 691 * intel_dp_stop_link_train - stop link training 692 * @intel_dp: DP struct 693 * @crtc_state: state for CRTC attached to the encoder 694 * 695 * Stop the link training of the @intel_dp port, disabling the training 696 * pattern in the sink's DPCD, and disabling the test pattern symbol 697 * generation on the port. 698 * 699 * What symbols are output on the port after this point is 700 * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern 701 * with the pipe being disabled, on older platforms it's HW specific if/how an 702 * idle pattern is generated, as the pipe is already enabled here for those. 703 * 704 * This function must be called after intel_dp_start_link_train(). 705 */ 706 void intel_dp_stop_link_train(struct intel_dp *intel_dp, 707 const struct intel_crtc_state *crtc_state) 708 { 709 intel_dp->link_trained = true; 710 711 intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); 712 intel_dp_program_link_training_pattern(intel_dp, crtc_state, 713 DP_TRAINING_PATTERN_DISABLE); 714 } 715 716 static bool 717 intel_dp_link_train_phy(struct intel_dp *intel_dp, 718 const struct intel_crtc_state *crtc_state, 719 enum drm_dp_phy dp_phy) 720 { 721 struct intel_connector *intel_connector = intel_dp->attached_connector; 722 char phy_name[10]; 723 bool ret = false; 724 725 if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy)) 726 goto out; 727 728 if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy)) 729 goto out; 730 731 ret = true; 732 733 out: 734 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 735 "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s\n", 736 intel_connector->base.base.id, 737 intel_connector->base.name, 738 ret ? "passed" : "failed", 739 crtc_state->port_clock, crtc_state->lane_count, 740 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name))); 741 742 return ret; 743 } 744 745 static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, 746 const struct intel_crtc_state *crtc_state) 747 { 748 struct intel_connector *intel_connector = intel_dp->attached_connector; 749 750 if (intel_dp->hobl_active) { 751 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 752 "Link Training failed with HOBL active, not enabling it from now on"); 753 intel_dp->hobl_failed = true; 754 } else if (intel_dp_get_link_train_fallback_values(intel_dp, 755 crtc_state->port_clock, 756 crtc_state->lane_count)) { 757 return; 758 } 759 760 /* Schedule a Hotplug Uevent to userspace to start modeset */ 761 schedule_work(&intel_connector->modeset_retry_work); 762 } 763 764 /* Perform the link training on all LTTPRs and the DPRX on a link. */ 765 static bool 766 intel_dp_link_train_all_phys(struct intel_dp *intel_dp, 767 const struct intel_crtc_state *crtc_state, 768 int lttpr_count) 769 { 770 bool ret = true; 771 int i; 772 773 intel_dp_prepare_link_train(intel_dp, crtc_state); 774 775 for (i = lttpr_count - 1; i >= 0; i--) { 776 enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i); 777 778 ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy); 779 intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy); 780 781 if (!ret) 782 break; 783 } 784 785 if (ret) 786 intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); 787 788 if (intel_dp->set_idle_link_train) 789 intel_dp->set_idle_link_train(intel_dp, crtc_state); 790 791 return ret; 792 } 793 794 /** 795 * intel_dp_start_link_train - start link training 796 * @intel_dp: DP struct 797 * @crtc_state: state for CRTC attached to the encoder 798 * 799 * Start the link training of the @intel_dp port, scheduling a fallback 800 * retraining with reduced link rate/lane parameters if the link training 801 * fails. 802 * After calling this function intel_dp_stop_link_train() must be called. 803 */ 804 void intel_dp_start_link_train(struct intel_dp *intel_dp, 805 const struct intel_crtc_state *crtc_state) 806 { 807 /* 808 * TODO: Reiniting LTTPRs here won't be needed once proper connector 809 * HW state readout is added. 810 */ 811 int lttpr_count = intel_dp_lttpr_init(intel_dp); 812 813 if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count)) 814 intel_dp_schedule_fallback_link_training(intel_dp, crtc_state); 815 } 816