1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "intel_display_types.h" 25 #include "intel_dp.h" 26 #include "intel_dp_link_training.h" 27 28 static void 29 intel_dp_dump_link_status(struct drm_device *drm, 30 const u8 link_status[DP_LINK_STATUS_SIZE]) 31 { 32 drm_dbg_kms(drm, 33 "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n", 34 link_status[0], link_status[1], link_status[2], 35 link_status[3], link_status[4], link_status[5]); 36 } 37 38 static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) 39 { 40 memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); 41 } 42 43 static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp) 44 { 45 intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT - 46 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0; 47 } 48 49 static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy, 50 char *buf, size_t buf_size) 51 { 52 if (dp_phy == DP_PHY_DPRX) 53 snprintf(buf, buf_size, "DPRX"); 54 else 55 snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1); 56 57 return buf; 58 } 59 60 static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp, 61 enum drm_dp_phy dp_phy) 62 { 63 return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1]; 64 } 65 66 static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, 67 enum drm_dp_phy dp_phy) 68 { 69 u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 70 char phy_name[10]; 71 72 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); 73 74 if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) { 75 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 76 "failed to read the PHY caps for %s\n", 77 phy_name); 78 return; 79 } 80 81 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 82 "%s PHY capabilities: %*ph\n", 83 phy_name, 84 (int)sizeof(intel_dp->lttpr_phy_caps[0]), 85 phy_caps); 86 } 87 88 static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp) 89 { 90 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 91 92 if (intel_dp_is_edp(intel_dp)) 93 return false; 94 95 /* 96 * Detecting LTTPRs must be avoided on platforms with an AUX timeout 97 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). 98 */ 99 if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915)) 100 return false; 101 102 if (drm_dp_read_lttpr_common_caps(&intel_dp->aux, 103 intel_dp->lttpr_common_caps) < 0) 104 goto reset_caps; 105 106 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 107 "LTTPR common capabilities: %*ph\n", 108 (int)sizeof(intel_dp->lttpr_common_caps), 109 intel_dp->lttpr_common_caps); 110 111 /* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */ 112 if (intel_dp->lttpr_common_caps[0] < 0x14) 113 goto reset_caps; 114 115 return true; 116 117 reset_caps: 118 intel_dp_reset_lttpr_common_caps(intel_dp); 119 return false; 120 } 121 122 static bool 123 intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) 124 { 125 u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT : 126 DP_PHY_REPEATER_MODE_NON_TRANSPARENT; 127 128 return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1; 129 } 130 131 static int intel_dp_init_lttpr(struct intel_dp *intel_dp) 132 { 133 int lttpr_count; 134 int i; 135 136 if (!intel_dp_read_lttpr_common_caps(intel_dp)) 137 return 0; 138 139 lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); 140 /* 141 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are 142 * detected as this breaks link training at least on the Dell WD19TB 143 * dock. 144 */ 145 if (lttpr_count == 0) 146 return 0; 147 148 /* 149 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of 150 * non-transparent mode and the disable->enable non-transparent mode 151 * sequence. 152 */ 153 intel_dp_set_lttpr_transparent_mode(intel_dp, true); 154 155 /* 156 * In case of unsupported number of LTTPRs or failing to switch to 157 * non-transparent mode fall-back to transparent link training mode, 158 * still taking into account any LTTPR common lane- rate/count limits. 159 */ 160 if (lttpr_count < 0) 161 return 0; 162 163 if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) { 164 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 165 "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n"); 166 167 intel_dp_set_lttpr_transparent_mode(intel_dp, true); 168 intel_dp_reset_lttpr_count(intel_dp); 169 170 return 0; 171 } 172 173 for (i = 0; i < lttpr_count; i++) 174 intel_dp_read_lttpr_phy_caps(intel_dp, DP_PHY_LTTPR(i)); 175 176 return lttpr_count; 177 } 178 179 /** 180 * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode 181 * @intel_dp: Intel DP struct 182 * 183 * Read the LTTPR common and DPRX capabilities and switch to non-transparent 184 * link training mode if any is detected and read the PHY capabilities for all 185 * detected LTTPRs. In case of an LTTPR detection error or if the number of 186 * LTTPRs is more than is supported (8), fall back to the no-LTTPR, 187 * transparent mode link training mode. 188 * 189 * Returns: 190 * >0 if LTTPRs were detected and the non-transparent LT mode was set. The 191 * DPRX capabilities are read out. 192 * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a 193 * detection failure and the transparent LT mode was set. The DPRX 194 * capabilities are read out. 195 * <0 Reading out the DPRX capabilities failed. 196 */ 197 int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) 198 { 199 int lttpr_count = intel_dp_init_lttpr(intel_dp); 200 201 /* The DPTX shall read the DPRX caps after LTTPR detection. */ 202 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) { 203 intel_dp_reset_lttpr_common_caps(intel_dp); 204 return -EIO; 205 } 206 207 return lttpr_count; 208 } 209 EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps); 210 211 static u8 dp_voltage_max(u8 preemph) 212 { 213 switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) { 214 case DP_TRAIN_PRE_EMPH_LEVEL_0: 215 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 216 case DP_TRAIN_PRE_EMPH_LEVEL_1: 217 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 218 case DP_TRAIN_PRE_EMPH_LEVEL_2: 219 return DP_TRAIN_VOLTAGE_SWING_LEVEL_1; 220 case DP_TRAIN_PRE_EMPH_LEVEL_3: 221 default: 222 return DP_TRAIN_VOLTAGE_SWING_LEVEL_0; 223 } 224 } 225 226 static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp, 227 enum drm_dp_phy dp_phy) 228 { 229 const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 230 231 if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps)) 232 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 233 else 234 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 235 } 236 237 static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp, 238 enum drm_dp_phy dp_phy) 239 { 240 const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 241 242 if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps)) 243 return DP_TRAIN_PRE_EMPH_LEVEL_3; 244 else 245 return DP_TRAIN_PRE_EMPH_LEVEL_2; 246 } 247 248 static bool 249 intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp, 250 enum drm_dp_phy dp_phy) 251 { 252 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 253 int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); 254 255 drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX); 256 257 return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1); 258 } 259 260 static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp, 261 const struct intel_crtc_state *crtc_state, 262 enum drm_dp_phy dp_phy) 263 { 264 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 265 u8 voltage_max; 266 267 /* 268 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from 269 * the DPRX_PHY we train. 270 */ 271 if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) 272 voltage_max = intel_dp->voltage_max(intel_dp, crtc_state); 273 else 274 voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1); 275 276 drm_WARN_ON_ONCE(&i915->drm, 277 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 && 278 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3); 279 280 return voltage_max; 281 } 282 283 static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, 284 enum drm_dp_phy dp_phy) 285 { 286 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 287 u8 preemph_max; 288 289 /* 290 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from 291 * the DPRX_PHY we train. 292 */ 293 if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) 294 preemph_max = intel_dp->preemph_max(intel_dp); 295 else 296 preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1); 297 298 drm_WARN_ON_ONCE(&i915->drm, 299 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 && 300 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3); 301 302 return preemph_max; 303 } 304 305 void 306 intel_dp_get_adjust_train(struct intel_dp *intel_dp, 307 const struct intel_crtc_state *crtc_state, 308 enum drm_dp_phy dp_phy, 309 const u8 link_status[DP_LINK_STATUS_SIZE]) 310 { 311 u8 v = 0; 312 u8 p = 0; 313 int lane; 314 u8 voltage_max; 315 u8 preemph_max; 316 317 for (lane = 0; lane < crtc_state->lane_count; lane++) { 318 v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane)); 319 p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane)); 320 } 321 322 preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy); 323 if (p >= preemph_max) 324 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 325 326 v = min(v, dp_voltage_max(p)); 327 328 voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy); 329 if (v >= voltage_max) 330 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 331 332 for (lane = 0; lane < 4; lane++) 333 intel_dp->train_set[lane] = v | p; 334 } 335 336 static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp, 337 enum drm_dp_phy dp_phy) 338 { 339 return dp_phy == DP_PHY_DPRX ? 340 DP_TRAINING_PATTERN_SET : 341 DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy); 342 } 343 344 static bool 345 intel_dp_set_link_train(struct intel_dp *intel_dp, 346 const struct intel_crtc_state *crtc_state, 347 enum drm_dp_phy dp_phy, 348 u8 dp_train_pat) 349 { 350 int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); 351 u8 buf[sizeof(intel_dp->train_set) + 1]; 352 int len; 353 354 intel_dp_program_link_training_pattern(intel_dp, crtc_state, 355 dp_train_pat); 356 357 buf[0] = dp_train_pat; 358 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ 359 memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count); 360 len = crtc_state->lane_count + 1; 361 362 return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len; 363 } 364 365 static char dp_training_pattern_name(u8 train_pat) 366 { 367 switch (train_pat) { 368 case DP_TRAINING_PATTERN_1: 369 case DP_TRAINING_PATTERN_2: 370 case DP_TRAINING_PATTERN_3: 371 return '0' + train_pat; 372 case DP_TRAINING_PATTERN_4: 373 return '4'; 374 default: 375 MISSING_CASE(train_pat); 376 return '?'; 377 } 378 } 379 380 void 381 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 382 const struct intel_crtc_state *crtc_state, 383 u8 dp_train_pat) 384 { 385 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 386 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 387 u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat); 388 389 if (train_pat != DP_TRAINING_PATTERN_DISABLE) 390 drm_dbg_kms(&dev_priv->drm, 391 "[ENCODER:%d:%s] Using DP training pattern TPS%c\n", 392 encoder->base.base.id, encoder->base.name, 393 dp_training_pattern_name(train_pat)); 394 395 intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); 396 } 397 398 void intel_dp_set_signal_levels(struct intel_dp *intel_dp, 399 const struct intel_crtc_state *crtc_state, 400 enum drm_dp_phy dp_phy) 401 { 402 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 403 u8 train_set = intel_dp->train_set[0]; 404 char phy_name[10]; 405 406 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n", 407 train_set & DP_TRAIN_VOLTAGE_SWING_MASK, 408 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "", 409 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> 410 DP_TRAIN_PRE_EMPHASIS_SHIFT, 411 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? 412 " (max)" : "", 413 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name))); 414 415 if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) 416 intel_dp->set_signal_levels(intel_dp, crtc_state); 417 } 418 419 static bool 420 intel_dp_reset_link_train(struct intel_dp *intel_dp, 421 const struct intel_crtc_state *crtc_state, 422 enum drm_dp_phy dp_phy, 423 u8 dp_train_pat) 424 { 425 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 426 intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); 427 return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat); 428 } 429 430 static bool 431 intel_dp_update_link_train(struct intel_dp *intel_dp, 432 const struct intel_crtc_state *crtc_state, 433 enum drm_dp_phy dp_phy) 434 { 435 int reg = dp_phy == DP_PHY_DPRX ? 436 DP_TRAINING_LANE0_SET : 437 DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy); 438 int ret; 439 440 intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); 441 442 ret = drm_dp_dpcd_write(&intel_dp->aux, reg, 443 intel_dp->train_set, crtc_state->lane_count); 444 445 return ret == crtc_state->lane_count; 446 } 447 448 static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp, 449 const struct intel_crtc_state *crtc_state) 450 { 451 int lane; 452 453 for (lane = 0; lane < crtc_state->lane_count; lane++) 454 if ((intel_dp->train_set[lane] & 455 DP_TRAIN_MAX_SWING_REACHED) == 0) 456 return false; 457 458 return true; 459 } 460 461 /* 462 * Prepare link training by configuring the link parameters. On DDI platforms 463 * also enable the port here. 464 */ 465 static bool 466 intel_dp_prepare_link_train(struct intel_dp *intel_dp, 467 const struct intel_crtc_state *crtc_state) 468 { 469 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 470 u8 link_config[2]; 471 u8 link_bw, rate_select; 472 473 if (intel_dp->prepare_link_retrain) 474 intel_dp->prepare_link_retrain(intel_dp, crtc_state); 475 476 intel_dp_compute_rate(intel_dp, crtc_state->port_clock, 477 &link_bw, &rate_select); 478 479 if (link_bw) 480 drm_dbg_kms(&i915->drm, 481 "Using LINK_BW_SET value %02x\n", link_bw); 482 else 483 drm_dbg_kms(&i915->drm, 484 "Using LINK_RATE_SET value %02x\n", rate_select); 485 486 /* Write the link configuration data */ 487 link_config[0] = link_bw; 488 link_config[1] = crtc_state->lane_count; 489 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 490 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 491 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); 492 493 /* eDP 1.4 rate select method. */ 494 if (!link_bw) 495 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, 496 &rate_select, 1); 497 498 link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0; 499 link_config[1] = DP_SET_ANSI_8B10B; 500 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); 501 502 intel_dp->DP |= DP_PORT_EN; 503 504 return true; 505 } 506 507 static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp, 508 enum drm_dp_phy dp_phy) 509 { 510 if (dp_phy == DP_PHY_DPRX) 511 drm_dp_link_train_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd); 512 else 513 drm_dp_lttpr_link_train_clock_recovery_delay(); 514 } 515 516 /* 517 * Perform the link training clock recovery phase on the given DP PHY using 518 * training pattern 1. 519 */ 520 static bool 521 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, 522 const struct intel_crtc_state *crtc_state, 523 enum drm_dp_phy dp_phy) 524 { 525 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 526 u8 voltage; 527 int voltage_tries, cr_tries, max_cr_tries; 528 bool max_vswing_reached = false; 529 530 /* clock recovery */ 531 if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy, 532 DP_TRAINING_PATTERN_1 | 533 DP_LINK_SCRAMBLING_DISABLE)) { 534 drm_err(&i915->drm, "failed to enable link training\n"); 535 return false; 536 } 537 538 /* 539 * The DP 1.4 spec defines the max clock recovery retries value 540 * as 10 but for pre-DP 1.4 devices we set a very tolerant 541 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x 542 * x 5 identical voltage retries). Since the previous specs didn't 543 * define a limit and created the possibility of an infinite loop 544 * we want to prevent any sync from triggering that corner case. 545 */ 546 if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) 547 max_cr_tries = 10; 548 else 549 max_cr_tries = 80; 550 551 voltage_tries = 1; 552 for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { 553 u8 link_status[DP_LINK_STATUS_SIZE]; 554 555 intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy); 556 557 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, 558 link_status) < 0) { 559 drm_err(&i915->drm, "failed to get link status\n"); 560 return false; 561 } 562 563 if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { 564 drm_dbg_kms(&i915->drm, "clock recovery OK\n"); 565 return true; 566 } 567 568 if (voltage_tries == 5) { 569 drm_dbg_kms(&i915->drm, 570 "Same voltage tried 5 times\n"); 571 return false; 572 } 573 574 if (max_vswing_reached) { 575 drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n"); 576 return false; 577 } 578 579 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 580 581 /* Update training set as requested by target */ 582 intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, 583 link_status); 584 if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { 585 drm_err(&i915->drm, 586 "failed to update link training\n"); 587 return false; 588 } 589 590 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == 591 voltage) 592 ++voltage_tries; 593 else 594 voltage_tries = 1; 595 596 if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state)) 597 max_vswing_reached = true; 598 599 } 600 drm_err(&i915->drm, 601 "Failed clock recovery %d times, giving up!\n", max_cr_tries); 602 return false; 603 } 604 605 /* 606 * Pick training pattern for channel equalization. Training pattern 4 for HBR3 607 * or for 1.4 devices that support it, training Pattern 3 for HBR2 608 * or 1.2 devices that support it, Training Pattern 2 otherwise. 609 */ 610 static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, 611 const struct intel_crtc_state *crtc_state, 612 enum drm_dp_phy dp_phy) 613 { 614 bool source_tps3, sink_tps3, source_tps4, sink_tps4; 615 616 /* 617 * Intel platforms that support HBR3 also support TPS4. It is mandatory 618 * for all downstream devices that support HBR3. There are no known eDP 619 * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 620 * specification. 621 * LTTPRs must support TPS4. 622 */ 623 source_tps4 = intel_dp_source_supports_hbr3(intel_dp); 624 sink_tps4 = dp_phy != DP_PHY_DPRX || 625 drm_dp_tps4_supported(intel_dp->dpcd); 626 if (source_tps4 && sink_tps4) { 627 return DP_TRAINING_PATTERN_4; 628 } else if (crtc_state->port_clock == 810000) { 629 if (!source_tps4) 630 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 631 "8.1 Gbps link rate without source HBR3/TPS4 support\n"); 632 if (!sink_tps4) 633 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 634 "8.1 Gbps link rate without sink TPS4 support\n"); 635 } 636 /* 637 * Intel platforms that support HBR2 also support TPS3. TPS3 support is 638 * also mandatory for downstream devices that support HBR2. However, not 639 * all sinks follow the spec. 640 */ 641 source_tps3 = intel_dp_source_supports_hbr2(intel_dp); 642 sink_tps3 = dp_phy != DP_PHY_DPRX || 643 drm_dp_tps3_supported(intel_dp->dpcd); 644 if (source_tps3 && sink_tps3) { 645 return DP_TRAINING_PATTERN_3; 646 } else if (crtc_state->port_clock >= 540000) { 647 if (!source_tps3) 648 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 649 ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n"); 650 if (!sink_tps3) 651 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 652 ">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); 653 } 654 655 return DP_TRAINING_PATTERN_2; 656 } 657 658 static void 659 intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp, 660 enum drm_dp_phy dp_phy) 661 { 662 if (dp_phy == DP_PHY_DPRX) { 663 drm_dp_link_train_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd); 664 } else { 665 const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 666 667 drm_dp_lttpr_link_train_channel_eq_delay(&intel_dp->aux, phy_caps); 668 } 669 } 670 671 /* 672 * Perform the link training channel equalization phase on the given DP PHY 673 * using one of training pattern 2, 3 or 4 depending on the source and 674 * sink capabilities. 675 */ 676 static bool 677 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, 678 const struct intel_crtc_state *crtc_state, 679 enum drm_dp_phy dp_phy) 680 { 681 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 682 int tries; 683 u32 training_pattern; 684 u8 link_status[DP_LINK_STATUS_SIZE]; 685 bool channel_eq = false; 686 687 training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy); 688 /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */ 689 if (training_pattern != DP_TRAINING_PATTERN_4) 690 training_pattern |= DP_LINK_SCRAMBLING_DISABLE; 691 692 /* channel equalization */ 693 if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, 694 training_pattern)) { 695 drm_err(&i915->drm, "failed to start channel equalization\n"); 696 return false; 697 } 698 699 for (tries = 0; tries < 5; tries++) { 700 intel_dp_link_training_channel_equalization_delay(intel_dp, 701 dp_phy); 702 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, 703 link_status) < 0) { 704 drm_err(&i915->drm, 705 "failed to get link status\n"); 706 break; 707 } 708 709 /* Make sure clock is still ok */ 710 if (!drm_dp_clock_recovery_ok(link_status, 711 crtc_state->lane_count)) { 712 intel_dp_dump_link_status(&i915->drm, link_status); 713 drm_dbg_kms(&i915->drm, 714 "Clock recovery check failed, cannot " 715 "continue channel equalization\n"); 716 break; 717 } 718 719 if (drm_dp_channel_eq_ok(link_status, 720 crtc_state->lane_count)) { 721 channel_eq = true; 722 drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training " 723 "successful\n"); 724 break; 725 } 726 727 /* Update training set as requested by target */ 728 intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, 729 link_status); 730 if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { 731 drm_err(&i915->drm, 732 "failed to update link training\n"); 733 break; 734 } 735 } 736 737 /* Try 5 times, else fail and try at lower BW */ 738 if (tries == 5) { 739 intel_dp_dump_link_status(&i915->drm, link_status); 740 drm_dbg_kms(&i915->drm, 741 "Channel equalization failed 5 times\n"); 742 } 743 744 return channel_eq; 745 } 746 747 static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp, 748 enum drm_dp_phy dp_phy) 749 { 750 int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); 751 u8 val = DP_TRAINING_PATTERN_DISABLE; 752 753 return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1; 754 } 755 756 /** 757 * intel_dp_stop_link_train - stop link training 758 * @intel_dp: DP struct 759 * @crtc_state: state for CRTC attached to the encoder 760 * 761 * Stop the link training of the @intel_dp port, disabling the training 762 * pattern in the sink's DPCD, and disabling the test pattern symbol 763 * generation on the port. 764 * 765 * What symbols are output on the port after this point is 766 * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern 767 * with the pipe being disabled, on older platforms it's HW specific if/how an 768 * idle pattern is generated, as the pipe is already enabled here for those. 769 * 770 * This function must be called after intel_dp_start_link_train(). 771 */ 772 void intel_dp_stop_link_train(struct intel_dp *intel_dp, 773 const struct intel_crtc_state *crtc_state) 774 { 775 intel_dp->link_trained = true; 776 777 intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); 778 intel_dp_program_link_training_pattern(intel_dp, crtc_state, 779 DP_TRAINING_PATTERN_DISABLE); 780 } 781 782 static bool 783 intel_dp_link_train_phy(struct intel_dp *intel_dp, 784 const struct intel_crtc_state *crtc_state, 785 enum drm_dp_phy dp_phy) 786 { 787 struct intel_connector *intel_connector = intel_dp->attached_connector; 788 char phy_name[10]; 789 bool ret = false; 790 791 if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy)) 792 goto out; 793 794 if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy)) 795 goto out; 796 797 ret = true; 798 799 out: 800 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 801 "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s\n", 802 intel_connector->base.base.id, 803 intel_connector->base.name, 804 ret ? "passed" : "failed", 805 crtc_state->port_clock, crtc_state->lane_count, 806 intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name))); 807 808 return ret; 809 } 810 811 static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, 812 const struct intel_crtc_state *crtc_state) 813 { 814 struct intel_connector *intel_connector = intel_dp->attached_connector; 815 816 if (intel_dp->hobl_active) { 817 drm_dbg_kms(&dp_to_i915(intel_dp)->drm, 818 "Link Training failed with HOBL active, not enabling it from now on"); 819 intel_dp->hobl_failed = true; 820 } else if (intel_dp_get_link_train_fallback_values(intel_dp, 821 crtc_state->port_clock, 822 crtc_state->lane_count)) { 823 return; 824 } 825 826 /* Schedule a Hotplug Uevent to userspace to start modeset */ 827 schedule_work(&intel_connector->modeset_retry_work); 828 } 829 830 /* Perform the link training on all LTTPRs and the DPRX on a link. */ 831 static bool 832 intel_dp_link_train_all_phys(struct intel_dp *intel_dp, 833 const struct intel_crtc_state *crtc_state, 834 int lttpr_count) 835 { 836 bool ret = true; 837 int i; 838 839 intel_dp_prepare_link_train(intel_dp, crtc_state); 840 841 for (i = lttpr_count - 1; i >= 0; i--) { 842 enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i); 843 844 ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy); 845 intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy); 846 847 if (!ret) 848 break; 849 } 850 851 if (ret) 852 intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); 853 854 if (intel_dp->set_idle_link_train) 855 intel_dp->set_idle_link_train(intel_dp, crtc_state); 856 857 return ret; 858 } 859 860 /** 861 * intel_dp_start_link_train - start link training 862 * @intel_dp: DP struct 863 * @crtc_state: state for CRTC attached to the encoder 864 * 865 * Start the link training of the @intel_dp port, scheduling a fallback 866 * retraining with reduced link rate/lane parameters if the link training 867 * fails. 868 * After calling this function intel_dp_stop_link_train() must be called. 869 */ 870 void intel_dp_start_link_train(struct intel_dp *intel_dp, 871 const struct intel_crtc_state *crtc_state) 872 { 873 /* 874 * TODO: Reiniting LTTPRs here won't be needed once proper connector 875 * HW state readout is added. 876 */ 877 int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); 878 879 if (lttpr_count < 0) 880 /* Still continue with enabling the port and link training. */ 881 lttpr_count = 0; 882 883 if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count)) 884 intel_dp_schedule_fallback_link_training(intel_dp, crtc_state); 885 } 886