1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include "intel_atomic.h" 7 #include "intel_crtc.h" 8 #include "intel_ddi.h" 9 #include "intel_de.h" 10 #include "intel_display_types.h" 11 #include "intel_fdi.h" 12 13 struct intel_fdi_funcs { 14 void (*fdi_link_train)(struct intel_crtc *crtc, 15 const struct intel_crtc_state *crtc_state); 16 }; 17 18 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 19 enum pipe pipe, bool state) 20 { 21 bool cur_state; 22 23 if (HAS_DDI(dev_priv)) { 24 /* 25 * DDI does not have a specific FDI_TX register. 26 * 27 * FDI is never fed from EDP transcoder 28 * so pipe->transcoder cast is fine here. 29 */ 30 enum transcoder cpu_transcoder = (enum transcoder)pipe; 31 cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE; 32 } else { 33 cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE; 34 } 35 I915_STATE_WARN(cur_state != state, 36 "FDI TX state assertion failure (expected %s, current %s)\n", 37 onoff(state), onoff(cur_state)); 38 } 39 40 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe) 41 { 42 assert_fdi_tx(i915, pipe, true); 43 } 44 45 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe) 46 { 47 assert_fdi_tx(i915, pipe, false); 48 } 49 50 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 51 enum pipe pipe, bool state) 52 { 53 bool cur_state; 54 55 cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE; 56 I915_STATE_WARN(cur_state != state, 57 "FDI RX state assertion failure (expected %s, current %s)\n", 58 onoff(state), onoff(cur_state)); 59 } 60 61 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe) 62 { 63 assert_fdi_rx(i915, pipe, true); 64 } 65 66 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe) 67 { 68 assert_fdi_rx(i915, pipe, false); 69 } 70 71 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, 72 enum pipe pipe) 73 { 74 bool cur_state; 75 76 /* ILK FDI PLL is always enabled */ 77 if (IS_IRONLAKE(i915)) 78 return; 79 80 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 81 if (HAS_DDI(i915)) 82 return; 83 84 cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE; 85 I915_STATE_WARN(!cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n"); 86 } 87 88 static void assert_fdi_rx_pll(struct drm_i915_private *i915, 89 enum pipe pipe, bool state) 90 { 91 bool cur_state; 92 93 cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE; 94 I915_STATE_WARN(cur_state != state, 95 "FDI RX PLL assertion failure (expected %s, current %s)\n", 96 onoff(state), onoff(cur_state)); 97 } 98 99 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) 100 { 101 assert_fdi_rx_pll(i915, pipe, true); 102 } 103 104 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe) 105 { 106 assert_fdi_rx_pll(i915, pipe, false); 107 } 108 109 void intel_fdi_link_train(struct intel_crtc *crtc, 110 const struct intel_crtc_state *crtc_state) 111 { 112 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 113 114 dev_priv->fdi_funcs->fdi_link_train(crtc, crtc_state); 115 } 116 117 /* units of 100MHz */ 118 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 119 { 120 if (crtc_state->hw.enable && crtc_state->has_pch_encoder) 121 return crtc_state->fdi_lanes; 122 123 return 0; 124 } 125 126 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 127 struct intel_crtc_state *pipe_config) 128 { 129 struct drm_i915_private *dev_priv = to_i915(dev); 130 struct drm_atomic_state *state = pipe_config->uapi.state; 131 struct intel_crtc *other_crtc; 132 struct intel_crtc_state *other_crtc_state; 133 134 drm_dbg_kms(&dev_priv->drm, 135 "checking fdi config on pipe %c, lanes %i\n", 136 pipe_name(pipe), pipe_config->fdi_lanes); 137 if (pipe_config->fdi_lanes > 4) { 138 drm_dbg_kms(&dev_priv->drm, 139 "invalid fdi lane config on pipe %c: %i lanes\n", 140 pipe_name(pipe), pipe_config->fdi_lanes); 141 return -EINVAL; 142 } 143 144 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 145 if (pipe_config->fdi_lanes > 2) { 146 drm_dbg_kms(&dev_priv->drm, 147 "only 2 lanes on haswell, required: %i lanes\n", 148 pipe_config->fdi_lanes); 149 return -EINVAL; 150 } else { 151 return 0; 152 } 153 } 154 155 if (INTEL_NUM_PIPES(dev_priv) == 2) 156 return 0; 157 158 /* Ivybridge 3 pipe is really complicated */ 159 switch (pipe) { 160 case PIPE_A: 161 return 0; 162 case PIPE_B: 163 if (pipe_config->fdi_lanes <= 2) 164 return 0; 165 166 other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C); 167 other_crtc_state = 168 intel_atomic_get_crtc_state(state, other_crtc); 169 if (IS_ERR(other_crtc_state)) 170 return PTR_ERR(other_crtc_state); 171 172 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 173 drm_dbg_kms(&dev_priv->drm, 174 "invalid shared fdi lane config on pipe %c: %i lanes\n", 175 pipe_name(pipe), pipe_config->fdi_lanes); 176 return -EINVAL; 177 } 178 return 0; 179 case PIPE_C: 180 if (pipe_config->fdi_lanes > 2) { 181 drm_dbg_kms(&dev_priv->drm, 182 "only 2 lanes on pipe %c: required %i lanes\n", 183 pipe_name(pipe), pipe_config->fdi_lanes); 184 return -EINVAL; 185 } 186 187 other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B); 188 other_crtc_state = 189 intel_atomic_get_crtc_state(state, other_crtc); 190 if (IS_ERR(other_crtc_state)) 191 return PTR_ERR(other_crtc_state); 192 193 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 194 drm_dbg_kms(&dev_priv->drm, 195 "fdi link B uses too many lanes to enable link C\n"); 196 return -EINVAL; 197 } 198 return 0; 199 default: 200 MISSING_CASE(pipe); 201 return 0; 202 } 203 } 204 205 void intel_fdi_pll_freq_update(struct drm_i915_private *i915) 206 { 207 if (IS_IRONLAKE(i915)) { 208 u32 fdi_pll_clk = 209 intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 210 211 i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; 212 } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) { 213 i915->fdi_pll_freq = 270000; 214 } else { 215 return; 216 } 217 218 drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq); 219 } 220 221 int intel_fdi_link_freq(struct drm_i915_private *i915, 222 const struct intel_crtc_state *pipe_config) 223 { 224 if (HAS_DDI(i915)) 225 return pipe_config->port_clock; /* SPLL */ 226 else 227 return i915->fdi_pll_freq; 228 } 229 230 int ilk_fdi_compute_config(struct intel_crtc *crtc, 231 struct intel_crtc_state *pipe_config) 232 { 233 struct drm_device *dev = crtc->base.dev; 234 struct drm_i915_private *i915 = to_i915(dev); 235 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 236 int lane, link_bw, fdi_dotclock, ret; 237 bool needs_recompute = false; 238 239 retry: 240 /* FDI is a binary signal running at ~2.7GHz, encoding 241 * each output octet as 10 bits. The actual frequency 242 * is stored as a divider into a 100MHz clock, and the 243 * mode pixel clock is stored in units of 1KHz. 244 * Hence the bw of each lane in terms of the mode signal 245 * is: 246 */ 247 link_bw = intel_fdi_link_freq(i915, pipe_config); 248 249 fdi_dotclock = adjusted_mode->crtc_clock; 250 251 lane = ilk_get_lanes_required(fdi_dotclock, link_bw, 252 pipe_config->pipe_bpp); 253 254 pipe_config->fdi_lanes = lane; 255 256 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 257 link_bw, &pipe_config->fdi_m_n, false, false); 258 259 ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config); 260 if (ret == -EDEADLK) 261 return ret; 262 263 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 264 pipe_config->pipe_bpp -= 2*3; 265 drm_dbg_kms(&i915->drm, 266 "fdi link bw constraint, reducing pipe bpp to %i\n", 267 pipe_config->pipe_bpp); 268 needs_recompute = true; 269 pipe_config->bw_constrained = true; 270 271 goto retry; 272 } 273 274 if (needs_recompute) 275 return -EAGAIN; 276 277 return ret; 278 } 279 280 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) 281 { 282 u32 temp; 283 284 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1); 285 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 286 return; 287 288 drm_WARN_ON(&dev_priv->drm, 289 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & 290 FDI_RX_ENABLE); 291 drm_WARN_ON(&dev_priv->drm, 292 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & 293 FDI_RX_ENABLE); 294 295 temp &= ~FDI_BC_BIFURCATION_SELECT; 296 if (enable) 297 temp |= FDI_BC_BIFURCATION_SELECT; 298 299 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n", 300 enable ? "en" : "dis"); 301 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp); 302 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1); 303 } 304 305 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 306 { 307 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 308 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 309 310 switch (crtc->pipe) { 311 case PIPE_A: 312 break; 313 case PIPE_B: 314 if (crtc_state->fdi_lanes > 2) 315 cpt_set_fdi_bc_bifurcation(dev_priv, false); 316 else 317 cpt_set_fdi_bc_bifurcation(dev_priv, true); 318 319 break; 320 case PIPE_C: 321 cpt_set_fdi_bc_bifurcation(dev_priv, true); 322 323 break; 324 default: 325 MISSING_CASE(crtc->pipe); 326 } 327 } 328 329 void intel_fdi_normal_train(struct intel_crtc *crtc) 330 { 331 struct drm_device *dev = crtc->base.dev; 332 struct drm_i915_private *dev_priv = to_i915(dev); 333 enum pipe pipe = crtc->pipe; 334 i915_reg_t reg; 335 u32 temp; 336 337 /* enable normal train */ 338 reg = FDI_TX_CTL(pipe); 339 temp = intel_de_read(dev_priv, reg); 340 if (IS_IVYBRIDGE(dev_priv)) { 341 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 342 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 343 } else { 344 temp &= ~FDI_LINK_TRAIN_NONE; 345 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 346 } 347 intel_de_write(dev_priv, reg, temp); 348 349 reg = FDI_RX_CTL(pipe); 350 temp = intel_de_read(dev_priv, reg); 351 if (HAS_PCH_CPT(dev_priv)) { 352 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 353 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 354 } else { 355 temp &= ~FDI_LINK_TRAIN_NONE; 356 temp |= FDI_LINK_TRAIN_NONE; 357 } 358 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 359 360 /* wait one idle pattern time */ 361 intel_de_posting_read(dev_priv, reg); 362 udelay(1000); 363 364 /* IVB wants error correction enabled */ 365 if (IS_IVYBRIDGE(dev_priv)) 366 intel_de_write(dev_priv, reg, 367 intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); 368 } 369 370 /* The FDI link training functions for ILK/Ibexpeak. */ 371 static void ilk_fdi_link_train(struct intel_crtc *crtc, 372 const struct intel_crtc_state *crtc_state) 373 { 374 struct drm_device *dev = crtc->base.dev; 375 struct drm_i915_private *dev_priv = to_i915(dev); 376 enum pipe pipe = crtc->pipe; 377 i915_reg_t reg; 378 u32 temp, tries; 379 380 /* 381 * Write the TU size bits before fdi link training, so that error 382 * detection works. 383 */ 384 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), 385 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 386 387 /* FDI needs bits from pipe first */ 388 assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder); 389 390 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 391 for train result */ 392 reg = FDI_RX_IMR(pipe); 393 temp = intel_de_read(dev_priv, reg); 394 temp &= ~FDI_RX_SYMBOL_LOCK; 395 temp &= ~FDI_RX_BIT_LOCK; 396 intel_de_write(dev_priv, reg, temp); 397 intel_de_read(dev_priv, reg); 398 udelay(150); 399 400 /* enable CPU FDI TX and PCH FDI RX */ 401 reg = FDI_TX_CTL(pipe); 402 temp = intel_de_read(dev_priv, reg); 403 temp &= ~FDI_DP_PORT_WIDTH_MASK; 404 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 405 temp &= ~FDI_LINK_TRAIN_NONE; 406 temp |= FDI_LINK_TRAIN_PATTERN_1; 407 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 408 409 reg = FDI_RX_CTL(pipe); 410 temp = intel_de_read(dev_priv, reg); 411 temp &= ~FDI_LINK_TRAIN_NONE; 412 temp |= FDI_LINK_TRAIN_PATTERN_1; 413 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 414 415 intel_de_posting_read(dev_priv, reg); 416 udelay(150); 417 418 /* Ironlake workaround, enable clock pointer after FDI enable*/ 419 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 420 FDI_RX_PHASE_SYNC_POINTER_OVR); 421 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 422 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN); 423 424 reg = FDI_RX_IIR(pipe); 425 for (tries = 0; tries < 5; tries++) { 426 temp = intel_de_read(dev_priv, reg); 427 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 428 429 if ((temp & FDI_RX_BIT_LOCK)) { 430 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n"); 431 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK); 432 break; 433 } 434 } 435 if (tries == 5) 436 drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); 437 438 /* Train 2 */ 439 reg = FDI_TX_CTL(pipe); 440 temp = intel_de_read(dev_priv, reg); 441 temp &= ~FDI_LINK_TRAIN_NONE; 442 temp |= FDI_LINK_TRAIN_PATTERN_2; 443 intel_de_write(dev_priv, reg, temp); 444 445 reg = FDI_RX_CTL(pipe); 446 temp = intel_de_read(dev_priv, reg); 447 temp &= ~FDI_LINK_TRAIN_NONE; 448 temp |= FDI_LINK_TRAIN_PATTERN_2; 449 intel_de_write(dev_priv, reg, temp); 450 451 intel_de_posting_read(dev_priv, reg); 452 udelay(150); 453 454 reg = FDI_RX_IIR(pipe); 455 for (tries = 0; tries < 5; tries++) { 456 temp = intel_de_read(dev_priv, reg); 457 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 458 459 if (temp & FDI_RX_SYMBOL_LOCK) { 460 intel_de_write(dev_priv, reg, 461 temp | FDI_RX_SYMBOL_LOCK); 462 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n"); 463 break; 464 } 465 } 466 if (tries == 5) 467 drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); 468 469 drm_dbg_kms(&dev_priv->drm, "FDI train done\n"); 470 471 } 472 473 static const int snb_b_fdi_train_param[] = { 474 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 475 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 476 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 477 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 478 }; 479 480 /* The FDI link training functions for SNB/Cougarpoint. */ 481 static void gen6_fdi_link_train(struct intel_crtc *crtc, 482 const struct intel_crtc_state *crtc_state) 483 { 484 struct drm_device *dev = crtc->base.dev; 485 struct drm_i915_private *dev_priv = to_i915(dev); 486 enum pipe pipe = crtc->pipe; 487 i915_reg_t reg; 488 u32 temp, i, retry; 489 490 /* 491 * Write the TU size bits before fdi link training, so that error 492 * detection works. 493 */ 494 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), 495 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 496 497 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 498 for train result */ 499 reg = FDI_RX_IMR(pipe); 500 temp = intel_de_read(dev_priv, reg); 501 temp &= ~FDI_RX_SYMBOL_LOCK; 502 temp &= ~FDI_RX_BIT_LOCK; 503 intel_de_write(dev_priv, reg, temp); 504 505 intel_de_posting_read(dev_priv, reg); 506 udelay(150); 507 508 /* enable CPU FDI TX and PCH FDI RX */ 509 reg = FDI_TX_CTL(pipe); 510 temp = intel_de_read(dev_priv, reg); 511 temp &= ~FDI_DP_PORT_WIDTH_MASK; 512 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 513 temp &= ~FDI_LINK_TRAIN_NONE; 514 temp |= FDI_LINK_TRAIN_PATTERN_1; 515 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 516 /* SNB-B */ 517 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 518 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 519 520 intel_de_write(dev_priv, FDI_RX_MISC(pipe), 521 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 522 523 reg = FDI_RX_CTL(pipe); 524 temp = intel_de_read(dev_priv, reg); 525 if (HAS_PCH_CPT(dev_priv)) { 526 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 527 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 528 } else { 529 temp &= ~FDI_LINK_TRAIN_NONE; 530 temp |= FDI_LINK_TRAIN_PATTERN_1; 531 } 532 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 533 534 intel_de_posting_read(dev_priv, reg); 535 udelay(150); 536 537 for (i = 0; i < 4; i++) { 538 reg = FDI_TX_CTL(pipe); 539 temp = intel_de_read(dev_priv, reg); 540 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 541 temp |= snb_b_fdi_train_param[i]; 542 intel_de_write(dev_priv, reg, temp); 543 544 intel_de_posting_read(dev_priv, reg); 545 udelay(500); 546 547 for (retry = 0; retry < 5; retry++) { 548 reg = FDI_RX_IIR(pipe); 549 temp = intel_de_read(dev_priv, reg); 550 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 551 if (temp & FDI_RX_BIT_LOCK) { 552 intel_de_write(dev_priv, reg, 553 temp | FDI_RX_BIT_LOCK); 554 drm_dbg_kms(&dev_priv->drm, 555 "FDI train 1 done.\n"); 556 break; 557 } 558 udelay(50); 559 } 560 if (retry < 5) 561 break; 562 } 563 if (i == 4) 564 drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); 565 566 /* Train 2 */ 567 reg = FDI_TX_CTL(pipe); 568 temp = intel_de_read(dev_priv, reg); 569 temp &= ~FDI_LINK_TRAIN_NONE; 570 temp |= FDI_LINK_TRAIN_PATTERN_2; 571 if (IS_SANDYBRIDGE(dev_priv)) { 572 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 573 /* SNB-B */ 574 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 575 } 576 intel_de_write(dev_priv, reg, temp); 577 578 reg = FDI_RX_CTL(pipe); 579 temp = intel_de_read(dev_priv, reg); 580 if (HAS_PCH_CPT(dev_priv)) { 581 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 582 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 583 } else { 584 temp &= ~FDI_LINK_TRAIN_NONE; 585 temp |= FDI_LINK_TRAIN_PATTERN_2; 586 } 587 intel_de_write(dev_priv, reg, temp); 588 589 intel_de_posting_read(dev_priv, reg); 590 udelay(150); 591 592 for (i = 0; i < 4; i++) { 593 reg = FDI_TX_CTL(pipe); 594 temp = intel_de_read(dev_priv, reg); 595 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 596 temp |= snb_b_fdi_train_param[i]; 597 intel_de_write(dev_priv, reg, temp); 598 599 intel_de_posting_read(dev_priv, reg); 600 udelay(500); 601 602 for (retry = 0; retry < 5; retry++) { 603 reg = FDI_RX_IIR(pipe); 604 temp = intel_de_read(dev_priv, reg); 605 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 606 if (temp & FDI_RX_SYMBOL_LOCK) { 607 intel_de_write(dev_priv, reg, 608 temp | FDI_RX_SYMBOL_LOCK); 609 drm_dbg_kms(&dev_priv->drm, 610 "FDI train 2 done.\n"); 611 break; 612 } 613 udelay(50); 614 } 615 if (retry < 5) 616 break; 617 } 618 if (i == 4) 619 drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); 620 621 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); 622 } 623 624 /* Manual link training for Ivy Bridge A0 parts */ 625 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 626 const struct intel_crtc_state *crtc_state) 627 { 628 struct drm_device *dev = crtc->base.dev; 629 struct drm_i915_private *dev_priv = to_i915(dev); 630 enum pipe pipe = crtc->pipe; 631 i915_reg_t reg; 632 u32 temp, i, j; 633 634 ivb_update_fdi_bc_bifurcation(crtc_state); 635 636 /* 637 * Write the TU size bits before fdi link training, so that error 638 * detection works. 639 */ 640 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), 641 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 642 643 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 644 for train result */ 645 reg = FDI_RX_IMR(pipe); 646 temp = intel_de_read(dev_priv, reg); 647 temp &= ~FDI_RX_SYMBOL_LOCK; 648 temp &= ~FDI_RX_BIT_LOCK; 649 intel_de_write(dev_priv, reg, temp); 650 651 intel_de_posting_read(dev_priv, reg); 652 udelay(150); 653 654 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n", 655 intel_de_read(dev_priv, FDI_RX_IIR(pipe))); 656 657 /* Try each vswing and preemphasis setting twice before moving on */ 658 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 659 /* disable first in case we need to retry */ 660 reg = FDI_TX_CTL(pipe); 661 temp = intel_de_read(dev_priv, reg); 662 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 663 temp &= ~FDI_TX_ENABLE; 664 intel_de_write(dev_priv, reg, temp); 665 666 reg = FDI_RX_CTL(pipe); 667 temp = intel_de_read(dev_priv, reg); 668 temp &= ~FDI_LINK_TRAIN_AUTO; 669 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 670 temp &= ~FDI_RX_ENABLE; 671 intel_de_write(dev_priv, reg, temp); 672 673 /* enable CPU FDI TX and PCH FDI RX */ 674 reg = FDI_TX_CTL(pipe); 675 temp = intel_de_read(dev_priv, reg); 676 temp &= ~FDI_DP_PORT_WIDTH_MASK; 677 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 678 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 679 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 680 temp |= snb_b_fdi_train_param[j/2]; 681 temp |= FDI_COMPOSITE_SYNC; 682 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 683 684 intel_de_write(dev_priv, FDI_RX_MISC(pipe), 685 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 686 687 reg = FDI_RX_CTL(pipe); 688 temp = intel_de_read(dev_priv, reg); 689 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 690 temp |= FDI_COMPOSITE_SYNC; 691 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 692 693 intel_de_posting_read(dev_priv, reg); 694 udelay(1); /* should be 0.5us */ 695 696 for (i = 0; i < 4; i++) { 697 reg = FDI_RX_IIR(pipe); 698 temp = intel_de_read(dev_priv, reg); 699 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 700 701 if (temp & FDI_RX_BIT_LOCK || 702 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) { 703 intel_de_write(dev_priv, reg, 704 temp | FDI_RX_BIT_LOCK); 705 drm_dbg_kms(&dev_priv->drm, 706 "FDI train 1 done, level %i.\n", 707 i); 708 break; 709 } 710 udelay(1); /* should be 0.5us */ 711 } 712 if (i == 4) { 713 drm_dbg_kms(&dev_priv->drm, 714 "FDI train 1 fail on vswing %d\n", j / 2); 715 continue; 716 } 717 718 /* Train 2 */ 719 reg = FDI_TX_CTL(pipe); 720 temp = intel_de_read(dev_priv, reg); 721 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 722 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 723 intel_de_write(dev_priv, reg, temp); 724 725 reg = FDI_RX_CTL(pipe); 726 temp = intel_de_read(dev_priv, reg); 727 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 728 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 729 intel_de_write(dev_priv, reg, temp); 730 731 intel_de_posting_read(dev_priv, reg); 732 udelay(2); /* should be 1.5us */ 733 734 for (i = 0; i < 4; i++) { 735 reg = FDI_RX_IIR(pipe); 736 temp = intel_de_read(dev_priv, reg); 737 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 738 739 if (temp & FDI_RX_SYMBOL_LOCK || 740 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) { 741 intel_de_write(dev_priv, reg, 742 temp | FDI_RX_SYMBOL_LOCK); 743 drm_dbg_kms(&dev_priv->drm, 744 "FDI train 2 done, level %i.\n", 745 i); 746 goto train_done; 747 } 748 udelay(2); /* should be 1.5us */ 749 } 750 if (i == 4) 751 drm_dbg_kms(&dev_priv->drm, 752 "FDI train 2 fail on vswing %d\n", j / 2); 753 } 754 755 train_done: 756 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); 757 } 758 759 /* Starting with Haswell, different DDI ports can work in FDI mode for 760 * connection to the PCH-located connectors. For this, it is necessary to train 761 * both the DDI port and PCH receiver for the desired DDI buffer settings. 762 * 763 * The recommended port to work in FDI mode is DDI E, which we use here. Also, 764 * please note that when FDI mode is active on DDI E, it shares 2 lines with 765 * DDI A (which is used for eDP) 766 */ 767 void hsw_fdi_link_train(struct intel_encoder *encoder, 768 const struct intel_crtc_state *crtc_state) 769 { 770 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 771 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 772 u32 temp, i, rx_ctl_val; 773 int n_entries; 774 775 encoder->get_buf_trans(encoder, crtc_state, &n_entries); 776 777 hsw_prepare_dp_ddi_buffers(encoder, crtc_state); 778 779 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the 780 * mode set "sequence for CRT port" document: 781 * - TP1 to TP2 time with the default value 782 * - FDI delay to 90h 783 * 784 * WaFDIAutoLinkSetTimingOverrride:hsw 785 */ 786 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), 787 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 788 789 /* Enable the PCH Receiver FDI PLL */ 790 rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | 791 FDI_RX_PLL_ENABLE | 792 FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 793 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); 794 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); 795 udelay(220); 796 797 /* Switch from Rawclk to PCDclk */ 798 rx_ctl_val |= FDI_PCDCLK; 799 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); 800 801 /* Configure Port Clock Select */ 802 drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL); 803 intel_ddi_enable_clock(encoder, crtc_state); 804 805 /* Start the training iterating through available voltages and emphasis, 806 * testing each value twice. */ 807 for (i = 0; i < n_entries * 2; i++) { 808 /* Configure DP_TP_CTL with auto-training */ 809 intel_de_write(dev_priv, DP_TP_CTL(PORT_E), 810 DP_TP_CTL_FDI_AUTOTRAIN | 811 DP_TP_CTL_ENHANCED_FRAME_ENABLE | 812 DP_TP_CTL_LINK_TRAIN_PAT1 | 813 DP_TP_CTL_ENABLE); 814 815 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage. 816 * DDI E does not support port reversal, the functionality is 817 * achieved on the PCH side in FDI_RX_CTL, so no need to set the 818 * port reversal bit */ 819 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), 820 DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2)); 821 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); 822 823 udelay(600); 824 825 /* Program PCH FDI Receiver TU */ 826 intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64)); 827 828 /* Enable PCH FDI Receiver with auto-training */ 829 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; 830 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); 831 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); 832 833 /* Wait for FDI receiver lane calibration */ 834 udelay(30); 835 836 /* Unset FDI_RX_MISC pwrdn lanes */ 837 temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); 838 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 839 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); 840 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); 841 842 /* Wait for FDI auto training time */ 843 udelay(5); 844 845 temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E)); 846 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { 847 drm_dbg_kms(&dev_priv->drm, 848 "FDI link training done on step %d\n", i); 849 break; 850 } 851 852 /* 853 * Leave things enabled even if we failed to train FDI. 854 * Results in less fireworks from the state checker. 855 */ 856 if (i == n_entries * 2 - 1) { 857 drm_err(&dev_priv->drm, "FDI link training failed!\n"); 858 break; 859 } 860 861 rx_ctl_val &= ~FDI_RX_ENABLE; 862 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); 863 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); 864 865 temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); 866 temp &= ~DDI_BUF_CTL_ENABLE; 867 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp); 868 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); 869 870 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ 871 temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E)); 872 temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); 873 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 874 intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp); 875 intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E)); 876 877 intel_wait_ddi_buf_idle(dev_priv, PORT_E); 878 879 /* Reset FDI_RX_MISC pwrdn lanes */ 880 temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); 881 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 882 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); 883 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); 884 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); 885 } 886 887 /* Enable normal pixel sending for FDI */ 888 intel_de_write(dev_priv, DP_TP_CTL(PORT_E), 889 DP_TP_CTL_FDI_AUTOTRAIN | 890 DP_TP_CTL_LINK_TRAIN_NORMAL | 891 DP_TP_CTL_ENHANCED_FRAME_ENABLE | 892 DP_TP_CTL_ENABLE); 893 } 894 895 void hsw_fdi_disable(struct intel_encoder *encoder) 896 { 897 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 898 u32 val; 899 900 /* 901 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) 902 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, 903 * step 13 is the correct place for it. Step 18 is where it was 904 * originally before the BUN. 905 */ 906 val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 907 val &= ~FDI_RX_ENABLE; 908 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); 909 910 val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); 911 val &= ~DDI_BUF_CTL_ENABLE; 912 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val); 913 914 intel_wait_ddi_buf_idle(dev_priv, PORT_E); 915 916 intel_ddi_disable_clock(encoder); 917 918 val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); 919 val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 920 val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); 921 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); 922 923 val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 924 val &= ~FDI_PCDCLK; 925 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); 926 927 val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 928 val &= ~FDI_RX_PLL_ENABLE; 929 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); 930 } 931 932 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 933 { 934 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 935 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 936 enum pipe pipe = crtc->pipe; 937 i915_reg_t reg; 938 u32 temp; 939 940 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 941 reg = FDI_RX_CTL(pipe); 942 temp = intel_de_read(dev_priv, reg); 943 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 944 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 945 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 946 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE); 947 948 intel_de_posting_read(dev_priv, reg); 949 udelay(200); 950 951 /* Switch from Rawclk to PCDclk */ 952 temp = intel_de_read(dev_priv, reg); 953 intel_de_write(dev_priv, reg, temp | FDI_PCDCLK); 954 955 intel_de_posting_read(dev_priv, reg); 956 udelay(200); 957 958 /* Enable CPU FDI TX PLL, always on for Ironlake */ 959 reg = FDI_TX_CTL(pipe); 960 temp = intel_de_read(dev_priv, reg); 961 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 962 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE); 963 964 intel_de_posting_read(dev_priv, reg); 965 udelay(100); 966 } 967 } 968 969 void ilk_fdi_pll_disable(struct intel_crtc *crtc) 970 { 971 struct drm_device *dev = crtc->base.dev; 972 struct drm_i915_private *dev_priv = to_i915(dev); 973 enum pipe pipe = crtc->pipe; 974 i915_reg_t reg; 975 u32 temp; 976 977 /* Switch from PCDclk to Rawclk */ 978 reg = FDI_RX_CTL(pipe); 979 temp = intel_de_read(dev_priv, reg); 980 intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK); 981 982 /* Disable CPU FDI TX PLL */ 983 reg = FDI_TX_CTL(pipe); 984 temp = intel_de_read(dev_priv, reg); 985 intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE); 986 987 intel_de_posting_read(dev_priv, reg); 988 udelay(100); 989 990 reg = FDI_RX_CTL(pipe); 991 temp = intel_de_read(dev_priv, reg); 992 intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE); 993 994 /* Wait for the clocks to turn off. */ 995 intel_de_posting_read(dev_priv, reg); 996 udelay(100); 997 } 998 999 void ilk_fdi_disable(struct intel_crtc *crtc) 1000 { 1001 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1002 enum pipe pipe = crtc->pipe; 1003 i915_reg_t reg; 1004 u32 temp; 1005 1006 /* disable CPU FDI tx and PCH FDI rx */ 1007 reg = FDI_TX_CTL(pipe); 1008 temp = intel_de_read(dev_priv, reg); 1009 intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE); 1010 intel_de_posting_read(dev_priv, reg); 1011 1012 reg = FDI_RX_CTL(pipe); 1013 temp = intel_de_read(dev_priv, reg); 1014 temp &= ~(0x7 << 16); 1015 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 1016 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE); 1017 1018 intel_de_posting_read(dev_priv, reg); 1019 udelay(100); 1020 1021 /* Ironlake workaround, disable clock pointer after downing FDI */ 1022 if (HAS_PCH_IBX(dev_priv)) 1023 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 1024 FDI_RX_PHASE_SYNC_POINTER_OVR); 1025 1026 /* still set train pattern 1 */ 1027 reg = FDI_TX_CTL(pipe); 1028 temp = intel_de_read(dev_priv, reg); 1029 temp &= ~FDI_LINK_TRAIN_NONE; 1030 temp |= FDI_LINK_TRAIN_PATTERN_1; 1031 intel_de_write(dev_priv, reg, temp); 1032 1033 reg = FDI_RX_CTL(pipe); 1034 temp = intel_de_read(dev_priv, reg); 1035 if (HAS_PCH_CPT(dev_priv)) { 1036 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 1037 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 1038 } else { 1039 temp &= ~FDI_LINK_TRAIN_NONE; 1040 temp |= FDI_LINK_TRAIN_PATTERN_1; 1041 } 1042 /* BPC in FDI rx is consistent with that in PIPECONF */ 1043 temp &= ~(0x07 << 16); 1044 temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 1045 intel_de_write(dev_priv, reg, temp); 1046 1047 intel_de_posting_read(dev_priv, reg); 1048 udelay(100); 1049 } 1050 1051 static const struct intel_fdi_funcs ilk_funcs = { 1052 .fdi_link_train = ilk_fdi_link_train, 1053 }; 1054 1055 static const struct intel_fdi_funcs gen6_funcs = { 1056 .fdi_link_train = gen6_fdi_link_train, 1057 }; 1058 1059 static const struct intel_fdi_funcs ivb_funcs = { 1060 .fdi_link_train = ivb_manual_fdi_link_train, 1061 }; 1062 1063 void 1064 intel_fdi_init_hook(struct drm_i915_private *dev_priv) 1065 { 1066 if (IS_IRONLAKE(dev_priv)) { 1067 dev_priv->fdi_funcs = &ilk_funcs; 1068 } else if (IS_SANDYBRIDGE(dev_priv)) { 1069 dev_priv->fdi_funcs = &gen6_funcs; 1070 } else if (IS_IVYBRIDGE(dev_priv)) { 1071 /* FIXME: detect B0+ stepping and use auto training */ 1072 dev_priv->fdi_funcs = &ivb_funcs; 1073 } 1074 } 1075