1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include "i915_reg.h" 9 #include "intel_atomic.h" 10 #include "intel_crtc.h" 11 #include "intel_ddi.h" 12 #include "intel_de.h" 13 #include "intel_display_types.h" 14 #include "intel_fdi.h" 15 #include "intel_fdi_regs.h" 16 17 struct intel_fdi_funcs { 18 void (*fdi_link_train)(struct intel_crtc *crtc, 19 const struct intel_crtc_state *crtc_state); 20 }; 21 22 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 23 enum pipe pipe, bool state) 24 { 25 bool cur_state; 26 27 if (HAS_DDI(dev_priv)) { 28 /* 29 * DDI does not have a specific FDI_TX register. 30 * 31 * FDI is never fed from EDP transcoder 32 * so pipe->transcoder cast is fine here. 33 */ 34 enum transcoder cpu_transcoder = (enum transcoder)pipe; 35 cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE; 36 } else { 37 cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE; 38 } 39 I915_STATE_WARN(cur_state != state, 40 "FDI TX state assertion failure (expected %s, current %s)\n", 41 str_on_off(state), str_on_off(cur_state)); 42 } 43 44 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe) 45 { 46 assert_fdi_tx(i915, pipe, true); 47 } 48 49 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe) 50 { 51 assert_fdi_tx(i915, pipe, false); 52 } 53 54 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 55 enum pipe pipe, bool state) 56 { 57 bool cur_state; 58 59 cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE; 60 I915_STATE_WARN(cur_state != state, 61 "FDI RX state assertion failure (expected %s, current %s)\n", 62 str_on_off(state), str_on_off(cur_state)); 63 } 64 65 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe) 66 { 67 assert_fdi_rx(i915, pipe, true); 68 } 69 70 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe) 71 { 72 assert_fdi_rx(i915, pipe, false); 73 } 74 75 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, 76 enum pipe pipe) 77 { 78 bool cur_state; 79 80 /* ILK FDI PLL is always enabled */ 81 if (IS_IRONLAKE(i915)) 82 return; 83 84 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 85 if (HAS_DDI(i915)) 86 return; 87 88 cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE; 89 I915_STATE_WARN(!cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n"); 90 } 91 92 static void assert_fdi_rx_pll(struct drm_i915_private *i915, 93 enum pipe pipe, bool state) 94 { 95 bool cur_state; 96 97 cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE; 98 I915_STATE_WARN(cur_state != state, 99 "FDI RX PLL assertion failure (expected %s, current %s)\n", 100 str_on_off(state), str_on_off(cur_state)); 101 } 102 103 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) 104 { 105 assert_fdi_rx_pll(i915, pipe, true); 106 } 107 108 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe) 109 { 110 assert_fdi_rx_pll(i915, pipe, false); 111 } 112 113 void intel_fdi_link_train(struct intel_crtc *crtc, 114 const struct intel_crtc_state *crtc_state) 115 { 116 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 117 118 dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state); 119 } 120 121 /* units of 100MHz */ 122 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 123 { 124 if (crtc_state->hw.enable && crtc_state->has_pch_encoder) 125 return crtc_state->fdi_lanes; 126 127 return 0; 128 } 129 130 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 131 struct intel_crtc_state *pipe_config) 132 { 133 struct drm_i915_private *dev_priv = to_i915(dev); 134 struct drm_atomic_state *state = pipe_config->uapi.state; 135 struct intel_crtc *other_crtc; 136 struct intel_crtc_state *other_crtc_state; 137 138 drm_dbg_kms(&dev_priv->drm, 139 "checking fdi config on pipe %c, lanes %i\n", 140 pipe_name(pipe), pipe_config->fdi_lanes); 141 if (pipe_config->fdi_lanes > 4) { 142 drm_dbg_kms(&dev_priv->drm, 143 "invalid fdi lane config on pipe %c: %i lanes\n", 144 pipe_name(pipe), pipe_config->fdi_lanes); 145 return -EINVAL; 146 } 147 148 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 149 if (pipe_config->fdi_lanes > 2) { 150 drm_dbg_kms(&dev_priv->drm, 151 "only 2 lanes on haswell, required: %i lanes\n", 152 pipe_config->fdi_lanes); 153 return -EINVAL; 154 } else { 155 return 0; 156 } 157 } 158 159 if (INTEL_NUM_PIPES(dev_priv) == 2) 160 return 0; 161 162 /* Ivybridge 3 pipe is really complicated */ 163 switch (pipe) { 164 case PIPE_A: 165 return 0; 166 case PIPE_B: 167 if (pipe_config->fdi_lanes <= 2) 168 return 0; 169 170 other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C); 171 other_crtc_state = 172 intel_atomic_get_crtc_state(state, other_crtc); 173 if (IS_ERR(other_crtc_state)) 174 return PTR_ERR(other_crtc_state); 175 176 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 177 drm_dbg_kms(&dev_priv->drm, 178 "invalid shared fdi lane config on pipe %c: %i lanes\n", 179 pipe_name(pipe), pipe_config->fdi_lanes); 180 return -EINVAL; 181 } 182 return 0; 183 case PIPE_C: 184 if (pipe_config->fdi_lanes > 2) { 185 drm_dbg_kms(&dev_priv->drm, 186 "only 2 lanes on pipe %c: required %i lanes\n", 187 pipe_name(pipe), pipe_config->fdi_lanes); 188 return -EINVAL; 189 } 190 191 other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B); 192 other_crtc_state = 193 intel_atomic_get_crtc_state(state, other_crtc); 194 if (IS_ERR(other_crtc_state)) 195 return PTR_ERR(other_crtc_state); 196 197 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 198 drm_dbg_kms(&dev_priv->drm, 199 "fdi link B uses too many lanes to enable link C\n"); 200 return -EINVAL; 201 } 202 return 0; 203 default: 204 MISSING_CASE(pipe); 205 return 0; 206 } 207 } 208 209 void intel_fdi_pll_freq_update(struct drm_i915_private *i915) 210 { 211 if (IS_IRONLAKE(i915)) { 212 u32 fdi_pll_clk = 213 intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 214 215 i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000; 216 } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) { 217 i915->display.fdi.pll_freq = 270000; 218 } else { 219 return; 220 } 221 222 drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq); 223 } 224 225 int intel_fdi_link_freq(struct drm_i915_private *i915, 226 const struct intel_crtc_state *pipe_config) 227 { 228 if (HAS_DDI(i915)) 229 return pipe_config->port_clock; /* SPLL */ 230 else 231 return i915->display.fdi.pll_freq; 232 } 233 234 int ilk_fdi_compute_config(struct intel_crtc *crtc, 235 struct intel_crtc_state *pipe_config) 236 { 237 struct drm_device *dev = crtc->base.dev; 238 struct drm_i915_private *i915 = to_i915(dev); 239 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 240 int lane, link_bw, fdi_dotclock, ret; 241 bool needs_recompute = false; 242 243 retry: 244 /* FDI is a binary signal running at ~2.7GHz, encoding 245 * each output octet as 10 bits. The actual frequency 246 * is stored as a divider into a 100MHz clock, and the 247 * mode pixel clock is stored in units of 1KHz. 248 * Hence the bw of each lane in terms of the mode signal 249 * is: 250 */ 251 link_bw = intel_fdi_link_freq(i915, pipe_config); 252 253 fdi_dotclock = adjusted_mode->crtc_clock; 254 255 lane = ilk_get_lanes_required(fdi_dotclock, link_bw, 256 pipe_config->pipe_bpp); 257 258 pipe_config->fdi_lanes = lane; 259 260 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 261 link_bw, &pipe_config->fdi_m_n, false); 262 263 ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config); 264 if (ret == -EDEADLK) 265 return ret; 266 267 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 268 pipe_config->pipe_bpp -= 2*3; 269 drm_dbg_kms(&i915->drm, 270 "fdi link bw constraint, reducing pipe bpp to %i\n", 271 pipe_config->pipe_bpp); 272 needs_recompute = true; 273 pipe_config->bw_constrained = true; 274 275 goto retry; 276 } 277 278 if (needs_recompute) 279 return -EAGAIN; 280 281 return ret; 282 } 283 284 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) 285 { 286 u32 temp; 287 288 temp = intel_de_read(dev_priv, SOUTH_CHICKEN1); 289 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 290 return; 291 292 drm_WARN_ON(&dev_priv->drm, 293 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & 294 FDI_RX_ENABLE); 295 drm_WARN_ON(&dev_priv->drm, 296 intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & 297 FDI_RX_ENABLE); 298 299 temp &= ~FDI_BC_BIFURCATION_SELECT; 300 if (enable) 301 temp |= FDI_BC_BIFURCATION_SELECT; 302 303 drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n", 304 enable ? "en" : "dis"); 305 intel_de_write(dev_priv, SOUTH_CHICKEN1, temp); 306 intel_de_posting_read(dev_priv, SOUTH_CHICKEN1); 307 } 308 309 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 310 { 311 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 312 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 313 314 switch (crtc->pipe) { 315 case PIPE_A: 316 break; 317 case PIPE_B: 318 if (crtc_state->fdi_lanes > 2) 319 cpt_set_fdi_bc_bifurcation(dev_priv, false); 320 else 321 cpt_set_fdi_bc_bifurcation(dev_priv, true); 322 323 break; 324 case PIPE_C: 325 cpt_set_fdi_bc_bifurcation(dev_priv, true); 326 327 break; 328 default: 329 MISSING_CASE(crtc->pipe); 330 } 331 } 332 333 void intel_fdi_normal_train(struct intel_crtc *crtc) 334 { 335 struct drm_device *dev = crtc->base.dev; 336 struct drm_i915_private *dev_priv = to_i915(dev); 337 enum pipe pipe = crtc->pipe; 338 i915_reg_t reg; 339 u32 temp; 340 341 /* enable normal train */ 342 reg = FDI_TX_CTL(pipe); 343 temp = intel_de_read(dev_priv, reg); 344 if (IS_IVYBRIDGE(dev_priv)) { 345 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 346 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 347 } else { 348 temp &= ~FDI_LINK_TRAIN_NONE; 349 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 350 } 351 intel_de_write(dev_priv, reg, temp); 352 353 reg = FDI_RX_CTL(pipe); 354 temp = intel_de_read(dev_priv, reg); 355 if (HAS_PCH_CPT(dev_priv)) { 356 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 357 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 358 } else { 359 temp &= ~FDI_LINK_TRAIN_NONE; 360 temp |= FDI_LINK_TRAIN_NONE; 361 } 362 intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 363 364 /* wait one idle pattern time */ 365 intel_de_posting_read(dev_priv, reg); 366 udelay(1000); 367 368 /* IVB wants error correction enabled */ 369 if (IS_IVYBRIDGE(dev_priv)) 370 intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); 371 } 372 373 /* The FDI link training functions for ILK/Ibexpeak. */ 374 static void ilk_fdi_link_train(struct intel_crtc *crtc, 375 const struct intel_crtc_state *crtc_state) 376 { 377 struct drm_device *dev = crtc->base.dev; 378 struct drm_i915_private *dev_priv = to_i915(dev); 379 enum pipe pipe = crtc->pipe; 380 i915_reg_t reg; 381 u32 temp, tries; 382 383 /* 384 * Write the TU size bits before fdi link training, so that error 385 * detection works. 386 */ 387 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), 388 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 389 390 /* FDI needs bits from pipe first */ 391 assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder); 392 393 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 394 for train result */ 395 reg = FDI_RX_IMR(pipe); 396 temp = intel_de_read(dev_priv, reg); 397 temp &= ~FDI_RX_SYMBOL_LOCK; 398 temp &= ~FDI_RX_BIT_LOCK; 399 intel_de_write(dev_priv, reg, temp); 400 intel_de_read(dev_priv, reg); 401 udelay(150); 402 403 /* enable CPU FDI TX and PCH FDI RX */ 404 reg = FDI_TX_CTL(pipe); 405 temp = intel_de_read(dev_priv, reg); 406 temp &= ~FDI_DP_PORT_WIDTH_MASK; 407 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 408 temp &= ~FDI_LINK_TRAIN_NONE; 409 temp |= FDI_LINK_TRAIN_PATTERN_1; 410 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 411 412 reg = FDI_RX_CTL(pipe); 413 temp = intel_de_read(dev_priv, reg); 414 temp &= ~FDI_LINK_TRAIN_NONE; 415 temp |= FDI_LINK_TRAIN_PATTERN_1; 416 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 417 418 intel_de_posting_read(dev_priv, reg); 419 udelay(150); 420 421 /* Ironlake workaround, enable clock pointer after FDI enable*/ 422 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 423 FDI_RX_PHASE_SYNC_POINTER_OVR); 424 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 425 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN); 426 427 reg = FDI_RX_IIR(pipe); 428 for (tries = 0; tries < 5; tries++) { 429 temp = intel_de_read(dev_priv, reg); 430 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 431 432 if ((temp & FDI_RX_BIT_LOCK)) { 433 drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n"); 434 intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK); 435 break; 436 } 437 } 438 if (tries == 5) 439 drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); 440 441 /* Train 2 */ 442 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), 443 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2); 444 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), 445 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2); 446 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); 447 udelay(150); 448 449 reg = FDI_RX_IIR(pipe); 450 for (tries = 0; tries < 5; tries++) { 451 temp = intel_de_read(dev_priv, reg); 452 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 453 454 if (temp & FDI_RX_SYMBOL_LOCK) { 455 intel_de_write(dev_priv, reg, 456 temp | FDI_RX_SYMBOL_LOCK); 457 drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n"); 458 break; 459 } 460 } 461 if (tries == 5) 462 drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); 463 464 drm_dbg_kms(&dev_priv->drm, "FDI train done\n"); 465 466 } 467 468 static const int snb_b_fdi_train_param[] = { 469 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 470 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 471 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 472 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 473 }; 474 475 /* The FDI link training functions for SNB/Cougarpoint. */ 476 static void gen6_fdi_link_train(struct intel_crtc *crtc, 477 const struct intel_crtc_state *crtc_state) 478 { 479 struct drm_device *dev = crtc->base.dev; 480 struct drm_i915_private *dev_priv = to_i915(dev); 481 enum pipe pipe = crtc->pipe; 482 i915_reg_t reg; 483 u32 temp, i, retry; 484 485 /* 486 * Write the TU size bits before fdi link training, so that error 487 * detection works. 488 */ 489 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), 490 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 491 492 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 493 for train result */ 494 reg = FDI_RX_IMR(pipe); 495 temp = intel_de_read(dev_priv, reg); 496 temp &= ~FDI_RX_SYMBOL_LOCK; 497 temp &= ~FDI_RX_BIT_LOCK; 498 intel_de_write(dev_priv, reg, temp); 499 500 intel_de_posting_read(dev_priv, reg); 501 udelay(150); 502 503 /* enable CPU FDI TX and PCH FDI RX */ 504 reg = FDI_TX_CTL(pipe); 505 temp = intel_de_read(dev_priv, reg); 506 temp &= ~FDI_DP_PORT_WIDTH_MASK; 507 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 508 temp &= ~FDI_LINK_TRAIN_NONE; 509 temp |= FDI_LINK_TRAIN_PATTERN_1; 510 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 511 /* SNB-B */ 512 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 513 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 514 515 intel_de_write(dev_priv, FDI_RX_MISC(pipe), 516 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 517 518 reg = FDI_RX_CTL(pipe); 519 temp = intel_de_read(dev_priv, reg); 520 if (HAS_PCH_CPT(dev_priv)) { 521 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 522 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 523 } else { 524 temp &= ~FDI_LINK_TRAIN_NONE; 525 temp |= FDI_LINK_TRAIN_PATTERN_1; 526 } 527 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 528 529 intel_de_posting_read(dev_priv, reg); 530 udelay(150); 531 532 for (i = 0; i < 4; i++) { 533 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), 534 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]); 535 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); 536 udelay(500); 537 538 for (retry = 0; retry < 5; retry++) { 539 reg = FDI_RX_IIR(pipe); 540 temp = intel_de_read(dev_priv, reg); 541 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 542 if (temp & FDI_RX_BIT_LOCK) { 543 intel_de_write(dev_priv, reg, 544 temp | FDI_RX_BIT_LOCK); 545 drm_dbg_kms(&dev_priv->drm, 546 "FDI train 1 done.\n"); 547 break; 548 } 549 udelay(50); 550 } 551 if (retry < 5) 552 break; 553 } 554 if (i == 4) 555 drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); 556 557 /* Train 2 */ 558 reg = FDI_TX_CTL(pipe); 559 temp = intel_de_read(dev_priv, reg); 560 temp &= ~FDI_LINK_TRAIN_NONE; 561 temp |= FDI_LINK_TRAIN_PATTERN_2; 562 if (IS_SANDYBRIDGE(dev_priv)) { 563 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 564 /* SNB-B */ 565 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 566 } 567 intel_de_write(dev_priv, reg, temp); 568 569 reg = FDI_RX_CTL(pipe); 570 temp = intel_de_read(dev_priv, reg); 571 if (HAS_PCH_CPT(dev_priv)) { 572 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 573 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 574 } else { 575 temp &= ~FDI_LINK_TRAIN_NONE; 576 temp |= FDI_LINK_TRAIN_PATTERN_2; 577 } 578 intel_de_write(dev_priv, reg, temp); 579 580 intel_de_posting_read(dev_priv, reg); 581 udelay(150); 582 583 for (i = 0; i < 4; i++) { 584 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), 585 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]); 586 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); 587 udelay(500); 588 589 for (retry = 0; retry < 5; retry++) { 590 reg = FDI_RX_IIR(pipe); 591 temp = intel_de_read(dev_priv, reg); 592 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 593 if (temp & FDI_RX_SYMBOL_LOCK) { 594 intel_de_write(dev_priv, reg, 595 temp | FDI_RX_SYMBOL_LOCK); 596 drm_dbg_kms(&dev_priv->drm, 597 "FDI train 2 done.\n"); 598 break; 599 } 600 udelay(50); 601 } 602 if (retry < 5) 603 break; 604 } 605 if (i == 4) 606 drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); 607 608 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); 609 } 610 611 /* Manual link training for Ivy Bridge A0 parts */ 612 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 613 const struct intel_crtc_state *crtc_state) 614 { 615 struct drm_device *dev = crtc->base.dev; 616 struct drm_i915_private *dev_priv = to_i915(dev); 617 enum pipe pipe = crtc->pipe; 618 i915_reg_t reg; 619 u32 temp, i, j; 620 621 ivb_update_fdi_bc_bifurcation(crtc_state); 622 623 /* 624 * Write the TU size bits before fdi link training, so that error 625 * detection works. 626 */ 627 intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), 628 intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 629 630 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 631 for train result */ 632 reg = FDI_RX_IMR(pipe); 633 temp = intel_de_read(dev_priv, reg); 634 temp &= ~FDI_RX_SYMBOL_LOCK; 635 temp &= ~FDI_RX_BIT_LOCK; 636 intel_de_write(dev_priv, reg, temp); 637 638 intel_de_posting_read(dev_priv, reg); 639 udelay(150); 640 641 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n", 642 intel_de_read(dev_priv, FDI_RX_IIR(pipe))); 643 644 /* Try each vswing and preemphasis setting twice before moving on */ 645 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 646 /* disable first in case we need to retry */ 647 reg = FDI_TX_CTL(pipe); 648 temp = intel_de_read(dev_priv, reg); 649 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 650 temp &= ~FDI_TX_ENABLE; 651 intel_de_write(dev_priv, reg, temp); 652 653 reg = FDI_RX_CTL(pipe); 654 temp = intel_de_read(dev_priv, reg); 655 temp &= ~FDI_LINK_TRAIN_AUTO; 656 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 657 temp &= ~FDI_RX_ENABLE; 658 intel_de_write(dev_priv, reg, temp); 659 660 /* enable CPU FDI TX and PCH FDI RX */ 661 reg = FDI_TX_CTL(pipe); 662 temp = intel_de_read(dev_priv, reg); 663 temp &= ~FDI_DP_PORT_WIDTH_MASK; 664 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 665 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 666 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 667 temp |= snb_b_fdi_train_param[j/2]; 668 temp |= FDI_COMPOSITE_SYNC; 669 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); 670 671 intel_de_write(dev_priv, FDI_RX_MISC(pipe), 672 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 673 674 reg = FDI_RX_CTL(pipe); 675 temp = intel_de_read(dev_priv, reg); 676 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 677 temp |= FDI_COMPOSITE_SYNC; 678 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); 679 680 intel_de_posting_read(dev_priv, reg); 681 udelay(1); /* should be 0.5us */ 682 683 for (i = 0; i < 4; i++) { 684 reg = FDI_RX_IIR(pipe); 685 temp = intel_de_read(dev_priv, reg); 686 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 687 688 if (temp & FDI_RX_BIT_LOCK || 689 (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) { 690 intel_de_write(dev_priv, reg, 691 temp | FDI_RX_BIT_LOCK); 692 drm_dbg_kms(&dev_priv->drm, 693 "FDI train 1 done, level %i.\n", 694 i); 695 break; 696 } 697 udelay(1); /* should be 0.5us */ 698 } 699 if (i == 4) { 700 drm_dbg_kms(&dev_priv->drm, 701 "FDI train 1 fail on vswing %d\n", j / 2); 702 continue; 703 } 704 705 /* Train 2 */ 706 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), 707 FDI_LINK_TRAIN_NONE_IVB, 708 FDI_LINK_TRAIN_PATTERN_2_IVB); 709 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), 710 FDI_LINK_TRAIN_PATTERN_MASK_CPT, 711 FDI_LINK_TRAIN_PATTERN_2_CPT); 712 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); 713 udelay(2); /* should be 1.5us */ 714 715 for (i = 0; i < 4; i++) { 716 reg = FDI_RX_IIR(pipe); 717 temp = intel_de_read(dev_priv, reg); 718 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); 719 720 if (temp & FDI_RX_SYMBOL_LOCK || 721 (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) { 722 intel_de_write(dev_priv, reg, 723 temp | FDI_RX_SYMBOL_LOCK); 724 drm_dbg_kms(&dev_priv->drm, 725 "FDI train 2 done, level %i.\n", 726 i); 727 goto train_done; 728 } 729 udelay(2); /* should be 1.5us */ 730 } 731 if (i == 4) 732 drm_dbg_kms(&dev_priv->drm, 733 "FDI train 2 fail on vswing %d\n", j / 2); 734 } 735 736 train_done: 737 drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); 738 } 739 740 /* Starting with Haswell, different DDI ports can work in FDI mode for 741 * connection to the PCH-located connectors. For this, it is necessary to train 742 * both the DDI port and PCH receiver for the desired DDI buffer settings. 743 * 744 * The recommended port to work in FDI mode is DDI E, which we use here. Also, 745 * please note that when FDI mode is active on DDI E, it shares 2 lines with 746 * DDI A (which is used for eDP) 747 */ 748 void hsw_fdi_link_train(struct intel_encoder *encoder, 749 const struct intel_crtc_state *crtc_state) 750 { 751 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 752 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 753 u32 temp, i, rx_ctl_val; 754 int n_entries; 755 756 encoder->get_buf_trans(encoder, crtc_state, &n_entries); 757 758 hsw_prepare_dp_ddi_buffers(encoder, crtc_state); 759 760 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the 761 * mode set "sequence for CRT port" document: 762 * - TP1 to TP2 time with the default value 763 * - FDI delay to 90h 764 * 765 * WaFDIAutoLinkSetTimingOverrride:hsw 766 */ 767 intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), 768 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 769 770 /* Enable the PCH Receiver FDI PLL */ 771 rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | 772 FDI_RX_PLL_ENABLE | 773 FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 774 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); 775 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); 776 udelay(220); 777 778 /* Switch from Rawclk to PCDclk */ 779 rx_ctl_val |= FDI_PCDCLK; 780 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); 781 782 /* Configure Port Clock Select */ 783 drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL); 784 intel_ddi_enable_clock(encoder, crtc_state); 785 786 /* Start the training iterating through available voltages and emphasis, 787 * testing each value twice. */ 788 for (i = 0; i < n_entries * 2; i++) { 789 /* Configure DP_TP_CTL with auto-training */ 790 intel_de_write(dev_priv, DP_TP_CTL(PORT_E), 791 DP_TP_CTL_FDI_AUTOTRAIN | 792 DP_TP_CTL_ENHANCED_FRAME_ENABLE | 793 DP_TP_CTL_LINK_TRAIN_PAT1 | 794 DP_TP_CTL_ENABLE); 795 796 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage. 797 * DDI E does not support port reversal, the functionality is 798 * achieved on the PCH side in FDI_RX_CTL, so no need to set the 799 * port reversal bit */ 800 intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), 801 DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2)); 802 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); 803 804 udelay(600); 805 806 /* Program PCH FDI Receiver TU */ 807 intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64)); 808 809 /* Enable PCH FDI Receiver with auto-training */ 810 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; 811 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); 812 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); 813 814 /* Wait for FDI receiver lane calibration */ 815 udelay(30); 816 817 /* Unset FDI_RX_MISC pwrdn lanes */ 818 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), 819 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0); 820 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); 821 822 /* Wait for FDI auto training time */ 823 udelay(5); 824 825 temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E)); 826 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { 827 drm_dbg_kms(&dev_priv->drm, 828 "FDI link training done on step %d\n", i); 829 break; 830 } 831 832 /* 833 * Leave things enabled even if we failed to train FDI. 834 * Results in less fireworks from the state checker. 835 */ 836 if (i == n_entries * 2 - 1) { 837 drm_err(&dev_priv->drm, "FDI link training failed!\n"); 838 break; 839 } 840 841 rx_ctl_val &= ~FDI_RX_ENABLE; 842 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); 843 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); 844 845 intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0); 846 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); 847 848 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ 849 intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0); 850 intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E)); 851 852 intel_wait_ddi_buf_idle(dev_priv, PORT_E); 853 854 /* Reset FDI_RX_MISC pwrdn lanes */ 855 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), 856 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 857 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2)); 858 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); 859 } 860 861 /* Enable normal pixel sending for FDI */ 862 intel_de_write(dev_priv, DP_TP_CTL(PORT_E), 863 DP_TP_CTL_FDI_AUTOTRAIN | 864 DP_TP_CTL_LINK_TRAIN_NORMAL | 865 DP_TP_CTL_ENHANCED_FRAME_ENABLE | 866 DP_TP_CTL_ENABLE); 867 } 868 869 void hsw_fdi_disable(struct intel_encoder *encoder) 870 { 871 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 872 873 /* 874 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) 875 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, 876 * step 13 is the correct place for it. Step 18 is where it was 877 * originally before the BUN. 878 */ 879 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0); 880 intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0); 881 intel_wait_ddi_buf_idle(dev_priv, PORT_E); 882 intel_ddi_disable_clock(encoder); 883 intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), 884 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 885 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2)); 886 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0); 887 intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0); 888 } 889 890 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 891 { 892 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 893 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 894 enum pipe pipe = crtc->pipe; 895 i915_reg_t reg; 896 u32 temp; 897 898 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 899 reg = FDI_RX_CTL(pipe); 900 temp = intel_de_read(dev_priv, reg); 901 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 902 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 903 temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; 904 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE); 905 906 intel_de_posting_read(dev_priv, reg); 907 udelay(200); 908 909 /* Switch from Rawclk to PCDclk */ 910 intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK); 911 intel_de_posting_read(dev_priv, reg); 912 udelay(200); 913 914 /* Enable CPU FDI TX PLL, always on for Ironlake */ 915 reg = FDI_TX_CTL(pipe); 916 temp = intel_de_read(dev_priv, reg); 917 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 918 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE); 919 920 intel_de_posting_read(dev_priv, reg); 921 udelay(100); 922 } 923 } 924 925 void ilk_fdi_pll_disable(struct intel_crtc *crtc) 926 { 927 struct drm_device *dev = crtc->base.dev; 928 struct drm_i915_private *dev_priv = to_i915(dev); 929 enum pipe pipe = crtc->pipe; 930 931 /* Switch from PCDclk to Rawclk */ 932 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0); 933 934 /* Disable CPU FDI TX PLL */ 935 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0); 936 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); 937 udelay(100); 938 939 /* Wait for the clocks to turn off. */ 940 intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0); 941 intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); 942 udelay(100); 943 } 944 945 void ilk_fdi_disable(struct intel_crtc *crtc) 946 { 947 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 948 enum pipe pipe = crtc->pipe; 949 i915_reg_t reg; 950 u32 temp; 951 952 /* disable CPU FDI tx and PCH FDI rx */ 953 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0); 954 intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); 955 956 reg = FDI_RX_CTL(pipe); 957 temp = intel_de_read(dev_priv, reg); 958 temp &= ~(0x7 << 16); 959 temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; 960 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE); 961 962 intel_de_posting_read(dev_priv, reg); 963 udelay(100); 964 965 /* Ironlake workaround, disable clock pointer after downing FDI */ 966 if (HAS_PCH_IBX(dev_priv)) 967 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), 968 FDI_RX_PHASE_SYNC_POINTER_OVR); 969 970 /* still set train pattern 1 */ 971 intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), 972 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1); 973 974 reg = FDI_RX_CTL(pipe); 975 temp = intel_de_read(dev_priv, reg); 976 if (HAS_PCH_CPT(dev_priv)) { 977 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 978 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 979 } else { 980 temp &= ~FDI_LINK_TRAIN_NONE; 981 temp |= FDI_LINK_TRAIN_PATTERN_1; 982 } 983 /* BPC in FDI rx is consistent with that in TRANSCONF */ 984 temp &= ~(0x07 << 16); 985 temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; 986 intel_de_write(dev_priv, reg, temp); 987 988 intel_de_posting_read(dev_priv, reg); 989 udelay(100); 990 } 991 992 static const struct intel_fdi_funcs ilk_funcs = { 993 .fdi_link_train = ilk_fdi_link_train, 994 }; 995 996 static const struct intel_fdi_funcs gen6_funcs = { 997 .fdi_link_train = gen6_fdi_link_train, 998 }; 999 1000 static const struct intel_fdi_funcs ivb_funcs = { 1001 .fdi_link_train = ivb_manual_fdi_link_train, 1002 }; 1003 1004 void 1005 intel_fdi_init_hook(struct drm_i915_private *dev_priv) 1006 { 1007 if (IS_IRONLAKE(dev_priv)) { 1008 dev_priv->display.funcs.fdi = &ilk_funcs; 1009 } else if (IS_SANDYBRIDGE(dev_priv)) { 1010 dev_priv->display.funcs.fdi = &gen6_funcs; 1011 } else if (IS_IVYBRIDGE(dev_priv)) { 1012 /* FIXME: detect B0+ stepping and use auto training */ 1013 dev_priv->display.funcs.fdi = &ivb_funcs; 1014 } 1015 } 1016