1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "intel_de.h" 7 #include "intel_display_types.h" 8 #include "intel_panel.h" 9 #include "intel_pch_refclk.h" 10 #include "intel_sbi.h" 11 12 static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) 13 { 14 u32 tmp; 15 16 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 17 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 18 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 19 20 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & 21 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 22 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); 23 24 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 25 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 26 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 27 28 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & 29 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 30 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); 31 } 32 33 /* WaMPhyProgramming:hsw */ 34 static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv) 35 { 36 u32 tmp; 37 38 lpt_fdi_reset_mphy(dev_priv); 39 40 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 41 tmp &= ~(0xFF << 24); 42 tmp |= (0x12 << 24); 43 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 44 45 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 46 tmp |= (1 << 11); 47 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 48 49 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 50 tmp |= (1 << 11); 51 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 52 53 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 54 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 55 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 56 57 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 58 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 59 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 60 61 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 62 tmp &= ~(7 << 13); 63 tmp |= (5 << 13); 64 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 65 66 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 67 tmp &= ~(7 << 13); 68 tmp |= (5 << 13); 69 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 70 71 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 72 tmp &= ~0xFF; 73 tmp |= 0x1C; 74 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 75 76 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 77 tmp &= ~0xFF; 78 tmp |= 0x1C; 79 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 80 81 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 82 tmp &= ~(0xFF << 16); 83 tmp |= (0x1C << 16); 84 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 85 86 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 87 tmp &= ~(0xFF << 16); 88 tmp |= (0x1C << 16); 89 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 90 91 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 92 tmp |= (1 << 27); 93 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 94 95 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 96 tmp |= (1 << 27); 97 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 98 99 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 100 tmp &= ~(0xF << 28); 101 tmp |= (4 << 28); 102 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 103 104 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 105 tmp &= ~(0xF << 28); 106 tmp |= (4 << 28); 107 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 108 } 109 110 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 111 { 112 u32 temp; 113 114 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); 115 116 mutex_lock(&dev_priv->sb_lock); 117 118 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 119 temp |= SBI_SSCCTL_DISABLE; 120 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 121 122 mutex_unlock(&dev_priv->sb_lock); 123 } 124 125 struct iclkip_params { 126 u32 iclk_virtual_root_freq; 127 u32 iclk_pi_range; 128 u32 divsel, phaseinc, auxdiv, phasedir, desired_divisor; 129 }; 130 131 static void iclkip_params_init(struct iclkip_params *p) 132 { 133 memset(p, 0, sizeof(*p)); 134 135 p->iclk_virtual_root_freq = 172800 * 1000; 136 p->iclk_pi_range = 64; 137 } 138 139 static int lpt_iclkip_freq(struct iclkip_params *p) 140 { 141 return DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq, 142 p->desired_divisor << p->auxdiv); 143 } 144 145 static void lpt_compute_iclkip(struct iclkip_params *p, int clock) 146 { 147 iclkip_params_init(p); 148 149 /* The iCLK virtual clock root frequency is in MHz, 150 * but the adjusted_mode->crtc_clock in KHz. To get the 151 * divisors, it is necessary to divide one by another, so we 152 * convert the virtual clock precision to KHz here for higher 153 * precision. 154 */ 155 for (p->auxdiv = 0; p->auxdiv < 2; p->auxdiv++) { 156 p->desired_divisor = DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq, 157 clock << p->auxdiv); 158 p->divsel = (p->desired_divisor / p->iclk_pi_range) - 2; 159 p->phaseinc = p->desired_divisor % p->iclk_pi_range; 160 161 /* 162 * Near 20MHz is a corner case which is 163 * out of range for the 7-bit divisor 164 */ 165 if (p->divsel <= 0x7f) 166 break; 167 } 168 } 169 170 /* Program iCLKIP clock to the desired frequency */ 171 void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 172 { 173 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 174 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 175 int clock = crtc_state->hw.adjusted_mode.crtc_clock; 176 struct iclkip_params p; 177 u32 temp; 178 179 lpt_disable_iclkip(dev_priv); 180 181 lpt_compute_iclkip(&p, clock); 182 183 /* This should not happen with any sane values */ 184 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) & 185 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 186 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) & 187 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 188 189 drm_dbg_kms(&dev_priv->drm, 190 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 191 clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc); 192 193 mutex_lock(&dev_priv->sb_lock); 194 195 /* Program SSCDIVINTPHASE6 */ 196 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 197 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 198 temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel); 199 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 200 temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc); 201 temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir); 202 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 203 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 204 205 /* Program SSCAUXDIV */ 206 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 207 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 208 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv); 209 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 210 211 /* Enable modulator and associated divider */ 212 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 213 temp &= ~SBI_SSCCTL_DISABLE; 214 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 215 216 mutex_unlock(&dev_priv->sb_lock); 217 218 /* Wait for initialization time */ 219 udelay(24); 220 221 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); 222 } 223 224 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 225 { 226 struct iclkip_params p; 227 u32 temp; 228 229 if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 230 return 0; 231 232 iclkip_params_init(&p); 233 234 mutex_lock(&dev_priv->sb_lock); 235 236 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 237 if (temp & SBI_SSCCTL_DISABLE) { 238 mutex_unlock(&dev_priv->sb_lock); 239 return 0; 240 } 241 242 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 243 p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 244 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 245 p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 246 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 247 248 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 249 p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 250 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 251 252 mutex_unlock(&dev_priv->sb_lock); 253 254 p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc; 255 256 return lpt_iclkip_freq(&p); 257 } 258 259 /* Implements 3 different sequences from BSpec chapter "Display iCLK 260 * Programming" based on the parameters passed: 261 * - Sequence to enable CLKOUT_DP 262 * - Sequence to enable CLKOUT_DP without spread 263 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 264 */ 265 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 266 bool with_spread, bool with_fdi) 267 { 268 u32 reg, tmp; 269 270 if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, 271 "FDI requires downspread\n")) 272 with_spread = true; 273 if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && 274 with_fdi, "LP PCH doesn't have FDI\n")) 275 with_fdi = false; 276 277 mutex_lock(&dev_priv->sb_lock); 278 279 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 280 tmp &= ~SBI_SSCCTL_DISABLE; 281 tmp |= SBI_SSCCTL_PATHALT; 282 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 283 284 udelay(24); 285 286 if (with_spread) { 287 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 288 tmp &= ~SBI_SSCCTL_PATHALT; 289 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 290 291 if (with_fdi) 292 lpt_fdi_program_mphy(dev_priv); 293 } 294 295 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 296 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 297 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 298 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 299 300 mutex_unlock(&dev_priv->sb_lock); 301 } 302 303 /* Sequence to disable CLKOUT_DP */ 304 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 305 { 306 u32 reg, tmp; 307 308 mutex_lock(&dev_priv->sb_lock); 309 310 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 311 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 312 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 313 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 314 315 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 316 if (!(tmp & SBI_SSCCTL_DISABLE)) { 317 if (!(tmp & SBI_SSCCTL_PATHALT)) { 318 tmp |= SBI_SSCCTL_PATHALT; 319 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 320 udelay(32); 321 } 322 tmp |= SBI_SSCCTL_DISABLE; 323 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 324 } 325 326 mutex_unlock(&dev_priv->sb_lock); 327 } 328 329 #define BEND_IDX(steps) ((50 + (steps)) / 5) 330 331 static const u16 sscdivintphase[] = { 332 [BEND_IDX( 50)] = 0x3B23, 333 [BEND_IDX( 45)] = 0x3B23, 334 [BEND_IDX( 40)] = 0x3C23, 335 [BEND_IDX( 35)] = 0x3C23, 336 [BEND_IDX( 30)] = 0x3D23, 337 [BEND_IDX( 25)] = 0x3D23, 338 [BEND_IDX( 20)] = 0x3E23, 339 [BEND_IDX( 15)] = 0x3E23, 340 [BEND_IDX( 10)] = 0x3F23, 341 [BEND_IDX( 5)] = 0x3F23, 342 [BEND_IDX( 0)] = 0x0025, 343 [BEND_IDX( -5)] = 0x0025, 344 [BEND_IDX(-10)] = 0x0125, 345 [BEND_IDX(-15)] = 0x0125, 346 [BEND_IDX(-20)] = 0x0225, 347 [BEND_IDX(-25)] = 0x0225, 348 [BEND_IDX(-30)] = 0x0325, 349 [BEND_IDX(-35)] = 0x0325, 350 [BEND_IDX(-40)] = 0x0425, 351 [BEND_IDX(-45)] = 0x0425, 352 [BEND_IDX(-50)] = 0x0525, 353 }; 354 355 /* 356 * Bend CLKOUT_DP 357 * steps -50 to 50 inclusive, in steps of 5 358 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 359 * change in clock period = -(steps / 10) * 5.787 ps 360 */ 361 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 362 { 363 u32 tmp; 364 int idx = BEND_IDX(steps); 365 366 if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) 367 return; 368 369 if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) 370 return; 371 372 mutex_lock(&dev_priv->sb_lock); 373 374 if (steps % 10 != 0) 375 tmp = 0xAAAAAAAB; 376 else 377 tmp = 0x00000000; 378 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 379 380 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 381 tmp &= 0xffff0000; 382 tmp |= sscdivintphase[idx]; 383 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 384 385 mutex_unlock(&dev_priv->sb_lock); 386 } 387 388 #undef BEND_IDX 389 390 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 391 { 392 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 393 u32 ctl = intel_de_read(dev_priv, SPLL_CTL); 394 395 if ((ctl & SPLL_PLL_ENABLE) == 0) 396 return false; 397 398 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 399 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 400 return true; 401 402 if (IS_BROADWELL(dev_priv) && 403 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 404 return true; 405 406 return false; 407 } 408 409 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 410 enum intel_dpll_id id) 411 { 412 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 413 u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); 414 415 if ((ctl & WRPLL_PLL_ENABLE) == 0) 416 return false; 417 418 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 419 return true; 420 421 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 422 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 423 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 424 return true; 425 426 return false; 427 } 428 429 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 430 { 431 struct intel_encoder *encoder; 432 bool has_fdi = false; 433 434 for_each_intel_encoder(&dev_priv->drm, encoder) { 435 switch (encoder->type) { 436 case INTEL_OUTPUT_ANALOG: 437 has_fdi = true; 438 break; 439 default: 440 break; 441 } 442 } 443 444 /* 445 * The BIOS may have decided to use the PCH SSC 446 * reference so we must not disable it until the 447 * relevant PLLs have stopped relying on it. We'll 448 * just leave the PCH SSC reference enabled in case 449 * any active PLL is using it. It will get disabled 450 * after runtime suspend if we don't have FDI. 451 * 452 * TODO: Move the whole reference clock handling 453 * to the modeset sequence proper so that we can 454 * actually enable/disable/reconfigure these things 455 * safely. To do that we need to introduce a real 456 * clock hierarchy. That would also allow us to do 457 * clock bending finally. 458 */ 459 dev_priv->pch_ssc_use = 0; 460 461 if (spll_uses_pch_ssc(dev_priv)) { 462 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); 463 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); 464 } 465 466 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 467 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); 468 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); 469 } 470 471 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 472 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); 473 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); 474 } 475 476 if (dev_priv->pch_ssc_use) 477 return; 478 479 if (has_fdi) { 480 lpt_bend_clkout_dp(dev_priv, 0); 481 lpt_enable_clkout_dp(dev_priv, true, true); 482 } else { 483 lpt_disable_clkout_dp(dev_priv); 484 } 485 } 486 487 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) 488 { 489 struct intel_encoder *encoder; 490 int i; 491 u32 val, final; 492 bool has_lvds = false; 493 bool has_cpu_edp = false; 494 bool has_panel = false; 495 bool has_ck505 = false; 496 bool can_ssc = false; 497 bool using_ssc_source = false; 498 499 /* We need to take the global config into account */ 500 for_each_intel_encoder(&dev_priv->drm, encoder) { 501 switch (encoder->type) { 502 case INTEL_OUTPUT_LVDS: 503 has_panel = true; 504 has_lvds = true; 505 break; 506 case INTEL_OUTPUT_EDP: 507 has_panel = true; 508 if (encoder->port == PORT_A) 509 has_cpu_edp = true; 510 break; 511 default: 512 break; 513 } 514 } 515 516 if (HAS_PCH_IBX(dev_priv)) { 517 has_ck505 = dev_priv->vbt.display_clock_mode; 518 can_ssc = has_ck505; 519 } else { 520 has_ck505 = false; 521 can_ssc = true; 522 } 523 524 /* Check if any DPLLs are using the SSC source */ 525 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { 526 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); 527 528 if (!(temp & DPLL_VCO_ENABLE)) 529 continue; 530 531 if ((temp & PLL_REF_INPUT_MASK) == 532 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 533 using_ssc_source = true; 534 break; 535 } 536 } 537 538 drm_dbg_kms(&dev_priv->drm, 539 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 540 has_panel, has_lvds, has_ck505, using_ssc_source); 541 542 /* Ironlake: try to setup display ref clock before DPLL 543 * enabling. This is only under driver's control after 544 * PCH B stepping, previous chipset stepping should be 545 * ignoring this setting. 546 */ 547 val = intel_de_read(dev_priv, PCH_DREF_CONTROL); 548 549 /* As we must carefully and slowly disable/enable each source in turn, 550 * compute the final state we want first and check if we need to 551 * make any changes at all. 552 */ 553 final = val; 554 final &= ~DREF_NONSPREAD_SOURCE_MASK; 555 if (has_ck505) 556 final |= DREF_NONSPREAD_CK505_ENABLE; 557 else 558 final |= DREF_NONSPREAD_SOURCE_ENABLE; 559 560 final &= ~DREF_SSC_SOURCE_MASK; 561 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 562 final &= ~DREF_SSC1_ENABLE; 563 564 if (has_panel) { 565 final |= DREF_SSC_SOURCE_ENABLE; 566 567 if (intel_panel_use_ssc(dev_priv) && can_ssc) 568 final |= DREF_SSC1_ENABLE; 569 570 if (has_cpu_edp) { 571 if (intel_panel_use_ssc(dev_priv) && can_ssc) 572 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 573 else 574 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 575 } else { 576 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 577 } 578 } else if (using_ssc_source) { 579 final |= DREF_SSC_SOURCE_ENABLE; 580 final |= DREF_SSC1_ENABLE; 581 } 582 583 if (final == val) 584 return; 585 586 /* Always enable nonspread source */ 587 val &= ~DREF_NONSPREAD_SOURCE_MASK; 588 589 if (has_ck505) 590 val |= DREF_NONSPREAD_CK505_ENABLE; 591 else 592 val |= DREF_NONSPREAD_SOURCE_ENABLE; 593 594 if (has_panel) { 595 val &= ~DREF_SSC_SOURCE_MASK; 596 val |= DREF_SSC_SOURCE_ENABLE; 597 598 /* SSC must be turned on before enabling the CPU output */ 599 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 600 drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); 601 val |= DREF_SSC1_ENABLE; 602 } else { 603 val &= ~DREF_SSC1_ENABLE; 604 } 605 606 /* Get SSC going before enabling the outputs */ 607 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 608 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 609 udelay(200); 610 611 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 612 613 /* Enable CPU source on CPU attached eDP */ 614 if (has_cpu_edp) { 615 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 616 drm_dbg_kms(&dev_priv->drm, 617 "Using SSC on eDP\n"); 618 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 619 } else { 620 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 621 } 622 } else { 623 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 624 } 625 626 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 627 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 628 udelay(200); 629 } else { 630 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); 631 632 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 633 634 /* Turn off CPU output */ 635 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 636 637 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 638 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 639 udelay(200); 640 641 if (!using_ssc_source) { 642 drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); 643 644 /* Turn off the SSC source */ 645 val &= ~DREF_SSC_SOURCE_MASK; 646 val |= DREF_SSC_SOURCE_DISABLE; 647 648 /* Turn off SSC1 */ 649 val &= ~DREF_SSC1_ENABLE; 650 651 intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 652 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 653 udelay(200); 654 } 655 } 656 657 BUG_ON(val != final); 658 } 659 660 /* 661 * Initialize reference clocks when the driver loads 662 */ 663 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 664 { 665 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 666 ilk_init_pch_refclk(dev_priv); 667 else if (HAS_PCH_LPT(dev_priv)) 668 lpt_init_pch_refclk(dev_priv); 669 } 670