1 /* 2 * Copyright © 2006-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "intel_display_types.h" 25 #include "intel_dpio_phy.h" 26 #include "intel_dpll_mgr.h" 27 28 /** 29 * DOC: Display PLLs 30 * 31 * Display PLLs used for driving outputs vary by platform. While some have 32 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL 33 * from a pool. In the latter scenario, it is possible that multiple pipes 34 * share a PLL if their configurations match. 35 * 36 * This file provides an abstraction over display PLLs. The function 37 * intel_shared_dpll_init() initializes the PLLs for the given platform. The 38 * users of a PLL are tracked and that tracking is integrated with the atomic 39 * modset interface. During an atomic operation, required PLLs can be reserved 40 * for a given CRTC and encoder configuration by calling 41 * intel_reserve_shared_dplls() and previously reserved PLLs can be released 42 * with intel_release_shared_dplls(). 43 * Changes to the users are first staged in the atomic state, and then made 44 * effective by calling intel_shared_dpll_swap_state() during the atomic 45 * commit phase. 46 */ 47 48 static void 49 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, 50 struct intel_shared_dpll_state *shared_dpll) 51 { 52 enum intel_dpll_id i; 53 54 /* Copy shared dpll state */ 55 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 56 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 57 58 shared_dpll[i] = pll->state; 59 } 60 } 61 62 static struct intel_shared_dpll_state * 63 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s) 64 { 65 struct intel_atomic_state *state = to_intel_atomic_state(s); 66 67 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex)); 68 69 if (!state->dpll_set) { 70 state->dpll_set = true; 71 72 intel_atomic_duplicate_dpll_state(to_i915(s->dev), 73 state->shared_dpll); 74 } 75 76 return state->shared_dpll; 77 } 78 79 /** 80 * intel_get_shared_dpll_by_id - get a DPLL given its id 81 * @dev_priv: i915 device instance 82 * @id: pll id 83 * 84 * Returns: 85 * A pointer to the DPLL with @id 86 */ 87 struct intel_shared_dpll * 88 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, 89 enum intel_dpll_id id) 90 { 91 return &dev_priv->shared_dplls[id]; 92 } 93 94 /** 95 * intel_get_shared_dpll_id - get the id of a DPLL 96 * @dev_priv: i915 device instance 97 * @pll: the DPLL 98 * 99 * Returns: 100 * The id of @pll 101 */ 102 enum intel_dpll_id 103 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, 104 struct intel_shared_dpll *pll) 105 { 106 if (WARN_ON(pll < dev_priv->shared_dplls|| 107 pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll])) 108 return -1; 109 110 return (enum intel_dpll_id) (pll - dev_priv->shared_dplls); 111 } 112 113 /* For ILK+ */ 114 void assert_shared_dpll(struct drm_i915_private *dev_priv, 115 struct intel_shared_dpll *pll, 116 bool state) 117 { 118 bool cur_state; 119 struct intel_dpll_hw_state hw_state; 120 121 if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state))) 122 return; 123 124 cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state); 125 I915_STATE_WARN(cur_state != state, 126 "%s assertion failure (expected %s, current %s)\n", 127 pll->info->name, onoff(state), onoff(cur_state)); 128 } 129 130 /** 131 * intel_prepare_shared_dpll - call a dpll's prepare hook 132 * @crtc_state: CRTC, and its state, which has a shared dpll 133 * 134 * This calls the PLL's prepare hook if it has one and if the PLL is not 135 * already enabled. The prepare hook is platform specific. 136 */ 137 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state) 138 { 139 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 140 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 141 struct intel_shared_dpll *pll = crtc_state->shared_dpll; 142 143 if (WARN_ON(pll == NULL)) 144 return; 145 146 mutex_lock(&dev_priv->dpll_lock); 147 WARN_ON(!pll->state.crtc_mask); 148 if (!pll->active_mask) { 149 DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name); 150 WARN_ON(pll->on); 151 assert_shared_dpll_disabled(dev_priv, pll); 152 153 pll->info->funcs->prepare(dev_priv, pll); 154 } 155 mutex_unlock(&dev_priv->dpll_lock); 156 } 157 158 /** 159 * intel_enable_shared_dpll - enable a CRTC's shared DPLL 160 * @crtc_state: CRTC, and its state, which has a shared DPLL 161 * 162 * Enable the shared DPLL used by @crtc. 163 */ 164 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) 165 { 166 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 167 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 168 struct intel_shared_dpll *pll = crtc_state->shared_dpll; 169 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 170 unsigned int old_mask; 171 172 if (WARN_ON(pll == NULL)) 173 return; 174 175 mutex_lock(&dev_priv->dpll_lock); 176 old_mask = pll->active_mask; 177 178 if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) || 179 WARN_ON(pll->active_mask & crtc_mask)) 180 goto out; 181 182 pll->active_mask |= crtc_mask; 183 184 DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n", 185 pll->info->name, pll->active_mask, pll->on, 186 crtc->base.base.id); 187 188 if (old_mask) { 189 WARN_ON(!pll->on); 190 assert_shared_dpll_enabled(dev_priv, pll); 191 goto out; 192 } 193 WARN_ON(pll->on); 194 195 DRM_DEBUG_KMS("enabling %s\n", pll->info->name); 196 pll->info->funcs->enable(dev_priv, pll); 197 pll->on = true; 198 199 out: 200 mutex_unlock(&dev_priv->dpll_lock); 201 } 202 203 /** 204 * intel_disable_shared_dpll - disable a CRTC's shared DPLL 205 * @crtc_state: CRTC, and its state, which has a shared DPLL 206 * 207 * Disable the shared DPLL used by @crtc. 208 */ 209 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) 210 { 211 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 212 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 213 struct intel_shared_dpll *pll = crtc_state->shared_dpll; 214 unsigned int crtc_mask = drm_crtc_mask(&crtc->base); 215 216 /* PCH only available on ILK+ */ 217 if (INTEL_GEN(dev_priv) < 5) 218 return; 219 220 if (pll == NULL) 221 return; 222 223 mutex_lock(&dev_priv->dpll_lock); 224 if (WARN_ON(!(pll->active_mask & crtc_mask))) 225 goto out; 226 227 DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n", 228 pll->info->name, pll->active_mask, pll->on, 229 crtc->base.base.id); 230 231 assert_shared_dpll_enabled(dev_priv, pll); 232 WARN_ON(!pll->on); 233 234 pll->active_mask &= ~crtc_mask; 235 if (pll->active_mask) 236 goto out; 237 238 DRM_DEBUG_KMS("disabling %s\n", pll->info->name); 239 pll->info->funcs->disable(dev_priv, pll); 240 pll->on = false; 241 242 out: 243 mutex_unlock(&dev_priv->dpll_lock); 244 } 245 246 static struct intel_shared_dpll * 247 intel_find_shared_dpll(struct intel_atomic_state *state, 248 const struct intel_crtc *crtc, 249 const struct intel_dpll_hw_state *pll_state, 250 enum intel_dpll_id range_min, 251 enum intel_dpll_id range_max) 252 { 253 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 254 struct intel_shared_dpll *pll, *unused_pll = NULL; 255 struct intel_shared_dpll_state *shared_dpll; 256 enum intel_dpll_id i; 257 258 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); 259 260 for (i = range_min; i <= range_max; i++) { 261 pll = &dev_priv->shared_dplls[i]; 262 263 /* Only want to check enabled timings first */ 264 if (shared_dpll[i].crtc_mask == 0) { 265 if (!unused_pll) 266 unused_pll = pll; 267 continue; 268 } 269 270 if (memcmp(pll_state, 271 &shared_dpll[i].hw_state, 272 sizeof(*pll_state)) == 0) { 273 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n", 274 crtc->base.base.id, crtc->base.name, 275 pll->info->name, 276 shared_dpll[i].crtc_mask, 277 pll->active_mask); 278 return pll; 279 } 280 } 281 282 /* Ok no matching timings, maybe there's a free one? */ 283 if (unused_pll) { 284 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n", 285 crtc->base.base.id, crtc->base.name, 286 unused_pll->info->name); 287 return unused_pll; 288 } 289 290 return NULL; 291 } 292 293 static void 294 intel_reference_shared_dpll(struct intel_atomic_state *state, 295 const struct intel_crtc *crtc, 296 const struct intel_shared_dpll *pll, 297 const struct intel_dpll_hw_state *pll_state) 298 { 299 struct intel_shared_dpll_state *shared_dpll; 300 const enum intel_dpll_id id = pll->info->id; 301 302 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); 303 304 if (shared_dpll[id].crtc_mask == 0) 305 shared_dpll[id].hw_state = *pll_state; 306 307 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name, 308 pipe_name(crtc->pipe)); 309 310 shared_dpll[id].crtc_mask |= 1 << crtc->pipe; 311 } 312 313 static void intel_unreference_shared_dpll(struct intel_atomic_state *state, 314 const struct intel_crtc *crtc, 315 const struct intel_shared_dpll *pll) 316 { 317 struct intel_shared_dpll_state *shared_dpll; 318 319 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); 320 shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe); 321 } 322 323 static void intel_put_dpll(struct intel_atomic_state *state, 324 struct intel_crtc *crtc) 325 { 326 const struct intel_crtc_state *old_crtc_state = 327 intel_atomic_get_old_crtc_state(state, crtc); 328 struct intel_crtc_state *new_crtc_state = 329 intel_atomic_get_new_crtc_state(state, crtc); 330 331 new_crtc_state->shared_dpll = NULL; 332 333 if (!old_crtc_state->shared_dpll) 334 return; 335 336 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll); 337 } 338 339 /** 340 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective 341 * @state: atomic state 342 * 343 * This is the dpll version of drm_atomic_helper_swap_state() since the 344 * helper does not handle driver-specific global state. 345 * 346 * For consistency with atomic helpers this function does a complete swap, 347 * i.e. it also puts the current state into @state, even though there is no 348 * need for that at this moment. 349 */ 350 void intel_shared_dpll_swap_state(struct intel_atomic_state *state) 351 { 352 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 353 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll; 354 enum intel_dpll_id i; 355 356 if (!state->dpll_set) 357 return; 358 359 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 360 struct intel_shared_dpll *pll = 361 &dev_priv->shared_dplls[i]; 362 363 swap(pll->state, shared_dpll[i]); 364 } 365 } 366 367 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 368 struct intel_shared_dpll *pll, 369 struct intel_dpll_hw_state *hw_state) 370 { 371 const enum intel_dpll_id id = pll->info->id; 372 intel_wakeref_t wakeref; 373 u32 val; 374 375 wakeref = intel_display_power_get_if_enabled(dev_priv, 376 POWER_DOMAIN_DISPLAY_CORE); 377 if (!wakeref) 378 return false; 379 380 val = I915_READ(PCH_DPLL(id)); 381 hw_state->dpll = val; 382 hw_state->fp0 = I915_READ(PCH_FP0(id)); 383 hw_state->fp1 = I915_READ(PCH_FP1(id)); 384 385 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 386 387 return val & DPLL_VCO_ENABLE; 388 } 389 390 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv, 391 struct intel_shared_dpll *pll) 392 { 393 const enum intel_dpll_id id = pll->info->id; 394 395 I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0); 396 I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1); 397 } 398 399 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 400 { 401 u32 val; 402 bool enabled; 403 404 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 405 406 val = I915_READ(PCH_DREF_CONTROL); 407 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 408 DREF_SUPERSPREAD_SOURCE_MASK)); 409 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 410 } 411 412 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 413 struct intel_shared_dpll *pll) 414 { 415 const enum intel_dpll_id id = pll->info->id; 416 417 /* PCH refclock must be enabled first */ 418 ibx_assert_pch_refclk_enabled(dev_priv); 419 420 I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll); 421 422 /* Wait for the clocks to stabilize. */ 423 POSTING_READ(PCH_DPLL(id)); 424 udelay(150); 425 426 /* The pixel multiplier can only be updated once the 427 * DPLL is enabled and the clocks are stable. 428 * 429 * So write it again. 430 */ 431 I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll); 432 POSTING_READ(PCH_DPLL(id)); 433 udelay(200); 434 } 435 436 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 437 struct intel_shared_dpll *pll) 438 { 439 const enum intel_dpll_id id = pll->info->id; 440 441 I915_WRITE(PCH_DPLL(id), 0); 442 POSTING_READ(PCH_DPLL(id)); 443 udelay(200); 444 } 445 446 static bool ibx_get_dpll(struct intel_atomic_state *state, 447 struct intel_crtc *crtc, 448 struct intel_encoder *encoder) 449 { 450 struct intel_crtc_state *crtc_state = 451 intel_atomic_get_new_crtc_state(state, crtc); 452 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 453 struct intel_shared_dpll *pll; 454 enum intel_dpll_id i; 455 456 if (HAS_PCH_IBX(dev_priv)) { 457 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 458 i = (enum intel_dpll_id) crtc->pipe; 459 pll = &dev_priv->shared_dplls[i]; 460 461 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", 462 crtc->base.base.id, crtc->base.name, 463 pll->info->name); 464 } else { 465 pll = intel_find_shared_dpll(state, crtc, 466 &crtc_state->dpll_hw_state, 467 DPLL_ID_PCH_PLL_A, 468 DPLL_ID_PCH_PLL_B); 469 } 470 471 if (!pll) 472 return false; 473 474 /* reference the pll */ 475 intel_reference_shared_dpll(state, crtc, 476 pll, &crtc_state->dpll_hw_state); 477 478 crtc_state->shared_dpll = pll; 479 480 return true; 481 } 482 483 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv, 484 const struct intel_dpll_hw_state *hw_state) 485 { 486 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 487 "fp0: 0x%x, fp1: 0x%x\n", 488 hw_state->dpll, 489 hw_state->dpll_md, 490 hw_state->fp0, 491 hw_state->fp1); 492 } 493 494 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { 495 .prepare = ibx_pch_dpll_prepare, 496 .enable = ibx_pch_dpll_enable, 497 .disable = ibx_pch_dpll_disable, 498 .get_hw_state = ibx_pch_dpll_get_hw_state, 499 }; 500 501 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, 502 struct intel_shared_dpll *pll) 503 { 504 const enum intel_dpll_id id = pll->info->id; 505 506 I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll); 507 POSTING_READ(WRPLL_CTL(id)); 508 udelay(20); 509 } 510 511 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, 512 struct intel_shared_dpll *pll) 513 { 514 I915_WRITE(SPLL_CTL, pll->state.hw_state.spll); 515 POSTING_READ(SPLL_CTL); 516 udelay(20); 517 } 518 519 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, 520 struct intel_shared_dpll *pll) 521 { 522 const enum intel_dpll_id id = pll->info->id; 523 u32 val; 524 525 val = I915_READ(WRPLL_CTL(id)); 526 I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE); 527 POSTING_READ(WRPLL_CTL(id)); 528 } 529 530 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, 531 struct intel_shared_dpll *pll) 532 { 533 u32 val; 534 535 val = I915_READ(SPLL_CTL); 536 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); 537 POSTING_READ(SPLL_CTL); 538 } 539 540 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, 541 struct intel_shared_dpll *pll, 542 struct intel_dpll_hw_state *hw_state) 543 { 544 const enum intel_dpll_id id = pll->info->id; 545 intel_wakeref_t wakeref; 546 u32 val; 547 548 wakeref = intel_display_power_get_if_enabled(dev_priv, 549 POWER_DOMAIN_DISPLAY_CORE); 550 if (!wakeref) 551 return false; 552 553 val = I915_READ(WRPLL_CTL(id)); 554 hw_state->wrpll = val; 555 556 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 557 558 return val & WRPLL_PLL_ENABLE; 559 } 560 561 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, 562 struct intel_shared_dpll *pll, 563 struct intel_dpll_hw_state *hw_state) 564 { 565 intel_wakeref_t wakeref; 566 u32 val; 567 568 wakeref = intel_display_power_get_if_enabled(dev_priv, 569 POWER_DOMAIN_DISPLAY_CORE); 570 if (!wakeref) 571 return false; 572 573 val = I915_READ(SPLL_CTL); 574 hw_state->spll = val; 575 576 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 577 578 return val & SPLL_PLL_ENABLE; 579 } 580 581 #define LC_FREQ 2700 582 #define LC_FREQ_2K U64_C(LC_FREQ * 2000) 583 584 #define P_MIN 2 585 #define P_MAX 64 586 #define P_INC 2 587 588 /* Constraints for PLL good behavior */ 589 #define REF_MIN 48 590 #define REF_MAX 400 591 #define VCO_MIN 2400 592 #define VCO_MAX 4800 593 594 struct hsw_wrpll_rnp { 595 unsigned p, n2, r2; 596 }; 597 598 static unsigned hsw_wrpll_get_budget_for_freq(int clock) 599 { 600 unsigned budget; 601 602 switch (clock) { 603 case 25175000: 604 case 25200000: 605 case 27000000: 606 case 27027000: 607 case 37762500: 608 case 37800000: 609 case 40500000: 610 case 40541000: 611 case 54000000: 612 case 54054000: 613 case 59341000: 614 case 59400000: 615 case 72000000: 616 case 74176000: 617 case 74250000: 618 case 81000000: 619 case 81081000: 620 case 89012000: 621 case 89100000: 622 case 108000000: 623 case 108108000: 624 case 111264000: 625 case 111375000: 626 case 148352000: 627 case 148500000: 628 case 162000000: 629 case 162162000: 630 case 222525000: 631 case 222750000: 632 case 296703000: 633 case 297000000: 634 budget = 0; 635 break; 636 case 233500000: 637 case 245250000: 638 case 247750000: 639 case 253250000: 640 case 298000000: 641 budget = 1500; 642 break; 643 case 169128000: 644 case 169500000: 645 case 179500000: 646 case 202000000: 647 budget = 2000; 648 break; 649 case 256250000: 650 case 262500000: 651 case 270000000: 652 case 272500000: 653 case 273750000: 654 case 280750000: 655 case 281250000: 656 case 286000000: 657 case 291750000: 658 budget = 4000; 659 break; 660 case 267250000: 661 case 268500000: 662 budget = 5000; 663 break; 664 default: 665 budget = 1000; 666 break; 667 } 668 669 return budget; 670 } 671 672 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget, 673 unsigned int r2, unsigned int n2, 674 unsigned int p, 675 struct hsw_wrpll_rnp *best) 676 { 677 u64 a, b, c, d, diff, diff_best; 678 679 /* No best (r,n,p) yet */ 680 if (best->p == 0) { 681 best->p = p; 682 best->n2 = n2; 683 best->r2 = r2; 684 return; 685 } 686 687 /* 688 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to 689 * freq2k. 690 * 691 * delta = 1e6 * 692 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) / 693 * freq2k; 694 * 695 * and we would like delta <= budget. 696 * 697 * If the discrepancy is above the PPM-based budget, always prefer to 698 * improve upon the previous solution. However, if you're within the 699 * budget, try to maximize Ref * VCO, that is N / (P * R^2). 700 */ 701 a = freq2k * budget * p * r2; 702 b = freq2k * budget * best->p * best->r2; 703 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2); 704 diff_best = abs_diff(freq2k * best->p * best->r2, 705 LC_FREQ_2K * best->n2); 706 c = 1000000 * diff; 707 d = 1000000 * diff_best; 708 709 if (a < c && b < d) { 710 /* If both are above the budget, pick the closer */ 711 if (best->p * best->r2 * diff < p * r2 * diff_best) { 712 best->p = p; 713 best->n2 = n2; 714 best->r2 = r2; 715 } 716 } else if (a >= c && b < d) { 717 /* If A is below the threshold but B is above it? Update. */ 718 best->p = p; 719 best->n2 = n2; 720 best->r2 = r2; 721 } else if (a >= c && b >= d) { 722 /* Both are below the limit, so pick the higher n2/(r2*r2) */ 723 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) { 724 best->p = p; 725 best->n2 = n2; 726 best->r2 = r2; 727 } 728 } 729 /* Otherwise a < c && b >= d, do nothing */ 730 } 731 732 static void 733 hsw_ddi_calculate_wrpll(int clock /* in Hz */, 734 unsigned *r2_out, unsigned *n2_out, unsigned *p_out) 735 { 736 u64 freq2k; 737 unsigned p, n2, r2; 738 struct hsw_wrpll_rnp best = { 0, 0, 0 }; 739 unsigned budget; 740 741 freq2k = clock / 100; 742 743 budget = hsw_wrpll_get_budget_for_freq(clock); 744 745 /* Special case handling for 540 pixel clock: bypass WR PLL entirely 746 * and directly pass the LC PLL to it. */ 747 if (freq2k == 5400000) { 748 *n2_out = 2; 749 *p_out = 1; 750 *r2_out = 2; 751 return; 752 } 753 754 /* 755 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by 756 * the WR PLL. 757 * 758 * We want R so that REF_MIN <= Ref <= REF_MAX. 759 * Injecting R2 = 2 * R gives: 760 * REF_MAX * r2 > LC_FREQ * 2 and 761 * REF_MIN * r2 < LC_FREQ * 2 762 * 763 * Which means the desired boundaries for r2 are: 764 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN 765 * 766 */ 767 for (r2 = LC_FREQ * 2 / REF_MAX + 1; 768 r2 <= LC_FREQ * 2 / REF_MIN; 769 r2++) { 770 771 /* 772 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R 773 * 774 * Once again we want VCO_MIN <= VCO <= VCO_MAX. 775 * Injecting R2 = 2 * R and N2 = 2 * N, we get: 776 * VCO_MAX * r2 > n2 * LC_FREQ and 777 * VCO_MIN * r2 < n2 * LC_FREQ) 778 * 779 * Which means the desired boundaries for n2 are: 780 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ 781 */ 782 for (n2 = VCO_MIN * r2 / LC_FREQ + 1; 783 n2 <= VCO_MAX * r2 / LC_FREQ; 784 n2++) { 785 786 for (p = P_MIN; p <= P_MAX; p += P_INC) 787 hsw_wrpll_update_rnp(freq2k, budget, 788 r2, n2, p, &best); 789 } 790 } 791 792 *n2_out = best.n2; 793 *p_out = best.p; 794 *r2_out = best.r2; 795 } 796 797 static struct intel_shared_dpll * 798 hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state, 799 struct intel_crtc *crtc) 800 { 801 struct intel_crtc_state *crtc_state = 802 intel_atomic_get_new_crtc_state(state, crtc); 803 struct intel_shared_dpll *pll; 804 u32 val; 805 unsigned int p, n2, r2; 806 807 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p); 808 809 val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL | 810 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | 811 WRPLL_DIVIDER_POST(p); 812 813 crtc_state->dpll_hw_state.wrpll = val; 814 815 pll = intel_find_shared_dpll(state, crtc, 816 &crtc_state->dpll_hw_state, 817 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); 818 819 if (!pll) 820 return NULL; 821 822 return pll; 823 } 824 825 static struct intel_shared_dpll * 826 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state) 827 { 828 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 829 struct intel_shared_dpll *pll; 830 enum intel_dpll_id pll_id; 831 int clock = crtc_state->port_clock; 832 833 switch (clock / 2) { 834 case 81000: 835 pll_id = DPLL_ID_LCPLL_810; 836 break; 837 case 135000: 838 pll_id = DPLL_ID_LCPLL_1350; 839 break; 840 case 270000: 841 pll_id = DPLL_ID_LCPLL_2700; 842 break; 843 default: 844 DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock); 845 return NULL; 846 } 847 848 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id); 849 850 if (!pll) 851 return NULL; 852 853 return pll; 854 } 855 856 static bool hsw_get_dpll(struct intel_atomic_state *state, 857 struct intel_crtc *crtc, 858 struct intel_encoder *encoder) 859 { 860 struct intel_crtc_state *crtc_state = 861 intel_atomic_get_new_crtc_state(state, crtc); 862 struct intel_shared_dpll *pll; 863 864 memset(&crtc_state->dpll_hw_state, 0, 865 sizeof(crtc_state->dpll_hw_state)); 866 867 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 868 pll = hsw_ddi_hdmi_get_dpll(state, crtc); 869 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 870 pll = hsw_ddi_dp_get_dpll(crtc_state); 871 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 872 if (WARN_ON(crtc_state->port_clock / 2 != 135000)) 873 return false; 874 875 crtc_state->dpll_hw_state.spll = 876 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC; 877 878 pll = intel_find_shared_dpll(state, crtc, 879 &crtc_state->dpll_hw_state, 880 DPLL_ID_SPLL, DPLL_ID_SPLL); 881 } else { 882 return false; 883 } 884 885 if (!pll) 886 return false; 887 888 intel_reference_shared_dpll(state, crtc, 889 pll, &crtc_state->dpll_hw_state); 890 891 crtc_state->shared_dpll = pll; 892 893 return true; 894 } 895 896 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv, 897 const struct intel_dpll_hw_state *hw_state) 898 { 899 DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", 900 hw_state->wrpll, hw_state->spll); 901 } 902 903 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = { 904 .enable = hsw_ddi_wrpll_enable, 905 .disable = hsw_ddi_wrpll_disable, 906 .get_hw_state = hsw_ddi_wrpll_get_hw_state, 907 }; 908 909 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = { 910 .enable = hsw_ddi_spll_enable, 911 .disable = hsw_ddi_spll_disable, 912 .get_hw_state = hsw_ddi_spll_get_hw_state, 913 }; 914 915 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv, 916 struct intel_shared_dpll *pll) 917 { 918 } 919 920 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv, 921 struct intel_shared_dpll *pll) 922 { 923 } 924 925 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv, 926 struct intel_shared_dpll *pll, 927 struct intel_dpll_hw_state *hw_state) 928 { 929 return true; 930 } 931 932 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = { 933 .enable = hsw_ddi_lcpll_enable, 934 .disable = hsw_ddi_lcpll_disable, 935 .get_hw_state = hsw_ddi_lcpll_get_hw_state, 936 }; 937 938 struct skl_dpll_regs { 939 i915_reg_t ctl, cfgcr1, cfgcr2; 940 }; 941 942 /* this array is indexed by the *shared* pll id */ 943 static const struct skl_dpll_regs skl_dpll_regs[4] = { 944 { 945 /* DPLL 0 */ 946 .ctl = LCPLL1_CTL, 947 /* DPLL 0 doesn't support HDMI mode */ 948 }, 949 { 950 /* DPLL 1 */ 951 .ctl = LCPLL2_CTL, 952 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1), 953 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1), 954 }, 955 { 956 /* DPLL 2 */ 957 .ctl = WRPLL_CTL(0), 958 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2), 959 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2), 960 }, 961 { 962 /* DPLL 3 */ 963 .ctl = WRPLL_CTL(1), 964 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3), 965 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3), 966 }, 967 }; 968 969 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, 970 struct intel_shared_dpll *pll) 971 { 972 const enum intel_dpll_id id = pll->info->id; 973 u32 val; 974 975 val = I915_READ(DPLL_CTRL1); 976 977 val &= ~(DPLL_CTRL1_HDMI_MODE(id) | 978 DPLL_CTRL1_SSC(id) | 979 DPLL_CTRL1_LINK_RATE_MASK(id)); 980 val |= pll->state.hw_state.ctrl1 << (id * 6); 981 982 I915_WRITE(DPLL_CTRL1, val); 983 POSTING_READ(DPLL_CTRL1); 984 } 985 986 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, 987 struct intel_shared_dpll *pll) 988 { 989 const struct skl_dpll_regs *regs = skl_dpll_regs; 990 const enum intel_dpll_id id = pll->info->id; 991 992 skl_ddi_pll_write_ctrl1(dev_priv, pll); 993 994 I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1); 995 I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2); 996 POSTING_READ(regs[id].cfgcr1); 997 POSTING_READ(regs[id].cfgcr2); 998 999 /* the enable bit is always bit 31 */ 1000 I915_WRITE(regs[id].ctl, 1001 I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE); 1002 1003 if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5)) 1004 DRM_ERROR("DPLL %d not locked\n", id); 1005 } 1006 1007 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv, 1008 struct intel_shared_dpll *pll) 1009 { 1010 skl_ddi_pll_write_ctrl1(dev_priv, pll); 1011 } 1012 1013 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv, 1014 struct intel_shared_dpll *pll) 1015 { 1016 const struct skl_dpll_regs *regs = skl_dpll_regs; 1017 const enum intel_dpll_id id = pll->info->id; 1018 1019 /* the enable bit is always bit 31 */ 1020 I915_WRITE(regs[id].ctl, 1021 I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE); 1022 POSTING_READ(regs[id].ctl); 1023 } 1024 1025 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv, 1026 struct intel_shared_dpll *pll) 1027 { 1028 } 1029 1030 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 1031 struct intel_shared_dpll *pll, 1032 struct intel_dpll_hw_state *hw_state) 1033 { 1034 u32 val; 1035 const struct skl_dpll_regs *regs = skl_dpll_regs; 1036 const enum intel_dpll_id id = pll->info->id; 1037 intel_wakeref_t wakeref; 1038 bool ret; 1039 1040 wakeref = intel_display_power_get_if_enabled(dev_priv, 1041 POWER_DOMAIN_DISPLAY_CORE); 1042 if (!wakeref) 1043 return false; 1044 1045 ret = false; 1046 1047 val = I915_READ(regs[id].ctl); 1048 if (!(val & LCPLL_PLL_ENABLE)) 1049 goto out; 1050 1051 val = I915_READ(DPLL_CTRL1); 1052 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f; 1053 1054 /* avoid reading back stale values if HDMI mode is not enabled */ 1055 if (val & DPLL_CTRL1_HDMI_MODE(id)) { 1056 hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1); 1057 hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2); 1058 } 1059 ret = true; 1060 1061 out: 1062 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 1063 1064 return ret; 1065 } 1066 1067 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, 1068 struct intel_shared_dpll *pll, 1069 struct intel_dpll_hw_state *hw_state) 1070 { 1071 const struct skl_dpll_regs *regs = skl_dpll_regs; 1072 const enum intel_dpll_id id = pll->info->id; 1073 intel_wakeref_t wakeref; 1074 u32 val; 1075 bool ret; 1076 1077 wakeref = intel_display_power_get_if_enabled(dev_priv, 1078 POWER_DOMAIN_DISPLAY_CORE); 1079 if (!wakeref) 1080 return false; 1081 1082 ret = false; 1083 1084 /* DPLL0 is always enabled since it drives CDCLK */ 1085 val = I915_READ(regs[id].ctl); 1086 if (WARN_ON(!(val & LCPLL_PLL_ENABLE))) 1087 goto out; 1088 1089 val = I915_READ(DPLL_CTRL1); 1090 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f; 1091 1092 ret = true; 1093 1094 out: 1095 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 1096 1097 return ret; 1098 } 1099 1100 struct skl_wrpll_context { 1101 u64 min_deviation; /* current minimal deviation */ 1102 u64 central_freq; /* chosen central freq */ 1103 u64 dco_freq; /* chosen dco freq */ 1104 unsigned int p; /* chosen divider */ 1105 }; 1106 1107 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx) 1108 { 1109 memset(ctx, 0, sizeof(*ctx)); 1110 1111 ctx->min_deviation = U64_MAX; 1112 } 1113 1114 /* DCO freq must be within +1%/-6% of the DCO central freq */ 1115 #define SKL_DCO_MAX_PDEVIATION 100 1116 #define SKL_DCO_MAX_NDEVIATION 600 1117 1118 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx, 1119 u64 central_freq, 1120 u64 dco_freq, 1121 unsigned int divider) 1122 { 1123 u64 deviation; 1124 1125 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq), 1126 central_freq); 1127 1128 /* positive deviation */ 1129 if (dco_freq >= central_freq) { 1130 if (deviation < SKL_DCO_MAX_PDEVIATION && 1131 deviation < ctx->min_deviation) { 1132 ctx->min_deviation = deviation; 1133 ctx->central_freq = central_freq; 1134 ctx->dco_freq = dco_freq; 1135 ctx->p = divider; 1136 } 1137 /* negative deviation */ 1138 } else if (deviation < SKL_DCO_MAX_NDEVIATION && 1139 deviation < ctx->min_deviation) { 1140 ctx->min_deviation = deviation; 1141 ctx->central_freq = central_freq; 1142 ctx->dco_freq = dco_freq; 1143 ctx->p = divider; 1144 } 1145 } 1146 1147 static void skl_wrpll_get_multipliers(unsigned int p, 1148 unsigned int *p0 /* out */, 1149 unsigned int *p1 /* out */, 1150 unsigned int *p2 /* out */) 1151 { 1152 /* even dividers */ 1153 if (p % 2 == 0) { 1154 unsigned int half = p / 2; 1155 1156 if (half == 1 || half == 2 || half == 3 || half == 5) { 1157 *p0 = 2; 1158 *p1 = 1; 1159 *p2 = half; 1160 } else if (half % 2 == 0) { 1161 *p0 = 2; 1162 *p1 = half / 2; 1163 *p2 = 2; 1164 } else if (half % 3 == 0) { 1165 *p0 = 3; 1166 *p1 = half / 3; 1167 *p2 = 2; 1168 } else if (half % 7 == 0) { 1169 *p0 = 7; 1170 *p1 = half / 7; 1171 *p2 = 2; 1172 } 1173 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */ 1174 *p0 = 3; 1175 *p1 = 1; 1176 *p2 = p / 3; 1177 } else if (p == 5 || p == 7) { 1178 *p0 = p; 1179 *p1 = 1; 1180 *p2 = 1; 1181 } else if (p == 15) { 1182 *p0 = 3; 1183 *p1 = 1; 1184 *p2 = 5; 1185 } else if (p == 21) { 1186 *p0 = 7; 1187 *p1 = 1; 1188 *p2 = 3; 1189 } else if (p == 35) { 1190 *p0 = 7; 1191 *p1 = 1; 1192 *p2 = 5; 1193 } 1194 } 1195 1196 struct skl_wrpll_params { 1197 u32 dco_fraction; 1198 u32 dco_integer; 1199 u32 qdiv_ratio; 1200 u32 qdiv_mode; 1201 u32 kdiv; 1202 u32 pdiv; 1203 u32 central_freq; 1204 }; 1205 1206 static void skl_wrpll_params_populate(struct skl_wrpll_params *params, 1207 u64 afe_clock, 1208 u64 central_freq, 1209 u32 p0, u32 p1, u32 p2) 1210 { 1211 u64 dco_freq; 1212 1213 switch (central_freq) { 1214 case 9600000000ULL: 1215 params->central_freq = 0; 1216 break; 1217 case 9000000000ULL: 1218 params->central_freq = 1; 1219 break; 1220 case 8400000000ULL: 1221 params->central_freq = 3; 1222 } 1223 1224 switch (p0) { 1225 case 1: 1226 params->pdiv = 0; 1227 break; 1228 case 2: 1229 params->pdiv = 1; 1230 break; 1231 case 3: 1232 params->pdiv = 2; 1233 break; 1234 case 7: 1235 params->pdiv = 4; 1236 break; 1237 default: 1238 WARN(1, "Incorrect PDiv\n"); 1239 } 1240 1241 switch (p2) { 1242 case 5: 1243 params->kdiv = 0; 1244 break; 1245 case 2: 1246 params->kdiv = 1; 1247 break; 1248 case 3: 1249 params->kdiv = 2; 1250 break; 1251 case 1: 1252 params->kdiv = 3; 1253 break; 1254 default: 1255 WARN(1, "Incorrect KDiv\n"); 1256 } 1257 1258 params->qdiv_ratio = p1; 1259 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1; 1260 1261 dco_freq = p0 * p1 * p2 * afe_clock; 1262 1263 /* 1264 * Intermediate values are in Hz. 1265 * Divide by MHz to match bsepc 1266 */ 1267 params->dco_integer = div_u64(dco_freq, 24 * MHz(1)); 1268 params->dco_fraction = 1269 div_u64((div_u64(dco_freq, 24) - 1270 params->dco_integer * MHz(1)) * 0x8000, MHz(1)); 1271 } 1272 1273 static bool 1274 skl_ddi_calculate_wrpll(int clock /* in Hz */, 1275 struct skl_wrpll_params *wrpll_params) 1276 { 1277 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ 1278 u64 dco_central_freq[3] = { 8400000000ULL, 1279 9000000000ULL, 1280 9600000000ULL }; 1281 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20, 1282 24, 28, 30, 32, 36, 40, 42, 44, 1283 48, 52, 54, 56, 60, 64, 66, 68, 1284 70, 72, 76, 78, 80, 84, 88, 90, 1285 92, 96, 98 }; 1286 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 }; 1287 static const struct { 1288 const int *list; 1289 int n_dividers; 1290 } dividers[] = { 1291 { even_dividers, ARRAY_SIZE(even_dividers) }, 1292 { odd_dividers, ARRAY_SIZE(odd_dividers) }, 1293 }; 1294 struct skl_wrpll_context ctx; 1295 unsigned int dco, d, i; 1296 unsigned int p0, p1, p2; 1297 1298 skl_wrpll_context_init(&ctx); 1299 1300 for (d = 0; d < ARRAY_SIZE(dividers); d++) { 1301 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) { 1302 for (i = 0; i < dividers[d].n_dividers; i++) { 1303 unsigned int p = dividers[d].list[i]; 1304 u64 dco_freq = p * afe_clock; 1305 1306 skl_wrpll_try_divider(&ctx, 1307 dco_central_freq[dco], 1308 dco_freq, 1309 p); 1310 /* 1311 * Skip the remaining dividers if we're sure to 1312 * have found the definitive divider, we can't 1313 * improve a 0 deviation. 1314 */ 1315 if (ctx.min_deviation == 0) 1316 goto skip_remaining_dividers; 1317 } 1318 } 1319 1320 skip_remaining_dividers: 1321 /* 1322 * If a solution is found with an even divider, prefer 1323 * this one. 1324 */ 1325 if (d == 0 && ctx.p) 1326 break; 1327 } 1328 1329 if (!ctx.p) { 1330 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock); 1331 return false; 1332 } 1333 1334 /* 1335 * gcc incorrectly analyses that these can be used without being 1336 * initialized. To be fair, it's hard to guess. 1337 */ 1338 p0 = p1 = p2 = 0; 1339 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2); 1340 skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq, 1341 p0, p1, p2); 1342 1343 return true; 1344 } 1345 1346 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) 1347 { 1348 u32 ctrl1, cfgcr1, cfgcr2; 1349 struct skl_wrpll_params wrpll_params = { 0, }; 1350 1351 /* 1352 * See comment in intel_dpll_hw_state to understand why we always use 0 1353 * as the DPLL id in this function. 1354 */ 1355 ctrl1 = DPLL_CTRL1_OVERRIDE(0); 1356 1357 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); 1358 1359 if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, 1360 &wrpll_params)) 1361 return false; 1362 1363 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | 1364 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | 1365 wrpll_params.dco_integer; 1366 1367 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | 1368 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) | 1369 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | 1370 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | 1371 wrpll_params.central_freq; 1372 1373 memset(&crtc_state->dpll_hw_state, 0, 1374 sizeof(crtc_state->dpll_hw_state)); 1375 1376 crtc_state->dpll_hw_state.ctrl1 = ctrl1; 1377 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; 1378 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; 1379 return true; 1380 } 1381 1382 static bool 1383 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) 1384 { 1385 u32 ctrl1; 1386 1387 /* 1388 * See comment in intel_dpll_hw_state to understand why we always use 0 1389 * as the DPLL id in this function. 1390 */ 1391 ctrl1 = DPLL_CTRL1_OVERRIDE(0); 1392 switch (crtc_state->port_clock / 2) { 1393 case 81000: 1394 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); 1395 break; 1396 case 135000: 1397 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0); 1398 break; 1399 case 270000: 1400 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0); 1401 break; 1402 /* eDP 1.4 rates */ 1403 case 162000: 1404 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0); 1405 break; 1406 case 108000: 1407 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0); 1408 break; 1409 case 216000: 1410 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0); 1411 break; 1412 } 1413 1414 memset(&crtc_state->dpll_hw_state, 0, 1415 sizeof(crtc_state->dpll_hw_state)); 1416 1417 crtc_state->dpll_hw_state.ctrl1 = ctrl1; 1418 1419 return true; 1420 } 1421 1422 static bool skl_get_dpll(struct intel_atomic_state *state, 1423 struct intel_crtc *crtc, 1424 struct intel_encoder *encoder) 1425 { 1426 struct intel_crtc_state *crtc_state = 1427 intel_atomic_get_new_crtc_state(state, crtc); 1428 struct intel_shared_dpll *pll; 1429 bool bret; 1430 1431 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 1432 bret = skl_ddi_hdmi_pll_dividers(crtc_state); 1433 if (!bret) { 1434 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); 1435 return false; 1436 } 1437 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 1438 bret = skl_ddi_dp_set_dpll_hw_state(crtc_state); 1439 if (!bret) { 1440 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); 1441 return false; 1442 } 1443 } else { 1444 return false; 1445 } 1446 1447 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) 1448 pll = intel_find_shared_dpll(state, crtc, 1449 &crtc_state->dpll_hw_state, 1450 DPLL_ID_SKL_DPLL0, 1451 DPLL_ID_SKL_DPLL0); 1452 else 1453 pll = intel_find_shared_dpll(state, crtc, 1454 &crtc_state->dpll_hw_state, 1455 DPLL_ID_SKL_DPLL1, 1456 DPLL_ID_SKL_DPLL3); 1457 if (!pll) 1458 return false; 1459 1460 intel_reference_shared_dpll(state, crtc, 1461 pll, &crtc_state->dpll_hw_state); 1462 1463 crtc_state->shared_dpll = pll; 1464 1465 return true; 1466 } 1467 1468 static void skl_dump_hw_state(struct drm_i915_private *dev_priv, 1469 const struct intel_dpll_hw_state *hw_state) 1470 { 1471 DRM_DEBUG_KMS("dpll_hw_state: " 1472 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", 1473 hw_state->ctrl1, 1474 hw_state->cfgcr1, 1475 hw_state->cfgcr2); 1476 } 1477 1478 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = { 1479 .enable = skl_ddi_pll_enable, 1480 .disable = skl_ddi_pll_disable, 1481 .get_hw_state = skl_ddi_pll_get_hw_state, 1482 }; 1483 1484 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = { 1485 .enable = skl_ddi_dpll0_enable, 1486 .disable = skl_ddi_dpll0_disable, 1487 .get_hw_state = skl_ddi_dpll0_get_hw_state, 1488 }; 1489 1490 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, 1491 struct intel_shared_dpll *pll) 1492 { 1493 u32 temp; 1494 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ 1495 enum dpio_phy phy; 1496 enum dpio_channel ch; 1497 1498 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); 1499 1500 /* Non-SSC reference */ 1501 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1502 temp |= PORT_PLL_REF_SEL; 1503 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1504 1505 if (IS_GEMINILAKE(dev_priv)) { 1506 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1507 temp |= PORT_PLL_POWER_ENABLE; 1508 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1509 1510 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & 1511 PORT_PLL_POWER_STATE), 200)) 1512 DRM_ERROR("Power state not set for PLL:%d\n", port); 1513 } 1514 1515 /* Disable 10 bit clock */ 1516 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch)); 1517 temp &= ~PORT_PLL_10BIT_CLK_ENABLE; 1518 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); 1519 1520 /* Write P1 & P2 */ 1521 temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch)); 1522 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK); 1523 temp |= pll->state.hw_state.ebb0; 1524 I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp); 1525 1526 /* Write M2 integer */ 1527 temp = I915_READ(BXT_PORT_PLL(phy, ch, 0)); 1528 temp &= ~PORT_PLL_M2_MASK; 1529 temp |= pll->state.hw_state.pll0; 1530 I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp); 1531 1532 /* Write N */ 1533 temp = I915_READ(BXT_PORT_PLL(phy, ch, 1)); 1534 temp &= ~PORT_PLL_N_MASK; 1535 temp |= pll->state.hw_state.pll1; 1536 I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp); 1537 1538 /* Write M2 fraction */ 1539 temp = I915_READ(BXT_PORT_PLL(phy, ch, 2)); 1540 temp &= ~PORT_PLL_M2_FRAC_MASK; 1541 temp |= pll->state.hw_state.pll2; 1542 I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp); 1543 1544 /* Write M2 fraction enable */ 1545 temp = I915_READ(BXT_PORT_PLL(phy, ch, 3)); 1546 temp &= ~PORT_PLL_M2_FRAC_ENABLE; 1547 temp |= pll->state.hw_state.pll3; 1548 I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp); 1549 1550 /* Write coeff */ 1551 temp = I915_READ(BXT_PORT_PLL(phy, ch, 6)); 1552 temp &= ~PORT_PLL_PROP_COEFF_MASK; 1553 temp &= ~PORT_PLL_INT_COEFF_MASK; 1554 temp &= ~PORT_PLL_GAIN_CTL_MASK; 1555 temp |= pll->state.hw_state.pll6; 1556 I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp); 1557 1558 /* Write calibration val */ 1559 temp = I915_READ(BXT_PORT_PLL(phy, ch, 8)); 1560 temp &= ~PORT_PLL_TARGET_CNT_MASK; 1561 temp |= pll->state.hw_state.pll8; 1562 I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp); 1563 1564 temp = I915_READ(BXT_PORT_PLL(phy, ch, 9)); 1565 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; 1566 temp |= pll->state.hw_state.pll9; 1567 I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp); 1568 1569 temp = I915_READ(BXT_PORT_PLL(phy, ch, 10)); 1570 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; 1571 temp &= ~PORT_PLL_DCO_AMP_MASK; 1572 temp |= pll->state.hw_state.pll10; 1573 I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp); 1574 1575 /* Recalibrate with new settings */ 1576 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch)); 1577 temp |= PORT_PLL_RECALIBRATE; 1578 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); 1579 temp &= ~PORT_PLL_10BIT_CLK_ENABLE; 1580 temp |= pll->state.hw_state.ebb4; 1581 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); 1582 1583 /* Enable PLL */ 1584 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1585 temp |= PORT_PLL_ENABLE; 1586 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1587 POSTING_READ(BXT_PORT_PLL_ENABLE(port)); 1588 1589 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), 1590 200)) 1591 DRM_ERROR("PLL %d not locked\n", port); 1592 1593 if (IS_GEMINILAKE(dev_priv)) { 1594 temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch)); 1595 temp |= DCC_DELAY_RANGE_2; 1596 I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp); 1597 } 1598 1599 /* 1600 * While we write to the group register to program all lanes at once we 1601 * can read only lane registers and we pick lanes 0/1 for that. 1602 */ 1603 temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch)); 1604 temp &= ~LANE_STAGGER_MASK; 1605 temp &= ~LANESTAGGER_STRAP_OVRD; 1606 temp |= pll->state.hw_state.pcsdw12; 1607 I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp); 1608 } 1609 1610 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, 1611 struct intel_shared_dpll *pll) 1612 { 1613 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ 1614 u32 temp; 1615 1616 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1617 temp &= ~PORT_PLL_ENABLE; 1618 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1619 POSTING_READ(BXT_PORT_PLL_ENABLE(port)); 1620 1621 if (IS_GEMINILAKE(dev_priv)) { 1622 temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1623 temp &= ~PORT_PLL_POWER_ENABLE; 1624 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1625 1626 if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) & 1627 PORT_PLL_POWER_STATE), 200)) 1628 DRM_ERROR("Power state not reset for PLL:%d\n", port); 1629 } 1630 } 1631 1632 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 1633 struct intel_shared_dpll *pll, 1634 struct intel_dpll_hw_state *hw_state) 1635 { 1636 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ 1637 intel_wakeref_t wakeref; 1638 enum dpio_phy phy; 1639 enum dpio_channel ch; 1640 u32 val; 1641 bool ret; 1642 1643 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); 1644 1645 wakeref = intel_display_power_get_if_enabled(dev_priv, 1646 POWER_DOMAIN_DISPLAY_CORE); 1647 if (!wakeref) 1648 return false; 1649 1650 ret = false; 1651 1652 val = I915_READ(BXT_PORT_PLL_ENABLE(port)); 1653 if (!(val & PORT_PLL_ENABLE)) 1654 goto out; 1655 1656 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch)); 1657 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; 1658 1659 hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch)); 1660 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE; 1661 1662 hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0)); 1663 hw_state->pll0 &= PORT_PLL_M2_MASK; 1664 1665 hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1)); 1666 hw_state->pll1 &= PORT_PLL_N_MASK; 1667 1668 hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2)); 1669 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK; 1670 1671 hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3)); 1672 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE; 1673 1674 hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6)); 1675 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK | 1676 PORT_PLL_INT_COEFF_MASK | 1677 PORT_PLL_GAIN_CTL_MASK; 1678 1679 hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8)); 1680 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK; 1681 1682 hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9)); 1683 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK; 1684 1685 hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10)); 1686 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H | 1687 PORT_PLL_DCO_AMP_MASK; 1688 1689 /* 1690 * While we write to the group register to program all lanes at once we 1691 * can read only lane registers. We configure all lanes the same way, so 1692 * here just read out lanes 0/1 and output a note if lanes 2/3 differ. 1693 */ 1694 hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch)); 1695 if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12) 1696 DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", 1697 hw_state->pcsdw12, 1698 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch))); 1699 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; 1700 1701 ret = true; 1702 1703 out: 1704 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 1705 1706 return ret; 1707 } 1708 1709 /* bxt clock parameters */ 1710 struct bxt_clk_div { 1711 int clock; 1712 u32 p1; 1713 u32 p2; 1714 u32 m2_int; 1715 u32 m2_frac; 1716 bool m2_frac_en; 1717 u32 n; 1718 1719 int vco; 1720 }; 1721 1722 /* pre-calculated values for DP linkrates */ 1723 static const struct bxt_clk_div bxt_dp_clk_val[] = { 1724 {162000, 4, 2, 32, 1677722, 1, 1}, 1725 {270000, 4, 1, 27, 0, 0, 1}, 1726 {540000, 2, 1, 27, 0, 0, 1}, 1727 {216000, 3, 2, 32, 1677722, 1, 1}, 1728 {243000, 4, 1, 24, 1258291, 1, 1}, 1729 {324000, 4, 1, 32, 1677722, 1, 1}, 1730 {432000, 3, 1, 32, 1677722, 1, 1} 1731 }; 1732 1733 static bool 1734 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state, 1735 struct bxt_clk_div *clk_div) 1736 { 1737 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1738 struct dpll best_clock; 1739 1740 /* Calculate HDMI div */ 1741 /* 1742 * FIXME: tie the following calculation into 1743 * i9xx_crtc_compute_clock 1744 */ 1745 if (!bxt_find_best_dpll(crtc_state, &best_clock)) { 1746 DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n", 1747 crtc_state->port_clock, 1748 pipe_name(crtc->pipe)); 1749 return false; 1750 } 1751 1752 clk_div->p1 = best_clock.p1; 1753 clk_div->p2 = best_clock.p2; 1754 WARN_ON(best_clock.m1 != 2); 1755 clk_div->n = best_clock.n; 1756 clk_div->m2_int = best_clock.m2 >> 22; 1757 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1); 1758 clk_div->m2_frac_en = clk_div->m2_frac != 0; 1759 1760 clk_div->vco = best_clock.vco; 1761 1762 return true; 1763 } 1764 1765 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state, 1766 struct bxt_clk_div *clk_div) 1767 { 1768 int clock = crtc_state->port_clock; 1769 int i; 1770 1771 *clk_div = bxt_dp_clk_val[0]; 1772 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) { 1773 if (bxt_dp_clk_val[i].clock == clock) { 1774 *clk_div = bxt_dp_clk_val[i]; 1775 break; 1776 } 1777 } 1778 1779 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2; 1780 } 1781 1782 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, 1783 const struct bxt_clk_div *clk_div) 1784 { 1785 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state; 1786 int clock = crtc_state->port_clock; 1787 int vco = clk_div->vco; 1788 u32 prop_coef, int_coef, gain_ctl, targ_cnt; 1789 u32 lanestagger; 1790 1791 memset(dpll_hw_state, 0, sizeof(*dpll_hw_state)); 1792 1793 if (vco >= 6200000 && vco <= 6700000) { 1794 prop_coef = 4; 1795 int_coef = 9; 1796 gain_ctl = 3; 1797 targ_cnt = 8; 1798 } else if ((vco > 5400000 && vco < 6200000) || 1799 (vco >= 4800000 && vco < 5400000)) { 1800 prop_coef = 5; 1801 int_coef = 11; 1802 gain_ctl = 3; 1803 targ_cnt = 9; 1804 } else if (vco == 5400000) { 1805 prop_coef = 3; 1806 int_coef = 8; 1807 gain_ctl = 1; 1808 targ_cnt = 9; 1809 } else { 1810 DRM_ERROR("Invalid VCO\n"); 1811 return false; 1812 } 1813 1814 if (clock > 270000) 1815 lanestagger = 0x18; 1816 else if (clock > 135000) 1817 lanestagger = 0x0d; 1818 else if (clock > 67000) 1819 lanestagger = 0x07; 1820 else if (clock > 33000) 1821 lanestagger = 0x04; 1822 else 1823 lanestagger = 0x02; 1824 1825 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2); 1826 dpll_hw_state->pll0 = clk_div->m2_int; 1827 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n); 1828 dpll_hw_state->pll2 = clk_div->m2_frac; 1829 1830 if (clk_div->m2_frac_en) 1831 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE; 1832 1833 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef); 1834 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl); 1835 1836 dpll_hw_state->pll8 = targ_cnt; 1837 1838 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT; 1839 1840 dpll_hw_state->pll10 = 1841 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT) 1842 | PORT_PLL_DCO_AMP_OVR_EN_H; 1843 1844 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE; 1845 1846 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger; 1847 1848 return true; 1849 } 1850 1851 static bool 1852 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) 1853 { 1854 struct bxt_clk_div clk_div = {}; 1855 1856 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div); 1857 1858 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); 1859 } 1860 1861 static bool 1862 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) 1863 { 1864 struct bxt_clk_div clk_div = {}; 1865 1866 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div); 1867 1868 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); 1869 } 1870 1871 static bool bxt_get_dpll(struct intel_atomic_state *state, 1872 struct intel_crtc *crtc, 1873 struct intel_encoder *encoder) 1874 { 1875 struct intel_crtc_state *crtc_state = 1876 intel_atomic_get_new_crtc_state(state, crtc); 1877 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1878 struct intel_shared_dpll *pll; 1879 enum intel_dpll_id id; 1880 1881 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && 1882 !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state)) 1883 return false; 1884 1885 if (intel_crtc_has_dp_encoder(crtc_state) && 1886 !bxt_ddi_dp_set_dpll_hw_state(crtc_state)) 1887 return false; 1888 1889 /* 1:1 mapping between ports and PLLs */ 1890 id = (enum intel_dpll_id) encoder->port; 1891 pll = intel_get_shared_dpll_by_id(dev_priv, id); 1892 1893 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", 1894 crtc->base.base.id, crtc->base.name, pll->info->name); 1895 1896 intel_reference_shared_dpll(state, crtc, 1897 pll, &crtc_state->dpll_hw_state); 1898 1899 crtc_state->shared_dpll = pll; 1900 1901 return true; 1902 } 1903 1904 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv, 1905 const struct intel_dpll_hw_state *hw_state) 1906 { 1907 DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," 1908 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " 1909 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", 1910 hw_state->ebb0, 1911 hw_state->ebb4, 1912 hw_state->pll0, 1913 hw_state->pll1, 1914 hw_state->pll2, 1915 hw_state->pll3, 1916 hw_state->pll6, 1917 hw_state->pll8, 1918 hw_state->pll9, 1919 hw_state->pll10, 1920 hw_state->pcsdw12); 1921 } 1922 1923 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { 1924 .enable = bxt_ddi_pll_enable, 1925 .disable = bxt_ddi_pll_disable, 1926 .get_hw_state = bxt_ddi_pll_get_hw_state, 1927 }; 1928 1929 struct intel_dpll_mgr { 1930 const struct dpll_info *dpll_info; 1931 1932 bool (*get_dplls)(struct intel_atomic_state *state, 1933 struct intel_crtc *crtc, 1934 struct intel_encoder *encoder); 1935 void (*put_dplls)(struct intel_atomic_state *state, 1936 struct intel_crtc *crtc); 1937 void (*update_active_dpll)(struct intel_atomic_state *state, 1938 struct intel_crtc *crtc, 1939 struct intel_encoder *encoder); 1940 void (*dump_hw_state)(struct drm_i915_private *dev_priv, 1941 const struct intel_dpll_hw_state *hw_state); 1942 }; 1943 1944 static const struct dpll_info pch_plls[] = { 1945 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 }, 1946 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 }, 1947 { }, 1948 }; 1949 1950 static const struct intel_dpll_mgr pch_pll_mgr = { 1951 .dpll_info = pch_plls, 1952 .get_dplls = ibx_get_dpll, 1953 .put_dplls = intel_put_dpll, 1954 .dump_hw_state = ibx_dump_hw_state, 1955 }; 1956 1957 static const struct dpll_info hsw_plls[] = { 1958 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 }, 1959 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 }, 1960 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 }, 1961 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON }, 1962 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON }, 1963 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON }, 1964 { }, 1965 }; 1966 1967 static const struct intel_dpll_mgr hsw_pll_mgr = { 1968 .dpll_info = hsw_plls, 1969 .get_dplls = hsw_get_dpll, 1970 .put_dplls = intel_put_dpll, 1971 .dump_hw_state = hsw_dump_hw_state, 1972 }; 1973 1974 static const struct dpll_info skl_plls[] = { 1975 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON }, 1976 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, 1977 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, 1978 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 }, 1979 { }, 1980 }; 1981 1982 static const struct intel_dpll_mgr skl_pll_mgr = { 1983 .dpll_info = skl_plls, 1984 .get_dplls = skl_get_dpll, 1985 .put_dplls = intel_put_dpll, 1986 .dump_hw_state = skl_dump_hw_state, 1987 }; 1988 1989 static const struct dpll_info bxt_plls[] = { 1990 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 }, 1991 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, 1992 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, 1993 { }, 1994 }; 1995 1996 static const struct intel_dpll_mgr bxt_pll_mgr = { 1997 .dpll_info = bxt_plls, 1998 .get_dplls = bxt_get_dpll, 1999 .put_dplls = intel_put_dpll, 2000 .dump_hw_state = bxt_dump_hw_state, 2001 }; 2002 2003 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, 2004 struct intel_shared_dpll *pll) 2005 { 2006 const enum intel_dpll_id id = pll->info->id; 2007 u32 val; 2008 2009 /* 1. Enable DPLL power in DPLL_ENABLE. */ 2010 val = I915_READ(CNL_DPLL_ENABLE(id)); 2011 val |= PLL_POWER_ENABLE; 2012 I915_WRITE(CNL_DPLL_ENABLE(id), val); 2013 2014 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */ 2015 if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), 2016 PLL_POWER_STATE, 5)) 2017 DRM_ERROR("PLL %d Power not enabled\n", id); 2018 2019 /* 2020 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable, 2021 * select DP mode, and set DP link rate. 2022 */ 2023 val = pll->state.hw_state.cfgcr0; 2024 I915_WRITE(CNL_DPLL_CFGCR0(id), val); 2025 2026 /* 4. Reab back to ensure writes completed */ 2027 POSTING_READ(CNL_DPLL_CFGCR0(id)); 2028 2029 /* 3. Configure DPLL_CFGCR0 */ 2030 /* Avoid touch CFGCR1 if HDMI mode is not enabled */ 2031 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { 2032 val = pll->state.hw_state.cfgcr1; 2033 I915_WRITE(CNL_DPLL_CFGCR1(id), val); 2034 /* 4. Reab back to ensure writes completed */ 2035 POSTING_READ(CNL_DPLL_CFGCR1(id)); 2036 } 2037 2038 /* 2039 * 5. If the frequency will result in a change to the voltage 2040 * requirement, follow the Display Voltage Frequency Switching 2041 * Sequence Before Frequency Change 2042 * 2043 * Note: DVFS is actually handled via the cdclk code paths, 2044 * hence we do nothing here. 2045 */ 2046 2047 /* 6. Enable DPLL in DPLL_ENABLE. */ 2048 val = I915_READ(CNL_DPLL_ENABLE(id)); 2049 val |= PLL_ENABLE; 2050 I915_WRITE(CNL_DPLL_ENABLE(id), val); 2051 2052 /* 7. Wait for PLL lock status in DPLL_ENABLE. */ 2053 if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5)) 2054 DRM_ERROR("PLL %d not locked\n", id); 2055 2056 /* 2057 * 8. If the frequency will result in a change to the voltage 2058 * requirement, follow the Display Voltage Frequency Switching 2059 * Sequence After Frequency Change 2060 * 2061 * Note: DVFS is actually handled via the cdclk code paths, 2062 * hence we do nothing here. 2063 */ 2064 2065 /* 2066 * 9. turn on the clock for the DDI and map the DPLL to the DDI 2067 * Done at intel_ddi_clk_select 2068 */ 2069 } 2070 2071 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, 2072 struct intel_shared_dpll *pll) 2073 { 2074 const enum intel_dpll_id id = pll->info->id; 2075 u32 val; 2076 2077 /* 2078 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI. 2079 * Done at intel_ddi_post_disable 2080 */ 2081 2082 /* 2083 * 2. If the frequency will result in a change to the voltage 2084 * requirement, follow the Display Voltage Frequency Switching 2085 * Sequence Before Frequency Change 2086 * 2087 * Note: DVFS is actually handled via the cdclk code paths, 2088 * hence we do nothing here. 2089 */ 2090 2091 /* 3. Disable DPLL through DPLL_ENABLE. */ 2092 val = I915_READ(CNL_DPLL_ENABLE(id)); 2093 val &= ~PLL_ENABLE; 2094 I915_WRITE(CNL_DPLL_ENABLE(id), val); 2095 2096 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */ 2097 if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5)) 2098 DRM_ERROR("PLL %d locked\n", id); 2099 2100 /* 2101 * 5. If the frequency will result in a change to the voltage 2102 * requirement, follow the Display Voltage Frequency Switching 2103 * Sequence After Frequency Change 2104 * 2105 * Note: DVFS is actually handled via the cdclk code paths, 2106 * hence we do nothing here. 2107 */ 2108 2109 /* 6. Disable DPLL power in DPLL_ENABLE. */ 2110 val = I915_READ(CNL_DPLL_ENABLE(id)); 2111 val &= ~PLL_POWER_ENABLE; 2112 I915_WRITE(CNL_DPLL_ENABLE(id), val); 2113 2114 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */ 2115 if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), 2116 PLL_POWER_STATE, 5)) 2117 DRM_ERROR("PLL %d Power not disabled\n", id); 2118 } 2119 2120 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 2121 struct intel_shared_dpll *pll, 2122 struct intel_dpll_hw_state *hw_state) 2123 { 2124 const enum intel_dpll_id id = pll->info->id; 2125 intel_wakeref_t wakeref; 2126 u32 val; 2127 bool ret; 2128 2129 wakeref = intel_display_power_get_if_enabled(dev_priv, 2130 POWER_DOMAIN_DISPLAY_CORE); 2131 if (!wakeref) 2132 return false; 2133 2134 ret = false; 2135 2136 val = I915_READ(CNL_DPLL_ENABLE(id)); 2137 if (!(val & PLL_ENABLE)) 2138 goto out; 2139 2140 val = I915_READ(CNL_DPLL_CFGCR0(id)); 2141 hw_state->cfgcr0 = val; 2142 2143 /* avoid reading back stale values if HDMI mode is not enabled */ 2144 if (val & DPLL_CFGCR0_HDMI_MODE) { 2145 hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id)); 2146 } 2147 ret = true; 2148 2149 out: 2150 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 2151 2152 return ret; 2153 } 2154 2155 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv, 2156 int *qdiv, int *kdiv) 2157 { 2158 /* even dividers */ 2159 if (bestdiv % 2 == 0) { 2160 if (bestdiv == 2) { 2161 *pdiv = 2; 2162 *qdiv = 1; 2163 *kdiv = 1; 2164 } else if (bestdiv % 4 == 0) { 2165 *pdiv = 2; 2166 *qdiv = bestdiv / 4; 2167 *kdiv = 2; 2168 } else if (bestdiv % 6 == 0) { 2169 *pdiv = 3; 2170 *qdiv = bestdiv / 6; 2171 *kdiv = 2; 2172 } else if (bestdiv % 5 == 0) { 2173 *pdiv = 5; 2174 *qdiv = bestdiv / 10; 2175 *kdiv = 2; 2176 } else if (bestdiv % 14 == 0) { 2177 *pdiv = 7; 2178 *qdiv = bestdiv / 14; 2179 *kdiv = 2; 2180 } 2181 } else { 2182 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) { 2183 *pdiv = bestdiv; 2184 *qdiv = 1; 2185 *kdiv = 1; 2186 } else { /* 9, 15, 21 */ 2187 *pdiv = bestdiv / 3; 2188 *qdiv = 1; 2189 *kdiv = 3; 2190 } 2191 } 2192 } 2193 2194 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, 2195 u32 dco_freq, u32 ref_freq, 2196 int pdiv, int qdiv, int kdiv) 2197 { 2198 u32 dco; 2199 2200 switch (kdiv) { 2201 case 1: 2202 params->kdiv = 1; 2203 break; 2204 case 2: 2205 params->kdiv = 2; 2206 break; 2207 case 3: 2208 params->kdiv = 4; 2209 break; 2210 default: 2211 WARN(1, "Incorrect KDiv\n"); 2212 } 2213 2214 switch (pdiv) { 2215 case 2: 2216 params->pdiv = 1; 2217 break; 2218 case 3: 2219 params->pdiv = 2; 2220 break; 2221 case 5: 2222 params->pdiv = 4; 2223 break; 2224 case 7: 2225 params->pdiv = 8; 2226 break; 2227 default: 2228 WARN(1, "Incorrect PDiv\n"); 2229 } 2230 2231 WARN_ON(kdiv != 2 && qdiv != 1); 2232 2233 params->qdiv_ratio = qdiv; 2234 params->qdiv_mode = (qdiv == 1) ? 0 : 1; 2235 2236 dco = div_u64((u64)dco_freq << 15, ref_freq); 2237 2238 params->dco_integer = dco >> 15; 2239 params->dco_fraction = dco & 0x7fff; 2240 } 2241 2242 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv) 2243 { 2244 int ref_clock = dev_priv->cdclk.hw.ref; 2245 2246 /* 2247 * For ICL+, the spec states: if reference frequency is 38.4, 2248 * use 19.2 because the DPLL automatically divides that by 2. 2249 */ 2250 if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400) 2251 ref_clock = 19200; 2252 2253 return ref_clock; 2254 } 2255 2256 static bool 2257 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state, 2258 struct skl_wrpll_params *wrpll_params) 2259 { 2260 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 2261 u32 afe_clock = crtc_state->port_clock * 5; 2262 u32 ref_clock; 2263 u32 dco_min = 7998000; 2264 u32 dco_max = 10000000; 2265 u32 dco_mid = (dco_min + dco_max) / 2; 2266 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16, 2267 18, 20, 24, 28, 30, 32, 36, 40, 2268 42, 44, 48, 50, 52, 54, 56, 60, 2269 64, 66, 68, 70, 72, 76, 78, 80, 2270 84, 88, 90, 92, 96, 98, 100, 102, 2271 3, 5, 7, 9, 15, 21 }; 2272 u32 dco, best_dco = 0, dco_centrality = 0; 2273 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */ 2274 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0; 2275 2276 for (d = 0; d < ARRAY_SIZE(dividers); d++) { 2277 dco = afe_clock * dividers[d]; 2278 2279 if ((dco <= dco_max) && (dco >= dco_min)) { 2280 dco_centrality = abs(dco - dco_mid); 2281 2282 if (dco_centrality < best_dco_centrality) { 2283 best_dco_centrality = dco_centrality; 2284 best_div = dividers[d]; 2285 best_dco = dco; 2286 } 2287 } 2288 } 2289 2290 if (best_div == 0) 2291 return false; 2292 2293 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv); 2294 2295 ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); 2296 2297 cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, 2298 pdiv, qdiv, kdiv); 2299 2300 return true; 2301 } 2302 2303 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) 2304 { 2305 u32 cfgcr0, cfgcr1; 2306 struct skl_wrpll_params wrpll_params = { 0, }; 2307 2308 cfgcr0 = DPLL_CFGCR0_HDMI_MODE; 2309 2310 if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params)) 2311 return false; 2312 2313 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) | 2314 wrpll_params.dco_integer; 2315 2316 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) | 2317 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) | 2318 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) | 2319 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) | 2320 DPLL_CFGCR1_CENTRAL_FREQ; 2321 2322 memset(&crtc_state->dpll_hw_state, 0, 2323 sizeof(crtc_state->dpll_hw_state)); 2324 2325 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0; 2326 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; 2327 return true; 2328 } 2329 2330 static bool 2331 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) 2332 { 2333 u32 cfgcr0; 2334 2335 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE; 2336 2337 switch (crtc_state->port_clock / 2) { 2338 case 81000: 2339 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810; 2340 break; 2341 case 135000: 2342 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350; 2343 break; 2344 case 270000: 2345 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700; 2346 break; 2347 /* eDP 1.4 rates */ 2348 case 162000: 2349 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620; 2350 break; 2351 case 108000: 2352 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080; 2353 break; 2354 case 216000: 2355 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160; 2356 break; 2357 case 324000: 2358 /* Some SKUs may require elevated I/O voltage to support this */ 2359 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240; 2360 break; 2361 case 405000: 2362 /* Some SKUs may require elevated I/O voltage to support this */ 2363 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050; 2364 break; 2365 } 2366 2367 memset(&crtc_state->dpll_hw_state, 0, 2368 sizeof(crtc_state->dpll_hw_state)); 2369 2370 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0; 2371 2372 return true; 2373 } 2374 2375 static bool cnl_get_dpll(struct intel_atomic_state *state, 2376 struct intel_crtc *crtc, 2377 struct intel_encoder *encoder) 2378 { 2379 struct intel_crtc_state *crtc_state = 2380 intel_atomic_get_new_crtc_state(state, crtc); 2381 struct intel_shared_dpll *pll; 2382 bool bret; 2383 2384 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 2385 bret = cnl_ddi_hdmi_pll_dividers(crtc_state); 2386 if (!bret) { 2387 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); 2388 return false; 2389 } 2390 } else if (intel_crtc_has_dp_encoder(crtc_state)) { 2391 bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state); 2392 if (!bret) { 2393 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); 2394 return false; 2395 } 2396 } else { 2397 DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n", 2398 crtc_state->output_types); 2399 return false; 2400 } 2401 2402 pll = intel_find_shared_dpll(state, crtc, 2403 &crtc_state->dpll_hw_state, 2404 DPLL_ID_SKL_DPLL0, 2405 DPLL_ID_SKL_DPLL2); 2406 if (!pll) { 2407 DRM_DEBUG_KMS("No PLL selected\n"); 2408 return false; 2409 } 2410 2411 intel_reference_shared_dpll(state, crtc, 2412 pll, &crtc_state->dpll_hw_state); 2413 2414 crtc_state->shared_dpll = pll; 2415 2416 return true; 2417 } 2418 2419 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv, 2420 const struct intel_dpll_hw_state *hw_state) 2421 { 2422 DRM_DEBUG_KMS("dpll_hw_state: " 2423 "cfgcr0: 0x%x, cfgcr1: 0x%x\n", 2424 hw_state->cfgcr0, 2425 hw_state->cfgcr1); 2426 } 2427 2428 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = { 2429 .enable = cnl_ddi_pll_enable, 2430 .disable = cnl_ddi_pll_disable, 2431 .get_hw_state = cnl_ddi_pll_get_hw_state, 2432 }; 2433 2434 static const struct dpll_info cnl_plls[] = { 2435 { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 }, 2436 { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, 2437 { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, 2438 { }, 2439 }; 2440 2441 static const struct intel_dpll_mgr cnl_pll_mgr = { 2442 .dpll_info = cnl_plls, 2443 .get_dplls = cnl_get_dpll, 2444 .put_dplls = intel_put_dpll, 2445 .dump_hw_state = cnl_dump_hw_state, 2446 }; 2447 2448 struct icl_combo_pll_params { 2449 int clock; 2450 struct skl_wrpll_params wrpll; 2451 }; 2452 2453 /* 2454 * These values alrea already adjusted: they're the bits we write to the 2455 * registers, not the logical values. 2456 */ 2457 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = { 2458 { 540000, 2459 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */ 2460 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2461 { 270000, 2462 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */ 2463 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2464 { 162000, 2465 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */ 2466 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2467 { 324000, 2468 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */ 2469 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2470 { 216000, 2471 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */ 2472 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, }, 2473 { 432000, 2474 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */ 2475 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2476 { 648000, 2477 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */ 2478 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2479 { 810000, 2480 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */ 2481 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2482 }; 2483 2484 2485 /* Also used for 38.4 MHz values. */ 2486 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = { 2487 { 540000, 2488 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */ 2489 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2490 { 270000, 2491 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */ 2492 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2493 { 162000, 2494 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */ 2495 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2496 { 324000, 2497 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */ 2498 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2499 { 216000, 2500 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */ 2501 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, }, 2502 { 432000, 2503 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */ 2504 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2505 { 648000, 2506 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */ 2507 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2508 { 810000, 2509 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */ 2510 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, 2511 }; 2512 2513 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = { 2514 .dco_integer = 0x151, .dco_fraction = 0x4000, 2515 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, 2516 }; 2517 2518 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = { 2519 .dco_integer = 0x1A5, .dco_fraction = 0x7000, 2520 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, 2521 }; 2522 2523 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = { 2524 .dco_integer = 0x54, .dco_fraction = 0x3000, 2525 /* the following params are unused */ 2526 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0, 2527 }; 2528 2529 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = { 2530 .dco_integer = 0x43, .dco_fraction = 0x4000, 2531 /* the following params are unused */ 2532 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0, 2533 }; 2534 2535 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, 2536 struct skl_wrpll_params *pll_params) 2537 { 2538 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 2539 const struct icl_combo_pll_params *params = 2540 dev_priv->cdclk.hw.ref == 24000 ? 2541 icl_dp_combo_pll_24MHz_values : 2542 icl_dp_combo_pll_19_2MHz_values; 2543 int clock = crtc_state->port_clock; 2544 int i; 2545 2546 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) { 2547 if (clock == params[i].clock) { 2548 *pll_params = params[i].wrpll; 2549 return true; 2550 } 2551 } 2552 2553 MISSING_CASE(clock); 2554 return false; 2555 } 2556 2557 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, 2558 struct skl_wrpll_params *pll_params) 2559 { 2560 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 2561 2562 if (INTEL_GEN(dev_priv) >= 12) { 2563 switch (dev_priv->cdclk.hw.ref) { 2564 default: 2565 MISSING_CASE(dev_priv->cdclk.hw.ref); 2566 /* fall-through */ 2567 case 19200: 2568 case 38400: 2569 *pll_params = tgl_tbt_pll_19_2MHz_values; 2570 break; 2571 case 24000: 2572 *pll_params = tgl_tbt_pll_24MHz_values; 2573 break; 2574 } 2575 } else { 2576 switch (dev_priv->cdclk.hw.ref) { 2577 default: 2578 MISSING_CASE(dev_priv->cdclk.hw.ref); 2579 /* fall-through */ 2580 case 19200: 2581 case 38400: 2582 *pll_params = icl_tbt_pll_19_2MHz_values; 2583 break; 2584 case 24000: 2585 *pll_params = icl_tbt_pll_24MHz_values; 2586 break; 2587 } 2588 } 2589 2590 return true; 2591 } 2592 2593 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, 2594 struct intel_encoder *encoder, 2595 struct intel_dpll_hw_state *pll_state) 2596 { 2597 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 2598 u32 cfgcr0, cfgcr1; 2599 struct skl_wrpll_params pll_params = { 0 }; 2600 bool ret; 2601 2602 if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, 2603 encoder->port))) 2604 ret = icl_calc_tbt_pll(crtc_state, &pll_params); 2605 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 2606 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 2607 ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params); 2608 else 2609 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params); 2610 2611 if (!ret) 2612 return false; 2613 2614 cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) | 2615 pll_params.dco_integer; 2616 2617 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) | 2618 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) | 2619 DPLL_CFGCR1_KDIV(pll_params.kdiv) | 2620 DPLL_CFGCR1_PDIV(pll_params.pdiv); 2621 2622 if (INTEL_GEN(dev_priv) >= 12) 2623 cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL; 2624 else 2625 cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400; 2626 2627 memset(pll_state, 0, sizeof(*pll_state)); 2628 2629 pll_state->cfgcr0 = cfgcr0; 2630 pll_state->cfgcr1 = cfgcr1; 2631 2632 return true; 2633 } 2634 2635 2636 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id) 2637 { 2638 return id - DPLL_ID_ICL_MGPLL1; 2639 } 2640 2641 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port) 2642 { 2643 return tc_port + DPLL_ID_ICL_MGPLL1; 2644 } 2645 2646 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, 2647 u32 *target_dco_khz, 2648 struct intel_dpll_hw_state *state, 2649 bool is_dkl) 2650 { 2651 u32 dco_min_freq, dco_max_freq; 2652 int div1_vals[] = {7, 5, 3, 2}; 2653 unsigned int i; 2654 int div2; 2655 2656 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000; 2657 dco_max_freq = is_dp ? 8100000 : 10000000; 2658 2659 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) { 2660 int div1 = div1_vals[i]; 2661 2662 for (div2 = 10; div2 > 0; div2--) { 2663 int dco = div1 * div2 * clock_khz * 5; 2664 int a_divratio, tlinedrv, inputsel; 2665 u32 hsdiv; 2666 2667 if (dco < dco_min_freq || dco > dco_max_freq) 2668 continue; 2669 2670 if (div2 >= 2) { 2671 /* 2672 * Note: a_divratio not matching TGL BSpec 2673 * algorithm but matching hardcoded values and 2674 * working on HW for DP alt-mode at least 2675 */ 2676 a_divratio = is_dp ? 10 : 5; 2677 tlinedrv = is_dkl ? 1 : 2; 2678 } else { 2679 a_divratio = 5; 2680 tlinedrv = 0; 2681 } 2682 inputsel = is_dp ? 0 : 1; 2683 2684 switch (div1) { 2685 default: 2686 MISSING_CASE(div1); 2687 /* fall through */ 2688 case 2: 2689 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2; 2690 break; 2691 case 3: 2692 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3; 2693 break; 2694 case 5: 2695 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5; 2696 break; 2697 case 7: 2698 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7; 2699 break; 2700 } 2701 2702 *target_dco_khz = dco; 2703 2704 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1); 2705 2706 state->mg_clktop2_coreclkctl1 = 2707 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio); 2708 2709 state->mg_clktop2_hsclkctl = 2710 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) | 2711 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) | 2712 hsdiv | 2713 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2); 2714 2715 return true; 2716 } 2717 } 2718 2719 return false; 2720 } 2721 2722 /* 2723 * The specification for this function uses real numbers, so the math had to be 2724 * adapted to integer-only calculation, that's why it looks so different. 2725 */ 2726 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, 2727 struct intel_dpll_hw_state *pll_state) 2728 { 2729 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 2730 int refclk_khz = dev_priv->cdclk.hw.ref; 2731 int clock = crtc_state->port_clock; 2732 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; 2733 u32 iref_ndiv, iref_trim, iref_pulse_w; 2734 u32 prop_coeff, int_coeff; 2735 u32 tdc_targetcnt, feedfwgain; 2736 u64 ssc_stepsize, ssc_steplen, ssc_steplog; 2737 u64 tmp; 2738 bool use_ssc = false; 2739 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI); 2740 bool is_dkl = INTEL_GEN(dev_priv) >= 12; 2741 2742 memset(pll_state, 0, sizeof(*pll_state)); 2743 2744 if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, 2745 pll_state, is_dkl)) { 2746 DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock); 2747 return false; 2748 } 2749 2750 m1div = 2; 2751 m2div_int = dco_khz / (refclk_khz * m1div); 2752 if (m2div_int > 255) { 2753 if (!is_dkl) { 2754 m1div = 4; 2755 m2div_int = dco_khz / (refclk_khz * m1div); 2756 } 2757 2758 if (m2div_int > 255) { 2759 DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n", 2760 clock); 2761 return false; 2762 } 2763 } 2764 m2div_rem = dco_khz % (refclk_khz * m1div); 2765 2766 tmp = (u64)m2div_rem * (1 << 22); 2767 do_div(tmp, refclk_khz * m1div); 2768 m2div_frac = tmp; 2769 2770 switch (refclk_khz) { 2771 case 19200: 2772 iref_ndiv = 1; 2773 iref_trim = 28; 2774 iref_pulse_w = 1; 2775 break; 2776 case 24000: 2777 iref_ndiv = 1; 2778 iref_trim = 25; 2779 iref_pulse_w = 2; 2780 break; 2781 case 38400: 2782 iref_ndiv = 2; 2783 iref_trim = 28; 2784 iref_pulse_w = 1; 2785 break; 2786 default: 2787 MISSING_CASE(refclk_khz); 2788 return false; 2789 } 2790 2791 /* 2792 * tdc_res = 0.000003 2793 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5) 2794 * 2795 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It 2796 * was supposed to be a division, but we rearranged the operations of 2797 * the formula to avoid early divisions so we don't multiply the 2798 * rounding errors. 2799 * 2800 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which 2801 * we also rearrange to work with integers. 2802 * 2803 * The 0.5 transformed to 5 results in a multiplication by 10 and the 2804 * last division by 10. 2805 */ 2806 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10; 2807 2808 /* 2809 * Here we divide dco_khz by 10 in order to allow the dividend to fit in 2810 * 32 bits. That's not a problem since we round the division down 2811 * anyway. 2812 */ 2813 feedfwgain = (use_ssc || m2div_rem > 0) ? 2814 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0; 2815 2816 if (dco_khz >= 9000000) { 2817 prop_coeff = 5; 2818 int_coeff = 10; 2819 } else { 2820 prop_coeff = 4; 2821 int_coeff = 8; 2822 } 2823 2824 if (use_ssc) { 2825 tmp = mul_u32_u32(dco_khz, 47 * 32); 2826 do_div(tmp, refclk_khz * m1div * 10000); 2827 ssc_stepsize = tmp; 2828 2829 tmp = mul_u32_u32(dco_khz, 1000); 2830 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32); 2831 } else { 2832 ssc_stepsize = 0; 2833 ssc_steplen = 0; 2834 } 2835 ssc_steplog = 4; 2836 2837 /* write pll_state calculations */ 2838 if (is_dkl) { 2839 pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) | 2840 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) | 2841 DKL_PLL_DIV0_FBPREDIV(m1div) | 2842 DKL_PLL_DIV0_FBDIV_INT(m2div_int); 2843 2844 pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) | 2845 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt); 2846 2847 pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) | 2848 DKL_PLL_SSC_STEP_LEN(ssc_steplen) | 2849 DKL_PLL_SSC_STEP_NUM(ssc_steplog) | 2850 (use_ssc ? DKL_PLL_SSC_EN : 0); 2851 2852 pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) | 2853 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac); 2854 2855 pll_state->mg_pll_tdc_coldst_bias = 2856 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) | 2857 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain); 2858 2859 } else { 2860 pll_state->mg_pll_div0 = 2861 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) | 2862 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) | 2863 MG_PLL_DIV0_FBDIV_INT(m2div_int); 2864 2865 pll_state->mg_pll_div1 = 2866 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) | 2867 MG_PLL_DIV1_DITHER_DIV_2 | 2868 MG_PLL_DIV1_NDIVRATIO(1) | 2869 MG_PLL_DIV1_FBPREDIV(m1div); 2870 2871 pll_state->mg_pll_lf = 2872 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) | 2873 MG_PLL_LF_AFCCNTSEL_512 | 2874 MG_PLL_LF_GAINCTRL(1) | 2875 MG_PLL_LF_INT_COEFF(int_coeff) | 2876 MG_PLL_LF_PROP_COEFF(prop_coeff); 2877 2878 pll_state->mg_pll_frac_lock = 2879 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 | 2880 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 | 2881 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) | 2882 MG_PLL_FRAC_LOCK_DCODITHEREN | 2883 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain); 2884 if (use_ssc || m2div_rem > 0) 2885 pll_state->mg_pll_frac_lock |= 2886 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN; 2887 2888 pll_state->mg_pll_ssc = 2889 (use_ssc ? MG_PLL_SSC_EN : 0) | 2890 MG_PLL_SSC_TYPE(2) | 2891 MG_PLL_SSC_STEPLENGTH(ssc_steplen) | 2892 MG_PLL_SSC_STEPNUM(ssc_steplog) | 2893 MG_PLL_SSC_FLLEN | 2894 MG_PLL_SSC_STEPSIZE(ssc_stepsize); 2895 2896 pll_state->mg_pll_tdc_coldst_bias = 2897 MG_PLL_TDC_COLDST_COLDSTART | 2898 MG_PLL_TDC_COLDST_IREFINT_EN | 2899 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | 2900 MG_PLL_TDC_TDCOVCCORR_EN | 2901 MG_PLL_TDC_TDCSEL(3); 2902 2903 pll_state->mg_pll_bias = 2904 MG_PLL_BIAS_BIAS_GB_SEL(3) | 2905 MG_PLL_BIAS_INIT_DCOAMP(0x3F) | 2906 MG_PLL_BIAS_BIAS_BONUS(10) | 2907 MG_PLL_BIAS_BIASCAL_EN | 2908 MG_PLL_BIAS_CTRIM(12) | 2909 MG_PLL_BIAS_VREF_RDAC(4) | 2910 MG_PLL_BIAS_IREFTRIM(iref_trim); 2911 2912 if (refclk_khz == 38400) { 2913 pll_state->mg_pll_tdc_coldst_bias_mask = 2914 MG_PLL_TDC_COLDST_COLDSTART; 2915 pll_state->mg_pll_bias_mask = 0; 2916 } else { 2917 pll_state->mg_pll_tdc_coldst_bias_mask = -1U; 2918 pll_state->mg_pll_bias_mask = -1U; 2919 } 2920 2921 pll_state->mg_pll_tdc_coldst_bias &= 2922 pll_state->mg_pll_tdc_coldst_bias_mask; 2923 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask; 2924 } 2925 2926 return true; 2927 } 2928 2929 /** 2930 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC 2931 * @crtc_state: state for the CRTC to select the DPLL for 2932 * @port_dpll_id: the active @port_dpll_id to select 2933 * 2934 * Select the given @port_dpll_id instance from the DPLLs reserved for the 2935 * CRTC. 2936 */ 2937 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, 2938 enum icl_port_dpll_id port_dpll_id) 2939 { 2940 struct icl_port_dpll *port_dpll = 2941 &crtc_state->icl_port_dplls[port_dpll_id]; 2942 2943 crtc_state->shared_dpll = port_dpll->pll; 2944 crtc_state->dpll_hw_state = port_dpll->hw_state; 2945 } 2946 2947 static void icl_update_active_dpll(struct intel_atomic_state *state, 2948 struct intel_crtc *crtc, 2949 struct intel_encoder *encoder) 2950 { 2951 struct intel_crtc_state *crtc_state = 2952 intel_atomic_get_new_crtc_state(state, crtc); 2953 struct intel_digital_port *primary_port; 2954 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT; 2955 2956 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ? 2957 enc_to_mst(&encoder->base)->primary : 2958 enc_to_dig_port(&encoder->base); 2959 2960 if (primary_port && 2961 (primary_port->tc_mode == TC_PORT_DP_ALT || 2962 primary_port->tc_mode == TC_PORT_LEGACY)) 2963 port_dpll_id = ICL_PORT_DPLL_MG_PHY; 2964 2965 icl_set_active_port_dpll(crtc_state, port_dpll_id); 2966 } 2967 2968 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, 2969 struct intel_crtc *crtc, 2970 struct intel_encoder *encoder) 2971 { 2972 struct intel_crtc_state *crtc_state = 2973 intel_atomic_get_new_crtc_state(state, crtc); 2974 struct icl_port_dpll *port_dpll = 2975 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; 2976 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2977 enum port port = encoder->port; 2978 bool has_dpll4 = false; 2979 2980 if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) { 2981 DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n"); 2982 2983 return false; 2984 } 2985 2986 if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) 2987 has_dpll4 = true; 2988 2989 port_dpll->pll = intel_find_shared_dpll(state, crtc, 2990 &port_dpll->hw_state, 2991 DPLL_ID_ICL_DPLL0, 2992 has_dpll4 ? DPLL_ID_EHL_DPLL4 2993 : DPLL_ID_ICL_DPLL1); 2994 if (!port_dpll->pll) { 2995 DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n", 2996 encoder->base.base.id, encoder->base.name); 2997 return false; 2998 } 2999 3000 intel_reference_shared_dpll(state, crtc, 3001 port_dpll->pll, &port_dpll->hw_state); 3002 3003 icl_update_active_dpll(state, crtc, encoder); 3004 3005 return true; 3006 } 3007 3008 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, 3009 struct intel_crtc *crtc, 3010 struct intel_encoder *encoder) 3011 { 3012 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 3013 struct intel_crtc_state *crtc_state = 3014 intel_atomic_get_new_crtc_state(state, crtc); 3015 struct icl_port_dpll *port_dpll; 3016 enum intel_dpll_id dpll_id; 3017 3018 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; 3019 if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) { 3020 DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n"); 3021 return false; 3022 } 3023 3024 port_dpll->pll = intel_find_shared_dpll(state, crtc, 3025 &port_dpll->hw_state, 3026 DPLL_ID_ICL_TBTPLL, 3027 DPLL_ID_ICL_TBTPLL); 3028 if (!port_dpll->pll) { 3029 DRM_DEBUG_KMS("No TBT-ALT PLL found\n"); 3030 return false; 3031 } 3032 intel_reference_shared_dpll(state, crtc, 3033 port_dpll->pll, &port_dpll->hw_state); 3034 3035 3036 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY]; 3037 if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) { 3038 DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n"); 3039 goto err_unreference_tbt_pll; 3040 } 3041 3042 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, 3043 encoder->port)); 3044 port_dpll->pll = intel_find_shared_dpll(state, crtc, 3045 &port_dpll->hw_state, 3046 dpll_id, 3047 dpll_id); 3048 if (!port_dpll->pll) { 3049 DRM_DEBUG_KMS("No MG PHY PLL found\n"); 3050 goto err_unreference_tbt_pll; 3051 } 3052 intel_reference_shared_dpll(state, crtc, 3053 port_dpll->pll, &port_dpll->hw_state); 3054 3055 icl_update_active_dpll(state, crtc, encoder); 3056 3057 return true; 3058 3059 err_unreference_tbt_pll: 3060 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; 3061 intel_unreference_shared_dpll(state, crtc, port_dpll->pll); 3062 3063 return false; 3064 } 3065 3066 static bool icl_get_dplls(struct intel_atomic_state *state, 3067 struct intel_crtc *crtc, 3068 struct intel_encoder *encoder) 3069 { 3070 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 3071 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 3072 3073 if (intel_phy_is_combo(dev_priv, phy)) 3074 return icl_get_combo_phy_dpll(state, crtc, encoder); 3075 else if (intel_phy_is_tc(dev_priv, phy)) 3076 return icl_get_tc_phy_dplls(state, crtc, encoder); 3077 3078 MISSING_CASE(phy); 3079 3080 return false; 3081 } 3082 3083 static void icl_put_dplls(struct intel_atomic_state *state, 3084 struct intel_crtc *crtc) 3085 { 3086 const struct intel_crtc_state *old_crtc_state = 3087 intel_atomic_get_old_crtc_state(state, crtc); 3088 struct intel_crtc_state *new_crtc_state = 3089 intel_atomic_get_new_crtc_state(state, crtc); 3090 enum icl_port_dpll_id id; 3091 3092 new_crtc_state->shared_dpll = NULL; 3093 3094 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) { 3095 const struct icl_port_dpll *old_port_dpll = 3096 &old_crtc_state->icl_port_dplls[id]; 3097 struct icl_port_dpll *new_port_dpll = 3098 &new_crtc_state->icl_port_dplls[id]; 3099 3100 new_port_dpll->pll = NULL; 3101 3102 if (!old_port_dpll->pll) 3103 continue; 3104 3105 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll); 3106 } 3107 } 3108 3109 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, 3110 struct intel_shared_dpll *pll, 3111 struct intel_dpll_hw_state *hw_state) 3112 { 3113 const enum intel_dpll_id id = pll->info->id; 3114 enum tc_port tc_port = icl_pll_id_to_tc_port(id); 3115 intel_wakeref_t wakeref; 3116 bool ret = false; 3117 u32 val; 3118 3119 wakeref = intel_display_power_get_if_enabled(dev_priv, 3120 POWER_DOMAIN_DISPLAY_CORE); 3121 if (!wakeref) 3122 return false; 3123 3124 val = I915_READ(MG_PLL_ENABLE(tc_port)); 3125 if (!(val & PLL_ENABLE)) 3126 goto out; 3127 3128 hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port)); 3129 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; 3130 3131 hw_state->mg_clktop2_coreclkctl1 = 3132 I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port)); 3133 hw_state->mg_clktop2_coreclkctl1 &= 3134 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; 3135 3136 hw_state->mg_clktop2_hsclkctl = 3137 I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port)); 3138 hw_state->mg_clktop2_hsclkctl &= 3139 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | 3140 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | 3141 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | 3142 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; 3143 3144 hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port)); 3145 hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port)); 3146 hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port)); 3147 hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port)); 3148 hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port)); 3149 3150 hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port)); 3151 hw_state->mg_pll_tdc_coldst_bias = 3152 I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); 3153 3154 if (dev_priv->cdclk.hw.ref == 38400) { 3155 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; 3156 hw_state->mg_pll_bias_mask = 0; 3157 } else { 3158 hw_state->mg_pll_tdc_coldst_bias_mask = -1U; 3159 hw_state->mg_pll_bias_mask = -1U; 3160 } 3161 3162 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; 3163 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; 3164 3165 ret = true; 3166 out: 3167 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 3168 return ret; 3169 } 3170 3171 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, 3172 struct intel_shared_dpll *pll, 3173 struct intel_dpll_hw_state *hw_state) 3174 { 3175 const enum intel_dpll_id id = pll->info->id; 3176 enum tc_port tc_port = icl_pll_id_to_tc_port(id); 3177 intel_wakeref_t wakeref; 3178 bool ret = false; 3179 u32 val; 3180 3181 wakeref = intel_display_power_get_if_enabled(dev_priv, 3182 POWER_DOMAIN_DISPLAY_CORE); 3183 if (!wakeref) 3184 return false; 3185 3186 val = I915_READ(MG_PLL_ENABLE(tc_port)); 3187 if (!(val & PLL_ENABLE)) 3188 goto out; 3189 3190 /* 3191 * All registers read here have the same HIP_INDEX_REG even though 3192 * they are on different building blocks 3193 */ 3194 I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); 3195 3196 hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port)); 3197 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; 3198 3199 hw_state->mg_clktop2_hsclkctl = 3200 I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port)); 3201 hw_state->mg_clktop2_hsclkctl &= 3202 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | 3203 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | 3204 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | 3205 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; 3206 3207 hw_state->mg_clktop2_coreclkctl1 = 3208 I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port)); 3209 hw_state->mg_clktop2_coreclkctl1 &= 3210 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; 3211 3212 hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port)); 3213 hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK | 3214 DKL_PLL_DIV0_PROP_COEFF_MASK | 3215 DKL_PLL_DIV0_FBPREDIV_MASK | 3216 DKL_PLL_DIV0_FBDIV_INT_MASK); 3217 3218 hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port)); 3219 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK | 3220 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); 3221 3222 hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port)); 3223 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | 3224 DKL_PLL_SSC_STEP_LEN_MASK | 3225 DKL_PLL_SSC_STEP_NUM_MASK | 3226 DKL_PLL_SSC_EN); 3227 3228 hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port)); 3229 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H | 3230 DKL_PLL_BIAS_FBDIV_FRAC_MASK); 3231 3232 hw_state->mg_pll_tdc_coldst_bias = 3233 I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port)); 3234 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK | 3235 DKL_PLL_TDC_FEED_FWD_GAIN_MASK); 3236 3237 ret = true; 3238 out: 3239 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 3240 return ret; 3241 } 3242 3243 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, 3244 struct intel_shared_dpll *pll, 3245 struct intel_dpll_hw_state *hw_state, 3246 i915_reg_t enable_reg) 3247 { 3248 const enum intel_dpll_id id = pll->info->id; 3249 intel_wakeref_t wakeref; 3250 bool ret = false; 3251 u32 val; 3252 3253 wakeref = intel_display_power_get_if_enabled(dev_priv, 3254 POWER_DOMAIN_DISPLAY_CORE); 3255 if (!wakeref) 3256 return false; 3257 3258 val = I915_READ(enable_reg); 3259 if (!(val & PLL_ENABLE)) 3260 goto out; 3261 3262 if (INTEL_GEN(dev_priv) >= 12) { 3263 hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id)); 3264 hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id)); 3265 } else { 3266 if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) { 3267 hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4)); 3268 hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4)); 3269 } else { 3270 hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); 3271 hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); 3272 } 3273 } 3274 3275 ret = true; 3276 out: 3277 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 3278 return ret; 3279 } 3280 3281 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv, 3282 struct intel_shared_dpll *pll, 3283 struct intel_dpll_hw_state *hw_state) 3284 { 3285 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); 3286 3287 if (IS_ELKHARTLAKE(dev_priv) && 3288 pll->info->id == DPLL_ID_EHL_DPLL4) { 3289 enable_reg = MG_PLL_ENABLE(0); 3290 } 3291 3292 return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg); 3293 } 3294 3295 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv, 3296 struct intel_shared_dpll *pll, 3297 struct intel_dpll_hw_state *hw_state) 3298 { 3299 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE); 3300 } 3301 3302 static void icl_dpll_write(struct drm_i915_private *dev_priv, 3303 struct intel_shared_dpll *pll) 3304 { 3305 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; 3306 const enum intel_dpll_id id = pll->info->id; 3307 i915_reg_t cfgcr0_reg, cfgcr1_reg; 3308 3309 if (INTEL_GEN(dev_priv) >= 12) { 3310 cfgcr0_reg = TGL_DPLL_CFGCR0(id); 3311 cfgcr1_reg = TGL_DPLL_CFGCR1(id); 3312 } else { 3313 if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) { 3314 cfgcr0_reg = ICL_DPLL_CFGCR0(4); 3315 cfgcr1_reg = ICL_DPLL_CFGCR1(4); 3316 } else { 3317 cfgcr0_reg = ICL_DPLL_CFGCR0(id); 3318 cfgcr1_reg = ICL_DPLL_CFGCR1(id); 3319 } 3320 } 3321 3322 I915_WRITE(cfgcr0_reg, hw_state->cfgcr0); 3323 I915_WRITE(cfgcr1_reg, hw_state->cfgcr1); 3324 POSTING_READ(cfgcr1_reg); 3325 } 3326 3327 static void icl_mg_pll_write(struct drm_i915_private *dev_priv, 3328 struct intel_shared_dpll *pll) 3329 { 3330 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; 3331 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id); 3332 u32 val; 3333 3334 /* 3335 * Some of the following registers have reserved fields, so program 3336 * these with RMW based on a mask. The mask can be fixed or generated 3337 * during the calc/readout phase if the mask depends on some other HW 3338 * state like refclk, see icl_calc_mg_pll_state(). 3339 */ 3340 val = I915_READ(MG_REFCLKIN_CTL(tc_port)); 3341 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; 3342 val |= hw_state->mg_refclkin_ctl; 3343 I915_WRITE(MG_REFCLKIN_CTL(tc_port), val); 3344 3345 val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port)); 3346 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; 3347 val |= hw_state->mg_clktop2_coreclkctl1; 3348 I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val); 3349 3350 val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port)); 3351 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | 3352 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | 3353 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | 3354 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); 3355 val |= hw_state->mg_clktop2_hsclkctl; 3356 I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val); 3357 3358 I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0); 3359 I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1); 3360 I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf); 3361 I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock); 3362 I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc); 3363 3364 val = I915_READ(MG_PLL_BIAS(tc_port)); 3365 val &= ~hw_state->mg_pll_bias_mask; 3366 val |= hw_state->mg_pll_bias; 3367 I915_WRITE(MG_PLL_BIAS(tc_port), val); 3368 3369 val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); 3370 val &= ~hw_state->mg_pll_tdc_coldst_bias_mask; 3371 val |= hw_state->mg_pll_tdc_coldst_bias; 3372 I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val); 3373 3374 POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); 3375 } 3376 3377 static void dkl_pll_write(struct drm_i915_private *dev_priv, 3378 struct intel_shared_dpll *pll) 3379 { 3380 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; 3381 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id); 3382 u32 val; 3383 3384 /* 3385 * All registers programmed here have the same HIP_INDEX_REG even 3386 * though on different building block 3387 */ 3388 I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); 3389 3390 /* All the registers are RMW */ 3391 val = I915_READ(DKL_REFCLKIN_CTL(tc_port)); 3392 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; 3393 val |= hw_state->mg_refclkin_ctl; 3394 I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val); 3395 3396 val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port)); 3397 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; 3398 val |= hw_state->mg_clktop2_coreclkctl1; 3399 I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val); 3400 3401 val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port)); 3402 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | 3403 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | 3404 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | 3405 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); 3406 val |= hw_state->mg_clktop2_hsclkctl; 3407 I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val); 3408 3409 val = I915_READ(DKL_PLL_DIV0(tc_port)); 3410 val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK | 3411 DKL_PLL_DIV0_PROP_COEFF_MASK | 3412 DKL_PLL_DIV0_FBPREDIV_MASK | 3413 DKL_PLL_DIV0_FBDIV_INT_MASK); 3414 val |= hw_state->mg_pll_div0; 3415 I915_WRITE(DKL_PLL_DIV0(tc_port), val); 3416 3417 val = I915_READ(DKL_PLL_DIV1(tc_port)); 3418 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK | 3419 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); 3420 val |= hw_state->mg_pll_div1; 3421 I915_WRITE(DKL_PLL_DIV1(tc_port), val); 3422 3423 val = I915_READ(DKL_PLL_SSC(tc_port)); 3424 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | 3425 DKL_PLL_SSC_STEP_LEN_MASK | 3426 DKL_PLL_SSC_STEP_NUM_MASK | 3427 DKL_PLL_SSC_EN); 3428 val |= hw_state->mg_pll_ssc; 3429 I915_WRITE(DKL_PLL_SSC(tc_port), val); 3430 3431 val = I915_READ(DKL_PLL_BIAS(tc_port)); 3432 val &= ~(DKL_PLL_BIAS_FRAC_EN_H | 3433 DKL_PLL_BIAS_FBDIV_FRAC_MASK); 3434 val |= hw_state->mg_pll_bias; 3435 I915_WRITE(DKL_PLL_BIAS(tc_port), val); 3436 3437 val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port)); 3438 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK | 3439 DKL_PLL_TDC_FEED_FWD_GAIN_MASK); 3440 val |= hw_state->mg_pll_tdc_coldst_bias; 3441 I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val); 3442 3443 POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port)); 3444 } 3445 3446 static void icl_pll_power_enable(struct drm_i915_private *dev_priv, 3447 struct intel_shared_dpll *pll, 3448 i915_reg_t enable_reg) 3449 { 3450 u32 val; 3451 3452 val = I915_READ(enable_reg); 3453 val |= PLL_POWER_ENABLE; 3454 I915_WRITE(enable_reg, val); 3455 3456 /* 3457 * The spec says we need to "wait" but it also says it should be 3458 * immediate. 3459 */ 3460 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1)) 3461 DRM_ERROR("PLL %d Power not enabled\n", pll->info->id); 3462 } 3463 3464 static void icl_pll_enable(struct drm_i915_private *dev_priv, 3465 struct intel_shared_dpll *pll, 3466 i915_reg_t enable_reg) 3467 { 3468 u32 val; 3469 3470 val = I915_READ(enable_reg); 3471 val |= PLL_ENABLE; 3472 I915_WRITE(enable_reg, val); 3473 3474 /* Timeout is actually 600us. */ 3475 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1)) 3476 DRM_ERROR("PLL %d not locked\n", pll->info->id); 3477 } 3478 3479 static void combo_pll_enable(struct drm_i915_private *dev_priv, 3480 struct intel_shared_dpll *pll) 3481 { 3482 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); 3483 3484 if (IS_ELKHARTLAKE(dev_priv) && 3485 pll->info->id == DPLL_ID_EHL_DPLL4) { 3486 enable_reg = MG_PLL_ENABLE(0); 3487 3488 /* 3489 * We need to disable DC states when this DPLL is enabled. 3490 * This can be done by taking a reference on DPLL4 power 3491 * domain. 3492 */ 3493 pll->wakeref = intel_display_power_get(dev_priv, 3494 POWER_DOMAIN_DPLL_DC_OFF); 3495 } 3496 3497 icl_pll_power_enable(dev_priv, pll, enable_reg); 3498 3499 icl_dpll_write(dev_priv, pll); 3500 3501 /* 3502 * DVFS pre sequence would be here, but in our driver the cdclk code 3503 * paths should already be setting the appropriate voltage, hence we do 3504 * nothing here. 3505 */ 3506 3507 icl_pll_enable(dev_priv, pll, enable_reg); 3508 3509 /* DVFS post sequence would be here. See the comment above. */ 3510 } 3511 3512 static void tbt_pll_enable(struct drm_i915_private *dev_priv, 3513 struct intel_shared_dpll *pll) 3514 { 3515 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE); 3516 3517 icl_dpll_write(dev_priv, pll); 3518 3519 /* 3520 * DVFS pre sequence would be here, but in our driver the cdclk code 3521 * paths should already be setting the appropriate voltage, hence we do 3522 * nothing here. 3523 */ 3524 3525 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE); 3526 3527 /* DVFS post sequence would be here. See the comment above. */ 3528 } 3529 3530 static void mg_pll_enable(struct drm_i915_private *dev_priv, 3531 struct intel_shared_dpll *pll) 3532 { 3533 i915_reg_t enable_reg = 3534 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id)); 3535 3536 icl_pll_power_enable(dev_priv, pll, enable_reg); 3537 3538 if (INTEL_GEN(dev_priv) >= 12) 3539 dkl_pll_write(dev_priv, pll); 3540 else 3541 icl_mg_pll_write(dev_priv, pll); 3542 3543 /* 3544 * DVFS pre sequence would be here, but in our driver the cdclk code 3545 * paths should already be setting the appropriate voltage, hence we do 3546 * nothing here. 3547 */ 3548 3549 icl_pll_enable(dev_priv, pll, enable_reg); 3550 3551 /* DVFS post sequence would be here. See the comment above. */ 3552 } 3553 3554 static void icl_pll_disable(struct drm_i915_private *dev_priv, 3555 struct intel_shared_dpll *pll, 3556 i915_reg_t enable_reg) 3557 { 3558 u32 val; 3559 3560 /* The first steps are done by intel_ddi_post_disable(). */ 3561 3562 /* 3563 * DVFS pre sequence would be here, but in our driver the cdclk code 3564 * paths should already be setting the appropriate voltage, hence we do 3565 * nothign here. 3566 */ 3567 3568 val = I915_READ(enable_reg); 3569 val &= ~PLL_ENABLE; 3570 I915_WRITE(enable_reg, val); 3571 3572 /* Timeout is actually 1us. */ 3573 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1)) 3574 DRM_ERROR("PLL %d locked\n", pll->info->id); 3575 3576 /* DVFS post sequence would be here. See the comment above. */ 3577 3578 val = I915_READ(enable_reg); 3579 val &= ~PLL_POWER_ENABLE; 3580 I915_WRITE(enable_reg, val); 3581 3582 /* 3583 * The spec says we need to "wait" but it also says it should be 3584 * immediate. 3585 */ 3586 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1)) 3587 DRM_ERROR("PLL %d Power not disabled\n", pll->info->id); 3588 } 3589 3590 static void combo_pll_disable(struct drm_i915_private *dev_priv, 3591 struct intel_shared_dpll *pll) 3592 { 3593 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); 3594 3595 if (IS_ELKHARTLAKE(dev_priv) && 3596 pll->info->id == DPLL_ID_EHL_DPLL4) { 3597 enable_reg = MG_PLL_ENABLE(0); 3598 icl_pll_disable(dev_priv, pll, enable_reg); 3599 3600 intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF, 3601 pll->wakeref); 3602 return; 3603 } 3604 3605 icl_pll_disable(dev_priv, pll, enable_reg); 3606 } 3607 3608 static void tbt_pll_disable(struct drm_i915_private *dev_priv, 3609 struct intel_shared_dpll *pll) 3610 { 3611 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE); 3612 } 3613 3614 static void mg_pll_disable(struct drm_i915_private *dev_priv, 3615 struct intel_shared_dpll *pll) 3616 { 3617 i915_reg_t enable_reg = 3618 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id)); 3619 3620 icl_pll_disable(dev_priv, pll, enable_reg); 3621 } 3622 3623 static void icl_dump_hw_state(struct drm_i915_private *dev_priv, 3624 const struct intel_dpll_hw_state *hw_state) 3625 { 3626 DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, " 3627 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, " 3628 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, " 3629 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, " 3630 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, " 3631 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n", 3632 hw_state->cfgcr0, hw_state->cfgcr1, 3633 hw_state->mg_refclkin_ctl, 3634 hw_state->mg_clktop2_coreclkctl1, 3635 hw_state->mg_clktop2_hsclkctl, 3636 hw_state->mg_pll_div0, 3637 hw_state->mg_pll_div1, 3638 hw_state->mg_pll_lf, 3639 hw_state->mg_pll_frac_lock, 3640 hw_state->mg_pll_ssc, 3641 hw_state->mg_pll_bias, 3642 hw_state->mg_pll_tdc_coldst_bias); 3643 } 3644 3645 static const struct intel_shared_dpll_funcs combo_pll_funcs = { 3646 .enable = combo_pll_enable, 3647 .disable = combo_pll_disable, 3648 .get_hw_state = combo_pll_get_hw_state, 3649 }; 3650 3651 static const struct intel_shared_dpll_funcs tbt_pll_funcs = { 3652 .enable = tbt_pll_enable, 3653 .disable = tbt_pll_disable, 3654 .get_hw_state = tbt_pll_get_hw_state, 3655 }; 3656 3657 static const struct intel_shared_dpll_funcs mg_pll_funcs = { 3658 .enable = mg_pll_enable, 3659 .disable = mg_pll_disable, 3660 .get_hw_state = mg_pll_get_hw_state, 3661 }; 3662 3663 static const struct dpll_info icl_plls[] = { 3664 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, 3665 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, 3666 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, 3667 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, 3668 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, 3669 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, 3670 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, 3671 { }, 3672 }; 3673 3674 static const struct intel_dpll_mgr icl_pll_mgr = { 3675 .dpll_info = icl_plls, 3676 .get_dplls = icl_get_dplls, 3677 .put_dplls = icl_put_dplls, 3678 .update_active_dpll = icl_update_active_dpll, 3679 .dump_hw_state = icl_dump_hw_state, 3680 }; 3681 3682 static const struct dpll_info ehl_plls[] = { 3683 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, 3684 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, 3685 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 }, 3686 { }, 3687 }; 3688 3689 static const struct intel_dpll_mgr ehl_pll_mgr = { 3690 .dpll_info = ehl_plls, 3691 .get_dplls = icl_get_dplls, 3692 .put_dplls = icl_put_dplls, 3693 .dump_hw_state = icl_dump_hw_state, 3694 }; 3695 3696 static const struct intel_shared_dpll_funcs dkl_pll_funcs = { 3697 .enable = mg_pll_enable, 3698 .disable = mg_pll_disable, 3699 .get_hw_state = dkl_pll_get_hw_state, 3700 }; 3701 3702 static const struct dpll_info tgl_plls[] = { 3703 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, 3704 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, 3705 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, 3706 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, 3707 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, 3708 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, 3709 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, 3710 { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 }, 3711 { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 }, 3712 { }, 3713 }; 3714 3715 static const struct intel_dpll_mgr tgl_pll_mgr = { 3716 .dpll_info = tgl_plls, 3717 .get_dplls = icl_get_dplls, 3718 .put_dplls = icl_put_dplls, 3719 .update_active_dpll = icl_update_active_dpll, 3720 .dump_hw_state = icl_dump_hw_state, 3721 }; 3722 3723 /** 3724 * intel_shared_dpll_init - Initialize shared DPLLs 3725 * @dev: drm device 3726 * 3727 * Initialize shared DPLLs for @dev. 3728 */ 3729 void intel_shared_dpll_init(struct drm_device *dev) 3730 { 3731 struct drm_i915_private *dev_priv = to_i915(dev); 3732 const struct intel_dpll_mgr *dpll_mgr = NULL; 3733 const struct dpll_info *dpll_info; 3734 int i; 3735 3736 if (INTEL_GEN(dev_priv) >= 12) 3737 dpll_mgr = &tgl_pll_mgr; 3738 else if (IS_ELKHARTLAKE(dev_priv)) 3739 dpll_mgr = &ehl_pll_mgr; 3740 else if (INTEL_GEN(dev_priv) >= 11) 3741 dpll_mgr = &icl_pll_mgr; 3742 else if (IS_CANNONLAKE(dev_priv)) 3743 dpll_mgr = &cnl_pll_mgr; 3744 else if (IS_GEN9_BC(dev_priv)) 3745 dpll_mgr = &skl_pll_mgr; 3746 else if (IS_GEN9_LP(dev_priv)) 3747 dpll_mgr = &bxt_pll_mgr; 3748 else if (HAS_DDI(dev_priv)) 3749 dpll_mgr = &hsw_pll_mgr; 3750 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 3751 dpll_mgr = &pch_pll_mgr; 3752 3753 if (!dpll_mgr) { 3754 dev_priv->num_shared_dpll = 0; 3755 return; 3756 } 3757 3758 dpll_info = dpll_mgr->dpll_info; 3759 3760 for (i = 0; dpll_info[i].name; i++) { 3761 WARN_ON(i != dpll_info[i].id); 3762 dev_priv->shared_dplls[i].info = &dpll_info[i]; 3763 } 3764 3765 dev_priv->dpll_mgr = dpll_mgr; 3766 dev_priv->num_shared_dpll = i; 3767 mutex_init(&dev_priv->dpll_lock); 3768 3769 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 3770 } 3771 3772 /** 3773 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination 3774 * @state: atomic state 3775 * @crtc: CRTC to reserve DPLLs for 3776 * @encoder: encoder 3777 * 3778 * This function reserves all required DPLLs for the given CRTC and encoder 3779 * combination in the current atomic commit @state and the new @crtc atomic 3780 * state. 3781 * 3782 * The new configuration in the atomic commit @state is made effective by 3783 * calling intel_shared_dpll_swap_state(). 3784 * 3785 * The reserved DPLLs should be released by calling 3786 * intel_release_shared_dplls(). 3787 * 3788 * Returns: 3789 * True if all required DPLLs were successfully reserved. 3790 */ 3791 bool intel_reserve_shared_dplls(struct intel_atomic_state *state, 3792 struct intel_crtc *crtc, 3793 struct intel_encoder *encoder) 3794 { 3795 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 3796 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; 3797 3798 if (WARN_ON(!dpll_mgr)) 3799 return false; 3800 3801 return dpll_mgr->get_dplls(state, crtc, encoder); 3802 } 3803 3804 /** 3805 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state 3806 * @state: atomic state 3807 * @crtc: crtc from which the DPLLs are to be released 3808 * 3809 * This function releases all DPLLs reserved by intel_reserve_shared_dplls() 3810 * from the current atomic commit @state and the old @crtc atomic state. 3811 * 3812 * The new configuration in the atomic commit @state is made effective by 3813 * calling intel_shared_dpll_swap_state(). 3814 */ 3815 void intel_release_shared_dplls(struct intel_atomic_state *state, 3816 struct intel_crtc *crtc) 3817 { 3818 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 3819 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; 3820 3821 /* 3822 * FIXME: this function is called for every platform having a 3823 * compute_clock hook, even though the platform doesn't yet support 3824 * the shared DPLL framework and intel_reserve_shared_dplls() is not 3825 * called on those. 3826 */ 3827 if (!dpll_mgr) 3828 return; 3829 3830 dpll_mgr->put_dplls(state, crtc); 3831 } 3832 3833 /** 3834 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder 3835 * @state: atomic state 3836 * @crtc: the CRTC for which to update the active DPLL 3837 * @encoder: encoder determining the type of port DPLL 3838 * 3839 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state, 3840 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The 3841 * DPLL selected will be based on the current mode of the encoder's port. 3842 */ 3843 void intel_update_active_dpll(struct intel_atomic_state *state, 3844 struct intel_crtc *crtc, 3845 struct intel_encoder *encoder) 3846 { 3847 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3848 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; 3849 3850 if (WARN_ON(!dpll_mgr)) 3851 return; 3852 3853 dpll_mgr->update_active_dpll(state, crtc, encoder); 3854 } 3855 3856 /** 3857 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg 3858 * @dev_priv: i915 drm device 3859 * @hw_state: hw state to be written to the log 3860 * 3861 * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS. 3862 */ 3863 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, 3864 const struct intel_dpll_hw_state *hw_state) 3865 { 3866 if (dev_priv->dpll_mgr) { 3867 dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state); 3868 } else { 3869 /* fallback for platforms that don't use the shared dpll 3870 * infrastructure 3871 */ 3872 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 3873 "fp0: 0x%x, fp1: 0x%x\n", 3874 hw_state->dpll, 3875 hw_state->dpll_md, 3876 hw_state->fp0, 3877 hw_state->fp1); 3878 } 3879 } 3880